1 /* $OpenBSD: pf_table.c,v 1.143 2022/06/26 11:37:08 mbuhl Exp $ */ 2 3 /* 4 * Copyright (c) 2002 Cedric Berger 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * - Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * - Redistributions in binary form must reproduce the above 14 * copyright notice, this list of conditions and the following 15 * disclaimer in the documentation and/or other materials provided 16 * with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 21 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 22 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 24 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 26 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 28 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 * 31 */ 32 33 #include <sys/param.h> 34 #include <sys/systm.h> 35 #include <sys/socket.h> 36 #include <sys/mbuf.h> 37 #include <sys/pool.h> 38 #include <sys/syslog.h> 39 #include <sys/proc.h> 40 41 #include <net/if.h> 42 43 #include <netinet/in.h> 44 #include <netinet/ip.h> 45 #include <netinet/ip_ipsp.h> 46 #include <netinet/ip_icmp.h> 47 #include <netinet/tcp.h> 48 #include <netinet/udp.h> 49 50 #ifdef INET6 51 #include <netinet/ip6.h> 52 #include <netinet/icmp6.h> 53 #endif /* INET6 */ 54 55 #include <net/pfvar.h> 56 #include <net/pfvar_priv.h> 57 58 #define ACCEPT_FLAGS(flags, oklist) \ 59 do { \ 60 if ((flags & ~(oklist)) & \ 61 PFR_FLAG_ALLMASK) \ 62 return (EINVAL); \ 63 } while (0) 64 65 #define COPYIN(from, to, size, flags) \ 66 ((flags & PFR_FLAG_USERIOCTL) ? \ 67 copyin((from), (to), (size)) : \ 68 (bcopy((from), (to), (size)), 0)) 69 70 #define COPYOUT(from, to, size, flags) \ 71 ((flags & PFR_FLAG_USERIOCTL) ? \ 72 copyout((from), (to), (size)) : \ 73 (bcopy((from), (to), (size)), 0)) 74 75 #define YIELD(ok) \ 76 do { \ 77 if (ok) \ 78 sched_pause(preempt); \ 79 } while (0) 80 81 #define FILLIN_SIN(sin, addr) \ 82 do { \ 83 (sin).sin_len = sizeof(sin); \ 84 (sin).sin_family = AF_INET; \ 85 (sin).sin_addr = (addr); \ 86 } while (0) 87 88 #define FILLIN_SIN6(sin6, addr) \ 89 do { \ 90 (sin6).sin6_len = sizeof(sin6); \ 91 (sin6).sin6_family = AF_INET6; \ 92 (sin6).sin6_addr = (addr); \ 93 } while (0) 94 95 #define SWAP(type, a1, a2) \ 96 do { \ 97 type tmp = a1; \ 98 a1 = a2; \ 99 a2 = tmp; \ 100 } while (0) 101 102 #define SUNION2PF(su, af) (((af)==AF_INET) ? \ 103 (struct pf_addr *)&(su)->sin.sin_addr : \ 104 (struct pf_addr *)&(su)->sin6.sin6_addr) 105 106 #define AF_BITS(af) (((af)==AF_INET)?32:128) 107 #define ADDR_NETWORK(ad) ((ad)->pfra_net < AF_BITS((ad)->pfra_af)) 108 #define KENTRY_NETWORK(ke) ((ke)->pfrke_net < AF_BITS((ke)->pfrke_af)) 109 110 #define NO_ADDRESSES (-1) 111 #define ENQUEUE_UNMARKED_ONLY (1) 112 #define INVERT_NEG_FLAG (1) 113 114 struct pfr_walktree { 115 enum pfrw_op { 116 PFRW_MARK, 117 PFRW_SWEEP, 118 PFRW_ENQUEUE, 119 PFRW_GET_ADDRS, 120 PFRW_GET_ASTATS, 121 PFRW_POOL_GET, 122 PFRW_DYNADDR_UPDATE 123 } pfrw_op; 124 union { 125 struct pfr_addr *pfrw1_addr; 126 struct pfr_astats *pfrw1_astats; 127 struct pfr_kentryworkq *pfrw1_workq; 128 struct pfr_kentry *pfrw1_kentry; 129 struct pfi_dynaddr *pfrw1_dyn; 130 } pfrw_1; 131 int pfrw_free; 132 int pfrw_flags; 133 }; 134 #define pfrw_addr pfrw_1.pfrw1_addr 135 #define pfrw_astats pfrw_1.pfrw1_astats 136 #define pfrw_workq pfrw_1.pfrw1_workq 137 #define pfrw_kentry pfrw_1.pfrw1_kentry 138 #define pfrw_dyn pfrw_1.pfrw1_dyn 139 #define pfrw_cnt pfrw_free 140 141 #define senderr(e) do { rv = (e); goto _bad; } while (0) 142 143 struct pool pfr_ktable_pl; 144 struct pool pfr_kentry_pl[PFRKE_MAX]; 145 struct pool pfr_kcounters_pl; 146 union sockaddr_union pfr_mask; 147 struct pf_addr pfr_ffaddr; 148 149 int pfr_gcd(int, int); 150 void pfr_copyout_addr(struct pfr_addr *, 151 struct pfr_kentry *ke); 152 int pfr_validate_addr(struct pfr_addr *); 153 void pfr_enqueue_addrs(struct pfr_ktable *, 154 struct pfr_kentryworkq *, int *, int); 155 void pfr_mark_addrs(struct pfr_ktable *); 156 struct pfr_kentry *pfr_lookup_addr(struct pfr_ktable *, 157 struct pfr_addr *, int); 158 struct pfr_kentry *pfr_lookup_kentry(struct pfr_ktable *, 159 struct pfr_kentry *, int); 160 struct pfr_kentry *pfr_create_kentry(struct pfr_addr *); 161 struct pfr_kentry *pfr_create_kentry_unlocked(struct pfr_addr *, int); 162 void pfr_kentry_kif_ref(struct pfr_kentry *); 163 void pfr_destroy_kentries(struct pfr_kentryworkq *); 164 void pfr_destroy_ioq(struct pfr_kentryworkq *, int); 165 void pfr_destroy_kentry(struct pfr_kentry *); 166 void pfr_insert_kentries(struct pfr_ktable *, 167 struct pfr_kentryworkq *, time_t); 168 void pfr_remove_kentries(struct pfr_ktable *, 169 struct pfr_kentryworkq *); 170 void pfr_clstats_kentries(struct pfr_kentryworkq *, time_t, 171 int); 172 void pfr_reset_feedback(struct pfr_addr *, int, int); 173 void pfr_prepare_network(union sockaddr_union *, int, int); 174 int pfr_route_kentry(struct pfr_ktable *, 175 struct pfr_kentry *); 176 int pfr_unroute_kentry(struct pfr_ktable *, 177 struct pfr_kentry *); 178 int pfr_walktree(struct radix_node *, void *, u_int); 179 int pfr_validate_table(struct pfr_table *, int, int); 180 int pfr_fix_anchor(char *); 181 void pfr_commit_ktable(struct pfr_ktable *, time_t); 182 void pfr_insert_ktables(struct pfr_ktableworkq *); 183 void pfr_insert_ktable(struct pfr_ktable *); 184 void pfr_setflags_ktables(struct pfr_ktableworkq *); 185 void pfr_setflags_ktable(struct pfr_ktable *, int); 186 void pfr_clstats_ktables(struct pfr_ktableworkq *, time_t, 187 int); 188 void pfr_clstats_ktable(struct pfr_ktable *, time_t, int); 189 struct pfr_ktable *pfr_create_ktable(struct pfr_table *, time_t, int, 190 int); 191 void pfr_destroy_ktables(struct pfr_ktableworkq *, int); 192 void pfr_destroy_ktables_aux(struct pfr_ktableworkq *); 193 void pfr_destroy_ktable(struct pfr_ktable *, int); 194 int pfr_ktable_compare(struct pfr_ktable *, 195 struct pfr_ktable *); 196 void pfr_ktable_winfo_update(struct pfr_ktable *, 197 struct pfr_kentry *); 198 struct pfr_ktable *pfr_lookup_table(struct pfr_table *); 199 void pfr_clean_node_mask(struct pfr_ktable *, 200 struct pfr_kentryworkq *); 201 int pfr_table_count(struct pfr_table *, int); 202 int pfr_skip_table(struct pfr_table *, 203 struct pfr_ktable *, int); 204 struct pfr_kentry *pfr_kentry_byidx(struct pfr_ktable *, int, int); 205 int pfr_islinklocal(sa_family_t, struct pf_addr *); 206 207 RB_PROTOTYPE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare); 208 RB_GENERATE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare); 209 210 struct pfr_ktablehead pfr_ktables; 211 struct pfr_table pfr_nulltable; 212 int pfr_ktable_cnt; 213 214 int 215 pfr_gcd(int m, int n) 216 { 217 int t; 218 219 while (m > 0) { 220 t = n % m; 221 n = m; 222 m = t; 223 } 224 return (n); 225 } 226 227 void 228 pfr_initialize(void) 229 { 230 rn_init(sizeof(struct sockaddr_in6)); 231 232 pool_init(&pfr_ktable_pl, sizeof(struct pfr_ktable), 233 0, IPL_SOFTNET, 0, "pfrktable", NULL); 234 pool_init(&pfr_kentry_pl[PFRKE_PLAIN], sizeof(struct pfr_kentry), 235 0, IPL_SOFTNET, 0, "pfrke_plain", NULL); 236 pool_init(&pfr_kentry_pl[PFRKE_ROUTE], sizeof(struct pfr_kentry_route), 237 0, IPL_SOFTNET, 0, "pfrke_route", NULL); 238 pool_init(&pfr_kentry_pl[PFRKE_COST], sizeof(struct pfr_kentry_cost), 239 0, IPL_SOFTNET, 0, "pfrke_cost", NULL); 240 pool_init(&pfr_kcounters_pl, sizeof(struct pfr_kcounters), 241 0, IPL_SOFTNET, 0, "pfrkcounters", NULL); 242 243 memset(&pfr_ffaddr, 0xff, sizeof(pfr_ffaddr)); 244 } 245 246 int 247 pfr_clr_addrs(struct pfr_table *tbl, int *ndel, int flags) 248 { 249 struct pfr_ktable *kt; 250 struct pfr_kentryworkq workq; 251 252 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY); 253 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL)) 254 return (EINVAL); 255 kt = pfr_lookup_table(tbl); 256 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 257 return (ESRCH); 258 if (kt->pfrkt_flags & PFR_TFLAG_CONST) 259 return (EPERM); 260 pfr_enqueue_addrs(kt, &workq, ndel, 0); 261 262 if (!(flags & PFR_FLAG_DUMMY)) { 263 pfr_remove_kentries(kt, &workq); 264 if (kt->pfrkt_cnt) { 265 DPFPRINTF(LOG_NOTICE, 266 "pfr_clr_addrs: corruption detected (%d).", 267 kt->pfrkt_cnt); 268 kt->pfrkt_cnt = 0; 269 } 270 } 271 return (0); 272 } 273 274 void 275 pfr_fill_feedback(struct pfr_kentry_all *ke, struct pfr_addr *ad) 276 { 277 ad->pfra_type = ke->pfrke_type; 278 279 switch (ke->pfrke_type) { 280 case PFRKE_PLAIN: 281 break; 282 case PFRKE_COST: 283 ((struct pfr_kentry_cost *)ke)->weight = ad->pfra_weight; 284 /* FALLTHROUGH */ 285 case PFRKE_ROUTE: 286 if (ke->pfrke_rifname[0]) 287 strlcpy(ad->pfra_ifname, ke->pfrke_rifname, IFNAMSIZ); 288 break; 289 } 290 291 switch (ke->pfrke_af) { 292 case AF_INET: 293 ad->pfra_ip4addr = ke->pfrke_sa.sin.sin_addr; 294 break; 295 #ifdef INET6 296 case AF_INET6: 297 ad->pfra_ip6addr = ke->pfrke_sa.sin6.sin6_addr; 298 break; 299 #endif /* INET6 */ 300 default: 301 unhandled_af(ke->pfrke_af); 302 } 303 ad->pfra_weight = ((struct pfr_kentry_cost *)ke)->weight; 304 ad->pfra_af = ke->pfrke_af; 305 ad->pfra_net = ke->pfrke_net; 306 if (ke->pfrke_flags & PFRKE_FLAG_NOT) 307 ad->pfra_not = 1; 308 ad->pfra_fback = ke->pfrke_fb; 309 } 310 311 int 312 pfr_add_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size, 313 int *nadd, int flags) 314 { 315 struct pfr_ktable *kt, *tmpkt; 316 struct pfr_kentryworkq workq, ioq; 317 struct pfr_kentry *p, *q, *ke; 318 struct pfr_addr ad; 319 int i, rv, xadd = 0; 320 time_t tzero = gettime(); 321 322 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK); 323 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL)) 324 return (EINVAL); 325 tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0, 326 (flags & PFR_FLAG_USERIOCTL? PR_WAITOK : PR_NOWAIT)); 327 if (tmpkt == NULL) 328 return (ENOMEM); 329 SLIST_INIT(&workq); 330 SLIST_INIT(&ioq); 331 for (i = 0; i < size; i++) { 332 YIELD(flags & PFR_FLAG_USERIOCTL); 333 if (COPYIN(addr+i, &ad, sizeof(ad), flags)) 334 senderr(EFAULT); 335 if (pfr_validate_addr(&ad)) 336 senderr(EINVAL); 337 338 ke = pfr_create_kentry_unlocked(&ad, flags); 339 if (ke == NULL) 340 senderr(ENOMEM); 341 ke->pfrke_fb = PFR_FB_NONE; 342 SLIST_INSERT_HEAD(&ioq, ke, pfrke_ioq); 343 } 344 345 NET_LOCK(); 346 PF_LOCK(); 347 kt = pfr_lookup_table(tbl); 348 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) { 349 PF_UNLOCK(); 350 NET_UNLOCK(); 351 senderr(ESRCH); 352 } 353 if (kt->pfrkt_flags & PFR_TFLAG_CONST) { 354 PF_UNLOCK(); 355 NET_UNLOCK(); 356 senderr(EPERM); 357 } 358 SLIST_FOREACH(ke, &ioq, pfrke_ioq) { 359 pfr_kentry_kif_ref(ke); 360 p = pfr_lookup_kentry(kt, ke, 1); 361 q = pfr_lookup_kentry(tmpkt, ke, 1); 362 if (flags & PFR_FLAG_FEEDBACK) { 363 if (q != NULL) 364 ke->pfrke_fb = PFR_FB_DUPLICATE; 365 else if (p == NULL) 366 ke->pfrke_fb = PFR_FB_ADDED; 367 else if ((p->pfrke_flags & PFRKE_FLAG_NOT) != 368 (ke->pfrke_flags & PFRKE_FLAG_NOT)) 369 ke->pfrke_fb = PFR_FB_CONFLICT; 370 else 371 ke->pfrke_fb = PFR_FB_NONE; 372 } 373 if (p == NULL && q == NULL) { 374 if (pfr_route_kentry(tmpkt, ke)) { 375 /* defer destroy after feedback is processed */ 376 ke->pfrke_fb = PFR_FB_NONE; 377 } else { 378 /* 379 * mark entry as added to table, so we won't 380 * kill it with rest of the ioq 381 */ 382 ke->pfrke_fb = PFR_FB_ADDED; 383 SLIST_INSERT_HEAD(&workq, ke, pfrke_workq); 384 xadd++; 385 } 386 } 387 } 388 /* remove entries, which we will insert from tmpkt */ 389 pfr_clean_node_mask(tmpkt, &workq); 390 if (!(flags & PFR_FLAG_DUMMY)) 391 pfr_insert_kentries(kt, &workq, tzero); 392 393 PF_UNLOCK(); 394 NET_UNLOCK(); 395 396 if (flags & PFR_FLAG_FEEDBACK) { 397 i = 0; 398 while ((ke = SLIST_FIRST(&ioq)) != NULL) { 399 YIELD(flags & PFR_FLAG_USERIOCTL); 400 pfr_fill_feedback((struct pfr_kentry_all *)ke, &ad); 401 if (COPYOUT(&ad, addr+i, sizeof(ad), flags)) 402 senderr(EFAULT); 403 i++; 404 SLIST_REMOVE_HEAD(&ioq, pfrke_ioq); 405 switch (ke->pfrke_fb) { 406 case PFR_FB_CONFLICT: 407 case PFR_FB_DUPLICATE: 408 case PFR_FB_NONE: 409 pfr_destroy_kentry(ke); 410 break; 411 case PFR_FB_ADDED: 412 if (flags & PFR_FLAG_DUMMY) 413 pfr_destroy_kentry(ke); 414 } 415 } 416 } else 417 pfr_destroy_ioq(&ioq, flags); 418 419 if (nadd != NULL) 420 *nadd = xadd; 421 422 pfr_destroy_ktable(tmpkt, 0); 423 return (0); 424 _bad: 425 pfr_destroy_ioq(&ioq, flags); 426 if (flags & PFR_FLAG_FEEDBACK) 427 pfr_reset_feedback(addr, size, flags); 428 pfr_destroy_ktable(tmpkt, 0); 429 return (rv); 430 } 431 432 int 433 pfr_del_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size, 434 int *ndel, int flags) 435 { 436 struct pfr_ktable *kt; 437 struct pfr_kentryworkq workq; 438 struct pfr_kentry *p; 439 struct pfr_addr ad; 440 int i, rv, xdel = 0, log = 1; 441 442 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK); 443 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL)) 444 return (EINVAL); 445 kt = pfr_lookup_table(tbl); 446 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 447 return (ESRCH); 448 if (kt->pfrkt_flags & PFR_TFLAG_CONST) 449 return (EPERM); 450 /* 451 * there are two algorithms to choose from here. 452 * with: 453 * n: number of addresses to delete 454 * N: number of addresses in the table 455 * 456 * one is O(N) and is better for large 'n' 457 * one is O(n*LOG(N)) and is better for small 'n' 458 * 459 * following code try to decide which one is best. 460 */ 461 for (i = kt->pfrkt_cnt; i > 0; i >>= 1) 462 log++; 463 if (size > kt->pfrkt_cnt/log) { 464 /* full table scan */ 465 pfr_mark_addrs(kt); 466 } else { 467 /* iterate over addresses to delete */ 468 for (i = 0; i < size; i++) { 469 YIELD(flags & PFR_FLAG_USERIOCTL); 470 if (COPYIN(addr+i, &ad, sizeof(ad), flags)) 471 return (EFAULT); 472 if (pfr_validate_addr(&ad)) 473 return (EINVAL); 474 p = pfr_lookup_addr(kt, &ad, 1); 475 if (p != NULL) 476 p->pfrke_flags &= ~PFRKE_FLAG_MARK; 477 } 478 } 479 SLIST_INIT(&workq); 480 for (i = 0; i < size; i++) { 481 YIELD(flags & PFR_FLAG_USERIOCTL); 482 if (COPYIN(addr+i, &ad, sizeof(ad), flags)) 483 senderr(EFAULT); 484 if (pfr_validate_addr(&ad)) 485 senderr(EINVAL); 486 p = pfr_lookup_addr(kt, &ad, 1); 487 if (flags & PFR_FLAG_FEEDBACK) { 488 if (p == NULL) 489 ad.pfra_fback = PFR_FB_NONE; 490 else if ((p->pfrke_flags & PFRKE_FLAG_NOT) != 491 ad.pfra_not) 492 ad.pfra_fback = PFR_FB_CONFLICT; 493 else if (p->pfrke_flags & PFRKE_FLAG_MARK) 494 ad.pfra_fback = PFR_FB_DUPLICATE; 495 else 496 ad.pfra_fback = PFR_FB_DELETED; 497 } 498 if (p != NULL && 499 (p->pfrke_flags & PFRKE_FLAG_NOT) == ad.pfra_not && 500 !(p->pfrke_flags & PFRKE_FLAG_MARK)) { 501 p->pfrke_flags |= PFRKE_FLAG_MARK; 502 SLIST_INSERT_HEAD(&workq, p, pfrke_workq); 503 xdel++; 504 } 505 if (flags & PFR_FLAG_FEEDBACK) 506 if (COPYOUT(&ad, addr+i, sizeof(ad), flags)) 507 senderr(EFAULT); 508 } 509 if (!(flags & PFR_FLAG_DUMMY)) { 510 pfr_remove_kentries(kt, &workq); 511 } 512 if (ndel != NULL) 513 *ndel = xdel; 514 return (0); 515 _bad: 516 if (flags & PFR_FLAG_FEEDBACK) 517 pfr_reset_feedback(addr, size, flags); 518 return (rv); 519 } 520 521 int 522 pfr_set_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size, 523 int *size2, int *nadd, int *ndel, int *nchange, int flags, 524 u_int32_t ignore_pfrt_flags) 525 { 526 struct pfr_ktable *kt, *tmpkt; 527 struct pfr_kentryworkq addq, delq, changeq; 528 struct pfr_kentry *p, *q; 529 struct pfr_addr ad; 530 int i, rv, xadd = 0, xdel = 0, xchange = 0; 531 time_t tzero = gettime(); 532 533 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK); 534 if (pfr_validate_table(tbl, ignore_pfrt_flags, flags & 535 PFR_FLAG_USERIOCTL)) 536 return (EINVAL); 537 kt = pfr_lookup_table(tbl); 538 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 539 return (ESRCH); 540 if (kt->pfrkt_flags & PFR_TFLAG_CONST) 541 return (EPERM); 542 tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0, 543 (flags & PFR_FLAG_USERIOCTL? PR_WAITOK : PR_NOWAIT)); 544 if (tmpkt == NULL) 545 return (ENOMEM); 546 pfr_mark_addrs(kt); 547 SLIST_INIT(&addq); 548 SLIST_INIT(&delq); 549 SLIST_INIT(&changeq); 550 for (i = 0; i < size; i++) { 551 YIELD(flags & PFR_FLAG_USERIOCTL); 552 if (COPYIN(addr+i, &ad, sizeof(ad), flags)) 553 senderr(EFAULT); 554 if (pfr_validate_addr(&ad)) 555 senderr(EINVAL); 556 ad.pfra_fback = PFR_FB_NONE; 557 p = pfr_lookup_addr(kt, &ad, 1); 558 if (p != NULL) { 559 if (p->pfrke_flags & PFRKE_FLAG_MARK) { 560 ad.pfra_fback = PFR_FB_DUPLICATE; 561 goto _skip; 562 } 563 p->pfrke_flags |= PFRKE_FLAG_MARK; 564 if ((p->pfrke_flags & PFRKE_FLAG_NOT) != ad.pfra_not) { 565 SLIST_INSERT_HEAD(&changeq, p, pfrke_workq); 566 ad.pfra_fback = PFR_FB_CHANGED; 567 xchange++; 568 } 569 } else { 570 q = pfr_lookup_addr(tmpkt, &ad, 1); 571 if (q != NULL) { 572 ad.pfra_fback = PFR_FB_DUPLICATE; 573 goto _skip; 574 } 575 p = pfr_create_kentry(&ad); 576 if (p == NULL) 577 senderr(ENOMEM); 578 if (pfr_route_kentry(tmpkt, p)) { 579 pfr_destroy_kentry(p); 580 ad.pfra_fback = PFR_FB_NONE; 581 goto _skip; 582 } 583 SLIST_INSERT_HEAD(&addq, p, pfrke_workq); 584 ad.pfra_fback = PFR_FB_ADDED; 585 xadd++; 586 if (p->pfrke_type == PFRKE_COST) 587 kt->pfrkt_refcntcost++; 588 pfr_ktable_winfo_update(kt, p); 589 } 590 _skip: 591 if (flags & PFR_FLAG_FEEDBACK) 592 if (COPYOUT(&ad, addr+i, sizeof(ad), flags)) 593 senderr(EFAULT); 594 } 595 pfr_enqueue_addrs(kt, &delq, &xdel, ENQUEUE_UNMARKED_ONLY); 596 if ((flags & PFR_FLAG_FEEDBACK) && *size2) { 597 if (*size2 < size+xdel) { 598 *size2 = size+xdel; 599 senderr(0); 600 } 601 i = 0; 602 SLIST_FOREACH(p, &delq, pfrke_workq) { 603 pfr_copyout_addr(&ad, p); 604 ad.pfra_fback = PFR_FB_DELETED; 605 if (COPYOUT(&ad, addr+size+i, sizeof(ad), flags)) 606 senderr(EFAULT); 607 i++; 608 } 609 } 610 pfr_clean_node_mask(tmpkt, &addq); 611 if (!(flags & PFR_FLAG_DUMMY)) { 612 pfr_insert_kentries(kt, &addq, tzero); 613 pfr_remove_kentries(kt, &delq); 614 pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG); 615 } else 616 pfr_destroy_kentries(&addq); 617 if (nadd != NULL) 618 *nadd = xadd; 619 if (ndel != NULL) 620 *ndel = xdel; 621 if (nchange != NULL) 622 *nchange = xchange; 623 if ((flags & PFR_FLAG_FEEDBACK) && size2) 624 *size2 = size+xdel; 625 pfr_destroy_ktable(tmpkt, 0); 626 return (0); 627 _bad: 628 pfr_clean_node_mask(tmpkt, &addq); 629 pfr_destroy_kentries(&addq); 630 if (flags & PFR_FLAG_FEEDBACK) 631 pfr_reset_feedback(addr, size, flags); 632 pfr_destroy_ktable(tmpkt, 0); 633 return (rv); 634 } 635 636 int 637 pfr_tst_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size, 638 int *nmatch, int flags) 639 { 640 struct pfr_ktable *kt; 641 struct pfr_kentry *p; 642 struct pfr_addr ad; 643 int i, xmatch = 0; 644 645 ACCEPT_FLAGS(flags, PFR_FLAG_REPLACE); 646 if (pfr_validate_table(tbl, 0, 0)) 647 return (EINVAL); 648 kt = pfr_lookup_table(tbl); 649 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 650 return (ESRCH); 651 652 for (i = 0; i < size; i++) { 653 YIELD(flags & PFR_FLAG_USERIOCTL); 654 if (COPYIN(addr+i, &ad, sizeof(ad), flags)) 655 return (EFAULT); 656 if (pfr_validate_addr(&ad)) 657 return (EINVAL); 658 if (ADDR_NETWORK(&ad)) 659 return (EINVAL); 660 p = pfr_lookup_addr(kt, &ad, 0); 661 if (flags & PFR_FLAG_REPLACE) 662 pfr_copyout_addr(&ad, p); 663 ad.pfra_fback = (p == NULL) ? PFR_FB_NONE : 664 ((p->pfrke_flags & PFRKE_FLAG_NOT) ? 665 PFR_FB_NOTMATCH : PFR_FB_MATCH); 666 if (p != NULL && !(p->pfrke_flags & PFRKE_FLAG_NOT)) 667 xmatch++; 668 if (COPYOUT(&ad, addr+i, sizeof(ad), flags)) 669 return (EFAULT); 670 } 671 if (nmatch != NULL) 672 *nmatch = xmatch; 673 return (0); 674 } 675 676 int 677 pfr_get_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int *size, 678 int flags) 679 { 680 struct pfr_ktable *kt; 681 struct pfr_walktree w; 682 int rv; 683 684 ACCEPT_FLAGS(flags, 0); 685 if (pfr_validate_table(tbl, 0, 0)) 686 return (EINVAL); 687 kt = pfr_lookup_table(tbl); 688 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 689 return (ESRCH); 690 if (kt->pfrkt_cnt > *size) { 691 *size = kt->pfrkt_cnt; 692 return (0); 693 } 694 695 bzero(&w, sizeof(w)); 696 w.pfrw_op = PFRW_GET_ADDRS; 697 w.pfrw_addr = addr; 698 w.pfrw_free = kt->pfrkt_cnt; 699 w.pfrw_flags = flags; 700 rv = rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w); 701 if (!rv) 702 rv = rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w); 703 if (rv) 704 return (rv); 705 706 if (w.pfrw_free) { 707 DPFPRINTF(LOG_ERR, 708 "pfr_get_addrs: corruption detected (%d)", w.pfrw_free); 709 return (ENOTTY); 710 } 711 *size = kt->pfrkt_cnt; 712 return (0); 713 } 714 715 int 716 pfr_get_astats(struct pfr_table *tbl, struct pfr_astats *addr, int *size, 717 int flags) 718 { 719 struct pfr_ktable *kt; 720 struct pfr_walktree w; 721 struct pfr_kentryworkq workq; 722 int rv; 723 time_t tzero = gettime(); 724 725 if (pfr_validate_table(tbl, 0, 0)) 726 return (EINVAL); 727 kt = pfr_lookup_table(tbl); 728 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 729 return (ESRCH); 730 if (kt->pfrkt_cnt > *size) { 731 *size = kt->pfrkt_cnt; 732 return (0); 733 } 734 735 bzero(&w, sizeof(w)); 736 w.pfrw_op = PFRW_GET_ASTATS; 737 w.pfrw_astats = addr; 738 w.pfrw_free = kt->pfrkt_cnt; 739 w.pfrw_flags = flags; 740 rv = rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w); 741 if (!rv) 742 rv = rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w); 743 if (!rv && (flags & PFR_FLAG_CLSTATS)) { 744 pfr_enqueue_addrs(kt, &workq, NULL, 0); 745 pfr_clstats_kentries(&workq, tzero, 0); 746 } 747 if (rv) 748 return (rv); 749 750 if (w.pfrw_free) { 751 DPFPRINTF(LOG_ERR, 752 "pfr_get_astats: corruption detected (%d)", w.pfrw_free); 753 return (ENOTTY); 754 } 755 *size = kt->pfrkt_cnt; 756 return (0); 757 } 758 759 int 760 pfr_clr_astats(struct pfr_table *tbl, struct pfr_addr *addr, int size, 761 int *nzero, int flags) 762 { 763 struct pfr_ktable *kt; 764 struct pfr_kentryworkq workq; 765 struct pfr_kentry *p; 766 struct pfr_addr ad; 767 int i, rv, xzero = 0; 768 769 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK); 770 if (pfr_validate_table(tbl, 0, 0)) 771 return (EINVAL); 772 kt = pfr_lookup_table(tbl); 773 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 774 return (ESRCH); 775 SLIST_INIT(&workq); 776 for (i = 0; i < size; i++) { 777 YIELD(flags & PFR_FLAG_USERIOCTL); 778 if (COPYIN(addr+i, &ad, sizeof(ad), flags)) 779 senderr(EFAULT); 780 if (pfr_validate_addr(&ad)) 781 senderr(EINVAL); 782 p = pfr_lookup_addr(kt, &ad, 1); 783 if (flags & PFR_FLAG_FEEDBACK) { 784 ad.pfra_fback = (p != NULL) ? 785 PFR_FB_CLEARED : PFR_FB_NONE; 786 if (COPYOUT(&ad, addr+i, sizeof(ad), flags)) 787 senderr(EFAULT); 788 } 789 if (p != NULL) { 790 SLIST_INSERT_HEAD(&workq, p, pfrke_workq); 791 xzero++; 792 } 793 } 794 795 if (!(flags & PFR_FLAG_DUMMY)) { 796 pfr_clstats_kentries(&workq, gettime(), 0); 797 } 798 if (nzero != NULL) 799 *nzero = xzero; 800 return (0); 801 _bad: 802 if (flags & PFR_FLAG_FEEDBACK) 803 pfr_reset_feedback(addr, size, flags); 804 return (rv); 805 } 806 807 int 808 pfr_validate_addr(struct pfr_addr *ad) 809 { 810 int i; 811 812 switch (ad->pfra_af) { 813 case AF_INET: 814 if (ad->pfra_net > 32) 815 return (-1); 816 break; 817 #ifdef INET6 818 case AF_INET6: 819 if (ad->pfra_net > 128) 820 return (-1); 821 break; 822 #endif /* INET6 */ 823 default: 824 return (-1); 825 } 826 if (ad->pfra_net < 128 && 827 (((caddr_t)ad)[ad->pfra_net/8] & (0xFF >> (ad->pfra_net%8)))) 828 return (-1); 829 for (i = (ad->pfra_net+7)/8; i < sizeof(ad->pfra_u); i++) 830 if (((caddr_t)ad)[i]) 831 return (-1); 832 if (ad->pfra_not && ad->pfra_not != 1) 833 return (-1); 834 if (ad->pfra_fback != PFR_FB_NONE) 835 return (-1); 836 if (ad->pfra_type >= PFRKE_MAX) 837 return (-1); 838 return (0); 839 } 840 841 void 842 pfr_enqueue_addrs(struct pfr_ktable *kt, struct pfr_kentryworkq *workq, 843 int *naddr, int sweep) 844 { 845 struct pfr_walktree w; 846 847 SLIST_INIT(workq); 848 bzero(&w, sizeof(w)); 849 w.pfrw_op = sweep ? PFRW_SWEEP : PFRW_ENQUEUE; 850 w.pfrw_workq = workq; 851 if (kt->pfrkt_ip4 != NULL) 852 if (rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w)) 853 DPFPRINTF(LOG_ERR, 854 "pfr_enqueue_addrs: IPv4 walktree failed."); 855 if (kt->pfrkt_ip6 != NULL) 856 if (rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w)) 857 DPFPRINTF(LOG_ERR, 858 "pfr_enqueue_addrs: IPv6 walktree failed."); 859 if (naddr != NULL) 860 *naddr = w.pfrw_cnt; 861 } 862 863 void 864 pfr_mark_addrs(struct pfr_ktable *kt) 865 { 866 struct pfr_walktree w; 867 868 bzero(&w, sizeof(w)); 869 w.pfrw_op = PFRW_MARK; 870 if (rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w)) 871 DPFPRINTF(LOG_ERR, 872 "pfr_mark_addrs: IPv4 walktree failed."); 873 if (rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w)) 874 DPFPRINTF(LOG_ERR, 875 "pfr_mark_addrs: IPv6 walktree failed."); 876 } 877 878 879 struct pfr_kentry * 880 pfr_lookup_addr(struct pfr_ktable *kt, struct pfr_addr *ad, int exact) 881 { 882 union sockaddr_union sa, mask; 883 struct radix_node_head *head; 884 struct pfr_kentry *ke; 885 886 bzero(&sa, sizeof(sa)); 887 switch (ad->pfra_af) { 888 case AF_INET: 889 FILLIN_SIN(sa.sin, ad->pfra_ip4addr); 890 head = kt->pfrkt_ip4; 891 break; 892 #ifdef INET6 893 case AF_INET6: 894 FILLIN_SIN6(sa.sin6, ad->pfra_ip6addr); 895 head = kt->pfrkt_ip6; 896 break; 897 #endif /* INET6 */ 898 default: 899 unhandled_af(ad->pfra_af); 900 } 901 if (ADDR_NETWORK(ad)) { 902 pfr_prepare_network(&mask, ad->pfra_af, ad->pfra_net); 903 ke = (struct pfr_kentry *)rn_lookup(&sa, &mask, head); 904 } else { 905 ke = (struct pfr_kentry *)rn_match(&sa, head); 906 if (exact && ke && KENTRY_NETWORK(ke)) 907 ke = NULL; 908 } 909 return (ke); 910 } 911 912 struct pfr_kentry * 913 pfr_lookup_kentry(struct pfr_ktable *kt, struct pfr_kentry *key, int exact) 914 { 915 union sockaddr_union mask; 916 struct radix_node_head *head; 917 struct pfr_kentry *ke; 918 919 switch (key->pfrke_af) { 920 case AF_INET: 921 head = kt->pfrkt_ip4; 922 break; 923 #ifdef INET6 924 case AF_INET6: 925 head = kt->pfrkt_ip6; 926 break; 927 #endif /* INET6 */ 928 default: 929 unhandled_af(key->pfrke_af); 930 } 931 if (KENTRY_NETWORK(key)) { 932 pfr_prepare_network(&mask, key->pfrke_af, key->pfrke_net); 933 ke = (struct pfr_kentry *)rn_lookup(&key->pfrke_sa, &mask, 934 head); 935 } else { 936 ke = (struct pfr_kentry *)rn_match(&key->pfrke_sa, head); 937 if (exact && ke && KENTRY_NETWORK(ke)) 938 ke = NULL; 939 } 940 return (ke); 941 } 942 943 struct pfr_kentry * 944 pfr_create_kentry(struct pfr_addr *ad) 945 { 946 struct pfr_kentry_all *ke; 947 948 if (ad->pfra_type >= PFRKE_MAX) 949 panic("unknown pfra_type %d", ad->pfra_type); 950 951 ke = pool_get(&pfr_kentry_pl[ad->pfra_type], PR_NOWAIT | PR_ZERO); 952 if (ke == NULL) 953 return (NULL); 954 955 ke->pfrke_type = ad->pfra_type; 956 957 /* set weight allowing implicit weights */ 958 if (ad->pfra_weight == 0) 959 ad->pfra_weight = 1; 960 961 switch (ke->pfrke_type) { 962 case PFRKE_PLAIN: 963 break; 964 case PFRKE_COST: 965 ((struct pfr_kentry_cost *)ke)->weight = ad->pfra_weight; 966 /* FALLTHROUGH */ 967 case PFRKE_ROUTE: 968 if (ad->pfra_ifname[0]) 969 ke->pfrke_rkif = pfi_kif_get(ad->pfra_ifname, NULL); 970 if (ke->pfrke_rkif) 971 pfi_kif_ref(ke->pfrke_rkif, PFI_KIF_REF_ROUTE); 972 break; 973 } 974 975 switch (ad->pfra_af) { 976 case AF_INET: 977 FILLIN_SIN(ke->pfrke_sa.sin, ad->pfra_ip4addr); 978 break; 979 #ifdef INET6 980 case AF_INET6: 981 FILLIN_SIN6(ke->pfrke_sa.sin6, ad->pfra_ip6addr); 982 break; 983 #endif /* INET6 */ 984 default: 985 unhandled_af(ad->pfra_af); 986 } 987 ke->pfrke_af = ad->pfra_af; 988 ke->pfrke_net = ad->pfra_net; 989 if (ad->pfra_not) 990 ke->pfrke_flags |= PFRKE_FLAG_NOT; 991 return ((struct pfr_kentry *)ke); 992 } 993 994 struct pfr_kentry * 995 pfr_create_kentry_unlocked(struct pfr_addr *ad, int flags) 996 { 997 struct pfr_kentry_all *ke; 998 int mflags = PR_ZERO; 999 1000 if (ad->pfra_type >= PFRKE_MAX) 1001 panic("unknown pfra_type %d", ad->pfra_type); 1002 1003 if (flags & PFR_FLAG_USERIOCTL) 1004 mflags |= PR_WAITOK; 1005 else 1006 mflags |= PR_NOWAIT; 1007 1008 ke = pool_get(&pfr_kentry_pl[ad->pfra_type], mflags); 1009 if (ke == NULL) 1010 return (NULL); 1011 1012 ke->pfrke_type = ad->pfra_type; 1013 1014 /* set weight allowing implicit weights */ 1015 if (ad->pfra_weight == 0) 1016 ad->pfra_weight = 1; 1017 1018 switch (ke->pfrke_type) { 1019 case PFRKE_PLAIN: 1020 break; 1021 case PFRKE_COST: 1022 ((struct pfr_kentry_cost *)ke)->weight = ad->pfra_weight; 1023 /* FALLTHROUGH */ 1024 case PFRKE_ROUTE: 1025 if (ad->pfra_ifname[0]) 1026 (void) strlcpy(ke->pfrke_rifname, ad->pfra_ifname, 1027 IFNAMSIZ); 1028 break; 1029 } 1030 1031 switch (ad->pfra_af) { 1032 case AF_INET: 1033 FILLIN_SIN(ke->pfrke_sa.sin, ad->pfra_ip4addr); 1034 break; 1035 #ifdef INET6 1036 case AF_INET6: 1037 FILLIN_SIN6(ke->pfrke_sa.sin6, ad->pfra_ip6addr); 1038 break; 1039 #endif /* INET6 */ 1040 default: 1041 unhandled_af(ad->pfra_af); 1042 } 1043 ke->pfrke_af = ad->pfra_af; 1044 ke->pfrke_net = ad->pfra_net; 1045 if (ad->pfra_not) 1046 ke->pfrke_flags |= PFRKE_FLAG_NOT; 1047 return ((struct pfr_kentry *)ke); 1048 } 1049 1050 void 1051 pfr_kentry_kif_ref(struct pfr_kentry *ke_all) 1052 { 1053 struct pfr_kentry_all *ke = (struct pfr_kentry_all *)ke_all; 1054 1055 NET_ASSERT_LOCKED(); 1056 switch (ke->pfrke_type) { 1057 case PFRKE_PLAIN: 1058 break; 1059 case PFRKE_COST: 1060 case PFRKE_ROUTE: 1061 if (ke->pfrke_rifname[0]) 1062 ke->pfrke_rkif = pfi_kif_get(ke->pfrke_rifname, NULL); 1063 if (ke->pfrke_rkif) 1064 pfi_kif_ref(ke->pfrke_rkif, PFI_KIF_REF_ROUTE); 1065 break; 1066 } 1067 } 1068 1069 void 1070 pfr_destroy_kentries(struct pfr_kentryworkq *workq) 1071 { 1072 struct pfr_kentry *p; 1073 1074 while ((p = SLIST_FIRST(workq)) != NULL) { 1075 YIELD(1); 1076 SLIST_REMOVE_HEAD(workq, pfrke_workq); 1077 pfr_destroy_kentry(p); 1078 } 1079 } 1080 1081 void 1082 pfr_destroy_ioq(struct pfr_kentryworkq *ioq, int flags) 1083 { 1084 struct pfr_kentry *p; 1085 1086 while ((p = SLIST_FIRST(ioq)) != NULL) { 1087 YIELD(flags & PFR_FLAG_USERIOCTL); 1088 SLIST_REMOVE_HEAD(ioq, pfrke_ioq); 1089 /* 1090 * we destroy only those entries, which did not make it to 1091 * table 1092 */ 1093 if ((p->pfrke_fb != PFR_FB_ADDED) || (flags & PFR_FLAG_DUMMY)) 1094 pfr_destroy_kentry(p); 1095 } 1096 } 1097 1098 void 1099 pfr_destroy_kentry(struct pfr_kentry *ke) 1100 { 1101 if (ke->pfrke_counters) 1102 pool_put(&pfr_kcounters_pl, ke->pfrke_counters); 1103 if (ke->pfrke_type == PFRKE_COST || ke->pfrke_type == PFRKE_ROUTE) 1104 pfi_kif_unref(((struct pfr_kentry_all *)ke)->pfrke_rkif, 1105 PFI_KIF_REF_ROUTE); 1106 pool_put(&pfr_kentry_pl[ke->pfrke_type], ke); 1107 } 1108 1109 void 1110 pfr_insert_kentries(struct pfr_ktable *kt, 1111 struct pfr_kentryworkq *workq, time_t tzero) 1112 { 1113 struct pfr_kentry *p; 1114 int rv, n = 0; 1115 1116 SLIST_FOREACH(p, workq, pfrke_workq) { 1117 rv = pfr_route_kentry(kt, p); 1118 if (rv) { 1119 DPFPRINTF(LOG_ERR, 1120 "pfr_insert_kentries: cannot route entry " 1121 "(code=%d).", rv); 1122 break; 1123 } 1124 p->pfrke_tzero = tzero; 1125 ++n; 1126 if (p->pfrke_type == PFRKE_COST) 1127 kt->pfrkt_refcntcost++; 1128 pfr_ktable_winfo_update(kt, p); 1129 YIELD(1); 1130 } 1131 kt->pfrkt_cnt += n; 1132 } 1133 1134 int 1135 pfr_insert_kentry(struct pfr_ktable *kt, struct pfr_addr *ad, time_t tzero) 1136 { 1137 struct pfr_kentry *p; 1138 int rv; 1139 1140 p = pfr_lookup_addr(kt, ad, 1); 1141 if (p != NULL) 1142 return (0); 1143 p = pfr_create_kentry(ad); 1144 if (p == NULL) 1145 return (EINVAL); 1146 1147 rv = pfr_route_kentry(kt, p); 1148 if (rv) 1149 return (rv); 1150 1151 p->pfrke_tzero = tzero; 1152 if (p->pfrke_type == PFRKE_COST) 1153 kt->pfrkt_refcntcost++; 1154 kt->pfrkt_cnt++; 1155 pfr_ktable_winfo_update(kt, p); 1156 1157 return (0); 1158 } 1159 1160 void 1161 pfr_remove_kentries(struct pfr_ktable *kt, 1162 struct pfr_kentryworkq *workq) 1163 { 1164 struct pfr_kentry *p; 1165 struct pfr_kentryworkq addrq; 1166 int n = 0; 1167 1168 SLIST_FOREACH(p, workq, pfrke_workq) { 1169 pfr_unroute_kentry(kt, p); 1170 ++n; 1171 YIELD(1); 1172 if (p->pfrke_type == PFRKE_COST) 1173 kt->pfrkt_refcntcost--; 1174 } 1175 kt->pfrkt_cnt -= n; 1176 pfr_destroy_kentries(workq); 1177 1178 /* update maxweight and gcd for load balancing */ 1179 if (kt->pfrkt_refcntcost > 0) { 1180 kt->pfrkt_gcdweight = 0; 1181 kt->pfrkt_maxweight = 1; 1182 pfr_enqueue_addrs(kt, &addrq, NULL, 0); 1183 SLIST_FOREACH(p, &addrq, pfrke_workq) 1184 pfr_ktable_winfo_update(kt, p); 1185 } 1186 } 1187 1188 void 1189 pfr_clean_node_mask(struct pfr_ktable *kt, 1190 struct pfr_kentryworkq *workq) 1191 { 1192 struct pfr_kentry *p; 1193 1194 SLIST_FOREACH(p, workq, pfrke_workq) { 1195 pfr_unroute_kentry(kt, p); 1196 } 1197 } 1198 1199 void 1200 pfr_clstats_kentries(struct pfr_kentryworkq *workq, time_t tzero, int negchange) 1201 { 1202 struct pfr_kentry *p; 1203 1204 SLIST_FOREACH(p, workq, pfrke_workq) { 1205 if (negchange) 1206 p->pfrke_flags ^= PFRKE_FLAG_NOT; 1207 if (p->pfrke_counters) { 1208 pool_put(&pfr_kcounters_pl, p->pfrke_counters); 1209 p->pfrke_counters = NULL; 1210 } 1211 p->pfrke_tzero = tzero; 1212 } 1213 } 1214 1215 void 1216 pfr_reset_feedback(struct pfr_addr *addr, int size, int flags) 1217 { 1218 struct pfr_addr ad; 1219 int i; 1220 1221 for (i = 0; i < size; i++) { 1222 YIELD(flags & PFR_FLAG_USERIOCTL); 1223 if (COPYIN(addr+i, &ad, sizeof(ad), flags)) 1224 break; 1225 ad.pfra_fback = PFR_FB_NONE; 1226 if (COPYOUT(&ad, addr+i, sizeof(ad), flags)) 1227 break; 1228 } 1229 } 1230 1231 void 1232 pfr_prepare_network(union sockaddr_union *sa, int af, int net) 1233 { 1234 #ifdef INET6 1235 int i; 1236 #endif /* INET6 */ 1237 1238 bzero(sa, sizeof(*sa)); 1239 switch (af) { 1240 case AF_INET: 1241 sa->sin.sin_len = sizeof(sa->sin); 1242 sa->sin.sin_family = AF_INET; 1243 sa->sin.sin_addr.s_addr = net ? htonl(-1 << (32-net)) : 0; 1244 break; 1245 #ifdef INET6 1246 case AF_INET6: 1247 sa->sin6.sin6_len = sizeof(sa->sin6); 1248 sa->sin6.sin6_family = AF_INET6; 1249 for (i = 0; i < 4; i++) { 1250 if (net <= 32) { 1251 sa->sin6.sin6_addr.s6_addr32[i] = 1252 net ? htonl(-1 << (32-net)) : 0; 1253 break; 1254 } 1255 sa->sin6.sin6_addr.s6_addr32[i] = 0xFFFFFFFF; 1256 net -= 32; 1257 } 1258 break; 1259 #endif /* INET6 */ 1260 default: 1261 unhandled_af(af); 1262 } 1263 } 1264 1265 int 1266 pfr_route_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke) 1267 { 1268 union sockaddr_union mask; 1269 struct radix_node *rn; 1270 struct radix_node_head *head; 1271 1272 bzero(ke->pfrke_node, sizeof(ke->pfrke_node)); 1273 switch (ke->pfrke_af) { 1274 case AF_INET: 1275 head = kt->pfrkt_ip4; 1276 break; 1277 #ifdef INET6 1278 case AF_INET6: 1279 head = kt->pfrkt_ip6; 1280 break; 1281 #endif /* INET6 */ 1282 default: 1283 unhandled_af(ke->pfrke_af); 1284 } 1285 1286 if (KENTRY_NETWORK(ke)) { 1287 pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net); 1288 rn = rn_addroute(&ke->pfrke_sa, &mask, head, ke->pfrke_node, 0); 1289 } else 1290 rn = rn_addroute(&ke->pfrke_sa, NULL, head, ke->pfrke_node, 0); 1291 1292 return (rn == NULL ? -1 : 0); 1293 } 1294 1295 int 1296 pfr_unroute_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke) 1297 { 1298 union sockaddr_union mask; 1299 struct radix_node *rn; 1300 struct radix_node_head *head; 1301 1302 switch (ke->pfrke_af) { 1303 case AF_INET: 1304 head = kt->pfrkt_ip4; 1305 break; 1306 #ifdef INET6 1307 case AF_INET6: 1308 head = kt->pfrkt_ip6; 1309 break; 1310 #endif /* INET6 */ 1311 default: 1312 unhandled_af(ke->pfrke_af); 1313 } 1314 1315 if (KENTRY_NETWORK(ke)) { 1316 pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net); 1317 rn = rn_delete(&ke->pfrke_sa, &mask, head, NULL); 1318 } else 1319 rn = rn_delete(&ke->pfrke_sa, NULL, head, NULL); 1320 1321 if (rn == NULL) { 1322 DPFPRINTF(LOG_ERR, "pfr_unroute_kentry: delete failed.\n"); 1323 return (-1); 1324 } 1325 return (0); 1326 } 1327 1328 void 1329 pfr_copyout_addr(struct pfr_addr *ad, struct pfr_kentry *ke) 1330 { 1331 bzero(ad, sizeof(*ad)); 1332 if (ke == NULL) 1333 return; 1334 ad->pfra_af = ke->pfrke_af; 1335 ad->pfra_net = ke->pfrke_net; 1336 ad->pfra_type = ke->pfrke_type; 1337 if (ke->pfrke_flags & PFRKE_FLAG_NOT) 1338 ad->pfra_not = 1; 1339 1340 switch (ad->pfra_af) { 1341 case AF_INET: 1342 ad->pfra_ip4addr = ke->pfrke_sa.sin.sin_addr; 1343 break; 1344 #ifdef INET6 1345 case AF_INET6: 1346 ad->pfra_ip6addr = ke->pfrke_sa.sin6.sin6_addr; 1347 break; 1348 #endif /* INET6 */ 1349 default: 1350 unhandled_af(ad->pfra_af); 1351 } 1352 if (ke->pfrke_counters != NULL) 1353 ad->pfra_states = ke->pfrke_counters->states; 1354 switch (ke->pfrke_type) { 1355 case PFRKE_COST: 1356 ad->pfra_weight = ((struct pfr_kentry_cost *)ke)->weight; 1357 /* FALLTHROUGH */ 1358 case PFRKE_ROUTE: 1359 if (((struct pfr_kentry_route *)ke)->kif != NULL) 1360 strlcpy(ad->pfra_ifname, 1361 ((struct pfr_kentry_route *)ke)->kif->pfik_name, 1362 IFNAMSIZ); 1363 break; 1364 default: 1365 break; 1366 } 1367 } 1368 1369 int 1370 pfr_walktree(struct radix_node *rn, void *arg, u_int id) 1371 { 1372 struct pfr_kentry *ke = (struct pfr_kentry *)rn; 1373 struct pfr_walktree *w = arg; 1374 union sockaddr_union mask; 1375 int flags = w->pfrw_flags; 1376 1377 switch (w->pfrw_op) { 1378 case PFRW_MARK: 1379 ke->pfrke_flags &= ~PFRKE_FLAG_MARK; 1380 break; 1381 case PFRW_SWEEP: 1382 if (ke->pfrke_flags & PFRKE_FLAG_MARK) 1383 break; 1384 /* FALLTHROUGH */ 1385 case PFRW_ENQUEUE: 1386 SLIST_INSERT_HEAD(w->pfrw_workq, ke, pfrke_workq); 1387 w->pfrw_cnt++; 1388 break; 1389 case PFRW_GET_ADDRS: 1390 if (w->pfrw_free-- > 0) { 1391 struct pfr_addr ad; 1392 1393 pfr_copyout_addr(&ad, ke); 1394 if (copyout(&ad, w->pfrw_addr, sizeof(ad))) 1395 return (EFAULT); 1396 w->pfrw_addr++; 1397 } 1398 break; 1399 case PFRW_GET_ASTATS: 1400 if (w->pfrw_free-- > 0) { 1401 struct pfr_astats as; 1402 1403 pfr_copyout_addr(&as.pfras_a, ke); 1404 1405 if (ke->pfrke_counters) { 1406 bcopy(ke->pfrke_counters->pfrkc_packets, 1407 as.pfras_packets, sizeof(as.pfras_packets)); 1408 bcopy(ke->pfrke_counters->pfrkc_bytes, 1409 as.pfras_bytes, sizeof(as.pfras_bytes)); 1410 } else { 1411 bzero(as.pfras_packets, 1412 sizeof(as.pfras_packets)); 1413 bzero(as.pfras_bytes, sizeof(as.pfras_bytes)); 1414 as.pfras_a.pfra_fback = PFR_FB_NOCOUNT; 1415 } 1416 as.pfras_tzero = ke->pfrke_tzero; 1417 1418 if (COPYOUT(&as, w->pfrw_astats, sizeof(as), flags)) 1419 return (EFAULT); 1420 w->pfrw_astats++; 1421 } 1422 break; 1423 case PFRW_POOL_GET: 1424 if (ke->pfrke_flags & PFRKE_FLAG_NOT) 1425 break; /* negative entries are ignored */ 1426 if (!w->pfrw_cnt--) { 1427 w->pfrw_kentry = ke; 1428 return (1); /* finish search */ 1429 } 1430 break; 1431 case PFRW_DYNADDR_UPDATE: 1432 switch (ke->pfrke_af) { 1433 case AF_INET: 1434 if (w->pfrw_dyn->pfid_acnt4++ > 0) 1435 break; 1436 pfr_prepare_network(&mask, AF_INET, ke->pfrke_net); 1437 w->pfrw_dyn->pfid_addr4 = *SUNION2PF( 1438 &ke->pfrke_sa, AF_INET); 1439 w->pfrw_dyn->pfid_mask4 = *SUNION2PF( 1440 &mask, AF_INET); 1441 break; 1442 #ifdef INET6 1443 case AF_INET6: 1444 if (w->pfrw_dyn->pfid_acnt6++ > 0) 1445 break; 1446 pfr_prepare_network(&mask, AF_INET6, ke->pfrke_net); 1447 w->pfrw_dyn->pfid_addr6 = *SUNION2PF( 1448 &ke->pfrke_sa, AF_INET6); 1449 w->pfrw_dyn->pfid_mask6 = *SUNION2PF( 1450 &mask, AF_INET6); 1451 break; 1452 #endif /* INET6 */ 1453 default: 1454 unhandled_af(ke->pfrke_af); 1455 } 1456 break; 1457 } 1458 return (0); 1459 } 1460 1461 int 1462 pfr_clr_tables(struct pfr_table *filter, int *ndel, int flags) 1463 { 1464 struct pfr_ktableworkq workq; 1465 struct pfr_ktable *p; 1466 int xdel = 0; 1467 1468 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ALLRSETS); 1469 if (pfr_fix_anchor(filter->pfrt_anchor)) 1470 return (EINVAL); 1471 if (pfr_table_count(filter, flags) < 0) 1472 return (ENOENT); 1473 1474 SLIST_INIT(&workq); 1475 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) { 1476 if (pfr_skip_table(filter, p, flags)) 1477 continue; 1478 if (!strcmp(p->pfrkt_anchor, PF_RESERVED_ANCHOR)) 1479 continue; 1480 if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE)) 1481 continue; 1482 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE; 1483 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1484 xdel++; 1485 } 1486 if (!(flags & PFR_FLAG_DUMMY)) { 1487 pfr_setflags_ktables(&workq); 1488 } 1489 if (ndel != NULL) 1490 *ndel = xdel; 1491 return (0); 1492 } 1493 1494 int 1495 pfr_add_tables(struct pfr_table *tbl, int size, int *nadd, int flags) 1496 { 1497 struct pfr_ktableworkq addq, changeq, auxq; 1498 struct pfr_ktable *p, *q, *r, *n, *w, key; 1499 int i, rv, xadd = 0; 1500 time_t tzero = gettime(); 1501 1502 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY); 1503 SLIST_INIT(&addq); 1504 SLIST_INIT(&changeq); 1505 SLIST_INIT(&auxq); 1506 /* pre-allocate all memory outside of locks */ 1507 for (i = 0; i < size; i++) { 1508 YIELD(flags & PFR_FLAG_USERIOCTL); 1509 if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t), flags)) 1510 senderr(EFAULT); 1511 if (pfr_validate_table(&key.pfrkt_t, PFR_TFLAG_USRMASK, 1512 flags & PFR_FLAG_USERIOCTL)) 1513 senderr(EINVAL); 1514 key.pfrkt_flags |= PFR_TFLAG_ACTIVE; 1515 p = pfr_create_ktable(&key.pfrkt_t, tzero, 0, 1516 (flags & PFR_FLAG_USERIOCTL? PR_WAITOK : PR_NOWAIT)); 1517 if (p == NULL) 1518 senderr(ENOMEM); 1519 1520 /* 1521 * Note: we also pre-allocate a root table here. We keep it 1522 * at ->pfrkt_root, which we must not forget about. 1523 */ 1524 key.pfrkt_flags = 0; 1525 memset(key.pfrkt_anchor, 0, sizeof(key.pfrkt_anchor)); 1526 p->pfrkt_root = pfr_create_ktable(&key.pfrkt_t, 0, 0, 1527 (flags & PFR_FLAG_USERIOCTL? PR_WAITOK : PR_NOWAIT)); 1528 if (p->pfrkt_root == NULL) { 1529 pfr_destroy_ktable(p, 0); 1530 senderr(ENOMEM); 1531 } 1532 1533 SLIST_FOREACH(q, &auxq, pfrkt_workq) { 1534 if (!pfr_ktable_compare(p, q)) { 1535 /* 1536 * We need no lock here, because `p` is empty, 1537 * there are no rules or shadow tables 1538 * attached. 1539 */ 1540 pfr_destroy_ktable(p->pfrkt_root, 0); 1541 p->pfrkt_root = NULL; 1542 pfr_destroy_ktable(p, 0); 1543 p = NULL; 1544 break; 1545 } 1546 } 1547 if (q != NULL) 1548 continue; 1549 1550 SLIST_INSERT_HEAD(&auxq, p, pfrkt_workq); 1551 } 1552 1553 /* 1554 * auxq contains freshly allocated tables with no dups. 1555 * also note there are no rulesets attached, because 1556 * the attach operation requires PF_LOCK(). 1557 */ 1558 NET_LOCK(); 1559 PF_LOCK(); 1560 SLIST_FOREACH_SAFE(n, &auxq, pfrkt_workq, w) { 1561 p = RB_FIND(pfr_ktablehead, &pfr_ktables, n); 1562 if (p == NULL) { 1563 SLIST_REMOVE(&auxq, n, pfr_ktable, pfrkt_workq); 1564 SLIST_INSERT_HEAD(&addq, n, pfrkt_workq); 1565 xadd++; 1566 } else if (!(flags & PFR_FLAG_DUMMY) && 1567 !(p->pfrkt_flags & PFR_TFLAG_ACTIVE)) { 1568 p->pfrkt_nflags = (p->pfrkt_flags & 1569 ~PFR_TFLAG_USRMASK) | key.pfrkt_flags; 1570 SLIST_INSERT_HEAD(&changeq, p, pfrkt_workq); 1571 } 1572 } 1573 1574 if (!(flags & PFR_FLAG_DUMMY)) { 1575 /* 1576 * addq contains tables we have to insert and attach rules to 1577 * them 1578 * 1579 * changeq contains tables we need to update 1580 * 1581 * auxq contains pre-allocated tables, we won't use and we must 1582 * free them 1583 */ 1584 SLIST_FOREACH_SAFE(p, &addq, pfrkt_workq, w) { 1585 p->pfrkt_rs = pf_find_or_create_ruleset( 1586 p->pfrkt_anchor); 1587 if (p->pfrkt_rs == NULL) { 1588 xadd--; 1589 SLIST_REMOVE(&addq, p, pfr_ktable, pfrkt_workq); 1590 SLIST_INSERT_HEAD(&auxq, p, pfrkt_workq); 1591 continue; 1592 } 1593 p->pfrkt_rs->tables++; 1594 1595 if (!p->pfrkt_anchor[0]) { 1596 q = p->pfrkt_root; 1597 p->pfrkt_root = NULL; 1598 SLIST_INSERT_HEAD(&auxq, q, pfrkt_workq); 1599 continue; 1600 } 1601 1602 /* use pre-allocated root table as a key */ 1603 q = p->pfrkt_root; 1604 p->pfrkt_root = NULL; 1605 r = RB_FIND(pfr_ktablehead, &pfr_ktables, q); 1606 if (r != NULL) { 1607 p->pfrkt_root = r; 1608 SLIST_INSERT_HEAD(&auxq, q, pfrkt_workq); 1609 continue; 1610 } 1611 /* 1612 * there is a chance we could create root table in 1613 * earlier iteration. such table may exist in addq only 1614 * then. 1615 */ 1616 SLIST_FOREACH(r, &addq, pfrkt_workq) { 1617 if (!pfr_ktable_compare(r, q)) { 1618 /* 1619 * `r` is our root table we've found 1620 * earlier, `q` can get dropped. 1621 */ 1622 p->pfrkt_root = r; 1623 SLIST_INSERT_HEAD(&auxq, q, 1624 pfrkt_workq); 1625 break; 1626 } 1627 } 1628 if (r != NULL) 1629 continue; 1630 1631 q->pfrkt_rs = pf_find_or_create_ruleset(q->pfrkt_anchor); 1632 /* 1633 * root tables are attached to main ruleset, 1634 * because ->pfrkt_anchor[0] == '\0' 1635 */ 1636 KASSERT(q->pfrkt_rs == &pf_main_ruleset); 1637 q->pfrkt_rs->tables++; 1638 p->pfrkt_root = q; 1639 SLIST_INSERT_HEAD(&addq, q, pfrkt_workq); 1640 } 1641 1642 pfr_insert_ktables(&addq); 1643 pfr_setflags_ktables(&changeq); 1644 } 1645 PF_UNLOCK(); 1646 NET_UNLOCK(); 1647 1648 pfr_destroy_ktables_aux(&auxq); 1649 if (flags & PFR_FLAG_DUMMY) 1650 pfr_destroy_ktables_aux(&addq); 1651 1652 if (nadd != NULL) 1653 *nadd = xadd; 1654 return (0); 1655 _bad: 1656 pfr_destroy_ktables_aux(&auxq); 1657 return (rv); 1658 } 1659 1660 int 1661 pfr_del_tables(struct pfr_table *tbl, int size, int *ndel, int flags) 1662 { 1663 struct pfr_ktableworkq workq; 1664 struct pfr_ktable *p, *q, key; 1665 int i, xdel = 0; 1666 1667 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY); 1668 SLIST_INIT(&workq); 1669 for (i = 0; i < size; i++) { 1670 YIELD(flags & PFR_FLAG_USERIOCTL); 1671 if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t), flags)) 1672 return (EFAULT); 1673 if (pfr_validate_table(&key.pfrkt_t, 0, 1674 flags & PFR_FLAG_USERIOCTL)) 1675 return (EINVAL); 1676 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key); 1677 if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) { 1678 SLIST_FOREACH(q, &workq, pfrkt_workq) 1679 if (!pfr_ktable_compare(p, q)) 1680 goto _skip; 1681 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE; 1682 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1683 xdel++; 1684 } 1685 _skip: 1686 ; 1687 } 1688 1689 if (!(flags & PFR_FLAG_DUMMY)) { 1690 pfr_setflags_ktables(&workq); 1691 } 1692 if (ndel != NULL) 1693 *ndel = xdel; 1694 return (0); 1695 } 1696 1697 int 1698 pfr_get_tables(struct pfr_table *filter, struct pfr_table *tbl, int *size, 1699 int flags) 1700 { 1701 struct pfr_ktable *p; 1702 int n, nn; 1703 1704 ACCEPT_FLAGS(flags, PFR_FLAG_ALLRSETS); 1705 if (pfr_fix_anchor(filter->pfrt_anchor)) 1706 return (EINVAL); 1707 n = nn = pfr_table_count(filter, flags); 1708 if (n < 0) 1709 return (ENOENT); 1710 if (n > *size) { 1711 *size = n; 1712 return (0); 1713 } 1714 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) { 1715 if (pfr_skip_table(filter, p, flags)) 1716 continue; 1717 if (n-- <= 0) 1718 continue; 1719 if (COPYOUT(&p->pfrkt_t, tbl++, sizeof(*tbl), flags)) 1720 return (EFAULT); 1721 } 1722 if (n) { 1723 DPFPRINTF(LOG_ERR, 1724 "pfr_get_tables: corruption detected (%d).", n); 1725 return (ENOTTY); 1726 } 1727 *size = nn; 1728 return (0); 1729 } 1730 1731 int 1732 pfr_get_tstats(struct pfr_table *filter, struct pfr_tstats *tbl, int *size, 1733 int flags) 1734 { 1735 struct pfr_ktable *p; 1736 struct pfr_ktableworkq workq; 1737 int n, nn; 1738 time_t tzero = gettime(); 1739 1740 /* XXX PFR_FLAG_CLSTATS disabled */ 1741 ACCEPT_FLAGS(flags, PFR_FLAG_ALLRSETS); 1742 if (pfr_fix_anchor(filter->pfrt_anchor)) 1743 return (EINVAL); 1744 n = nn = pfr_table_count(filter, flags); 1745 if (n < 0) 1746 return (ENOENT); 1747 if (n > *size) { 1748 *size = n; 1749 return (0); 1750 } 1751 SLIST_INIT(&workq); 1752 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) { 1753 if (pfr_skip_table(filter, p, flags)) 1754 continue; 1755 if (n-- <= 0) 1756 continue; 1757 if (COPYOUT(&p->pfrkt_ts, tbl++, sizeof(*tbl), flags)) 1758 return (EFAULT); 1759 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1760 } 1761 if (flags & PFR_FLAG_CLSTATS) 1762 pfr_clstats_ktables(&workq, tzero, 1763 flags & PFR_FLAG_ADDRSTOO); 1764 if (n) { 1765 DPFPRINTF(LOG_ERR, 1766 "pfr_get_tstats: corruption detected (%d).", n); 1767 return (ENOTTY); 1768 } 1769 *size = nn; 1770 return (0); 1771 } 1772 1773 int 1774 pfr_clr_tstats(struct pfr_table *tbl, int size, int *nzero, int flags) 1775 { 1776 struct pfr_ktableworkq workq; 1777 struct pfr_ktable *p, key; 1778 int i, xzero = 0; 1779 time_t tzero = gettime(); 1780 1781 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ADDRSTOO); 1782 SLIST_INIT(&workq); 1783 for (i = 0; i < size; i++) { 1784 YIELD(flags & PFR_FLAG_USERIOCTL); 1785 if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t), flags)) 1786 return (EFAULT); 1787 if (pfr_validate_table(&key.pfrkt_t, 0, 0)) 1788 return (EINVAL); 1789 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key); 1790 if (p != NULL) { 1791 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1792 xzero++; 1793 } 1794 } 1795 if (!(flags & PFR_FLAG_DUMMY)) { 1796 pfr_clstats_ktables(&workq, tzero, flags & PFR_FLAG_ADDRSTOO); 1797 } 1798 if (nzero != NULL) 1799 *nzero = xzero; 1800 return (0); 1801 } 1802 1803 int 1804 pfr_set_tflags(struct pfr_table *tbl, int size, int setflag, int clrflag, 1805 int *nchange, int *ndel, int flags) 1806 { 1807 struct pfr_ktableworkq workq; 1808 struct pfr_ktable *p, *q, key; 1809 int i, xchange = 0, xdel = 0; 1810 1811 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY); 1812 if ((setflag & ~PFR_TFLAG_USRMASK) || 1813 (clrflag & ~PFR_TFLAG_USRMASK) || 1814 (setflag & clrflag)) 1815 return (EINVAL); 1816 SLIST_INIT(&workq); 1817 for (i = 0; i < size; i++) { 1818 YIELD(flags & PFR_FLAG_USERIOCTL); 1819 if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t), flags)) 1820 return (EFAULT); 1821 if (pfr_validate_table(&key.pfrkt_t, 0, 1822 flags & PFR_FLAG_USERIOCTL)) 1823 return (EINVAL); 1824 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key); 1825 if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) { 1826 p->pfrkt_nflags = (p->pfrkt_flags | setflag) & 1827 ~clrflag; 1828 if (p->pfrkt_nflags == p->pfrkt_flags) 1829 goto _skip; 1830 SLIST_FOREACH(q, &workq, pfrkt_workq) 1831 if (!pfr_ktable_compare(p, q)) 1832 goto _skip; 1833 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1834 if ((p->pfrkt_flags & PFR_TFLAG_PERSIST) && 1835 (clrflag & PFR_TFLAG_PERSIST) && 1836 !(p->pfrkt_flags & PFR_TFLAG_REFERENCED)) 1837 xdel++; 1838 else 1839 xchange++; 1840 } 1841 _skip: 1842 ; 1843 } 1844 if (!(flags & PFR_FLAG_DUMMY)) { 1845 pfr_setflags_ktables(&workq); 1846 } 1847 if (nchange != NULL) 1848 *nchange = xchange; 1849 if (ndel != NULL) 1850 *ndel = xdel; 1851 return (0); 1852 } 1853 1854 int 1855 pfr_ina_begin(struct pfr_table *trs, u_int32_t *ticket, int *ndel, int flags) 1856 { 1857 struct pfr_ktableworkq workq; 1858 struct pfr_ktable *p; 1859 struct pf_ruleset *rs; 1860 int xdel = 0; 1861 1862 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY); 1863 rs = pf_find_or_create_ruleset(trs->pfrt_anchor); 1864 if (rs == NULL) 1865 return (ENOMEM); 1866 SLIST_INIT(&workq); 1867 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) { 1868 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) || 1869 pfr_skip_table(trs, p, 0)) 1870 continue; 1871 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE; 1872 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1873 xdel++; 1874 } 1875 if (!(flags & PFR_FLAG_DUMMY)) { 1876 pfr_setflags_ktables(&workq); 1877 if (ticket != NULL) 1878 *ticket = ++rs->tticket; 1879 rs->topen = 1; 1880 } else 1881 pf_remove_if_empty_ruleset(rs); 1882 if (ndel != NULL) 1883 *ndel = xdel; 1884 return (0); 1885 } 1886 1887 int 1888 pfr_ina_define(struct pfr_table *tbl, struct pfr_addr *addr, int size, 1889 int *nadd, int *naddr, u_int32_t ticket, int flags) 1890 { 1891 struct pfr_ktableworkq tableq; 1892 struct pfr_kentryworkq addrq; 1893 struct pfr_ktable *kt, *rt, *shadow, key; 1894 struct pfr_kentry *p; 1895 struct pfr_addr ad; 1896 struct pf_ruleset *rs; 1897 int i, rv, xadd = 0, xaddr = 0; 1898 1899 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ADDRSTOO); 1900 if (size && !(flags & PFR_FLAG_ADDRSTOO)) 1901 return (EINVAL); 1902 if (pfr_validate_table(tbl, PFR_TFLAG_USRMASK, 1903 flags & PFR_FLAG_USERIOCTL)) 1904 return (EINVAL); 1905 rs = pf_find_ruleset(tbl->pfrt_anchor); 1906 if (rs == NULL || !rs->topen || ticket != rs->tticket) 1907 return (EBUSY); 1908 tbl->pfrt_flags |= PFR_TFLAG_INACTIVE; 1909 SLIST_INIT(&tableq); 1910 kt = RB_FIND(pfr_ktablehead, &pfr_ktables, (struct pfr_ktable *)tbl); 1911 if (kt == NULL) { 1912 kt = pfr_create_ktable(tbl, 0, 1, 1913 (flags & PFR_FLAG_USERIOCTL? PR_WAITOK : PR_NOWAIT)); 1914 if (kt == NULL) 1915 return (ENOMEM); 1916 SLIST_INSERT_HEAD(&tableq, kt, pfrkt_workq); 1917 xadd++; 1918 if (!tbl->pfrt_anchor[0]) 1919 goto _skip; 1920 1921 /* find or create root table */ 1922 bzero(&key, sizeof(key)); 1923 strlcpy(key.pfrkt_name, tbl->pfrt_name, sizeof(key.pfrkt_name)); 1924 rt = RB_FIND(pfr_ktablehead, &pfr_ktables, &key); 1925 if (rt != NULL) { 1926 kt->pfrkt_root = rt; 1927 goto _skip; 1928 } 1929 rt = pfr_create_ktable(&key.pfrkt_t, 0, 1, 1930 (flags & PFR_FLAG_USERIOCTL? PR_WAITOK : PR_NOWAIT)); 1931 if (rt == NULL) { 1932 pfr_destroy_ktables(&tableq, 0); 1933 return (ENOMEM); 1934 } 1935 SLIST_INSERT_HEAD(&tableq, rt, pfrkt_workq); 1936 kt->pfrkt_root = rt; 1937 } else if (!(kt->pfrkt_flags & PFR_TFLAG_INACTIVE)) 1938 xadd++; 1939 _skip: 1940 shadow = pfr_create_ktable(tbl, 0, 0, 1941 (flags & PFR_FLAG_USERIOCTL? PR_WAITOK : PR_NOWAIT)); 1942 if (shadow == NULL) { 1943 pfr_destroy_ktables(&tableq, 0); 1944 return (ENOMEM); 1945 } 1946 SLIST_INIT(&addrq); 1947 for (i = 0; i < size; i++) { 1948 YIELD(flags & PFR_FLAG_USERIOCTL); 1949 if (COPYIN(addr+i, &ad, sizeof(ad), flags)) 1950 senderr(EFAULT); 1951 if (pfr_validate_addr(&ad)) 1952 senderr(EINVAL); 1953 if (pfr_lookup_addr(shadow, &ad, 1) != NULL) 1954 continue; 1955 p = pfr_create_kentry(&ad); 1956 if (p == NULL) 1957 senderr(ENOMEM); 1958 if (pfr_route_kentry(shadow, p)) { 1959 pfr_destroy_kentry(p); 1960 continue; 1961 } 1962 SLIST_INSERT_HEAD(&addrq, p, pfrke_workq); 1963 xaddr++; 1964 if (p->pfrke_type == PFRKE_COST) 1965 kt->pfrkt_refcntcost++; 1966 pfr_ktable_winfo_update(kt, p); 1967 } 1968 if (!(flags & PFR_FLAG_DUMMY)) { 1969 if (kt->pfrkt_shadow != NULL) 1970 pfr_destroy_ktable(kt->pfrkt_shadow, 1); 1971 kt->pfrkt_flags |= PFR_TFLAG_INACTIVE; 1972 pfr_insert_ktables(&tableq); 1973 shadow->pfrkt_cnt = (flags & PFR_FLAG_ADDRSTOO) ? 1974 xaddr : NO_ADDRESSES; 1975 kt->pfrkt_shadow = shadow; 1976 } else { 1977 pfr_clean_node_mask(shadow, &addrq); 1978 pfr_destroy_ktable(shadow, 0); 1979 pfr_destroy_ktables(&tableq, 0); 1980 pfr_destroy_kentries(&addrq); 1981 } 1982 if (nadd != NULL) 1983 *nadd = xadd; 1984 if (naddr != NULL) 1985 *naddr = xaddr; 1986 return (0); 1987 _bad: 1988 pfr_destroy_ktable(shadow, 0); 1989 pfr_destroy_ktables(&tableq, 0); 1990 pfr_destroy_kentries(&addrq); 1991 return (rv); 1992 } 1993 1994 int 1995 pfr_ina_rollback(struct pfr_table *trs, u_int32_t ticket, int *ndel, int flags) 1996 { 1997 struct pfr_ktableworkq workq; 1998 struct pfr_ktable *p; 1999 struct pf_ruleset *rs; 2000 int xdel = 0; 2001 2002 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY); 2003 rs = pf_find_ruleset(trs->pfrt_anchor); 2004 if (rs == NULL || !rs->topen || ticket != rs->tticket) 2005 return (0); 2006 SLIST_INIT(&workq); 2007 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) { 2008 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) || 2009 pfr_skip_table(trs, p, 0)) 2010 continue; 2011 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE; 2012 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 2013 xdel++; 2014 } 2015 if (!(flags & PFR_FLAG_DUMMY)) { 2016 pfr_setflags_ktables(&workq); 2017 rs->topen = 0; 2018 pf_remove_if_empty_ruleset(rs); 2019 } 2020 if (ndel != NULL) 2021 *ndel = xdel; 2022 return (0); 2023 } 2024 2025 int 2026 pfr_ina_commit(struct pfr_table *trs, u_int32_t ticket, int *nadd, 2027 int *nchange, int flags) 2028 { 2029 struct pfr_ktable *p, *q; 2030 struct pfr_ktableworkq workq; 2031 struct pf_ruleset *rs; 2032 int xadd = 0, xchange = 0; 2033 time_t tzero = gettime(); 2034 2035 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY); 2036 rs = pf_find_ruleset(trs->pfrt_anchor); 2037 if (rs == NULL || !rs->topen || ticket != rs->tticket) 2038 return (EBUSY); 2039 2040 SLIST_INIT(&workq); 2041 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) { 2042 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) || 2043 pfr_skip_table(trs, p, 0)) 2044 continue; 2045 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 2046 if (p->pfrkt_flags & PFR_TFLAG_ACTIVE) 2047 xchange++; 2048 else 2049 xadd++; 2050 } 2051 2052 if (!(flags & PFR_FLAG_DUMMY)) { 2053 SLIST_FOREACH_SAFE(p, &workq, pfrkt_workq, q) { 2054 pfr_commit_ktable(p, tzero); 2055 } 2056 rs->topen = 0; 2057 pf_remove_if_empty_ruleset(rs); 2058 } 2059 if (nadd != NULL) 2060 *nadd = xadd; 2061 if (nchange != NULL) 2062 *nchange = xchange; 2063 2064 return (0); 2065 } 2066 2067 void 2068 pfr_commit_ktable(struct pfr_ktable *kt, time_t tzero) 2069 { 2070 struct pfr_ktable *shadow = kt->pfrkt_shadow; 2071 int nflags; 2072 2073 if (shadow->pfrkt_cnt == NO_ADDRESSES) { 2074 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 2075 pfr_clstats_ktable(kt, tzero, 1); 2076 } else if (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) { 2077 /* kt might contain addresses */ 2078 struct pfr_kentryworkq addrq, addq, changeq, delq, garbageq; 2079 struct pfr_kentry *p, *q; 2080 struct pfr_addr ad; 2081 2082 pfr_enqueue_addrs(shadow, &addrq, NULL, 0); 2083 pfr_mark_addrs(kt); 2084 SLIST_INIT(&addq); 2085 SLIST_INIT(&changeq); 2086 SLIST_INIT(&delq); 2087 SLIST_INIT(&garbageq); 2088 pfr_clean_node_mask(shadow, &addrq); 2089 while ((p = SLIST_FIRST(&addrq)) != NULL) { 2090 SLIST_REMOVE_HEAD(&addrq, pfrke_workq); 2091 pfr_copyout_addr(&ad, p); 2092 q = pfr_lookup_addr(kt, &ad, 1); 2093 if (q != NULL) { 2094 if ((q->pfrke_flags & PFRKE_FLAG_NOT) != 2095 (p->pfrke_flags & PFRKE_FLAG_NOT)) 2096 SLIST_INSERT_HEAD(&changeq, q, 2097 pfrke_workq); 2098 q->pfrke_flags |= PFRKE_FLAG_MARK; 2099 SLIST_INSERT_HEAD(&garbageq, p, pfrke_workq); 2100 } else { 2101 p->pfrke_tzero = tzero; 2102 SLIST_INSERT_HEAD(&addq, p, pfrke_workq); 2103 } 2104 } 2105 pfr_enqueue_addrs(kt, &delq, NULL, ENQUEUE_UNMARKED_ONLY); 2106 pfr_insert_kentries(kt, &addq, tzero); 2107 pfr_remove_kentries(kt, &delq); 2108 pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG); 2109 pfr_destroy_kentries(&garbageq); 2110 } else { 2111 /* kt cannot contain addresses */ 2112 SWAP(struct radix_node_head *, kt->pfrkt_ip4, 2113 shadow->pfrkt_ip4); 2114 SWAP(struct radix_node_head *, kt->pfrkt_ip6, 2115 shadow->pfrkt_ip6); 2116 SWAP(int, kt->pfrkt_cnt, shadow->pfrkt_cnt); 2117 pfr_clstats_ktable(kt, tzero, 1); 2118 } 2119 nflags = ((shadow->pfrkt_flags & PFR_TFLAG_USRMASK) | 2120 (kt->pfrkt_flags & PFR_TFLAG_SETMASK) | PFR_TFLAG_ACTIVE) 2121 & ~PFR_TFLAG_INACTIVE; 2122 pfr_destroy_ktable(shadow, 0); 2123 kt->pfrkt_shadow = NULL; 2124 pfr_setflags_ktable(kt, nflags); 2125 } 2126 2127 int 2128 pfr_validate_table(struct pfr_table *tbl, int allowedflags, int no_reserved) 2129 { 2130 int i; 2131 2132 if (!tbl->pfrt_name[0]) 2133 return (-1); 2134 if (no_reserved && !strcmp(tbl->pfrt_anchor, PF_RESERVED_ANCHOR)) 2135 return (-1); 2136 if (tbl->pfrt_name[PF_TABLE_NAME_SIZE-1]) 2137 return (-1); 2138 for (i = strlen(tbl->pfrt_name); i < PF_TABLE_NAME_SIZE; i++) 2139 if (tbl->pfrt_name[i]) 2140 return (-1); 2141 if (pfr_fix_anchor(tbl->pfrt_anchor)) 2142 return (-1); 2143 if (tbl->pfrt_flags & ~allowedflags) 2144 return (-1); 2145 return (0); 2146 } 2147 2148 /* 2149 * Rewrite anchors referenced by tables to remove slashes 2150 * and check for validity. 2151 */ 2152 int 2153 pfr_fix_anchor(char *anchor) 2154 { 2155 size_t siz = MAXPATHLEN; 2156 int i; 2157 2158 if (anchor[0] == '/') { 2159 char *path; 2160 int off; 2161 2162 path = anchor; 2163 off = 1; 2164 while (*++path == '/') 2165 off++; 2166 bcopy(path, anchor, siz - off); 2167 memset(anchor + siz - off, 0, off); 2168 } 2169 if (anchor[siz - 1]) 2170 return (-1); 2171 for (i = strlen(anchor); i < siz; i++) 2172 if (anchor[i]) 2173 return (-1); 2174 return (0); 2175 } 2176 2177 int 2178 pfr_table_count(struct pfr_table *filter, int flags) 2179 { 2180 struct pf_ruleset *rs; 2181 2182 if (flags & PFR_FLAG_ALLRSETS) 2183 return (pfr_ktable_cnt); 2184 if (filter->pfrt_anchor[0]) { 2185 rs = pf_find_ruleset(filter->pfrt_anchor); 2186 return ((rs != NULL) ? rs->tables : -1); 2187 } 2188 return (pf_main_ruleset.tables); 2189 } 2190 2191 int 2192 pfr_skip_table(struct pfr_table *filter, struct pfr_ktable *kt, int flags) 2193 { 2194 if (flags & PFR_FLAG_ALLRSETS) 2195 return (0); 2196 if (strcmp(filter->pfrt_anchor, kt->pfrkt_anchor)) 2197 return (1); 2198 return (0); 2199 } 2200 2201 void 2202 pfr_insert_ktables(struct pfr_ktableworkq *workq) 2203 { 2204 struct pfr_ktable *p; 2205 2206 SLIST_FOREACH(p, workq, pfrkt_workq) 2207 pfr_insert_ktable(p); 2208 } 2209 2210 void 2211 pfr_insert_ktable(struct pfr_ktable *kt) 2212 { 2213 RB_INSERT(pfr_ktablehead, &pfr_ktables, kt); 2214 pfr_ktable_cnt++; 2215 if (kt->pfrkt_root != NULL) 2216 if (!kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]++) 2217 pfr_setflags_ktable(kt->pfrkt_root, 2218 kt->pfrkt_root->pfrkt_flags|PFR_TFLAG_REFDANCHOR); 2219 } 2220 2221 void 2222 pfr_setflags_ktables(struct pfr_ktableworkq *workq) 2223 { 2224 struct pfr_ktable *p, *q; 2225 2226 SLIST_FOREACH_SAFE(p, workq, pfrkt_workq, q) { 2227 pfr_setflags_ktable(p, p->pfrkt_nflags); 2228 } 2229 } 2230 2231 void 2232 pfr_setflags_ktable(struct pfr_ktable *kt, int newf) 2233 { 2234 struct pfr_kentryworkq addrq; 2235 2236 if (!(newf & PFR_TFLAG_REFERENCED) && 2237 !(newf & PFR_TFLAG_REFDANCHOR) && 2238 !(newf & PFR_TFLAG_PERSIST)) 2239 newf &= ~PFR_TFLAG_ACTIVE; 2240 if (!(newf & PFR_TFLAG_ACTIVE)) 2241 newf &= ~PFR_TFLAG_USRMASK; 2242 if (!(newf & PFR_TFLAG_SETMASK)) { 2243 RB_REMOVE(pfr_ktablehead, &pfr_ktables, kt); 2244 if (kt->pfrkt_root != NULL) 2245 if (!--kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]) 2246 pfr_setflags_ktable(kt->pfrkt_root, 2247 kt->pfrkt_root->pfrkt_flags & 2248 ~PFR_TFLAG_REFDANCHOR); 2249 pfr_destroy_ktable(kt, 1); 2250 pfr_ktable_cnt--; 2251 return; 2252 } 2253 if (!(newf & PFR_TFLAG_ACTIVE) && kt->pfrkt_cnt) { 2254 pfr_enqueue_addrs(kt, &addrq, NULL, 0); 2255 pfr_remove_kentries(kt, &addrq); 2256 } 2257 if (!(newf & PFR_TFLAG_INACTIVE) && kt->pfrkt_shadow != NULL) { 2258 pfr_destroy_ktable(kt->pfrkt_shadow, 1); 2259 kt->pfrkt_shadow = NULL; 2260 } 2261 kt->pfrkt_flags = newf; 2262 } 2263 2264 void 2265 pfr_clstats_ktables(struct pfr_ktableworkq *workq, time_t tzero, int recurse) 2266 { 2267 struct pfr_ktable *p; 2268 2269 SLIST_FOREACH(p, workq, pfrkt_workq) 2270 pfr_clstats_ktable(p, tzero, recurse); 2271 } 2272 2273 void 2274 pfr_clstats_ktable(struct pfr_ktable *kt, time_t tzero, int recurse) 2275 { 2276 struct pfr_kentryworkq addrq; 2277 2278 if (recurse) { 2279 pfr_enqueue_addrs(kt, &addrq, NULL, 0); 2280 pfr_clstats_kentries(&addrq, tzero, 0); 2281 } 2282 bzero(kt->pfrkt_packets, sizeof(kt->pfrkt_packets)); 2283 bzero(kt->pfrkt_bytes, sizeof(kt->pfrkt_bytes)); 2284 kt->pfrkt_match = kt->pfrkt_nomatch = 0; 2285 kt->pfrkt_tzero = tzero; 2286 } 2287 2288 struct pfr_ktable * 2289 pfr_create_ktable(struct pfr_table *tbl, time_t tzero, int attachruleset, 2290 int wait) 2291 { 2292 struct pfr_ktable *kt; 2293 struct pf_ruleset *rs; 2294 2295 kt = pool_get(&pfr_ktable_pl, wait|PR_ZERO|PR_LIMITFAIL); 2296 if (kt == NULL) 2297 return (NULL); 2298 kt->pfrkt_t = *tbl; 2299 2300 if (attachruleset) { 2301 PF_ASSERT_LOCKED(); 2302 rs = pf_find_or_create_ruleset(tbl->pfrt_anchor); 2303 if (!rs) { 2304 pfr_destroy_ktable(kt, 0); 2305 return (NULL); 2306 } 2307 kt->pfrkt_rs = rs; 2308 rs->tables++; 2309 } 2310 2311 if (!rn_inithead((void **)&kt->pfrkt_ip4, 2312 offsetof(struct sockaddr_in, sin_addr)) || 2313 !rn_inithead((void **)&kt->pfrkt_ip6, 2314 offsetof(struct sockaddr_in6, sin6_addr))) { 2315 pfr_destroy_ktable(kt, 0); 2316 return (NULL); 2317 } 2318 kt->pfrkt_tzero = tzero; 2319 kt->pfrkt_refcntcost = 0; 2320 kt->pfrkt_gcdweight = 0; 2321 kt->pfrkt_maxweight = 1; 2322 2323 return (kt); 2324 } 2325 2326 void 2327 pfr_destroy_ktables(struct pfr_ktableworkq *workq, int flushaddr) 2328 { 2329 struct pfr_ktable *p; 2330 2331 while ((p = SLIST_FIRST(workq)) != NULL) { 2332 SLIST_REMOVE_HEAD(workq, pfrkt_workq); 2333 pfr_destroy_ktable(p, flushaddr); 2334 } 2335 } 2336 2337 void 2338 pfr_destroy_ktables_aux(struct pfr_ktableworkq *auxq) 2339 { 2340 struct pfr_ktable *p; 2341 2342 while ((p = SLIST_FIRST(auxq)) != NULL) { 2343 SLIST_REMOVE_HEAD(auxq, pfrkt_workq); 2344 /* 2345 * There must be no extra data (rules, shadow tables, ...) 2346 * attached, because auxq holds just empty memory to be 2347 * initialized. Therefore we can also be called with no lock. 2348 */ 2349 if (p->pfrkt_root != NULL) { 2350 KASSERT(p->pfrkt_root->pfrkt_rs == NULL); 2351 KASSERT(p->pfrkt_root->pfrkt_shadow == NULL); 2352 KASSERT(p->pfrkt_root->pfrkt_root == NULL); 2353 pfr_destroy_ktable(p->pfrkt_root, 0); 2354 p->pfrkt_root = NULL; 2355 } 2356 KASSERT(p->pfrkt_rs == NULL); 2357 KASSERT(p->pfrkt_shadow == NULL); 2358 pfr_destroy_ktable(p, 0); 2359 } 2360 } 2361 2362 void 2363 pfr_destroy_ktable(struct pfr_ktable *kt, int flushaddr) 2364 { 2365 struct pfr_kentryworkq addrq; 2366 2367 if (flushaddr) { 2368 pfr_enqueue_addrs(kt, &addrq, NULL, 0); 2369 pfr_clean_node_mask(kt, &addrq); 2370 pfr_destroy_kentries(&addrq); 2371 } 2372 if (kt->pfrkt_ip4 != NULL) 2373 free(kt->pfrkt_ip4, M_RTABLE, sizeof(*kt->pfrkt_ip4)); 2374 if (kt->pfrkt_ip6 != NULL) 2375 free(kt->pfrkt_ip6, M_RTABLE, sizeof(*kt->pfrkt_ip6)); 2376 if (kt->pfrkt_shadow != NULL) 2377 pfr_destroy_ktable(kt->pfrkt_shadow, flushaddr); 2378 if (kt->pfrkt_rs != NULL) { 2379 kt->pfrkt_rs->tables--; 2380 pf_remove_if_empty_ruleset(kt->pfrkt_rs); 2381 } 2382 pool_put(&pfr_ktable_pl, kt); 2383 } 2384 2385 int 2386 pfr_ktable_compare(struct pfr_ktable *p, struct pfr_ktable *q) 2387 { 2388 int d; 2389 2390 if ((d = strncmp(p->pfrkt_name, q->pfrkt_name, PF_TABLE_NAME_SIZE))) 2391 return (d); 2392 return (strcmp(p->pfrkt_anchor, q->pfrkt_anchor)); 2393 } 2394 2395 struct pfr_ktable * 2396 pfr_lookup_table(struct pfr_table *tbl) 2397 { 2398 /* struct pfr_ktable start like a struct pfr_table */ 2399 return (RB_FIND(pfr_ktablehead, &pfr_ktables, 2400 (struct pfr_ktable *)tbl)); 2401 } 2402 2403 int 2404 pfr_match_addr(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af) 2405 { 2406 struct pfr_kentry *ke = NULL; 2407 int match; 2408 2409 ke = pfr_kentry_byaddr(kt, a, af, 0); 2410 2411 match = (ke && !(ke->pfrke_flags & PFRKE_FLAG_NOT)); 2412 if (match) 2413 kt->pfrkt_match++; 2414 else 2415 kt->pfrkt_nomatch++; 2416 2417 return (match); 2418 } 2419 2420 struct pfr_kentry * 2421 pfr_kentry_byaddr(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af, 2422 int exact) 2423 { 2424 struct pfr_kentry *ke = NULL; 2425 struct sockaddr_in tmp4; 2426 #ifdef INET6 2427 struct sockaddr_in6 tmp6; 2428 #endif /* INET6 */ 2429 2430 kt = pfr_ktable_select_active(kt); 2431 if (kt == NULL) 2432 return (0); 2433 2434 switch (af) { 2435 case AF_INET: 2436 bzero(&tmp4, sizeof(tmp4)); 2437 tmp4.sin_len = sizeof(tmp4); 2438 tmp4.sin_family = AF_INET; 2439 tmp4.sin_addr.s_addr = a->addr32[0]; 2440 ke = (struct pfr_kentry *)rn_match(&tmp4, kt->pfrkt_ip4); 2441 break; 2442 #ifdef INET6 2443 case AF_INET6: 2444 bzero(&tmp6, sizeof(tmp6)); 2445 tmp6.sin6_len = sizeof(tmp6); 2446 tmp6.sin6_family = AF_INET6; 2447 bcopy(a, &tmp6.sin6_addr, sizeof(tmp6.sin6_addr)); 2448 ke = (struct pfr_kentry *)rn_match(&tmp6, kt->pfrkt_ip6); 2449 break; 2450 #endif /* INET6 */ 2451 default: 2452 unhandled_af(af); 2453 } 2454 if (exact && ke && KENTRY_NETWORK(ke)) 2455 ke = NULL; 2456 2457 return (ke); 2458 } 2459 2460 void 2461 pfr_update_stats(struct pfr_ktable *kt, struct pf_addr *a, struct pf_pdesc *pd, 2462 int op, int notrule) 2463 { 2464 struct pfr_kentry *ke = NULL; 2465 struct sockaddr_in tmp4; 2466 #ifdef INET6 2467 struct sockaddr_in6 tmp6; 2468 #endif /* INET6 */ 2469 sa_family_t af = pd->af; 2470 u_int64_t len = pd->tot_len; 2471 int dir_idx = (pd->dir == PF_OUT); 2472 int op_idx; 2473 2474 kt = pfr_ktable_select_active(kt); 2475 if (kt == NULL) 2476 return; 2477 2478 switch (af) { 2479 case AF_INET: 2480 bzero(&tmp4, sizeof(tmp4)); 2481 tmp4.sin_len = sizeof(tmp4); 2482 tmp4.sin_family = AF_INET; 2483 tmp4.sin_addr.s_addr = a->addr32[0]; 2484 ke = (struct pfr_kentry *)rn_match(&tmp4, kt->pfrkt_ip4); 2485 break; 2486 #ifdef INET6 2487 case AF_INET6: 2488 bzero(&tmp6, sizeof(tmp6)); 2489 tmp6.sin6_len = sizeof(tmp6); 2490 tmp6.sin6_family = AF_INET6; 2491 bcopy(a, &tmp6.sin6_addr, sizeof(tmp6.sin6_addr)); 2492 ke = (struct pfr_kentry *)rn_match(&tmp6, kt->pfrkt_ip6); 2493 break; 2494 #endif /* INET6 */ 2495 default: 2496 unhandled_af(af); 2497 } 2498 2499 switch (op) { 2500 case PF_PASS: 2501 op_idx = PFR_OP_PASS; 2502 break; 2503 case PF_MATCH: 2504 op_idx = PFR_OP_MATCH; 2505 break; 2506 case PF_DROP: 2507 op_idx = PFR_OP_BLOCK; 2508 break; 2509 default: 2510 panic("unhandled op"); 2511 } 2512 2513 if ((ke == NULL || (ke->pfrke_flags & PFRKE_FLAG_NOT)) != notrule) { 2514 if (op_idx != PFR_OP_PASS) 2515 DPFPRINTF(LOG_DEBUG, 2516 "pfr_update_stats: assertion failed."); 2517 op_idx = PFR_OP_XPASS; 2518 } 2519 kt->pfrkt_packets[dir_idx][op_idx]++; 2520 kt->pfrkt_bytes[dir_idx][op_idx] += len; 2521 if (ke != NULL && op_idx != PFR_OP_XPASS && 2522 (kt->pfrkt_flags & PFR_TFLAG_COUNTERS)) { 2523 if (ke->pfrke_counters == NULL) 2524 ke->pfrke_counters = pool_get(&pfr_kcounters_pl, 2525 PR_NOWAIT | PR_ZERO); 2526 if (ke->pfrke_counters != NULL) { 2527 ke->pfrke_counters->pfrkc_packets[dir_idx][op_idx]++; 2528 ke->pfrke_counters->pfrkc_bytes[dir_idx][op_idx] += len; 2529 } 2530 } 2531 } 2532 2533 struct pfr_ktable * 2534 pfr_attach_table(struct pf_ruleset *rs, char *name, int wait) 2535 { 2536 struct pfr_ktable *kt, *rt; 2537 struct pfr_table tbl; 2538 struct pf_anchor *ac = rs->anchor; 2539 2540 bzero(&tbl, sizeof(tbl)); 2541 strlcpy(tbl.pfrt_name, name, sizeof(tbl.pfrt_name)); 2542 if (ac != NULL) 2543 strlcpy(tbl.pfrt_anchor, ac->path, sizeof(tbl.pfrt_anchor)); 2544 kt = pfr_lookup_table(&tbl); 2545 if (kt == NULL) { 2546 kt = pfr_create_ktable(&tbl, gettime(), 1, wait); 2547 if (kt == NULL) 2548 return (NULL); 2549 if (ac != NULL) { 2550 bzero(tbl.pfrt_anchor, sizeof(tbl.pfrt_anchor)); 2551 rt = pfr_lookup_table(&tbl); 2552 if (rt == NULL) { 2553 rt = pfr_create_ktable(&tbl, 0, 1, wait); 2554 if (rt == NULL) { 2555 pfr_destroy_ktable(kt, 0); 2556 return (NULL); 2557 } 2558 pfr_insert_ktable(rt); 2559 } 2560 kt->pfrkt_root = rt; 2561 } 2562 pfr_insert_ktable(kt); 2563 } 2564 if (!kt->pfrkt_refcnt[PFR_REFCNT_RULE]++) 2565 pfr_setflags_ktable(kt, kt->pfrkt_flags|PFR_TFLAG_REFERENCED); 2566 return (kt); 2567 } 2568 2569 void 2570 pfr_detach_table(struct pfr_ktable *kt) 2571 { 2572 if (kt->pfrkt_refcnt[PFR_REFCNT_RULE] <= 0) 2573 DPFPRINTF(LOG_NOTICE, "pfr_detach_table: refcount = %d.", 2574 kt->pfrkt_refcnt[PFR_REFCNT_RULE]); 2575 else if (!--kt->pfrkt_refcnt[PFR_REFCNT_RULE]) 2576 pfr_setflags_ktable(kt, kt->pfrkt_flags&~PFR_TFLAG_REFERENCED); 2577 } 2578 2579 int 2580 pfr_islinklocal(sa_family_t af, struct pf_addr *addr) 2581 { 2582 #ifdef INET6 2583 if (af == AF_INET6 && IN6_IS_ADDR_LINKLOCAL(&addr->v6)) 2584 return (1); 2585 #endif /* INET6 */ 2586 return (0); 2587 } 2588 2589 int 2590 pfr_pool_get(struct pf_pool *rpool, struct pf_addr **raddr, 2591 struct pf_addr **rmask, sa_family_t af) 2592 { 2593 struct pfr_ktable *kt; 2594 struct pfr_kentry *ke, *ke2; 2595 struct pf_addr *addr, *counter; 2596 union sockaddr_union mask; 2597 struct sockaddr_in tmp4; 2598 #ifdef INET6 2599 struct sockaddr_in6 tmp6; 2600 #endif 2601 int startidx, idx = -1, loop = 0, use_counter = 0; 2602 2603 switch (af) { 2604 case AF_INET: 2605 bzero(&tmp4, sizeof(tmp4)); 2606 tmp4.sin_len = sizeof(tmp4); 2607 tmp4.sin_family = AF_INET; 2608 addr = (struct pf_addr *)&tmp4.sin_addr; 2609 break; 2610 #ifdef INET6 2611 case AF_INET6: 2612 bzero(&tmp6, sizeof(tmp6)); 2613 tmp6.sin6_len = sizeof(tmp6); 2614 tmp6.sin6_family = AF_INET6; 2615 addr = (struct pf_addr *)&tmp6.sin6_addr; 2616 break; 2617 #endif /* INET6 */ 2618 default: 2619 unhandled_af(af); 2620 } 2621 2622 if (rpool->addr.type == PF_ADDR_TABLE) 2623 kt = rpool->addr.p.tbl; 2624 else if (rpool->addr.type == PF_ADDR_DYNIFTL) 2625 kt = rpool->addr.p.dyn->pfid_kt; 2626 else 2627 return (-1); 2628 kt = pfr_ktable_select_active(kt); 2629 if (kt == NULL) 2630 return (-1); 2631 2632 counter = &rpool->counter; 2633 idx = rpool->tblidx; 2634 if (idx < 0 || idx >= kt->pfrkt_cnt) 2635 idx = 0; 2636 else 2637 use_counter = 1; 2638 startidx = idx; 2639 2640 _next_block: 2641 if (loop && startidx == idx) { 2642 kt->pfrkt_nomatch++; 2643 return (1); 2644 } 2645 2646 ke = pfr_kentry_byidx(kt, idx, af); 2647 if (ke == NULL) { 2648 /* we don't have this idx, try looping */ 2649 if (loop || (ke = pfr_kentry_byidx(kt, 0, af)) == NULL) { 2650 kt->pfrkt_nomatch++; 2651 return (1); 2652 } 2653 idx = 0; 2654 loop++; 2655 } 2656 2657 /* Get current weight for weighted round-robin */ 2658 if (idx == 0 && use_counter == 1 && kt->pfrkt_refcntcost > 0) { 2659 rpool->curweight = rpool->curweight - kt->pfrkt_gcdweight; 2660 2661 if (rpool->curweight < 1) 2662 rpool->curweight = kt->pfrkt_maxweight; 2663 } 2664 2665 pfr_prepare_network(&pfr_mask, af, ke->pfrke_net); 2666 *raddr = SUNION2PF(&ke->pfrke_sa, af); 2667 *rmask = SUNION2PF(&pfr_mask, af); 2668 2669 if (use_counter && !PF_AZERO(counter, af)) { 2670 /* is supplied address within block? */ 2671 if (!pf_match_addr(0, *raddr, *rmask, counter, af)) { 2672 /* no, go to next block in table */ 2673 idx++; 2674 use_counter = 0; 2675 goto _next_block; 2676 } 2677 pf_addrcpy(addr, counter, af); 2678 } else { 2679 /* use first address of block */ 2680 pf_addrcpy(addr, *raddr, af); 2681 } 2682 2683 if (!KENTRY_NETWORK(ke)) { 2684 /* this is a single IP address - no possible nested block */ 2685 if (rpool->addr.type == PF_ADDR_DYNIFTL && 2686 pfr_islinklocal(af, addr)) { 2687 idx++; 2688 goto _next_block; 2689 } 2690 pf_addrcpy(counter, addr, af); 2691 rpool->tblidx = idx; 2692 kt->pfrkt_match++; 2693 rpool->states = 0; 2694 if (ke->pfrke_counters != NULL) 2695 rpool->states = ke->pfrke_counters->states; 2696 switch (ke->pfrke_type) { 2697 case PFRKE_COST: 2698 rpool->weight = ((struct pfr_kentry_cost *)ke)->weight; 2699 /* FALLTHROUGH */ 2700 case PFRKE_ROUTE: 2701 rpool->kif = ((struct pfr_kentry_route *)ke)->kif; 2702 break; 2703 default: 2704 rpool->weight = 1; 2705 break; 2706 } 2707 return (0); 2708 } 2709 for (;;) { 2710 /* we don't want to use a nested block */ 2711 switch (af) { 2712 case AF_INET: 2713 ke2 = (struct pfr_kentry *)rn_match(&tmp4, 2714 kt->pfrkt_ip4); 2715 break; 2716 #ifdef INET6 2717 case AF_INET6: 2718 ke2 = (struct pfr_kentry *)rn_match(&tmp6, 2719 kt->pfrkt_ip6); 2720 break; 2721 #endif /* INET6 */ 2722 default: 2723 unhandled_af(af); 2724 } 2725 if (ke2 == ke) { 2726 /* lookup return the same block - perfect */ 2727 if (rpool->addr.type == PF_ADDR_DYNIFTL && 2728 pfr_islinklocal(af, addr)) 2729 goto _next_entry; 2730 pf_addrcpy(counter, addr, af); 2731 rpool->tblidx = idx; 2732 kt->pfrkt_match++; 2733 rpool->states = 0; 2734 if (ke->pfrke_counters != NULL) 2735 rpool->states = ke->pfrke_counters->states; 2736 switch (ke->pfrke_type) { 2737 case PFRKE_COST: 2738 rpool->weight = 2739 ((struct pfr_kentry_cost *)ke)->weight; 2740 /* FALLTHROUGH */ 2741 case PFRKE_ROUTE: 2742 rpool->kif = ((struct pfr_kentry_route *)ke)->kif; 2743 break; 2744 default: 2745 rpool->weight = 1; 2746 break; 2747 } 2748 return (0); 2749 } 2750 _next_entry: 2751 /* we need to increase the counter past the nested block */ 2752 pfr_prepare_network(&mask, AF_INET, ke2->pfrke_net); 2753 pf_poolmask(addr, addr, SUNION2PF(&mask, af), &pfr_ffaddr, af); 2754 pf_addr_inc(addr, af); 2755 if (!pf_match_addr(0, *raddr, *rmask, addr, af)) { 2756 /* ok, we reached the end of our main block */ 2757 /* go to next block in table */ 2758 idx++; 2759 use_counter = 0; 2760 goto _next_block; 2761 } 2762 } 2763 } 2764 2765 struct pfr_kentry * 2766 pfr_kentry_byidx(struct pfr_ktable *kt, int idx, int af) 2767 { 2768 struct pfr_walktree w; 2769 2770 bzero(&w, sizeof(w)); 2771 w.pfrw_op = PFRW_POOL_GET; 2772 w.pfrw_cnt = idx; 2773 2774 switch (af) { 2775 case AF_INET: 2776 rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w); 2777 return (w.pfrw_kentry); 2778 #ifdef INET6 2779 case AF_INET6: 2780 rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w); 2781 return (w.pfrw_kentry); 2782 #endif /* INET6 */ 2783 default: 2784 return (NULL); 2785 } 2786 } 2787 2788 /* Added for load balancing state counter use. */ 2789 int 2790 pfr_states_increase(struct pfr_ktable *kt, struct pf_addr *addr, int af) 2791 { 2792 struct pfr_kentry *ke; 2793 2794 ke = pfr_kentry_byaddr(kt, addr, af, 1); 2795 if (ke == NULL) 2796 return (-1); 2797 2798 if (ke->pfrke_counters == NULL) 2799 ke->pfrke_counters = pool_get(&pfr_kcounters_pl, 2800 PR_NOWAIT | PR_ZERO); 2801 if (ke->pfrke_counters == NULL) 2802 return (-1); 2803 2804 ke->pfrke_counters->states++; 2805 return ke->pfrke_counters->states; 2806 } 2807 2808 /* Added for load balancing state counter use. */ 2809 int 2810 pfr_states_decrease(struct pfr_ktable *kt, struct pf_addr *addr, int af) 2811 { 2812 struct pfr_kentry *ke; 2813 2814 ke = pfr_kentry_byaddr(kt, addr, af, 1); 2815 if (ke == NULL) 2816 return (-1); 2817 2818 if (ke->pfrke_counters == NULL) 2819 ke->pfrke_counters = pool_get(&pfr_kcounters_pl, 2820 PR_NOWAIT | PR_ZERO); 2821 if (ke->pfrke_counters == NULL) 2822 return (-1); 2823 2824 if (ke->pfrke_counters->states > 0) 2825 ke->pfrke_counters->states--; 2826 else 2827 DPFPRINTF(LOG_DEBUG, 2828 "pfr_states_decrease: states-- when states <= 0"); 2829 2830 return ke->pfrke_counters->states; 2831 } 2832 2833 void 2834 pfr_dynaddr_update(struct pfr_ktable *kt, struct pfi_dynaddr *dyn) 2835 { 2836 struct pfr_walktree w; 2837 2838 bzero(&w, sizeof(w)); 2839 w.pfrw_op = PFRW_DYNADDR_UPDATE; 2840 w.pfrw_dyn = dyn; 2841 2842 dyn->pfid_acnt4 = 0; 2843 dyn->pfid_acnt6 = 0; 2844 switch (dyn->pfid_af) { 2845 case AF_UNSPEC: /* look up all both addresses IPv4 + IPv6 */ 2846 rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w); 2847 rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w); 2848 break; 2849 case AF_INET: 2850 rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w); 2851 break; 2852 #ifdef INET6 2853 case AF_INET6: 2854 rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w); 2855 break; 2856 #endif /* INET6 */ 2857 default: 2858 unhandled_af(dyn->pfid_af); 2859 } 2860 } 2861 2862 void 2863 pfr_ktable_winfo_update(struct pfr_ktable *kt, struct pfr_kentry *p) { 2864 /* 2865 * If cost flag is set, 2866 * gcdweight is needed for round-robin. 2867 */ 2868 if (kt->pfrkt_refcntcost > 0) { 2869 u_int16_t weight; 2870 2871 weight = (p->pfrke_type == PFRKE_COST) ? 2872 ((struct pfr_kentry_cost *)p)->weight : 1; 2873 2874 if (kt->pfrkt_gcdweight == 0) 2875 kt->pfrkt_gcdweight = weight; 2876 2877 kt->pfrkt_gcdweight = 2878 pfr_gcd(weight, kt->pfrkt_gcdweight); 2879 2880 if (kt->pfrkt_maxweight < weight) 2881 kt->pfrkt_maxweight = weight; 2882 } 2883 } 2884 2885 struct pfr_ktable * 2886 pfr_ktable_select_active(struct pfr_ktable *kt) 2887 { 2888 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL) 2889 kt = kt->pfrkt_root; 2890 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 2891 return (NULL); 2892 2893 return (kt); 2894 } 2895