1 /* $NetBSD: pf_table.c,v 1.7 2005/12/11 12:24:25 christos Exp $ */ 2 /* $OpenBSD: pf_table.c,v 1.62 2004/12/07 18:02:04 mcbride Exp $ */ 3 4 /* 5 * Copyright (c) 2002 Cedric Berger 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * - Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * - Redistributions in binary form must reproduce the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer in the documentation and/or other materials provided 17 * with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 20 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 22 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 23 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 25 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 27 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 29 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 * POSSIBILITY OF SUCH DAMAGE. 31 * 32 */ 33 34 #ifdef _KERNEL_OPT 35 #include "opt_inet.h" 36 #endif 37 38 #include <sys/param.h> 39 #include <sys/systm.h> 40 #include <sys/socket.h> 41 #include <sys/mbuf.h> 42 #include <sys/kernel.h> 43 44 #include <net/if.h> 45 #include <net/route.h> 46 #include <netinet/in.h> 47 #ifdef __OpenBSD__ 48 #include <netinet/ip_ipsp.h> 49 #endif 50 #include <net/pfvar.h> 51 52 #define ACCEPT_FLAGS(oklist) \ 53 do { \ 54 if ((flags & ~(oklist)) & \ 55 PFR_FLAG_ALLMASK) \ 56 return (EINVAL); \ 57 } while (0) 58 59 #define COPYIN(from, to, size) \ 60 ((flags & PFR_FLAG_USERIOCTL) ? \ 61 copyin((from), (to), (size)) : \ 62 (bcopy((from), (to), (size)), 0)) 63 64 #define COPYOUT(from, to, size) \ 65 ((flags & PFR_FLAG_USERIOCTL) ? \ 66 copyout((from), (to), (size)) : \ 67 (bcopy((from), (to), (size)), 0)) 68 69 #define FILLIN_SIN(sin, addr) \ 70 do { \ 71 (sin).sin_len = sizeof(sin); \ 72 (sin).sin_family = AF_INET; \ 73 (sin).sin_addr = (addr); \ 74 } while (0) 75 76 #define FILLIN_SIN6(sin6, addr) \ 77 do { \ 78 (sin6).sin6_len = sizeof(sin6); \ 79 (sin6).sin6_family = AF_INET6; \ 80 (sin6).sin6_addr = (addr); \ 81 } while (0) 82 83 #define SWAP(type, a1, a2) \ 84 do { \ 85 type tmp = a1; \ 86 a1 = a2; \ 87 a2 = tmp; \ 88 } while (0) 89 90 #define SUNION2PF(su, af) (((af)==AF_INET) ? \ 91 (struct pf_addr *)&(su)->sin.sin_addr : \ 92 (struct pf_addr *)&(su)->sin6.sin6_addr) 93 94 #define AF_BITS(af) (((af)==AF_INET)?32:128) 95 #define ADDR_NETWORK(ad) ((ad)->pfra_net < AF_BITS((ad)->pfra_af)) 96 #define KENTRY_NETWORK(ke) ((ke)->pfrke_net < AF_BITS((ke)->pfrke_af)) 97 #define KENTRY_RNF_ROOT(ke) \ 98 ((((struct radix_node *)(ke))->rn_flags & RNF_ROOT) != 0) 99 100 #define NO_ADDRESSES (-1) 101 #define ENQUEUE_UNMARKED_ONLY (1) 102 #define INVERT_NEG_FLAG (1) 103 104 struct pfr_walktree { 105 enum pfrw_op { 106 PFRW_MARK, 107 PFRW_SWEEP, 108 PFRW_ENQUEUE, 109 PFRW_GET_ADDRS, 110 PFRW_GET_ASTATS, 111 PFRW_POOL_GET, 112 PFRW_DYNADDR_UPDATE 113 } pfrw_op; 114 union { 115 struct pfr_addr *pfrw1_addr; 116 struct pfr_astats *pfrw1_astats; 117 struct pfr_kentryworkq *pfrw1_workq; 118 struct pfr_kentry *pfrw1_kentry; 119 struct pfi_dynaddr *pfrw1_dyn; 120 } pfrw_1; 121 int pfrw_free; 122 int pfrw_flags; 123 }; 124 #define pfrw_addr pfrw_1.pfrw1_addr 125 #define pfrw_astats pfrw_1.pfrw1_astats 126 #define pfrw_workq pfrw_1.pfrw1_workq 127 #define pfrw_kentry pfrw_1.pfrw1_kentry 128 #define pfrw_dyn pfrw_1.pfrw1_dyn 129 #define pfrw_cnt pfrw_free 130 131 #define senderr(e) do { rv = (e); goto _bad; } while (0) 132 133 struct pool pfr_ktable_pl; 134 struct pool pfr_kentry_pl; 135 struct pool pfr_kentry_pl2; 136 struct sockaddr_in pfr_sin; 137 struct sockaddr_in6 pfr_sin6; 138 union sockaddr_union pfr_mask; 139 struct pf_addr pfr_ffaddr; 140 141 void pfr_copyout_addr(struct pfr_addr *, 142 struct pfr_kentry *ke); 143 int pfr_validate_addr(struct pfr_addr *); 144 void pfr_enqueue_addrs(struct pfr_ktable *, 145 struct pfr_kentryworkq *, int *, int); 146 void pfr_mark_addrs(struct pfr_ktable *); 147 struct pfr_kentry *pfr_lookup_addr(struct pfr_ktable *, 148 struct pfr_addr *, int); 149 struct pfr_kentry *pfr_create_kentry(struct pfr_addr *, int); 150 void pfr_destroy_kentries(struct pfr_kentryworkq *); 151 void pfr_destroy_kentry(struct pfr_kentry *); 152 void pfr_insert_kentries(struct pfr_ktable *, 153 struct pfr_kentryworkq *, long); 154 void pfr_remove_kentries(struct pfr_ktable *, 155 struct pfr_kentryworkq *); 156 void pfr_clstats_kentries(struct pfr_kentryworkq *, long, 157 int); 158 void pfr_reset_feedback(struct pfr_addr *, int, int); 159 void pfr_prepare_network(union sockaddr_union *, int, int); 160 int pfr_route_kentry(struct pfr_ktable *, 161 struct pfr_kentry *); 162 int pfr_unroute_kentry(struct pfr_ktable *, 163 struct pfr_kentry *); 164 int pfr_walktree(struct radix_node *, void *); 165 int pfr_validate_table(struct pfr_table *, int, int); 166 int pfr_fix_anchor(char *); 167 void pfr_commit_ktable(struct pfr_ktable *, long); 168 void pfr_insert_ktables(struct pfr_ktableworkq *); 169 void pfr_insert_ktable(struct pfr_ktable *); 170 void pfr_setflags_ktables(struct pfr_ktableworkq *); 171 void pfr_setflags_ktable(struct pfr_ktable *, int); 172 void pfr_clstats_ktables(struct pfr_ktableworkq *, long, 173 int); 174 void pfr_clstats_ktable(struct pfr_ktable *, long, int); 175 struct pfr_ktable *pfr_create_ktable(struct pfr_table *, long, int); 176 void pfr_destroy_ktables(struct pfr_ktableworkq *, int); 177 void pfr_destroy_ktable(struct pfr_ktable *, int); 178 int pfr_ktable_compare(struct pfr_ktable *, 179 struct pfr_ktable *); 180 struct pfr_ktable *pfr_lookup_table(struct pfr_table *); 181 void pfr_clean_node_mask(struct pfr_ktable *, 182 struct pfr_kentryworkq *); 183 int pfr_table_count(struct pfr_table *, int); 184 int pfr_skip_table(struct pfr_table *, 185 struct pfr_ktable *, int); 186 struct pfr_kentry *pfr_kentry_byidx(struct pfr_ktable *, int, int); 187 188 RB_PROTOTYPE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare); 189 RB_GENERATE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare); 190 191 struct pfr_ktablehead pfr_ktables; 192 struct pfr_table pfr_nulltable; 193 int pfr_ktable_cnt; 194 195 void 196 pfr_initialize(void) 197 { 198 pool_init(&pfr_ktable_pl, sizeof(struct pfr_ktable), 0, 0, 0, 199 "pfrktable", &pool_allocator_oldnointr); 200 pool_init(&pfr_kentry_pl, sizeof(struct pfr_kentry), 0, 0, 0, 201 "pfrkentry", &pool_allocator_oldnointr); 202 pool_init(&pfr_kentry_pl2, sizeof(struct pfr_kentry), 0, 0, 0, 203 "pfrkentry2", NULL); 204 205 pfr_sin.sin_len = sizeof(pfr_sin); 206 pfr_sin.sin_family = AF_INET; 207 pfr_sin6.sin6_len = sizeof(pfr_sin6); 208 pfr_sin6.sin6_family = AF_INET6; 209 210 memset(&pfr_ffaddr, 0xff, sizeof(pfr_ffaddr)); 211 } 212 213 #ifdef _LKM 214 void 215 pfr_destroy(void) 216 { 217 pool_destroy(&pfr_ktable_pl); 218 pool_destroy(&pfr_kentry_pl); 219 pool_destroy(&pfr_kentry_pl2); 220 } 221 #endif 222 223 int 224 pfr_clr_addrs(struct pfr_table *tbl, int *ndel, int flags) 225 { 226 struct pfr_ktable *kt; 227 struct pfr_kentryworkq workq; 228 int s = 0; 229 230 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY); 231 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL)) 232 return (EINVAL); 233 kt = pfr_lookup_table(tbl); 234 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 235 return (ESRCH); 236 if (kt->pfrkt_flags & PFR_TFLAG_CONST) 237 return (EPERM); 238 pfr_enqueue_addrs(kt, &workq, ndel, 0); 239 240 if (!(flags & PFR_FLAG_DUMMY)) { 241 if (flags & PFR_FLAG_ATOMIC) 242 s = splsoftnet(); 243 pfr_remove_kentries(kt, &workq); 244 if (flags & PFR_FLAG_ATOMIC) 245 splx(s); 246 if (kt->pfrkt_cnt) { 247 printf("pfr_clr_addrs: corruption detected (%d).\n", 248 kt->pfrkt_cnt); 249 kt->pfrkt_cnt = 0; 250 } 251 } 252 return (0); 253 } 254 255 int 256 pfr_add_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size, 257 int *nadd, int flags) 258 { 259 struct pfr_ktable *kt, *tmpkt; 260 struct pfr_kentryworkq workq; 261 struct pfr_kentry *p, *q; 262 struct pfr_addr ad; 263 int i, rv, s = 0 /* XXX gcc */, xadd = 0; 264 long tzero = time_second; 265 266 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_FEEDBACK); 267 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL)) 268 return (EINVAL); 269 kt = pfr_lookup_table(tbl); 270 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 271 return (ESRCH); 272 if (kt->pfrkt_flags & PFR_TFLAG_CONST) 273 return (EPERM); 274 tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0); 275 if (tmpkt == NULL) 276 return (ENOMEM); 277 SLIST_INIT(&workq); 278 for (i = 0; i < size; i++) { 279 if (COPYIN(addr+i, &ad, sizeof(ad))) 280 senderr(EFAULT); 281 if (pfr_validate_addr(&ad)) 282 senderr(EINVAL); 283 p = pfr_lookup_addr(kt, &ad, 1); 284 q = pfr_lookup_addr(tmpkt, &ad, 1); 285 if (flags & PFR_FLAG_FEEDBACK) { 286 if (q != NULL) 287 ad.pfra_fback = PFR_FB_DUPLICATE; 288 else if (p == NULL) 289 ad.pfra_fback = PFR_FB_ADDED; 290 else if (p->pfrke_not != ad.pfra_not) 291 ad.pfra_fback = PFR_FB_CONFLICT; 292 else 293 ad.pfra_fback = PFR_FB_NONE; 294 } 295 if (p == NULL && q == NULL) { 296 p = pfr_create_kentry(&ad, 0); 297 if (p == NULL) 298 senderr(ENOMEM); 299 if (pfr_route_kentry(tmpkt, p)) { 300 pfr_destroy_kentry(p); 301 ad.pfra_fback = PFR_FB_NONE; 302 } else { 303 SLIST_INSERT_HEAD(&workq, p, pfrke_workq); 304 xadd++; 305 } 306 } 307 if (flags & PFR_FLAG_FEEDBACK) 308 if (COPYOUT(&ad, addr+i, sizeof(ad))) 309 senderr(EFAULT); 310 } 311 pfr_clean_node_mask(tmpkt, &workq); 312 if (!(flags & PFR_FLAG_DUMMY)) { 313 if (flags & PFR_FLAG_ATOMIC) 314 s = splsoftnet(); 315 pfr_insert_kentries(kt, &workq, tzero); 316 if (flags & PFR_FLAG_ATOMIC) 317 splx(s); 318 } else 319 pfr_destroy_kentries(&workq); 320 if (nadd != NULL) 321 *nadd = xadd; 322 pfr_destroy_ktable(tmpkt, 0); 323 return (0); 324 _bad: 325 pfr_clean_node_mask(tmpkt, &workq); 326 pfr_destroy_kentries(&workq); 327 if (flags & PFR_FLAG_FEEDBACK) 328 pfr_reset_feedback(addr, size, flags); 329 pfr_destroy_ktable(tmpkt, 0); 330 return (rv); 331 } 332 333 int 334 pfr_del_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size, 335 int *ndel, int flags) 336 { 337 struct pfr_ktable *kt; 338 struct pfr_kentryworkq workq; 339 struct pfr_kentry *p; 340 struct pfr_addr ad; 341 int i, rv, s = 0 /* XXX gcc */, xdel = 0, log = 1; 342 343 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_FEEDBACK); 344 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL)) 345 return (EINVAL); 346 kt = pfr_lookup_table(tbl); 347 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 348 return (ESRCH); 349 if (kt->pfrkt_flags & PFR_TFLAG_CONST) 350 return (EPERM); 351 /* 352 * there are two algorithms to choose from here. 353 * with: 354 * n: number of addresses to delete 355 * N: number of addresses in the table 356 * 357 * one is O(N) and is better for large 'n' 358 * one is O(n*LOG(N)) and is better for small 'n' 359 * 360 * following code try to decide which one is best. 361 */ 362 for (i = kt->pfrkt_cnt; i > 0; i >>= 1) 363 log++; 364 if (size > kt->pfrkt_cnt/log) { 365 /* full table scan */ 366 pfr_mark_addrs(kt); 367 } else { 368 /* iterate over addresses to delete */ 369 for (i = 0; i < size; i++) { 370 if (COPYIN(addr+i, &ad, sizeof(ad))) 371 return (EFAULT); 372 if (pfr_validate_addr(&ad)) 373 return (EINVAL); 374 p = pfr_lookup_addr(kt, &ad, 1); 375 if (p != NULL) 376 p->pfrke_mark = 0; 377 } 378 } 379 SLIST_INIT(&workq); 380 for (i = 0; i < size; i++) { 381 if (COPYIN(addr+i, &ad, sizeof(ad))) 382 senderr(EFAULT); 383 if (pfr_validate_addr(&ad)) 384 senderr(EINVAL); 385 p = pfr_lookup_addr(kt, &ad, 1); 386 if (flags & PFR_FLAG_FEEDBACK) { 387 if (p == NULL) 388 ad.pfra_fback = PFR_FB_NONE; 389 else if (p->pfrke_not != ad.pfra_not) 390 ad.pfra_fback = PFR_FB_CONFLICT; 391 else if (p->pfrke_mark) 392 ad.pfra_fback = PFR_FB_DUPLICATE; 393 else 394 ad.pfra_fback = PFR_FB_DELETED; 395 } 396 if (p != NULL && p->pfrke_not == ad.pfra_not && 397 !p->pfrke_mark) { 398 p->pfrke_mark = 1; 399 SLIST_INSERT_HEAD(&workq, p, pfrke_workq); 400 xdel++; 401 } 402 if (flags & PFR_FLAG_FEEDBACK) 403 if (COPYOUT(&ad, addr+i, sizeof(ad))) 404 senderr(EFAULT); 405 } 406 if (!(flags & PFR_FLAG_DUMMY)) { 407 if (flags & PFR_FLAG_ATOMIC) 408 s = splsoftnet(); 409 pfr_remove_kentries(kt, &workq); 410 if (flags & PFR_FLAG_ATOMIC) 411 splx(s); 412 } 413 if (ndel != NULL) 414 *ndel = xdel; 415 return (0); 416 _bad: 417 if (flags & PFR_FLAG_FEEDBACK) 418 pfr_reset_feedback(addr, size, flags); 419 return (rv); 420 } 421 422 int 423 pfr_set_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size, 424 int *size2, int *nadd, int *ndel, int *nchange, int flags) 425 { 426 struct pfr_ktable *kt, *tmpkt; 427 struct pfr_kentryworkq addq, delq, changeq; 428 struct pfr_kentry *p, *q; 429 struct pfr_addr ad; 430 int i, rv, s = 0 /* XXX gcc */, xadd = 0, xdel = 0, 431 xchange = 0; 432 long tzero = time_second; 433 434 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_FEEDBACK); 435 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL)) 436 return (EINVAL); 437 kt = pfr_lookup_table(tbl); 438 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 439 return (ESRCH); 440 if (kt->pfrkt_flags & PFR_TFLAG_CONST) 441 return (EPERM); 442 tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0); 443 if (tmpkt == NULL) 444 return (ENOMEM); 445 pfr_mark_addrs(kt); 446 SLIST_INIT(&addq); 447 SLIST_INIT(&delq); 448 SLIST_INIT(&changeq); 449 for (i = 0; i < size; i++) { 450 if (COPYIN(addr+i, &ad, sizeof(ad))) 451 senderr(EFAULT); 452 if (pfr_validate_addr(&ad)) 453 senderr(EINVAL); 454 ad.pfra_fback = PFR_FB_NONE; 455 p = pfr_lookup_addr(kt, &ad, 1); 456 if (p != NULL) { 457 if (p->pfrke_mark) { 458 ad.pfra_fback = PFR_FB_DUPLICATE; 459 goto _skip; 460 } 461 p->pfrke_mark = 1; 462 if (p->pfrke_not != ad.pfra_not) { 463 SLIST_INSERT_HEAD(&changeq, p, pfrke_workq); 464 ad.pfra_fback = PFR_FB_CHANGED; 465 xchange++; 466 } 467 } else { 468 q = pfr_lookup_addr(tmpkt, &ad, 1); 469 if (q != NULL) { 470 ad.pfra_fback = PFR_FB_DUPLICATE; 471 goto _skip; 472 } 473 p = pfr_create_kentry(&ad, 0); 474 if (p == NULL) 475 senderr(ENOMEM); 476 if (pfr_route_kentry(tmpkt, p)) { 477 pfr_destroy_kentry(p); 478 ad.pfra_fback = PFR_FB_NONE; 479 } else { 480 SLIST_INSERT_HEAD(&addq, p, pfrke_workq); 481 ad.pfra_fback = PFR_FB_ADDED; 482 xadd++; 483 } 484 } 485 _skip: 486 if (flags & PFR_FLAG_FEEDBACK) 487 if (COPYOUT(&ad, addr+i, sizeof(ad))) 488 senderr(EFAULT); 489 } 490 pfr_enqueue_addrs(kt, &delq, &xdel, ENQUEUE_UNMARKED_ONLY); 491 if ((flags & PFR_FLAG_FEEDBACK) && *size2) { 492 if (*size2 < size+xdel) { 493 *size2 = size+xdel; 494 senderr(0); 495 } 496 i = 0; 497 SLIST_FOREACH(p, &delq, pfrke_workq) { 498 pfr_copyout_addr(&ad, p); 499 ad.pfra_fback = PFR_FB_DELETED; 500 if (COPYOUT(&ad, addr+size+i, sizeof(ad))) 501 senderr(EFAULT); 502 i++; 503 } 504 } 505 pfr_clean_node_mask(tmpkt, &addq); 506 if (!(flags & PFR_FLAG_DUMMY)) { 507 if (flags & PFR_FLAG_ATOMIC) 508 s = splsoftnet(); 509 pfr_insert_kentries(kt, &addq, tzero); 510 pfr_remove_kentries(kt, &delq); 511 pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG); 512 if (flags & PFR_FLAG_ATOMIC) 513 splx(s); 514 } else 515 pfr_destroy_kentries(&addq); 516 if (nadd != NULL) 517 *nadd = xadd; 518 if (ndel != NULL) 519 *ndel = xdel; 520 if (nchange != NULL) 521 *nchange = xchange; 522 if ((flags & PFR_FLAG_FEEDBACK) && size2) 523 *size2 = size+xdel; 524 pfr_destroy_ktable(tmpkt, 0); 525 return (0); 526 _bad: 527 pfr_clean_node_mask(tmpkt, &addq); 528 pfr_destroy_kentries(&addq); 529 if (flags & PFR_FLAG_FEEDBACK) 530 pfr_reset_feedback(addr, size, flags); 531 pfr_destroy_ktable(tmpkt, 0); 532 return (rv); 533 } 534 535 int 536 pfr_tst_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size, 537 int *nmatch, int flags) 538 { 539 struct pfr_ktable *kt; 540 struct pfr_kentry *p; 541 struct pfr_addr ad; 542 int i, xmatch = 0; 543 544 ACCEPT_FLAGS(PFR_FLAG_REPLACE); 545 if (pfr_validate_table(tbl, 0, 0)) 546 return (EINVAL); 547 kt = pfr_lookup_table(tbl); 548 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 549 return (ESRCH); 550 551 for (i = 0; i < size; i++) { 552 if (COPYIN(addr+i, &ad, sizeof(ad))) 553 return (EFAULT); 554 if (pfr_validate_addr(&ad)) 555 return (EINVAL); 556 if (ADDR_NETWORK(&ad)) 557 return (EINVAL); 558 p = pfr_lookup_addr(kt, &ad, 0); 559 if (flags & PFR_FLAG_REPLACE) 560 pfr_copyout_addr(&ad, p); 561 ad.pfra_fback = (p == NULL) ? PFR_FB_NONE : 562 (p->pfrke_not ? PFR_FB_NOTMATCH : PFR_FB_MATCH); 563 if (p != NULL && !p->pfrke_not) 564 xmatch++; 565 if (COPYOUT(&ad, addr+i, sizeof(ad))) 566 return (EFAULT); 567 } 568 if (nmatch != NULL) 569 *nmatch = xmatch; 570 return (0); 571 } 572 573 int 574 pfr_get_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int *size, 575 int flags) 576 { 577 struct pfr_ktable *kt; 578 struct pfr_walktree w; 579 int rv; 580 581 ACCEPT_FLAGS(0); 582 if (pfr_validate_table(tbl, 0, 0)) 583 return (EINVAL); 584 kt = pfr_lookup_table(tbl); 585 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 586 return (ESRCH); 587 if (kt->pfrkt_cnt > *size) { 588 *size = kt->pfrkt_cnt; 589 return (0); 590 } 591 592 bzero(&w, sizeof(w)); 593 w.pfrw_op = PFRW_GET_ADDRS; 594 w.pfrw_addr = addr; 595 w.pfrw_free = kt->pfrkt_cnt; 596 w.pfrw_flags = flags; 597 rv = rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w); 598 if (!rv) 599 rv = rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w); 600 if (rv) 601 return (rv); 602 603 if (w.pfrw_free) { 604 printf("pfr_get_addrs: corruption detected (%d).\n", 605 w.pfrw_free); 606 return (ENOTTY); 607 } 608 *size = kt->pfrkt_cnt; 609 return (0); 610 } 611 612 int 613 pfr_get_astats(struct pfr_table *tbl, struct pfr_astats *addr, int *size, 614 int flags) 615 { 616 struct pfr_ktable *kt; 617 struct pfr_walktree w; 618 struct pfr_kentryworkq workq; 619 int rv, s = 0 /* XXX gcc */; 620 long tzero = time_second; 621 622 ACCEPT_FLAGS(PFR_FLAG_ATOMIC); /* XXX PFR_FLAG_CLSTATS disabled */ 623 if (pfr_validate_table(tbl, 0, 0)) 624 return (EINVAL); 625 kt = pfr_lookup_table(tbl); 626 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 627 return (ESRCH); 628 if (kt->pfrkt_cnt > *size) { 629 *size = kt->pfrkt_cnt; 630 return (0); 631 } 632 633 bzero(&w, sizeof(w)); 634 w.pfrw_op = PFRW_GET_ASTATS; 635 w.pfrw_astats = addr; 636 w.pfrw_free = kt->pfrkt_cnt; 637 w.pfrw_flags = flags; 638 if (flags & PFR_FLAG_ATOMIC) 639 s = splsoftnet(); 640 rv = rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w); 641 if (!rv) 642 rv = rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w); 643 if (!rv && (flags & PFR_FLAG_CLSTATS)) { 644 pfr_enqueue_addrs(kt, &workq, NULL, 0); 645 pfr_clstats_kentries(&workq, tzero, 0); 646 } 647 if (flags & PFR_FLAG_ATOMIC) 648 splx(s); 649 if (rv) 650 return (rv); 651 652 if (w.pfrw_free) { 653 printf("pfr_get_astats: corruption detected (%d).\n", 654 w.pfrw_free); 655 return (ENOTTY); 656 } 657 *size = kt->pfrkt_cnt; 658 return (0); 659 } 660 661 int 662 pfr_clr_astats(struct pfr_table *tbl, struct pfr_addr *addr, int size, 663 int *nzero, int flags) 664 { 665 struct pfr_ktable *kt; 666 struct pfr_kentryworkq workq; 667 struct pfr_kentry *p; 668 struct pfr_addr ad; 669 int i, rv, s = 0, xzero = 0; 670 671 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_FEEDBACK); 672 if (pfr_validate_table(tbl, 0, 0)) 673 return (EINVAL); 674 kt = pfr_lookup_table(tbl); 675 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 676 return (ESRCH); 677 SLIST_INIT(&workq); 678 for (i = 0; i < size; i++) { 679 if (COPYIN(addr+i, &ad, sizeof(ad))) 680 senderr(EFAULT); 681 if (pfr_validate_addr(&ad)) 682 senderr(EINVAL); 683 p = pfr_lookup_addr(kt, &ad, 1); 684 if (flags & PFR_FLAG_FEEDBACK) { 685 ad.pfra_fback = (p != NULL) ? 686 PFR_FB_CLEARED : PFR_FB_NONE; 687 if (COPYOUT(&ad, addr+i, sizeof(ad))) 688 senderr(EFAULT); 689 } 690 if (p != NULL) { 691 SLIST_INSERT_HEAD(&workq, p, pfrke_workq); 692 xzero++; 693 } 694 } 695 696 if (!(flags & PFR_FLAG_DUMMY)) { 697 if (flags & PFR_FLAG_ATOMIC) 698 s = splsoftnet(); 699 pfr_clstats_kentries(&workq, 0, 0); 700 if (flags & PFR_FLAG_ATOMIC) 701 splx(s); 702 } 703 if (nzero != NULL) 704 *nzero = xzero; 705 return (0); 706 _bad: 707 if (flags & PFR_FLAG_FEEDBACK) 708 pfr_reset_feedback(addr, size, flags); 709 return (rv); 710 } 711 712 int 713 pfr_validate_addr(struct pfr_addr *ad) 714 { 715 int i; 716 717 switch (ad->pfra_af) { 718 #ifdef INET 719 case AF_INET: 720 if (ad->pfra_net > 32) 721 return (-1); 722 break; 723 #endif /* INET */ 724 #ifdef INET6 725 case AF_INET6: 726 if (ad->pfra_net > 128) 727 return (-1); 728 break; 729 #endif /* INET6 */ 730 default: 731 return (-1); 732 } 733 if (ad->pfra_net < 128 && 734 (((caddr_t)ad)[ad->pfra_net/8] & (0xFF >> (ad->pfra_net%8)))) 735 return (-1); 736 for (i = (ad->pfra_net+7)/8; i < sizeof(ad->pfra_u); i++) 737 if (((caddr_t)ad)[i]) 738 return (-1); 739 if (ad->pfra_not && ad->pfra_not != 1) 740 return (-1); 741 if (ad->pfra_fback) 742 return (-1); 743 return (0); 744 } 745 746 void 747 pfr_enqueue_addrs(struct pfr_ktable *kt, struct pfr_kentryworkq *workq, 748 int *naddr, int sweep) 749 { 750 struct pfr_walktree w; 751 752 SLIST_INIT(workq); 753 bzero(&w, sizeof(w)); 754 w.pfrw_op = sweep ? PFRW_SWEEP : PFRW_ENQUEUE; 755 w.pfrw_workq = workq; 756 if (kt->pfrkt_ip4 != NULL) 757 if (rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w)) 758 printf("pfr_enqueue_addrs: IPv4 walktree failed.\n"); 759 if (kt->pfrkt_ip6 != NULL) 760 if (rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w)) 761 printf("pfr_enqueue_addrs: IPv6 walktree failed.\n"); 762 if (naddr != NULL) 763 *naddr = w.pfrw_cnt; 764 } 765 766 void 767 pfr_mark_addrs(struct pfr_ktable *kt) 768 { 769 struct pfr_walktree w; 770 771 bzero(&w, sizeof(w)); 772 w.pfrw_op = PFRW_MARK; 773 if (rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w)) 774 printf("pfr_mark_addrs: IPv4 walktree failed.\n"); 775 if (rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w)) 776 printf("pfr_mark_addrs: IPv6 walktree failed.\n"); 777 } 778 779 780 struct pfr_kentry * 781 pfr_lookup_addr(struct pfr_ktable *kt, struct pfr_addr *ad, int exact) 782 { 783 union sockaddr_union sa, mask; 784 struct radix_node_head *head = (void *)0xdeadb; 785 struct pfr_kentry *ke; 786 int s; 787 788 bzero(&sa, sizeof(sa)); 789 if (ad->pfra_af == AF_INET) { 790 FILLIN_SIN(sa.sin, ad->pfra_ip4addr); 791 head = kt->pfrkt_ip4; 792 } else if ( ad->pfra_af == AF_INET6 ) { 793 FILLIN_SIN6(sa.sin6, ad->pfra_ip6addr); 794 head = kt->pfrkt_ip6; 795 } 796 if (ADDR_NETWORK(ad)) { 797 pfr_prepare_network(&mask, ad->pfra_af, ad->pfra_net); 798 s = splsoftnet(); /* rn_lookup makes use of globals */ 799 ke = (struct pfr_kentry *)rn_lookup(&sa, &mask, head); 800 splx(s); 801 if (ke && KENTRY_RNF_ROOT(ke)) 802 ke = NULL; 803 } else { 804 ke = (struct pfr_kentry *)rn_match(&sa, head); 805 if (ke && KENTRY_RNF_ROOT(ke)) 806 ke = NULL; 807 if (exact && ke && KENTRY_NETWORK(ke)) 808 ke = NULL; 809 } 810 return (ke); 811 } 812 813 struct pfr_kentry * 814 pfr_create_kentry(struct pfr_addr *ad, int intr) 815 { 816 struct pfr_kentry *ke; 817 818 if (intr) 819 ke = pool_get(&pfr_kentry_pl2, PR_NOWAIT); 820 else 821 ke = pool_get(&pfr_kentry_pl, PR_NOWAIT); 822 if (ke == NULL) 823 return (NULL); 824 bzero(ke, sizeof(*ke)); 825 826 if (ad->pfra_af == AF_INET) 827 FILLIN_SIN(ke->pfrke_sa.sin, ad->pfra_ip4addr); 828 else if (ad->pfra_af == AF_INET6) 829 FILLIN_SIN6(ke->pfrke_sa.sin6, ad->pfra_ip6addr); 830 ke->pfrke_af = ad->pfra_af; 831 ke->pfrke_net = ad->pfra_net; 832 ke->pfrke_not = ad->pfra_not; 833 ke->pfrke_intrpool = intr; 834 return (ke); 835 } 836 837 void 838 pfr_destroy_kentries(struct pfr_kentryworkq *workq) 839 { 840 struct pfr_kentry *p, *q; 841 842 for (p = SLIST_FIRST(workq); p != NULL; p = q) { 843 q = SLIST_NEXT(p, pfrke_workq); 844 pfr_destroy_kentry(p); 845 } 846 } 847 848 void 849 pfr_destroy_kentry(struct pfr_kentry *ke) 850 { 851 if (ke->pfrke_intrpool) 852 pool_put(&pfr_kentry_pl2, ke); 853 else 854 pool_put(&pfr_kentry_pl, ke); 855 } 856 857 void 858 pfr_insert_kentries(struct pfr_ktable *kt, 859 struct pfr_kentryworkq *workq, long tzero) 860 { 861 struct pfr_kentry *p; 862 int rv, n = 0; 863 864 SLIST_FOREACH(p, workq, pfrke_workq) { 865 rv = pfr_route_kentry(kt, p); 866 if (rv) { 867 printf("pfr_insert_kentries: cannot route entry " 868 "(code=%d).\n", rv); 869 break; 870 } 871 p->pfrke_tzero = tzero; 872 n++; 873 } 874 kt->pfrkt_cnt += n; 875 } 876 877 int 878 pfr_insert_kentry(struct pfr_ktable *kt, struct pfr_addr *ad, long tzero) 879 { 880 struct pfr_kentry *p; 881 int rv; 882 883 p = pfr_lookup_addr(kt, ad, 1); 884 if (p != NULL) 885 return (0); 886 p = pfr_create_kentry(ad, 1); 887 if (p == NULL) 888 return (EINVAL); 889 890 rv = pfr_route_kentry(kt, p); 891 if (rv) 892 return (rv); 893 894 p->pfrke_tzero = tzero; 895 kt->pfrkt_cnt++; 896 897 return (0); 898 } 899 900 void 901 pfr_remove_kentries(struct pfr_ktable *kt, 902 struct pfr_kentryworkq *workq) 903 { 904 struct pfr_kentry *p; 905 int n = 0; 906 907 SLIST_FOREACH(p, workq, pfrke_workq) { 908 pfr_unroute_kentry(kt, p); 909 n++; 910 } 911 kt->pfrkt_cnt -= n; 912 pfr_destroy_kentries(workq); 913 } 914 915 void 916 pfr_clean_node_mask(struct pfr_ktable *kt, 917 struct pfr_kentryworkq *workq) 918 { 919 struct pfr_kentry *p; 920 921 SLIST_FOREACH(p, workq, pfrke_workq) 922 pfr_unroute_kentry(kt, p); 923 } 924 925 void 926 pfr_clstats_kentries(struct pfr_kentryworkq *workq, long tzero, int negchange) 927 { 928 struct pfr_kentry *p; 929 int s; 930 931 SLIST_FOREACH(p, workq, pfrke_workq) { 932 s = splsoftnet(); 933 if (negchange) 934 p->pfrke_not = !p->pfrke_not; 935 bzero(p->pfrke_packets, sizeof(p->pfrke_packets)); 936 bzero(p->pfrke_bytes, sizeof(p->pfrke_bytes)); 937 splx(s); 938 p->pfrke_tzero = tzero; 939 } 940 } 941 942 void 943 pfr_reset_feedback(struct pfr_addr *addr, int size, int flags) 944 { 945 struct pfr_addr ad; 946 int i; 947 948 for (i = 0; i < size; i++) { 949 if (COPYIN(addr+i, &ad, sizeof(ad))) 950 break; 951 ad.pfra_fback = PFR_FB_NONE; 952 if (COPYOUT(&ad, addr+i, sizeof(ad))) 953 break; 954 } 955 } 956 957 void 958 pfr_prepare_network(union sockaddr_union *sa, int af, int net) 959 { 960 int i; 961 962 bzero(sa, sizeof(*sa)); 963 if (af == AF_INET) { 964 sa->sin.sin_len = sizeof(sa->sin); 965 sa->sin.sin_family = AF_INET; 966 sa->sin.sin_addr.s_addr = net ? htonl(-1 << (32-net)) : 0; 967 } else if (af == AF_INET6) { 968 sa->sin6.sin6_len = sizeof(sa->sin6); 969 sa->sin6.sin6_family = AF_INET6; 970 for (i = 0; i < 4; i++) { 971 if (net <= 32) { 972 sa->sin6.sin6_addr.s6_addr32[i] = 973 net ? htonl(-1 << (32-net)) : 0; 974 break; 975 } 976 sa->sin6.sin6_addr.s6_addr32[i] = 0xFFFFFFFF; 977 net -= 32; 978 } 979 } 980 } 981 982 int 983 pfr_route_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke) 984 { 985 union sockaddr_union mask; 986 struct radix_node *rn; 987 struct radix_node_head *head = (void *)0xdeadb; 988 int s; 989 990 bzero(ke->pfrke_node, sizeof(ke->pfrke_node)); 991 if (ke->pfrke_af == AF_INET) 992 head = kt->pfrkt_ip4; 993 else if (ke->pfrke_af == AF_INET6) 994 head = kt->pfrkt_ip6; 995 996 s = splsoftnet(); 997 if (KENTRY_NETWORK(ke)) { 998 pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net); 999 rn = rn_addroute(&ke->pfrke_sa, &mask, head, ke->pfrke_node); 1000 } else 1001 rn = rn_addroute(&ke->pfrke_sa, NULL, head, ke->pfrke_node); 1002 splx(s); 1003 1004 return (rn == NULL ? -1 : 0); 1005 } 1006 1007 int 1008 pfr_unroute_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke) 1009 { 1010 union sockaddr_union mask; 1011 struct radix_node *rn; 1012 struct radix_node_head *head = (void *)0xdeadb; 1013 int s; 1014 1015 if (ke->pfrke_af == AF_INET) 1016 head = kt->pfrkt_ip4; 1017 else if (ke->pfrke_af == AF_INET6) 1018 head = kt->pfrkt_ip6; 1019 1020 s = splsoftnet(); 1021 if (KENTRY_NETWORK(ke)) { 1022 pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net); 1023 #ifdef __OpenBSD__ 1024 rn = rn_delete(&ke->pfrke_sa, &mask, head, NULL); 1025 #else 1026 rn = rn_delete(&ke->pfrke_sa, &mask, head); 1027 #endif 1028 } else 1029 #ifdef __OpenBSD__ 1030 rn = rn_delete(&ke->pfrke_sa, NULL, head, NULL); 1031 #else 1032 rn = rn_delete(&ke->pfrke_sa, NULL, head); 1033 #endif 1034 splx(s); 1035 1036 if (rn == NULL) { 1037 printf("pfr_unroute_kentry: delete failed.\n"); 1038 return (-1); 1039 } 1040 return (0); 1041 } 1042 1043 void 1044 pfr_copyout_addr(struct pfr_addr *ad, struct pfr_kentry *ke) 1045 { 1046 bzero(ad, sizeof(*ad)); 1047 if (ke == NULL) 1048 return; 1049 ad->pfra_af = ke->pfrke_af; 1050 ad->pfra_net = ke->pfrke_net; 1051 ad->pfra_not = ke->pfrke_not; 1052 if (ad->pfra_af == AF_INET) 1053 ad->pfra_ip4addr = ke->pfrke_sa.sin.sin_addr; 1054 else if (ad->pfra_af == AF_INET6) 1055 ad->pfra_ip6addr = ke->pfrke_sa.sin6.sin6_addr; 1056 } 1057 1058 int 1059 pfr_walktree(struct radix_node *rn, void *arg) 1060 { 1061 struct pfr_kentry *ke = (struct pfr_kentry *)rn; 1062 struct pfr_walktree *w = arg; 1063 int s, flags = w->pfrw_flags; 1064 1065 switch (w->pfrw_op) { 1066 case PFRW_MARK: 1067 ke->pfrke_mark = 0; 1068 break; 1069 case PFRW_SWEEP: 1070 if (ke->pfrke_mark) 1071 break; 1072 /* FALLTHROUGH */ 1073 case PFRW_ENQUEUE: 1074 SLIST_INSERT_HEAD(w->pfrw_workq, ke, pfrke_workq); 1075 w->pfrw_cnt++; 1076 break; 1077 case PFRW_GET_ADDRS: 1078 if (w->pfrw_free-- > 0) { 1079 struct pfr_addr ad; 1080 1081 pfr_copyout_addr(&ad, ke); 1082 if (copyout(&ad, w->pfrw_addr, sizeof(ad))) 1083 return (EFAULT); 1084 w->pfrw_addr++; 1085 } 1086 break; 1087 case PFRW_GET_ASTATS: 1088 if (w->pfrw_free-- > 0) { 1089 struct pfr_astats as; 1090 1091 pfr_copyout_addr(&as.pfras_a, ke); 1092 1093 s = splsoftnet(); 1094 bcopy(ke->pfrke_packets, as.pfras_packets, 1095 sizeof(as.pfras_packets)); 1096 bcopy(ke->pfrke_bytes, as.pfras_bytes, 1097 sizeof(as.pfras_bytes)); 1098 splx(s); 1099 as.pfras_tzero = ke->pfrke_tzero; 1100 1101 if (COPYOUT(&as, w->pfrw_astats, sizeof(as))) 1102 return (EFAULT); 1103 w->pfrw_astats++; 1104 } 1105 break; 1106 case PFRW_POOL_GET: 1107 if (ke->pfrke_not) 1108 break; /* negative entries are ignored */ 1109 if (!w->pfrw_cnt--) { 1110 w->pfrw_kentry = ke; 1111 return (1); /* finish search */ 1112 } 1113 break; 1114 case PFRW_DYNADDR_UPDATE: 1115 if (ke->pfrke_af == AF_INET) { 1116 if (w->pfrw_dyn->pfid_acnt4++ > 0) 1117 break; 1118 pfr_prepare_network(&pfr_mask, AF_INET, ke->pfrke_net); 1119 w->pfrw_dyn->pfid_addr4 = *SUNION2PF( 1120 &ke->pfrke_sa, AF_INET); 1121 w->pfrw_dyn->pfid_mask4 = *SUNION2PF( 1122 &pfr_mask, AF_INET); 1123 } else if (ke->pfrke_af == AF_INET6){ 1124 if (w->pfrw_dyn->pfid_acnt6++ > 0) 1125 break; 1126 pfr_prepare_network(&pfr_mask, AF_INET6, ke->pfrke_net); 1127 w->pfrw_dyn->pfid_addr6 = *SUNION2PF( 1128 &ke->pfrke_sa, AF_INET6); 1129 w->pfrw_dyn->pfid_mask6 = *SUNION2PF( 1130 &pfr_mask, AF_INET6); 1131 } 1132 break; 1133 } 1134 return (0); 1135 } 1136 1137 int 1138 pfr_clr_tables(struct pfr_table *filter, int *ndel, int flags) 1139 { 1140 struct pfr_ktableworkq workq; 1141 struct pfr_ktable *p; 1142 int s = 0, xdel = 0; 1143 1144 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_ALLRSETS); 1145 if (pfr_fix_anchor(filter->pfrt_anchor)) 1146 return (EINVAL); 1147 if (pfr_table_count(filter, flags) < 0) 1148 return (ENOENT); 1149 1150 SLIST_INIT(&workq); 1151 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) { 1152 if (pfr_skip_table(filter, p, flags)) 1153 continue; 1154 if (!strcmp(p->pfrkt_anchor, PF_RESERVED_ANCHOR)) 1155 continue; 1156 if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE)) 1157 continue; 1158 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE; 1159 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1160 xdel++; 1161 } 1162 if (!(flags & PFR_FLAG_DUMMY)) { 1163 if (flags & PFR_FLAG_ATOMIC) 1164 s = splsoftnet(); 1165 pfr_setflags_ktables(&workq); 1166 if (flags & PFR_FLAG_ATOMIC) 1167 splx(s); 1168 } 1169 if (ndel != NULL) 1170 *ndel = xdel; 1171 return (0); 1172 } 1173 1174 int 1175 pfr_add_tables(struct pfr_table *tbl, int size, int *nadd, int flags) 1176 { 1177 struct pfr_ktableworkq addq, changeq; 1178 struct pfr_ktable *p, *q, *r, key; 1179 int i, rv, s = 0 /* XXX gcc */, xadd = 0; 1180 long tzero = time_second; 1181 1182 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY); 1183 SLIST_INIT(&addq); 1184 SLIST_INIT(&changeq); 1185 for (i = 0; i < size; i++) { 1186 if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t))) 1187 senderr(EFAULT); 1188 if (pfr_validate_table(&key.pfrkt_t, PFR_TFLAG_USRMASK, 1189 flags & PFR_FLAG_USERIOCTL)) 1190 senderr(EINVAL); 1191 key.pfrkt_flags |= PFR_TFLAG_ACTIVE; 1192 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key); 1193 if (p == NULL) { 1194 p = pfr_create_ktable(&key.pfrkt_t, tzero, 1); 1195 if (p == NULL) 1196 senderr(ENOMEM); 1197 SLIST_FOREACH(q, &addq, pfrkt_workq) { 1198 if (!pfr_ktable_compare(p, q)) 1199 goto _skip; 1200 } 1201 SLIST_INSERT_HEAD(&addq, p, pfrkt_workq); 1202 xadd++; 1203 if (!key.pfrkt_anchor[0]) 1204 goto _skip; 1205 1206 /* find or create root table */ 1207 bzero(key.pfrkt_anchor, sizeof(key.pfrkt_anchor)); 1208 r = RB_FIND(pfr_ktablehead, &pfr_ktables, &key); 1209 if (r != NULL) { 1210 p->pfrkt_root = r; 1211 goto _skip; 1212 } 1213 SLIST_FOREACH(q, &addq, pfrkt_workq) { 1214 if (!pfr_ktable_compare(&key, q)) { 1215 p->pfrkt_root = q; 1216 goto _skip; 1217 } 1218 } 1219 key.pfrkt_flags = 0; 1220 r = pfr_create_ktable(&key.pfrkt_t, 0, 1); 1221 if (r == NULL) 1222 senderr(ENOMEM); 1223 SLIST_INSERT_HEAD(&addq, r, pfrkt_workq); 1224 p->pfrkt_root = r; 1225 } else if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE)) { 1226 SLIST_FOREACH(q, &changeq, pfrkt_workq) 1227 if (!pfr_ktable_compare(&key, q)) 1228 goto _skip; 1229 p->pfrkt_nflags = (p->pfrkt_flags & 1230 ~PFR_TFLAG_USRMASK) | key.pfrkt_flags; 1231 SLIST_INSERT_HEAD(&changeq, p, pfrkt_workq); 1232 xadd++; 1233 } 1234 _skip: 1235 ; 1236 } 1237 if (!(flags & PFR_FLAG_DUMMY)) { 1238 if (flags & PFR_FLAG_ATOMIC) 1239 s = splsoftnet(); 1240 pfr_insert_ktables(&addq); 1241 pfr_setflags_ktables(&changeq); 1242 if (flags & PFR_FLAG_ATOMIC) 1243 splx(s); 1244 } else 1245 pfr_destroy_ktables(&addq, 0); 1246 if (nadd != NULL) 1247 *nadd = xadd; 1248 return (0); 1249 _bad: 1250 pfr_destroy_ktables(&addq, 0); 1251 return (rv); 1252 } 1253 1254 int 1255 pfr_del_tables(struct pfr_table *tbl, int size, int *ndel, int flags) 1256 { 1257 struct pfr_ktableworkq workq; 1258 struct pfr_ktable *p, *q, key; 1259 int i, s = 0, xdel = 0; 1260 1261 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY); 1262 SLIST_INIT(&workq); 1263 for (i = 0; i < size; i++) { 1264 if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t))) 1265 return (EFAULT); 1266 if (pfr_validate_table(&key.pfrkt_t, 0, 1267 flags & PFR_FLAG_USERIOCTL)) 1268 return (EINVAL); 1269 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key); 1270 if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) { 1271 SLIST_FOREACH(q, &workq, pfrkt_workq) 1272 if (!pfr_ktable_compare(p, q)) 1273 goto _skip; 1274 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE; 1275 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1276 xdel++; 1277 } 1278 _skip: 1279 ; 1280 } 1281 1282 if (!(flags & PFR_FLAG_DUMMY)) { 1283 if (flags & PFR_FLAG_ATOMIC) 1284 s = splsoftnet(); 1285 pfr_setflags_ktables(&workq); 1286 if (flags & PFR_FLAG_ATOMIC) 1287 splx(s); 1288 } 1289 if (ndel != NULL) 1290 *ndel = xdel; 1291 return (0); 1292 } 1293 1294 int 1295 pfr_get_tables(struct pfr_table *filter, struct pfr_table *tbl, int *size, 1296 int flags) 1297 { 1298 struct pfr_ktable *p; 1299 int n, nn; 1300 1301 ACCEPT_FLAGS(PFR_FLAG_ALLRSETS); 1302 if (pfr_fix_anchor(filter->pfrt_anchor)) 1303 return (EINVAL); 1304 n = nn = pfr_table_count(filter, flags); 1305 if (n < 0) 1306 return (ENOENT); 1307 if (n > *size) { 1308 *size = n; 1309 return (0); 1310 } 1311 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) { 1312 if (pfr_skip_table(filter, p, flags)) 1313 continue; 1314 if (n-- <= 0) 1315 continue; 1316 if (COPYOUT(&p->pfrkt_t, tbl++, sizeof(*tbl))) 1317 return (EFAULT); 1318 } 1319 if (n) { 1320 printf("pfr_get_tables: corruption detected (%d).\n", n); 1321 return (ENOTTY); 1322 } 1323 *size = nn; 1324 return (0); 1325 } 1326 1327 int 1328 pfr_get_tstats(struct pfr_table *filter, struct pfr_tstats *tbl, int *size, 1329 int flags) 1330 { 1331 struct pfr_ktable *p; 1332 struct pfr_ktableworkq workq; 1333 int s = 0 /* XXX gcc */, n, nn; 1334 long tzero = time_second; 1335 1336 ACCEPT_FLAGS(PFR_FLAG_ATOMIC|PFR_FLAG_ALLRSETS); 1337 /* XXX PFR_FLAG_CLSTATS disabled */ 1338 if (pfr_fix_anchor(filter->pfrt_anchor)) 1339 return (EINVAL); 1340 n = nn = pfr_table_count(filter, flags); 1341 if (n < 0) 1342 return (ENOENT); 1343 if (n > *size) { 1344 *size = n; 1345 return (0); 1346 } 1347 SLIST_INIT(&workq); 1348 if (flags & PFR_FLAG_ATOMIC) 1349 s = splsoftnet(); 1350 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) { 1351 if (pfr_skip_table(filter, p, flags)) 1352 continue; 1353 if (n-- <= 0) 1354 continue; 1355 if (!(flags & PFR_FLAG_ATOMIC)) 1356 s = splsoftnet(); 1357 if (COPYOUT(&p->pfrkt_ts, tbl++, sizeof(*tbl))) { 1358 splx(s); 1359 return (EFAULT); 1360 } 1361 if (!(flags & PFR_FLAG_ATOMIC)) 1362 splx(s); 1363 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1364 } 1365 if (flags & PFR_FLAG_CLSTATS) 1366 pfr_clstats_ktables(&workq, tzero, 1367 flags & PFR_FLAG_ADDRSTOO); 1368 if (flags & PFR_FLAG_ATOMIC) 1369 splx(s); 1370 if (n) { 1371 printf("pfr_get_tstats: corruption detected (%d).\n", n); 1372 return (ENOTTY); 1373 } 1374 *size = nn; 1375 return (0); 1376 } 1377 1378 int 1379 pfr_clr_tstats(struct pfr_table *tbl, int size, int *nzero, int flags) 1380 { 1381 struct pfr_ktableworkq workq; 1382 struct pfr_ktable *p, key; 1383 int i, s = 0 /* XXX gcc */, xzero = 0; 1384 long tzero = time_second; 1385 1386 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_ADDRSTOO); 1387 SLIST_INIT(&workq); 1388 for (i = 0; i < size; i++) { 1389 if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t))) 1390 return (EFAULT); 1391 if (pfr_validate_table(&key.pfrkt_t, 0, 0)) 1392 return (EINVAL); 1393 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key); 1394 if (p != NULL) { 1395 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1396 xzero++; 1397 } 1398 } 1399 if (!(flags & PFR_FLAG_DUMMY)) { 1400 if (flags & PFR_FLAG_ATOMIC) 1401 s = splsoftnet(); 1402 pfr_clstats_ktables(&workq, tzero, flags & PFR_FLAG_ADDRSTOO); 1403 if (flags & PFR_FLAG_ATOMIC) 1404 splx(s); 1405 } 1406 if (nzero != NULL) 1407 *nzero = xzero; 1408 return (0); 1409 } 1410 1411 int 1412 pfr_set_tflags(struct pfr_table *tbl, int size, int setflag, int clrflag, 1413 int *nchange, int *ndel, int flags) 1414 { 1415 struct pfr_ktableworkq workq; 1416 struct pfr_ktable *p, *q, key; 1417 int i, s = 0, xchange = 0, xdel = 0; 1418 1419 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY); 1420 if ((setflag & ~PFR_TFLAG_USRMASK) || 1421 (clrflag & ~PFR_TFLAG_USRMASK) || 1422 (setflag & clrflag)) 1423 return (EINVAL); 1424 SLIST_INIT(&workq); 1425 for (i = 0; i < size; i++) { 1426 if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t))) 1427 return (EFAULT); 1428 if (pfr_validate_table(&key.pfrkt_t, 0, 1429 flags & PFR_FLAG_USERIOCTL)) 1430 return (EINVAL); 1431 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key); 1432 if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) { 1433 p->pfrkt_nflags = (p->pfrkt_flags | setflag) & 1434 ~clrflag; 1435 if (p->pfrkt_nflags == p->pfrkt_flags) 1436 goto _skip; 1437 SLIST_FOREACH(q, &workq, pfrkt_workq) 1438 if (!pfr_ktable_compare(p, q)) 1439 goto _skip; 1440 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1441 if ((p->pfrkt_flags & PFR_TFLAG_PERSIST) && 1442 (clrflag & PFR_TFLAG_PERSIST) && 1443 !(p->pfrkt_flags & PFR_TFLAG_REFERENCED)) 1444 xdel++; 1445 else 1446 xchange++; 1447 } 1448 _skip: 1449 ; 1450 } 1451 if (!(flags & PFR_FLAG_DUMMY)) { 1452 if (flags & PFR_FLAG_ATOMIC) 1453 s = splsoftnet(); 1454 pfr_setflags_ktables(&workq); 1455 if (flags & PFR_FLAG_ATOMIC) 1456 splx(s); 1457 } 1458 if (nchange != NULL) 1459 *nchange = xchange; 1460 if (ndel != NULL) 1461 *ndel = xdel; 1462 return (0); 1463 } 1464 1465 int 1466 pfr_ina_begin(struct pfr_table *trs, u_int32_t *ticket, int *ndel, int flags) 1467 { 1468 struct pfr_ktableworkq workq; 1469 struct pfr_ktable *p; 1470 struct pf_ruleset *rs; 1471 int xdel = 0; 1472 1473 ACCEPT_FLAGS(PFR_FLAG_DUMMY); 1474 rs = pf_find_or_create_ruleset(trs->pfrt_anchor); 1475 if (rs == NULL) 1476 return (ENOMEM); 1477 SLIST_INIT(&workq); 1478 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) { 1479 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) || 1480 pfr_skip_table(trs, p, 0)) 1481 continue; 1482 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE; 1483 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1484 xdel++; 1485 } 1486 if (!(flags & PFR_FLAG_DUMMY)) { 1487 pfr_setflags_ktables(&workq); 1488 if (ticket != NULL) 1489 *ticket = ++rs->tticket; 1490 rs->topen = 1; 1491 } else 1492 pf_remove_if_empty_ruleset(rs); 1493 if (ndel != NULL) 1494 *ndel = xdel; 1495 return (0); 1496 } 1497 1498 int 1499 pfr_ina_define(struct pfr_table *tbl, struct pfr_addr *addr, int size, 1500 int *nadd, int *naddr, u_int32_t ticket, int flags) 1501 { 1502 struct pfr_ktableworkq tableq; 1503 struct pfr_kentryworkq addrq; 1504 struct pfr_ktable *kt, *rt, *shadow, key; 1505 struct pfr_kentry *p; 1506 struct pfr_addr ad; 1507 struct pf_ruleset *rs; 1508 int i, rv, xadd = 0, xaddr = 0; 1509 1510 ACCEPT_FLAGS(PFR_FLAG_DUMMY|PFR_FLAG_ADDRSTOO); 1511 if (size && !(flags & PFR_FLAG_ADDRSTOO)) 1512 return (EINVAL); 1513 if (pfr_validate_table(tbl, PFR_TFLAG_USRMASK, 1514 flags & PFR_FLAG_USERIOCTL)) 1515 return (EINVAL); 1516 rs = pf_find_ruleset(tbl->pfrt_anchor); 1517 if (rs == NULL || !rs->topen || ticket != rs->tticket) 1518 return (EBUSY); 1519 tbl->pfrt_flags |= PFR_TFLAG_INACTIVE; 1520 SLIST_INIT(&tableq); 1521 kt = RB_FIND(pfr_ktablehead, &pfr_ktables, (struct pfr_ktable *)tbl); 1522 if (kt == NULL) { 1523 kt = pfr_create_ktable(tbl, 0, 1); 1524 if (kt == NULL) 1525 return (ENOMEM); 1526 SLIST_INSERT_HEAD(&tableq, kt, pfrkt_workq); 1527 xadd++; 1528 if (!tbl->pfrt_anchor[0]) 1529 goto _skip; 1530 1531 /* find or create root table */ 1532 bzero(&key, sizeof(key)); 1533 strlcpy(key.pfrkt_name, tbl->pfrt_name, sizeof(key.pfrkt_name)); 1534 rt = RB_FIND(pfr_ktablehead, &pfr_ktables, &key); 1535 if (rt != NULL) { 1536 kt->pfrkt_root = rt; 1537 goto _skip; 1538 } 1539 rt = pfr_create_ktable(&key.pfrkt_t, 0, 1); 1540 if (rt == NULL) { 1541 pfr_destroy_ktables(&tableq, 0); 1542 return (ENOMEM); 1543 } 1544 SLIST_INSERT_HEAD(&tableq, rt, pfrkt_workq); 1545 kt->pfrkt_root = rt; 1546 } else if (!(kt->pfrkt_flags & PFR_TFLAG_INACTIVE)) 1547 xadd++; 1548 _skip: 1549 shadow = pfr_create_ktable(tbl, 0, 0); 1550 if (shadow == NULL) { 1551 pfr_destroy_ktables(&tableq, 0); 1552 return (ENOMEM); 1553 } 1554 SLIST_INIT(&addrq); 1555 for (i = 0; i < size; i++) { 1556 if (COPYIN(addr+i, &ad, sizeof(ad))) 1557 senderr(EFAULT); 1558 if (pfr_validate_addr(&ad)) 1559 senderr(EINVAL); 1560 if (pfr_lookup_addr(shadow, &ad, 1) != NULL) 1561 continue; 1562 p = pfr_create_kentry(&ad, 0); 1563 if (p == NULL) 1564 senderr(ENOMEM); 1565 if (pfr_route_kentry(shadow, p)) { 1566 pfr_destroy_kentry(p); 1567 continue; 1568 } 1569 SLIST_INSERT_HEAD(&addrq, p, pfrke_workq); 1570 xaddr++; 1571 } 1572 if (!(flags & PFR_FLAG_DUMMY)) { 1573 if (kt->pfrkt_shadow != NULL) 1574 pfr_destroy_ktable(kt->pfrkt_shadow, 1); 1575 kt->pfrkt_flags |= PFR_TFLAG_INACTIVE; 1576 pfr_insert_ktables(&tableq); 1577 shadow->pfrkt_cnt = (flags & PFR_FLAG_ADDRSTOO) ? 1578 xaddr : NO_ADDRESSES; 1579 kt->pfrkt_shadow = shadow; 1580 } else { 1581 pfr_clean_node_mask(shadow, &addrq); 1582 pfr_destroy_ktable(shadow, 0); 1583 pfr_destroy_ktables(&tableq, 0); 1584 pfr_destroy_kentries(&addrq); 1585 } 1586 if (nadd != NULL) 1587 *nadd = xadd; 1588 if (naddr != NULL) 1589 *naddr = xaddr; 1590 return (0); 1591 _bad: 1592 pfr_destroy_ktable(shadow, 0); 1593 pfr_destroy_ktables(&tableq, 0); 1594 pfr_destroy_kentries(&addrq); 1595 return (rv); 1596 } 1597 1598 int 1599 pfr_ina_rollback(struct pfr_table *trs, u_int32_t ticket, int *ndel, int flags) 1600 { 1601 struct pfr_ktableworkq workq; 1602 struct pfr_ktable *p; 1603 struct pf_ruleset *rs; 1604 int xdel = 0; 1605 1606 ACCEPT_FLAGS(PFR_FLAG_DUMMY); 1607 rs = pf_find_ruleset(trs->pfrt_anchor); 1608 if (rs == NULL || !rs->topen || ticket != rs->tticket) 1609 return (0); 1610 SLIST_INIT(&workq); 1611 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) { 1612 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) || 1613 pfr_skip_table(trs, p, 0)) 1614 continue; 1615 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE; 1616 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1617 xdel++; 1618 } 1619 if (!(flags & PFR_FLAG_DUMMY)) { 1620 pfr_setflags_ktables(&workq); 1621 rs->topen = 0; 1622 pf_remove_if_empty_ruleset(rs); 1623 } 1624 if (ndel != NULL) 1625 *ndel = xdel; 1626 return (0); 1627 } 1628 1629 int 1630 pfr_ina_commit(struct pfr_table *trs, u_int32_t ticket, int *nadd, 1631 int *nchange, int flags) 1632 { 1633 struct pfr_ktable *p, *q; 1634 struct pfr_ktableworkq workq; 1635 struct pf_ruleset *rs; 1636 int s = 0 /* XXX gcc */, xadd = 0, xchange = 0; 1637 long tzero = time_second; 1638 1639 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY); 1640 rs = pf_find_ruleset(trs->pfrt_anchor); 1641 if (rs == NULL || !rs->topen || ticket != rs->tticket) 1642 return (EBUSY); 1643 1644 SLIST_INIT(&workq); 1645 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) { 1646 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) || 1647 pfr_skip_table(trs, p, 0)) 1648 continue; 1649 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1650 if (p->pfrkt_flags & PFR_TFLAG_ACTIVE) 1651 xchange++; 1652 else 1653 xadd++; 1654 } 1655 1656 if (!(flags & PFR_FLAG_DUMMY)) { 1657 if (flags & PFR_FLAG_ATOMIC) 1658 s = splsoftnet(); 1659 for (p = SLIST_FIRST(&workq); p != NULL; p = q) { 1660 q = SLIST_NEXT(p, pfrkt_workq); 1661 pfr_commit_ktable(p, tzero); 1662 } 1663 if (flags & PFR_FLAG_ATOMIC) 1664 splx(s); 1665 rs->topen = 0; 1666 pf_remove_if_empty_ruleset(rs); 1667 } 1668 if (nadd != NULL) 1669 *nadd = xadd; 1670 if (nchange != NULL) 1671 *nchange = xchange; 1672 1673 return (0); 1674 } 1675 1676 void 1677 pfr_commit_ktable(struct pfr_ktable *kt, long tzero) 1678 { 1679 struct pfr_ktable *shadow = kt->pfrkt_shadow; 1680 int nflags; 1681 1682 if (shadow->pfrkt_cnt == NO_ADDRESSES) { 1683 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 1684 pfr_clstats_ktable(kt, tzero, 1); 1685 } else if (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) { 1686 /* kt might contain addresses */ 1687 struct pfr_kentryworkq addrq, addq, changeq, delq, garbageq; 1688 struct pfr_kentry *p, *q, *next; 1689 struct pfr_addr ad; 1690 1691 pfr_enqueue_addrs(shadow, &addrq, NULL, 0); 1692 pfr_mark_addrs(kt); 1693 SLIST_INIT(&addq); 1694 SLIST_INIT(&changeq); 1695 SLIST_INIT(&delq); 1696 SLIST_INIT(&garbageq); 1697 pfr_clean_node_mask(shadow, &addrq); 1698 for (p = SLIST_FIRST(&addrq); p != NULL; p = next) { 1699 next = SLIST_NEXT(p, pfrke_workq); /* XXX */ 1700 pfr_copyout_addr(&ad, p); 1701 q = pfr_lookup_addr(kt, &ad, 1); 1702 if (q != NULL) { 1703 if (q->pfrke_not != p->pfrke_not) 1704 SLIST_INSERT_HEAD(&changeq, q, 1705 pfrke_workq); 1706 q->pfrke_mark = 1; 1707 SLIST_INSERT_HEAD(&garbageq, p, pfrke_workq); 1708 } else { 1709 p->pfrke_tzero = tzero; 1710 SLIST_INSERT_HEAD(&addq, p, pfrke_workq); 1711 } 1712 } 1713 pfr_enqueue_addrs(kt, &delq, NULL, ENQUEUE_UNMARKED_ONLY); 1714 pfr_insert_kentries(kt, &addq, tzero); 1715 pfr_remove_kentries(kt, &delq); 1716 pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG); 1717 pfr_destroy_kentries(&garbageq); 1718 } else { 1719 /* kt cannot contain addresses */ 1720 SWAP(struct radix_node_head *, kt->pfrkt_ip4, 1721 shadow->pfrkt_ip4); 1722 SWAP(struct radix_node_head *, kt->pfrkt_ip6, 1723 shadow->pfrkt_ip6); 1724 SWAP(int, kt->pfrkt_cnt, shadow->pfrkt_cnt); 1725 pfr_clstats_ktable(kt, tzero, 1); 1726 } 1727 nflags = ((shadow->pfrkt_flags & PFR_TFLAG_USRMASK) | 1728 (kt->pfrkt_flags & PFR_TFLAG_SETMASK) | PFR_TFLAG_ACTIVE) 1729 & ~PFR_TFLAG_INACTIVE; 1730 pfr_destroy_ktable(shadow, 0); 1731 kt->pfrkt_shadow = NULL; 1732 pfr_setflags_ktable(kt, nflags); 1733 } 1734 1735 int 1736 pfr_validate_table(struct pfr_table *tbl, int allowedflags, int no_reserved) 1737 { 1738 int i; 1739 1740 if (!tbl->pfrt_name[0]) 1741 return (-1); 1742 if (no_reserved && !strcmp(tbl->pfrt_anchor, PF_RESERVED_ANCHOR)) 1743 return (-1); 1744 if (tbl->pfrt_name[PF_TABLE_NAME_SIZE-1]) 1745 return (-1); 1746 for (i = strlen(tbl->pfrt_name); i < PF_TABLE_NAME_SIZE; i++) 1747 if (tbl->pfrt_name[i]) 1748 return (-1); 1749 if (pfr_fix_anchor(tbl->pfrt_anchor)) 1750 return (-1); 1751 if (tbl->pfrt_flags & ~allowedflags) 1752 return (-1); 1753 return (0); 1754 } 1755 1756 /* 1757 * Rewrite anchors referenced by tables to remove slashes 1758 * and check for validity. 1759 */ 1760 int 1761 pfr_fix_anchor(char *anchor) 1762 { 1763 size_t siz = MAXPATHLEN; 1764 int i; 1765 1766 if (anchor[0] == '/') { 1767 char *path; 1768 int off; 1769 1770 path = anchor; 1771 off = 1; 1772 while (*++path == '/') 1773 off++; 1774 bcopy(path, anchor, siz - off); 1775 memset(anchor + siz - off, 0, off); 1776 } 1777 if (anchor[siz - 1]) 1778 return (-1); 1779 for (i = strlen(anchor); i < siz; i++) 1780 if (anchor[i]) 1781 return (-1); 1782 return (0); 1783 } 1784 1785 int 1786 pfr_table_count(struct pfr_table *filter, int flags) 1787 { 1788 struct pf_ruleset *rs; 1789 1790 if (flags & PFR_FLAG_ALLRSETS) 1791 return (pfr_ktable_cnt); 1792 if (filter->pfrt_anchor[0]) { 1793 rs = pf_find_ruleset(filter->pfrt_anchor); 1794 return ((rs != NULL) ? rs->tables : -1); 1795 } 1796 return (pf_main_ruleset.tables); 1797 } 1798 1799 int 1800 pfr_skip_table(struct pfr_table *filter, struct pfr_ktable *kt, int flags) 1801 { 1802 if (flags & PFR_FLAG_ALLRSETS) 1803 return (0); 1804 if (strcmp(filter->pfrt_anchor, kt->pfrkt_anchor)) 1805 return (1); 1806 return (0); 1807 } 1808 1809 void 1810 pfr_insert_ktables(struct pfr_ktableworkq *workq) 1811 { 1812 struct pfr_ktable *p; 1813 1814 SLIST_FOREACH(p, workq, pfrkt_workq) 1815 pfr_insert_ktable(p); 1816 } 1817 1818 void 1819 pfr_insert_ktable(struct pfr_ktable *kt) 1820 { 1821 RB_INSERT(pfr_ktablehead, &pfr_ktables, kt); 1822 pfr_ktable_cnt++; 1823 if (kt->pfrkt_root != NULL) 1824 if (!kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]++) 1825 pfr_setflags_ktable(kt->pfrkt_root, 1826 kt->pfrkt_root->pfrkt_flags|PFR_TFLAG_REFDANCHOR); 1827 } 1828 1829 void 1830 pfr_setflags_ktables(struct pfr_ktableworkq *workq) 1831 { 1832 struct pfr_ktable *p, *q; 1833 1834 for (p = SLIST_FIRST(workq); p; p = q) { 1835 q = SLIST_NEXT(p, pfrkt_workq); 1836 pfr_setflags_ktable(p, p->pfrkt_nflags); 1837 } 1838 } 1839 1840 void 1841 pfr_setflags_ktable(struct pfr_ktable *kt, int newf) 1842 { 1843 struct pfr_kentryworkq addrq; 1844 1845 if (!(newf & PFR_TFLAG_REFERENCED) && 1846 !(newf & PFR_TFLAG_PERSIST)) 1847 newf &= ~PFR_TFLAG_ACTIVE; 1848 if (!(newf & PFR_TFLAG_ACTIVE)) 1849 newf &= ~PFR_TFLAG_USRMASK; 1850 if (!(newf & PFR_TFLAG_SETMASK)) { 1851 RB_REMOVE(pfr_ktablehead, &pfr_ktables, kt); 1852 if (kt->pfrkt_root != NULL) 1853 if (!--kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]) 1854 pfr_setflags_ktable(kt->pfrkt_root, 1855 kt->pfrkt_root->pfrkt_flags & 1856 ~PFR_TFLAG_REFDANCHOR); 1857 pfr_destroy_ktable(kt, 1); 1858 pfr_ktable_cnt--; 1859 return; 1860 } 1861 if (!(newf & PFR_TFLAG_ACTIVE) && kt->pfrkt_cnt) { 1862 pfr_enqueue_addrs(kt, &addrq, NULL, 0); 1863 pfr_remove_kentries(kt, &addrq); 1864 } 1865 if (!(newf & PFR_TFLAG_INACTIVE) && kt->pfrkt_shadow != NULL) { 1866 pfr_destroy_ktable(kt->pfrkt_shadow, 1); 1867 kt->pfrkt_shadow = NULL; 1868 } 1869 kt->pfrkt_flags = newf; 1870 } 1871 1872 void 1873 pfr_clstats_ktables(struct pfr_ktableworkq *workq, long tzero, int recurse) 1874 { 1875 struct pfr_ktable *p; 1876 1877 SLIST_FOREACH(p, workq, pfrkt_workq) 1878 pfr_clstats_ktable(p, tzero, recurse); 1879 } 1880 1881 void 1882 pfr_clstats_ktable(struct pfr_ktable *kt, long tzero, int recurse) 1883 { 1884 struct pfr_kentryworkq addrq; 1885 int s; 1886 1887 if (recurse) { 1888 pfr_enqueue_addrs(kt, &addrq, NULL, 0); 1889 pfr_clstats_kentries(&addrq, tzero, 0); 1890 } 1891 s = splsoftnet(); 1892 bzero(kt->pfrkt_packets, sizeof(kt->pfrkt_packets)); 1893 bzero(kt->pfrkt_bytes, sizeof(kt->pfrkt_bytes)); 1894 kt->pfrkt_match = kt->pfrkt_nomatch = 0; 1895 splx(s); 1896 kt->pfrkt_tzero = tzero; 1897 } 1898 1899 struct pfr_ktable * 1900 pfr_create_ktable(struct pfr_table *tbl, long tzero, int attachruleset) 1901 { 1902 struct pfr_ktable *kt; 1903 struct pf_ruleset *rs; 1904 1905 kt = pool_get(&pfr_ktable_pl, PR_NOWAIT); 1906 if (kt == NULL) 1907 return (NULL); 1908 bzero(kt, sizeof(*kt)); 1909 kt->pfrkt_t = *tbl; 1910 1911 if (attachruleset) { 1912 rs = pf_find_or_create_ruleset(tbl->pfrt_anchor); 1913 if (!rs) { 1914 pfr_destroy_ktable(kt, 0); 1915 return (NULL); 1916 } 1917 kt->pfrkt_rs = rs; 1918 rs->tables++; 1919 } 1920 1921 if (!rn_inithead((void **)&kt->pfrkt_ip4, 1922 offsetof(struct sockaddr_in, sin_addr) * 8) || 1923 !rn_inithead((void **)&kt->pfrkt_ip6, 1924 offsetof(struct sockaddr_in6, sin6_addr) * 8)) { 1925 pfr_destroy_ktable(kt, 0); 1926 return (NULL); 1927 } 1928 kt->pfrkt_tzero = tzero; 1929 1930 return (kt); 1931 } 1932 1933 void 1934 pfr_destroy_ktables(struct pfr_ktableworkq *workq, int flushaddr) 1935 { 1936 struct pfr_ktable *p, *q; 1937 1938 for (p = SLIST_FIRST(workq); p; p = q) { 1939 q = SLIST_NEXT(p, pfrkt_workq); 1940 pfr_destroy_ktable(p, flushaddr); 1941 } 1942 } 1943 1944 void 1945 pfr_destroy_ktable(struct pfr_ktable *kt, int flushaddr) 1946 { 1947 struct pfr_kentryworkq addrq; 1948 1949 if (flushaddr) { 1950 pfr_enqueue_addrs(kt, &addrq, NULL, 0); 1951 pfr_clean_node_mask(kt, &addrq); 1952 pfr_destroy_kentries(&addrq); 1953 } 1954 if (kt->pfrkt_ip4 != NULL) 1955 free((caddr_t)kt->pfrkt_ip4, M_RTABLE); 1956 if (kt->pfrkt_ip6 != NULL) 1957 free((caddr_t)kt->pfrkt_ip6, M_RTABLE); 1958 if (kt->pfrkt_shadow != NULL) 1959 pfr_destroy_ktable(kt->pfrkt_shadow, flushaddr); 1960 if (kt->pfrkt_rs != NULL) { 1961 kt->pfrkt_rs->tables--; 1962 pf_remove_if_empty_ruleset(kt->pfrkt_rs); 1963 } 1964 pool_put(&pfr_ktable_pl, kt); 1965 } 1966 1967 int 1968 pfr_ktable_compare(struct pfr_ktable *p, struct pfr_ktable *q) 1969 { 1970 int d; 1971 1972 if ((d = strncmp(p->pfrkt_name, q->pfrkt_name, PF_TABLE_NAME_SIZE))) 1973 return (d); 1974 return (strcmp(p->pfrkt_anchor, q->pfrkt_anchor)); 1975 } 1976 1977 struct pfr_ktable * 1978 pfr_lookup_table(struct pfr_table *tbl) 1979 { 1980 /* struct pfr_ktable start like a struct pfr_table */ 1981 return (RB_FIND(pfr_ktablehead, &pfr_ktables, 1982 (struct pfr_ktable *)tbl)); 1983 } 1984 1985 int 1986 pfr_match_addr(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af) 1987 { 1988 struct pfr_kentry *ke = NULL; 1989 int match; 1990 1991 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL) 1992 kt = kt->pfrkt_root; 1993 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 1994 return (0); 1995 1996 switch (af) { 1997 #ifdef INET 1998 case AF_INET: 1999 pfr_sin.sin_addr.s_addr = a->addr32[0]; 2000 ke = (struct pfr_kentry *)rn_match(&pfr_sin, kt->pfrkt_ip4); 2001 if (ke && KENTRY_RNF_ROOT(ke)) 2002 ke = NULL; 2003 break; 2004 #endif /* INET */ 2005 #ifdef INET6 2006 case AF_INET6: 2007 bcopy(a, &pfr_sin6.sin6_addr, sizeof(pfr_sin6.sin6_addr)); 2008 ke = (struct pfr_kentry *)rn_match(&pfr_sin6, kt->pfrkt_ip6); 2009 if (ke && KENTRY_RNF_ROOT(ke)) 2010 ke = NULL; 2011 break; 2012 #endif /* INET6 */ 2013 } 2014 match = (ke && !ke->pfrke_not); 2015 if (match) 2016 kt->pfrkt_match++; 2017 else 2018 kt->pfrkt_nomatch++; 2019 return (match); 2020 } 2021 2022 void 2023 pfr_update_stats(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af, 2024 u_int64_t len, int dir_out, int op_pass, int notrule) 2025 { 2026 struct pfr_kentry *ke = NULL; 2027 2028 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL) 2029 kt = kt->pfrkt_root; 2030 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 2031 return; 2032 2033 switch (af) { 2034 #ifdef INET 2035 case AF_INET: 2036 pfr_sin.sin_addr.s_addr = a->addr32[0]; 2037 ke = (struct pfr_kentry *)rn_match(&pfr_sin, kt->pfrkt_ip4); 2038 if (ke && KENTRY_RNF_ROOT(ke)) 2039 ke = NULL; 2040 break; 2041 #endif /* INET */ 2042 #ifdef INET6 2043 case AF_INET6: 2044 bcopy(a, &pfr_sin6.sin6_addr, sizeof(pfr_sin6.sin6_addr)); 2045 ke = (struct pfr_kentry *)rn_match(&pfr_sin6, kt->pfrkt_ip6); 2046 if (ke && KENTRY_RNF_ROOT(ke)) 2047 ke = NULL; 2048 break; 2049 #endif /* INET6 */ 2050 default: 2051 ; 2052 } 2053 if ((ke == NULL || ke->pfrke_not) != notrule) { 2054 if (op_pass != PFR_OP_PASS) 2055 printf("pfr_update_stats: assertion failed.\n"); 2056 op_pass = PFR_OP_XPASS; 2057 } 2058 kt->pfrkt_packets[dir_out][op_pass]++; 2059 kt->pfrkt_bytes[dir_out][op_pass] += len; 2060 if (ke != NULL && op_pass != PFR_OP_XPASS) { 2061 ke->pfrke_packets[dir_out][op_pass]++; 2062 ke->pfrke_bytes[dir_out][op_pass] += len; 2063 } 2064 } 2065 2066 struct pfr_ktable * 2067 pfr_attach_table(struct pf_ruleset *rs, char *name) 2068 { 2069 struct pfr_ktable *kt, *rt; 2070 struct pfr_table tbl; 2071 struct pf_anchor *ac = rs->anchor; 2072 2073 bzero(&tbl, sizeof(tbl)); 2074 strlcpy(tbl.pfrt_name, name, sizeof(tbl.pfrt_name)); 2075 if (ac != NULL) 2076 strlcpy(tbl.pfrt_anchor, ac->name, sizeof(tbl.pfrt_anchor)); 2077 kt = pfr_lookup_table(&tbl); 2078 if (kt == NULL) { 2079 kt = pfr_create_ktable(&tbl, time_second, 1); 2080 if (kt == NULL) 2081 return (NULL); 2082 if (ac != NULL) { 2083 bzero(tbl.pfrt_anchor, sizeof(tbl.pfrt_anchor)); 2084 rt = pfr_lookup_table(&tbl); 2085 if (rt == NULL) { 2086 rt = pfr_create_ktable(&tbl, 0, 1); 2087 if (rt == NULL) { 2088 pfr_destroy_ktable(kt, 0); 2089 return (NULL); 2090 } 2091 pfr_insert_ktable(rt); 2092 } 2093 kt->pfrkt_root = rt; 2094 } 2095 pfr_insert_ktable(kt); 2096 } 2097 if (!kt->pfrkt_refcnt[PFR_REFCNT_RULE]++) 2098 pfr_setflags_ktable(kt, kt->pfrkt_flags|PFR_TFLAG_REFERENCED); 2099 return (kt); 2100 } 2101 2102 void 2103 pfr_detach_table(struct pfr_ktable *kt) 2104 { 2105 if (kt->pfrkt_refcnt[PFR_REFCNT_RULE] <= 0) 2106 printf("pfr_detach_table: refcount = %d.\n", 2107 kt->pfrkt_refcnt[PFR_REFCNT_RULE]); 2108 else if (!--kt->pfrkt_refcnt[PFR_REFCNT_RULE]) 2109 pfr_setflags_ktable(kt, kt->pfrkt_flags&~PFR_TFLAG_REFERENCED); 2110 } 2111 2112 int 2113 pfr_pool_get(struct pfr_ktable *kt, int *pidx, struct pf_addr *counter, 2114 struct pf_addr **raddr, struct pf_addr **rmask, sa_family_t af) 2115 { 2116 struct pfr_kentry *ke, *ke2 = (void *)0xdeadb; 2117 struct pf_addr *addr = (void *)0xdeadb; 2118 union sockaddr_union mask; 2119 int idx = -1, use_counter = 0; 2120 2121 if (af == AF_INET) 2122 addr = (struct pf_addr *)&pfr_sin.sin_addr; 2123 else if (af == AF_INET6) 2124 addr = (struct pf_addr *)&pfr_sin6.sin6_addr; 2125 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL) 2126 kt = kt->pfrkt_root; 2127 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 2128 return (-1); 2129 2130 if (pidx != NULL) 2131 idx = *pidx; 2132 if (counter != NULL && idx >= 0) 2133 use_counter = 1; 2134 if (idx < 0) 2135 idx = 0; 2136 2137 _next_block: 2138 ke = pfr_kentry_byidx(kt, idx, af); 2139 if (ke == NULL) 2140 return (1); 2141 pfr_prepare_network(&pfr_mask, af, ke->pfrke_net); 2142 *raddr = SUNION2PF(&ke->pfrke_sa, af); 2143 *rmask = SUNION2PF(&pfr_mask, af); 2144 2145 if (use_counter) { 2146 /* is supplied address within block? */ 2147 if (!PF_MATCHA(0, *raddr, *rmask, counter, af)) { 2148 /* no, go to next block in table */ 2149 idx++; 2150 use_counter = 0; 2151 goto _next_block; 2152 } 2153 PF_ACPY(addr, counter, af); 2154 } else { 2155 /* use first address of block */ 2156 PF_ACPY(addr, *raddr, af); 2157 } 2158 2159 if (!KENTRY_NETWORK(ke)) { 2160 /* this is a single IP address - no possible nested block */ 2161 PF_ACPY(counter, addr, af); 2162 *pidx = idx; 2163 return (0); 2164 } 2165 for (;;) { 2166 /* we don't want to use a nested block */ 2167 if (af == AF_INET) 2168 ke2 = (struct pfr_kentry *)rn_match(&pfr_sin, 2169 kt->pfrkt_ip4); 2170 else if (af == AF_INET6) 2171 ke2 = (struct pfr_kentry *)rn_match(&pfr_sin6, 2172 kt->pfrkt_ip6); 2173 /* no need to check KENTRY_RNF_ROOT() here */ 2174 if (ke2 == ke) { 2175 /* lookup return the same block - perfect */ 2176 PF_ACPY(counter, addr, af); 2177 *pidx = idx; 2178 return (0); 2179 } 2180 2181 /* we need to increase the counter past the nested block */ 2182 pfr_prepare_network(&mask, AF_INET, ke2->pfrke_net); 2183 PF_POOLMASK(addr, addr, SUNION2PF(&mask, af), &pfr_ffaddr, af); 2184 PF_AINC(addr, af); 2185 if (!PF_MATCHA(0, *raddr, *rmask, addr, af)) { 2186 /* ok, we reached the end of our main block */ 2187 /* go to next block in table */ 2188 idx++; 2189 use_counter = 0; 2190 goto _next_block; 2191 } 2192 } 2193 } 2194 2195 struct pfr_kentry * 2196 pfr_kentry_byidx(struct pfr_ktable *kt, int idx, int af) 2197 { 2198 struct pfr_walktree w; 2199 2200 bzero(&w, sizeof(w)); 2201 w.pfrw_op = PFRW_POOL_GET; 2202 w.pfrw_cnt = idx; 2203 2204 switch (af) { 2205 #ifdef INET 2206 case AF_INET: 2207 rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w); 2208 return (w.pfrw_kentry); 2209 #endif /* INET */ 2210 #ifdef INET6 2211 case AF_INET6: 2212 rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w); 2213 return (w.pfrw_kentry); 2214 #endif /* INET6 */ 2215 default: 2216 return (NULL); 2217 } 2218 } 2219 2220 void 2221 pfr_dynaddr_update(struct pfr_ktable *kt, struct pfi_dynaddr *dyn) 2222 { 2223 struct pfr_walktree w; 2224 int s; 2225 2226 bzero(&w, sizeof(w)); 2227 w.pfrw_op = PFRW_DYNADDR_UPDATE; 2228 w.pfrw_dyn = dyn; 2229 2230 s = splsoftnet(); 2231 dyn->pfid_acnt4 = 0; 2232 dyn->pfid_acnt6 = 0; 2233 if (!dyn->pfid_af || dyn->pfid_af == AF_INET) 2234 rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w); 2235 if (!dyn->pfid_af || dyn->pfid_af == AF_INET6) 2236 rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w); 2237 splx(s); 2238 } 2239