1 /* $NetBSD: pf_table.c,v 1.5 2004/11/14 11:12:16 yamt Exp $ */ 2 /* $OpenBSD: pf_table.c,v 1.59 2004/07/08 23:17:38 mcbride Exp $ */ 3 4 /* 5 * Copyright (c) 2002 Cedric Berger 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * - Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * - Redistributions in binary form must reproduce the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer in the documentation and/or other materials provided 17 * with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 20 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 22 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 23 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 25 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 27 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 29 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 * POSSIBILITY OF SUCH DAMAGE. 31 * 32 */ 33 34 #ifdef _KERNEL_OPT 35 #include "opt_inet.h" 36 #endif 37 38 #include <sys/param.h> 39 #include <sys/systm.h> 40 #include <sys/socket.h> 41 #include <sys/mbuf.h> 42 #include <sys/kernel.h> 43 44 #include <net/if.h> 45 #include <net/route.h> 46 #include <netinet/in.h> 47 #ifdef __OpenBSD__ 48 #include <netinet/ip_ipsp.h> 49 #endif 50 #include <net/pfvar.h> 51 52 #define ACCEPT_FLAGS(oklist) \ 53 do { \ 54 if ((flags & ~(oklist)) & \ 55 PFR_FLAG_ALLMASK) \ 56 return (EINVAL); \ 57 } while (0) 58 59 #define COPYIN(from, to, size) \ 60 ((flags & PFR_FLAG_USERIOCTL) ? \ 61 copyin((from), (to), (size)) : \ 62 (bcopy((from), (to), (size)), 0)) 63 64 #define COPYOUT(from, to, size) \ 65 ((flags & PFR_FLAG_USERIOCTL) ? \ 66 copyout((from), (to), (size)) : \ 67 (bcopy((from), (to), (size)), 0)) 68 69 #define FILLIN_SIN(sin, addr) \ 70 do { \ 71 (sin).sin_len = sizeof(sin); \ 72 (sin).sin_family = AF_INET; \ 73 (sin).sin_addr = (addr); \ 74 } while (0) 75 76 #define FILLIN_SIN6(sin6, addr) \ 77 do { \ 78 (sin6).sin6_len = sizeof(sin6); \ 79 (sin6).sin6_family = AF_INET6; \ 80 (sin6).sin6_addr = (addr); \ 81 } while (0) 82 83 #define SWAP(type, a1, a2) \ 84 do { \ 85 type tmp = a1; \ 86 a1 = a2; \ 87 a2 = tmp; \ 88 } while (0) 89 90 #define SUNION2PF(su, af) (((af)==AF_INET) ? \ 91 (struct pf_addr *)&(su)->sin.sin_addr : \ 92 (struct pf_addr *)&(su)->sin6.sin6_addr) 93 94 #define AF_BITS(af) (((af)==AF_INET)?32:128) 95 #define ADDR_NETWORK(ad) ((ad)->pfra_net < AF_BITS((ad)->pfra_af)) 96 #define KENTRY_NETWORK(ke) ((ke)->pfrke_net < AF_BITS((ke)->pfrke_af)) 97 #define KENTRY_RNF_ROOT(ke) \ 98 ((((struct radix_node *)(ke))->rn_flags & RNF_ROOT) != 0) 99 100 #define NO_ADDRESSES (-1) 101 #define ENQUEUE_UNMARKED_ONLY (1) 102 #define INVERT_NEG_FLAG (1) 103 104 struct pfr_walktree { 105 enum pfrw_op { 106 PFRW_MARK, 107 PFRW_SWEEP, 108 PFRW_ENQUEUE, 109 PFRW_GET_ADDRS, 110 PFRW_GET_ASTATS, 111 PFRW_POOL_GET, 112 PFRW_DYNADDR_UPDATE 113 } pfrw_op; 114 union { 115 struct pfr_addr *pfrw1_addr; 116 struct pfr_astats *pfrw1_astats; 117 struct pfr_kentryworkq *pfrw1_workq; 118 struct pfr_kentry *pfrw1_kentry; 119 struct pfi_dynaddr *pfrw1_dyn; 120 } pfrw_1; 121 int pfrw_free; 122 int pfrw_flags; 123 }; 124 #define pfrw_addr pfrw_1.pfrw1_addr 125 #define pfrw_astats pfrw_1.pfrw1_astats 126 #define pfrw_workq pfrw_1.pfrw1_workq 127 #define pfrw_kentry pfrw_1.pfrw1_kentry 128 #define pfrw_dyn pfrw_1.pfrw1_dyn 129 #define pfrw_cnt pfrw_free 130 131 #define senderr(e) do { rv = (e); goto _bad; } while (0) 132 133 struct pool pfr_ktable_pl; 134 struct pool pfr_kentry_pl; 135 struct sockaddr_in pfr_sin; 136 struct sockaddr_in6 pfr_sin6; 137 union sockaddr_union pfr_mask; 138 struct pf_addr pfr_ffaddr; 139 140 void pfr_copyout_addr(struct pfr_addr *, 141 struct pfr_kentry *ke); 142 int pfr_validate_addr(struct pfr_addr *); 143 void pfr_enqueue_addrs(struct pfr_ktable *, 144 struct pfr_kentryworkq *, int *, int); 145 void pfr_mark_addrs(struct pfr_ktable *); 146 struct pfr_kentry *pfr_lookup_addr(struct pfr_ktable *, 147 struct pfr_addr *, int); 148 struct pfr_kentry *pfr_create_kentry(struct pfr_addr *); 149 void pfr_destroy_kentries(struct pfr_kentryworkq *); 150 void pfr_destroy_kentry(struct pfr_kentry *); 151 void pfr_insert_kentries(struct pfr_ktable *, 152 struct pfr_kentryworkq *, long); 153 void pfr_remove_kentries(struct pfr_ktable *, 154 struct pfr_kentryworkq *); 155 void pfr_clstats_kentries(struct pfr_kentryworkq *, long, 156 int); 157 void pfr_reset_feedback(struct pfr_addr *, int, int); 158 void pfr_prepare_network(union sockaddr_union *, int, int); 159 int pfr_route_kentry(struct pfr_ktable *, 160 struct pfr_kentry *); 161 int pfr_unroute_kentry(struct pfr_ktable *, 162 struct pfr_kentry *); 163 int pfr_walktree(struct radix_node *, void *); 164 int pfr_validate_table(struct pfr_table *, int, int); 165 void pfr_commit_ktable(struct pfr_ktable *, long); 166 void pfr_insert_ktables(struct pfr_ktableworkq *); 167 void pfr_insert_ktable(struct pfr_ktable *); 168 void pfr_setflags_ktables(struct pfr_ktableworkq *); 169 void pfr_setflags_ktable(struct pfr_ktable *, int); 170 void pfr_clstats_ktables(struct pfr_ktableworkq *, long, 171 int); 172 void pfr_clstats_ktable(struct pfr_ktable *, long, int); 173 struct pfr_ktable *pfr_create_ktable(struct pfr_table *, long, int); 174 void pfr_destroy_ktables(struct pfr_ktableworkq *, int); 175 void pfr_destroy_ktable(struct pfr_ktable *, int); 176 int pfr_ktable_compare(struct pfr_ktable *, 177 struct pfr_ktable *); 178 struct pfr_ktable *pfr_lookup_table(struct pfr_table *); 179 void pfr_clean_node_mask(struct pfr_ktable *, 180 struct pfr_kentryworkq *); 181 int pfr_table_count(struct pfr_table *, int); 182 int pfr_skip_table(struct pfr_table *, 183 struct pfr_ktable *, int); 184 struct pfr_kentry *pfr_kentry_byidx(struct pfr_ktable *, int, int); 185 186 RB_PROTOTYPE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare); 187 RB_GENERATE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare); 188 189 struct pfr_ktablehead pfr_ktables; 190 struct pfr_table pfr_nulltable; 191 int pfr_ktable_cnt; 192 193 void 194 pfr_initialize(void) 195 { 196 pool_init(&pfr_ktable_pl, sizeof(struct pfr_ktable), 0, 0, 0, 197 "pfrktable", &pool_allocator_oldnointr); 198 pool_init(&pfr_kentry_pl, sizeof(struct pfr_kentry), 0, 0, 0, 199 "pfrkentry", &pool_allocator_oldnointr); 200 201 pfr_sin.sin_len = sizeof(pfr_sin); 202 pfr_sin.sin_family = AF_INET; 203 pfr_sin6.sin6_len = sizeof(pfr_sin6); 204 pfr_sin6.sin6_family = AF_INET6; 205 206 memset(&pfr_ffaddr, 0xff, sizeof(pfr_ffaddr)); 207 } 208 209 #ifdef _LKM 210 void 211 pfr_destroy(void) 212 { 213 pool_destroy(&pfr_ktable_pl); 214 pool_destroy(&pfr_kentry_pl); 215 } 216 #endif 217 218 int 219 pfr_clr_addrs(struct pfr_table *tbl, int *ndel, int flags) 220 { 221 struct pfr_ktable *kt; 222 struct pfr_kentryworkq workq; 223 int s = 0; 224 225 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY); 226 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL)) 227 return (EINVAL); 228 kt = pfr_lookup_table(tbl); 229 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 230 return (ESRCH); 231 if (kt->pfrkt_flags & PFR_TFLAG_CONST) 232 return (EPERM); 233 pfr_enqueue_addrs(kt, &workq, ndel, 0); 234 235 if (!(flags & PFR_FLAG_DUMMY)) { 236 if (flags & PFR_FLAG_ATOMIC) 237 s = splsoftnet(); 238 pfr_remove_kentries(kt, &workq); 239 if (flags & PFR_FLAG_ATOMIC) 240 splx(s); 241 if (kt->pfrkt_cnt) { 242 printf("pfr_clr_addrs: corruption detected (%d).\n", 243 kt->pfrkt_cnt); 244 kt->pfrkt_cnt = 0; 245 } 246 } 247 return (0); 248 } 249 250 int 251 pfr_add_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size, 252 int *nadd, int flags) 253 { 254 struct pfr_ktable *kt, *tmpkt; 255 struct pfr_kentryworkq workq; 256 struct pfr_kentry *p, *q; 257 struct pfr_addr ad; 258 int i, rv, s = 0 /* XXX gcc */, xadd = 0; 259 long tzero = time_second; 260 261 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_FEEDBACK); 262 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL)) 263 return (EINVAL); 264 kt = pfr_lookup_table(tbl); 265 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 266 return (ESRCH); 267 if (kt->pfrkt_flags & PFR_TFLAG_CONST) 268 return (EPERM); 269 tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0); 270 if (tmpkt == NULL) 271 return (ENOMEM); 272 SLIST_INIT(&workq); 273 for (i = 0; i < size; i++) { 274 if (COPYIN(addr+i, &ad, sizeof(ad))) 275 senderr(EFAULT); 276 if (pfr_validate_addr(&ad)) 277 senderr(EINVAL); 278 p = pfr_lookup_addr(kt, &ad, 1); 279 q = pfr_lookup_addr(tmpkt, &ad, 1); 280 if (flags & PFR_FLAG_FEEDBACK) { 281 if (q != NULL) 282 ad.pfra_fback = PFR_FB_DUPLICATE; 283 else if (p == NULL) 284 ad.pfra_fback = PFR_FB_ADDED; 285 else if (p->pfrke_not != ad.pfra_not) 286 ad.pfra_fback = PFR_FB_CONFLICT; 287 else 288 ad.pfra_fback = PFR_FB_NONE; 289 } 290 if (p == NULL && q == NULL) { 291 p = pfr_create_kentry(&ad); 292 if (p == NULL) 293 senderr(ENOMEM); 294 if (pfr_route_kentry(tmpkt, p)) { 295 pfr_destroy_kentry(p); 296 ad.pfra_fback = PFR_FB_NONE; 297 } else { 298 SLIST_INSERT_HEAD(&workq, p, pfrke_workq); 299 xadd++; 300 } 301 } 302 if (flags & PFR_FLAG_FEEDBACK) 303 if (COPYOUT(&ad, addr+i, sizeof(ad))) 304 senderr(EFAULT); 305 } 306 pfr_clean_node_mask(tmpkt, &workq); 307 if (!(flags & PFR_FLAG_DUMMY)) { 308 if (flags & PFR_FLAG_ATOMIC) 309 s = splsoftnet(); 310 pfr_insert_kentries(kt, &workq, tzero); 311 if (flags & PFR_FLAG_ATOMIC) 312 splx(s); 313 } else 314 pfr_destroy_kentries(&workq); 315 if (nadd != NULL) 316 *nadd = xadd; 317 pfr_destroy_ktable(tmpkt, 0); 318 return (0); 319 _bad: 320 pfr_clean_node_mask(tmpkt, &workq); 321 pfr_destroy_kentries(&workq); 322 if (flags & PFR_FLAG_FEEDBACK) 323 pfr_reset_feedback(addr, size, flags); 324 pfr_destroy_ktable(tmpkt, 0); 325 return (rv); 326 } 327 328 int 329 pfr_del_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size, 330 int *ndel, int flags) 331 { 332 struct pfr_ktable *kt; 333 struct pfr_kentryworkq workq; 334 struct pfr_kentry *p; 335 struct pfr_addr ad; 336 int i, rv, s = 0 /* XXX gcc */, xdel = 0, log = 1; 337 338 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_FEEDBACK); 339 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL)) 340 return (EINVAL); 341 kt = pfr_lookup_table(tbl); 342 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 343 return (ESRCH); 344 if (kt->pfrkt_flags & PFR_TFLAG_CONST) 345 return (EPERM); 346 /* 347 * there are two algorithms to choose from here. 348 * with: 349 * n: number of addresses to delete 350 * N: number of addresses in the table 351 * 352 * one is O(N) and is better for large 'n' 353 * one is O(n*LOG(N)) and is better for small 'n' 354 * 355 * following code try to decide which one is best. 356 */ 357 for (i = kt->pfrkt_cnt; i > 0; i >>= 1) 358 log++; 359 if (size > kt->pfrkt_cnt/log) { 360 /* full table scan */ 361 pfr_mark_addrs(kt); 362 } else { 363 /* iterate over addresses to delete */ 364 for (i = 0; i < size; i++) { 365 if (COPYIN(addr+i, &ad, sizeof(ad))) 366 return (EFAULT); 367 if (pfr_validate_addr(&ad)) 368 return (EINVAL); 369 p = pfr_lookup_addr(kt, &ad, 1); 370 if (p != NULL) 371 p->pfrke_mark = 0; 372 } 373 } 374 SLIST_INIT(&workq); 375 for (i = 0; i < size; i++) { 376 if (COPYIN(addr+i, &ad, sizeof(ad))) 377 senderr(EFAULT); 378 if (pfr_validate_addr(&ad)) 379 senderr(EINVAL); 380 p = pfr_lookup_addr(kt, &ad, 1); 381 if (flags & PFR_FLAG_FEEDBACK) { 382 if (p == NULL) 383 ad.pfra_fback = PFR_FB_NONE; 384 else if (p->pfrke_not != ad.pfra_not) 385 ad.pfra_fback = PFR_FB_CONFLICT; 386 else if (p->pfrke_mark) 387 ad.pfra_fback = PFR_FB_DUPLICATE; 388 else 389 ad.pfra_fback = PFR_FB_DELETED; 390 } 391 if (p != NULL && p->pfrke_not == ad.pfra_not && 392 !p->pfrke_mark) { 393 p->pfrke_mark = 1; 394 SLIST_INSERT_HEAD(&workq, p, pfrke_workq); 395 xdel++; 396 } 397 if (flags & PFR_FLAG_FEEDBACK) 398 if (COPYOUT(&ad, addr+i, sizeof(ad))) 399 senderr(EFAULT); 400 } 401 if (!(flags & PFR_FLAG_DUMMY)) { 402 if (flags & PFR_FLAG_ATOMIC) 403 s = splsoftnet(); 404 pfr_remove_kentries(kt, &workq); 405 if (flags & PFR_FLAG_ATOMIC) 406 splx(s); 407 } 408 if (ndel != NULL) 409 *ndel = xdel; 410 return (0); 411 _bad: 412 if (flags & PFR_FLAG_FEEDBACK) 413 pfr_reset_feedback(addr, size, flags); 414 return (rv); 415 } 416 417 int 418 pfr_set_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size, 419 int *size2, int *nadd, int *ndel, int *nchange, int flags) 420 { 421 struct pfr_ktable *kt, *tmpkt; 422 struct pfr_kentryworkq addq, delq, changeq; 423 struct pfr_kentry *p, *q; 424 struct pfr_addr ad; 425 int i, rv, s = 0 /* XXX gcc */, xadd = 0, xdel = 0, 426 xchange = 0; 427 long tzero = time_second; 428 429 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_FEEDBACK); 430 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL)) 431 return (EINVAL); 432 kt = pfr_lookup_table(tbl); 433 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 434 return (ESRCH); 435 if (kt->pfrkt_flags & PFR_TFLAG_CONST) 436 return (EPERM); 437 tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0); 438 if (tmpkt == NULL) 439 return (ENOMEM); 440 pfr_mark_addrs(kt); 441 SLIST_INIT(&addq); 442 SLIST_INIT(&delq); 443 SLIST_INIT(&changeq); 444 for (i = 0; i < size; i++) { 445 if (COPYIN(addr+i, &ad, sizeof(ad))) 446 senderr(EFAULT); 447 if (pfr_validate_addr(&ad)) 448 senderr(EINVAL); 449 ad.pfra_fback = PFR_FB_NONE; 450 p = pfr_lookup_addr(kt, &ad, 1); 451 if (p != NULL) { 452 if (p->pfrke_mark) { 453 ad.pfra_fback = PFR_FB_DUPLICATE; 454 goto _skip; 455 } 456 p->pfrke_mark = 1; 457 if (p->pfrke_not != ad.pfra_not) { 458 SLIST_INSERT_HEAD(&changeq, p, pfrke_workq); 459 ad.pfra_fback = PFR_FB_CHANGED; 460 xchange++; 461 } 462 } else { 463 q = pfr_lookup_addr(tmpkt, &ad, 1); 464 if (q != NULL) { 465 ad.pfra_fback = PFR_FB_DUPLICATE; 466 goto _skip; 467 } 468 p = pfr_create_kentry(&ad); 469 if (p == NULL) 470 senderr(ENOMEM); 471 if (pfr_route_kentry(tmpkt, p)) { 472 pfr_destroy_kentry(p); 473 ad.pfra_fback = PFR_FB_NONE; 474 } else { 475 SLIST_INSERT_HEAD(&addq, p, pfrke_workq); 476 ad.pfra_fback = PFR_FB_ADDED; 477 xadd++; 478 } 479 } 480 _skip: 481 if (flags & PFR_FLAG_FEEDBACK) 482 if (COPYOUT(&ad, addr+i, sizeof(ad))) 483 senderr(EFAULT); 484 } 485 pfr_enqueue_addrs(kt, &delq, &xdel, ENQUEUE_UNMARKED_ONLY); 486 if ((flags & PFR_FLAG_FEEDBACK) && *size2) { 487 if (*size2 < size+xdel) { 488 *size2 = size+xdel; 489 senderr(0); 490 } 491 i = 0; 492 SLIST_FOREACH(p, &delq, pfrke_workq) { 493 pfr_copyout_addr(&ad, p); 494 ad.pfra_fback = PFR_FB_DELETED; 495 if (COPYOUT(&ad, addr+size+i, sizeof(ad))) 496 senderr(EFAULT); 497 i++; 498 } 499 } 500 pfr_clean_node_mask(tmpkt, &addq); 501 if (!(flags & PFR_FLAG_DUMMY)) { 502 if (flags & PFR_FLAG_ATOMIC) 503 s = splsoftnet(); 504 pfr_insert_kentries(kt, &addq, tzero); 505 pfr_remove_kentries(kt, &delq); 506 pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG); 507 if (flags & PFR_FLAG_ATOMIC) 508 splx(s); 509 } else 510 pfr_destroy_kentries(&addq); 511 if (nadd != NULL) 512 *nadd = xadd; 513 if (ndel != NULL) 514 *ndel = xdel; 515 if (nchange != NULL) 516 *nchange = xchange; 517 if ((flags & PFR_FLAG_FEEDBACK) && size2) 518 *size2 = size+xdel; 519 pfr_destroy_ktable(tmpkt, 0); 520 return (0); 521 _bad: 522 pfr_clean_node_mask(tmpkt, &addq); 523 pfr_destroy_kentries(&addq); 524 if (flags & PFR_FLAG_FEEDBACK) 525 pfr_reset_feedback(addr, size, flags); 526 pfr_destroy_ktable(tmpkt, 0); 527 return (rv); 528 } 529 530 int 531 pfr_tst_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size, 532 int *nmatch, int flags) 533 { 534 struct pfr_ktable *kt; 535 struct pfr_kentry *p; 536 struct pfr_addr ad; 537 int i, xmatch = 0; 538 539 ACCEPT_FLAGS(PFR_FLAG_REPLACE); 540 if (pfr_validate_table(tbl, 0, 0)) 541 return (EINVAL); 542 kt = pfr_lookup_table(tbl); 543 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 544 return (ESRCH); 545 546 for (i = 0; i < size; i++) { 547 if (COPYIN(addr+i, &ad, sizeof(ad))) 548 return (EFAULT); 549 if (pfr_validate_addr(&ad)) 550 return (EINVAL); 551 if (ADDR_NETWORK(&ad)) 552 return (EINVAL); 553 p = pfr_lookup_addr(kt, &ad, 0); 554 if (flags & PFR_FLAG_REPLACE) 555 pfr_copyout_addr(&ad, p); 556 ad.pfra_fback = (p == NULL) ? PFR_FB_NONE : 557 (p->pfrke_not ? PFR_FB_NOTMATCH : PFR_FB_MATCH); 558 if (p != NULL && !p->pfrke_not) 559 xmatch++; 560 if (COPYOUT(&ad, addr+i, sizeof(ad))) 561 return (EFAULT); 562 } 563 if (nmatch != NULL) 564 *nmatch = xmatch; 565 return (0); 566 } 567 568 int 569 pfr_get_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int *size, 570 int flags) 571 { 572 struct pfr_ktable *kt; 573 struct pfr_walktree w; 574 int rv; 575 576 ACCEPT_FLAGS(0); 577 if (pfr_validate_table(tbl, 0, 0)) 578 return (EINVAL); 579 kt = pfr_lookup_table(tbl); 580 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 581 return (ESRCH); 582 if (kt->pfrkt_cnt > *size) { 583 *size = kt->pfrkt_cnt; 584 return (0); 585 } 586 587 bzero(&w, sizeof(w)); 588 w.pfrw_op = PFRW_GET_ADDRS; 589 w.pfrw_addr = addr; 590 w.pfrw_free = kt->pfrkt_cnt; 591 w.pfrw_flags = flags; 592 rv = rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w); 593 if (!rv) 594 rv = rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w); 595 if (rv) 596 return (rv); 597 598 if (w.pfrw_free) { 599 printf("pfr_get_addrs: corruption detected (%d).\n", 600 w.pfrw_free); 601 return (ENOTTY); 602 } 603 *size = kt->pfrkt_cnt; 604 return (0); 605 } 606 607 int 608 pfr_get_astats(struct pfr_table *tbl, struct pfr_astats *addr, int *size, 609 int flags) 610 { 611 struct pfr_ktable *kt; 612 struct pfr_walktree w; 613 struct pfr_kentryworkq workq; 614 int rv, s = 0 /* XXX gcc */; 615 long tzero = time_second; 616 617 ACCEPT_FLAGS(PFR_FLAG_ATOMIC); /* XXX PFR_FLAG_CLSTATS disabled */ 618 if (pfr_validate_table(tbl, 0, 0)) 619 return (EINVAL); 620 kt = pfr_lookup_table(tbl); 621 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 622 return (ESRCH); 623 if (kt->pfrkt_cnt > *size) { 624 *size = kt->pfrkt_cnt; 625 return (0); 626 } 627 628 bzero(&w, sizeof(w)); 629 w.pfrw_op = PFRW_GET_ASTATS; 630 w.pfrw_astats = addr; 631 w.pfrw_free = kt->pfrkt_cnt; 632 w.pfrw_flags = flags; 633 if (flags & PFR_FLAG_ATOMIC) 634 s = splsoftnet(); 635 rv = rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w); 636 if (!rv) 637 rv = rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w); 638 if (!rv && (flags & PFR_FLAG_CLSTATS)) { 639 pfr_enqueue_addrs(kt, &workq, NULL, 0); 640 pfr_clstats_kentries(&workq, tzero, 0); 641 } 642 if (flags & PFR_FLAG_ATOMIC) 643 splx(s); 644 if (rv) 645 return (rv); 646 647 if (w.pfrw_free) { 648 printf("pfr_get_astats: corruption detected (%d).\n", 649 w.pfrw_free); 650 return (ENOTTY); 651 } 652 *size = kt->pfrkt_cnt; 653 return (0); 654 } 655 656 int 657 pfr_clr_astats(struct pfr_table *tbl, struct pfr_addr *addr, int size, 658 int *nzero, int flags) 659 { 660 struct pfr_ktable *kt; 661 struct pfr_kentryworkq workq; 662 struct pfr_kentry *p; 663 struct pfr_addr ad; 664 int i, rv, s = 0, xzero = 0; 665 666 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_FEEDBACK); 667 if (pfr_validate_table(tbl, 0, 0)) 668 return (EINVAL); 669 kt = pfr_lookup_table(tbl); 670 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 671 return (ESRCH); 672 SLIST_INIT(&workq); 673 for (i = 0; i < size; i++) { 674 if (COPYIN(addr+i, &ad, sizeof(ad))) 675 senderr(EFAULT); 676 if (pfr_validate_addr(&ad)) 677 senderr(EINVAL); 678 p = pfr_lookup_addr(kt, &ad, 1); 679 if (flags & PFR_FLAG_FEEDBACK) { 680 ad.pfra_fback = (p != NULL) ? 681 PFR_FB_CLEARED : PFR_FB_NONE; 682 if (COPYOUT(&ad, addr+i, sizeof(ad))) 683 senderr(EFAULT); 684 } 685 if (p != NULL) { 686 SLIST_INSERT_HEAD(&workq, p, pfrke_workq); 687 xzero++; 688 } 689 } 690 691 if (!(flags & PFR_FLAG_DUMMY)) { 692 if (flags & PFR_FLAG_ATOMIC) 693 s = splsoftnet(); 694 pfr_clstats_kentries(&workq, 0, 0); 695 if (flags & PFR_FLAG_ATOMIC) 696 splx(s); 697 } 698 if (nzero != NULL) 699 *nzero = xzero; 700 return (0); 701 _bad: 702 if (flags & PFR_FLAG_FEEDBACK) 703 pfr_reset_feedback(addr, size, flags); 704 return (rv); 705 } 706 707 int 708 pfr_validate_addr(struct pfr_addr *ad) 709 { 710 int i; 711 712 switch (ad->pfra_af) { 713 #ifdef INET 714 case AF_INET: 715 if (ad->pfra_net > 32) 716 return (-1); 717 break; 718 #endif /* INET */ 719 #ifdef INET6 720 case AF_INET6: 721 if (ad->pfra_net > 128) 722 return (-1); 723 break; 724 #endif /* INET6 */ 725 default: 726 return (-1); 727 } 728 if (ad->pfra_net < 128 && 729 (((caddr_t)ad)[ad->pfra_net/8] & (0xFF >> (ad->pfra_net%8)))) 730 return (-1); 731 for (i = (ad->pfra_net+7)/8; i < sizeof(ad->pfra_u); i++) 732 if (((caddr_t)ad)[i]) 733 return (-1); 734 if (ad->pfra_not && ad->pfra_not != 1) 735 return (-1); 736 if (ad->pfra_fback) 737 return (-1); 738 return (0); 739 } 740 741 void 742 pfr_enqueue_addrs(struct pfr_ktable *kt, struct pfr_kentryworkq *workq, 743 int *naddr, int sweep) 744 { 745 struct pfr_walktree w; 746 747 SLIST_INIT(workq); 748 bzero(&w, sizeof(w)); 749 w.pfrw_op = sweep ? PFRW_SWEEP : PFRW_ENQUEUE; 750 w.pfrw_workq = workq; 751 if (kt->pfrkt_ip4 != NULL) 752 if (rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w)) 753 printf("pfr_enqueue_addrs: IPv4 walktree failed.\n"); 754 if (kt->pfrkt_ip6 != NULL) 755 if (rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w)) 756 printf("pfr_enqueue_addrs: IPv6 walktree failed.\n"); 757 if (naddr != NULL) 758 *naddr = w.pfrw_cnt; 759 } 760 761 void 762 pfr_mark_addrs(struct pfr_ktable *kt) 763 { 764 struct pfr_walktree w; 765 766 bzero(&w, sizeof(w)); 767 w.pfrw_op = PFRW_MARK; 768 if (rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w)) 769 printf("pfr_mark_addrs: IPv4 walktree failed.\n"); 770 if (rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w)) 771 printf("pfr_mark_addrs: IPv6 walktree failed.\n"); 772 } 773 774 775 struct pfr_kentry * 776 pfr_lookup_addr(struct pfr_ktable *kt, struct pfr_addr *ad, int exact) 777 { 778 union sockaddr_union sa, mask; 779 struct radix_node_head *head = (void *)0xdeadb; 780 struct pfr_kentry *ke; 781 int s; 782 783 bzero(&sa, sizeof(sa)); 784 if (ad->pfra_af == AF_INET) { 785 FILLIN_SIN(sa.sin, ad->pfra_ip4addr); 786 head = kt->pfrkt_ip4; 787 } else if ( ad->pfra_af == AF_INET6 ) { 788 FILLIN_SIN6(sa.sin6, ad->pfra_ip6addr); 789 head = kt->pfrkt_ip6; 790 } 791 if (ADDR_NETWORK(ad)) { 792 pfr_prepare_network(&mask, ad->pfra_af, ad->pfra_net); 793 s = splsoftnet(); /* rn_lookup makes use of globals */ 794 ke = (struct pfr_kentry *)rn_lookup(&sa, &mask, head); 795 splx(s); 796 if (ke && KENTRY_RNF_ROOT(ke)) 797 ke = NULL; 798 } else { 799 ke = (struct pfr_kentry *)rn_match(&sa, head); 800 if (ke && KENTRY_RNF_ROOT(ke)) 801 ke = NULL; 802 if (exact && ke && KENTRY_NETWORK(ke)) 803 ke = NULL; 804 } 805 return (ke); 806 } 807 808 struct pfr_kentry * 809 pfr_create_kentry(struct pfr_addr *ad) 810 { 811 struct pfr_kentry *ke; 812 813 ke = pool_get(&pfr_kentry_pl, PR_NOWAIT); 814 if (ke == NULL) 815 return (NULL); 816 bzero(ke, sizeof(*ke)); 817 818 if (ad->pfra_af == AF_INET) 819 FILLIN_SIN(ke->pfrke_sa.sin, ad->pfra_ip4addr); 820 else if (ad->pfra_af == AF_INET6) 821 FILLIN_SIN6(ke->pfrke_sa.sin6, ad->pfra_ip6addr); 822 ke->pfrke_af = ad->pfra_af; 823 ke->pfrke_net = ad->pfra_net; 824 ke->pfrke_not = ad->pfra_not; 825 return (ke); 826 } 827 828 void 829 pfr_destroy_kentries(struct pfr_kentryworkq *workq) 830 { 831 struct pfr_kentry *p, *q; 832 833 for (p = SLIST_FIRST(workq); p != NULL; p = q) { 834 q = SLIST_NEXT(p, pfrke_workq); 835 pfr_destroy_kentry(p); 836 } 837 } 838 839 void 840 pfr_destroy_kentry(struct pfr_kentry *ke) 841 { 842 pool_put(&pfr_kentry_pl, ke); 843 } 844 845 void 846 pfr_insert_kentries(struct pfr_ktable *kt, 847 struct pfr_kentryworkq *workq, long tzero) 848 { 849 struct pfr_kentry *p; 850 int rv, n = 0; 851 852 SLIST_FOREACH(p, workq, pfrke_workq) { 853 rv = pfr_route_kentry(kt, p); 854 if (rv) { 855 printf("pfr_insert_kentries: cannot route entry " 856 "(code=%d).\n", rv); 857 break; 858 } 859 p->pfrke_tzero = tzero; 860 n++; 861 } 862 kt->pfrkt_cnt += n; 863 } 864 865 void 866 pfr_remove_kentries(struct pfr_ktable *kt, 867 struct pfr_kentryworkq *workq) 868 { 869 struct pfr_kentry *p; 870 int n = 0; 871 872 SLIST_FOREACH(p, workq, pfrke_workq) { 873 pfr_unroute_kentry(kt, p); 874 n++; 875 } 876 kt->pfrkt_cnt -= n; 877 pfr_destroy_kentries(workq); 878 } 879 880 void 881 pfr_clean_node_mask(struct pfr_ktable *kt, 882 struct pfr_kentryworkq *workq) 883 { 884 struct pfr_kentry *p; 885 886 SLIST_FOREACH(p, workq, pfrke_workq) 887 pfr_unroute_kentry(kt, p); 888 } 889 890 void 891 pfr_clstats_kentries(struct pfr_kentryworkq *workq, long tzero, int negchange) 892 { 893 struct pfr_kentry *p; 894 int s; 895 896 SLIST_FOREACH(p, workq, pfrke_workq) { 897 s = splsoftnet(); 898 if (negchange) 899 p->pfrke_not = !p->pfrke_not; 900 bzero(p->pfrke_packets, sizeof(p->pfrke_packets)); 901 bzero(p->pfrke_bytes, sizeof(p->pfrke_bytes)); 902 splx(s); 903 p->pfrke_tzero = tzero; 904 } 905 } 906 907 void 908 pfr_reset_feedback(struct pfr_addr *addr, int size, int flags) 909 { 910 struct pfr_addr ad; 911 int i; 912 913 for (i = 0; i < size; i++) { 914 if (COPYIN(addr+i, &ad, sizeof(ad))) 915 break; 916 ad.pfra_fback = PFR_FB_NONE; 917 if (COPYOUT(&ad, addr+i, sizeof(ad))) 918 break; 919 } 920 } 921 922 void 923 pfr_prepare_network(union sockaddr_union *sa, int af, int net) 924 { 925 int i; 926 927 bzero(sa, sizeof(*sa)); 928 if (af == AF_INET) { 929 sa->sin.sin_len = sizeof(sa->sin); 930 sa->sin.sin_family = AF_INET; 931 sa->sin.sin_addr.s_addr = net ? htonl(-1 << (32-net)) : 0; 932 } else if (af == AF_INET6) { 933 sa->sin6.sin6_len = sizeof(sa->sin6); 934 sa->sin6.sin6_family = AF_INET6; 935 for (i = 0; i < 4; i++) { 936 if (net <= 32) { 937 sa->sin6.sin6_addr.s6_addr32[i] = 938 net ? htonl(-1 << (32-net)) : 0; 939 break; 940 } 941 sa->sin6.sin6_addr.s6_addr32[i] = 0xFFFFFFFF; 942 net -= 32; 943 } 944 } 945 } 946 947 int 948 pfr_route_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke) 949 { 950 union sockaddr_union mask; 951 struct radix_node *rn; 952 struct radix_node_head *head = (void *)0xdeadb; 953 int s; 954 955 bzero(ke->pfrke_node, sizeof(ke->pfrke_node)); 956 if (ke->pfrke_af == AF_INET) 957 head = kt->pfrkt_ip4; 958 else if (ke->pfrke_af == AF_INET6) 959 head = kt->pfrkt_ip6; 960 961 s = splsoftnet(); 962 if (KENTRY_NETWORK(ke)) { 963 pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net); 964 rn = rn_addroute(&ke->pfrke_sa, &mask, head, ke->pfrke_node); 965 } else 966 rn = rn_addroute(&ke->pfrke_sa, NULL, head, ke->pfrke_node); 967 splx(s); 968 969 return (rn == NULL ? -1 : 0); 970 } 971 972 int 973 pfr_unroute_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke) 974 { 975 union sockaddr_union mask; 976 struct radix_node *rn; 977 struct radix_node_head *head = (void *)0xdeadb; 978 int s; 979 980 if (ke->pfrke_af == AF_INET) 981 head = kt->pfrkt_ip4; 982 else if (ke->pfrke_af == AF_INET6) 983 head = kt->pfrkt_ip6; 984 985 s = splsoftnet(); 986 if (KENTRY_NETWORK(ke)) { 987 pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net); 988 #ifdef __OpenBSD__ 989 rn = rn_delete(&ke->pfrke_sa, &mask, head, NULL); 990 #else 991 rn = rn_delete(&ke->pfrke_sa, &mask, head); 992 #endif 993 } else 994 #ifdef __OpenBSD__ 995 rn = rn_delete(&ke->pfrke_sa, NULL, head, NULL); 996 #else 997 rn = rn_delete(&ke->pfrke_sa, NULL, head); 998 #endif 999 splx(s); 1000 1001 if (rn == NULL) { 1002 printf("pfr_unroute_kentry: delete failed.\n"); 1003 return (-1); 1004 } 1005 return (0); 1006 } 1007 1008 void 1009 pfr_copyout_addr(struct pfr_addr *ad, struct pfr_kentry *ke) 1010 { 1011 bzero(ad, sizeof(*ad)); 1012 if (ke == NULL) 1013 return; 1014 ad->pfra_af = ke->pfrke_af; 1015 ad->pfra_net = ke->pfrke_net; 1016 ad->pfra_not = ke->pfrke_not; 1017 if (ad->pfra_af == AF_INET) 1018 ad->pfra_ip4addr = ke->pfrke_sa.sin.sin_addr; 1019 else if (ad->pfra_af == AF_INET6) 1020 ad->pfra_ip6addr = ke->pfrke_sa.sin6.sin6_addr; 1021 } 1022 1023 int 1024 pfr_walktree(struct radix_node *rn, void *arg) 1025 { 1026 struct pfr_kentry *ke = (struct pfr_kentry *)rn; 1027 struct pfr_walktree *w = arg; 1028 int s, flags = w->pfrw_flags; 1029 1030 switch (w->pfrw_op) { 1031 case PFRW_MARK: 1032 ke->pfrke_mark = 0; 1033 break; 1034 case PFRW_SWEEP: 1035 if (ke->pfrke_mark) 1036 break; 1037 /* FALLTHROUGH */ 1038 case PFRW_ENQUEUE: 1039 SLIST_INSERT_HEAD(w->pfrw_workq, ke, pfrke_workq); 1040 w->pfrw_cnt++; 1041 break; 1042 case PFRW_GET_ADDRS: 1043 if (w->pfrw_free-- > 0) { 1044 struct pfr_addr ad; 1045 1046 pfr_copyout_addr(&ad, ke); 1047 if (copyout(&ad, w->pfrw_addr, sizeof(ad))) 1048 return (EFAULT); 1049 w->pfrw_addr++; 1050 } 1051 break; 1052 case PFRW_GET_ASTATS: 1053 if (w->pfrw_free-- > 0) { 1054 struct pfr_astats as; 1055 1056 pfr_copyout_addr(&as.pfras_a, ke); 1057 1058 s = splsoftnet(); 1059 bcopy(ke->pfrke_packets, as.pfras_packets, 1060 sizeof(as.pfras_packets)); 1061 bcopy(ke->pfrke_bytes, as.pfras_bytes, 1062 sizeof(as.pfras_bytes)); 1063 splx(s); 1064 as.pfras_tzero = ke->pfrke_tzero; 1065 1066 if (COPYOUT(&as, w->pfrw_astats, sizeof(as))) 1067 return (EFAULT); 1068 w->pfrw_astats++; 1069 } 1070 break; 1071 case PFRW_POOL_GET: 1072 if (ke->pfrke_not) 1073 break; /* negative entries are ignored */ 1074 if (!w->pfrw_cnt--) { 1075 w->pfrw_kentry = ke; 1076 return (1); /* finish search */ 1077 } 1078 break; 1079 case PFRW_DYNADDR_UPDATE: 1080 if (ke->pfrke_af == AF_INET) { 1081 if (w->pfrw_dyn->pfid_acnt4++ > 0) 1082 break; 1083 pfr_prepare_network(&pfr_mask, AF_INET, ke->pfrke_net); 1084 w->pfrw_dyn->pfid_addr4 = *SUNION2PF( 1085 &ke->pfrke_sa, AF_INET); 1086 w->pfrw_dyn->pfid_mask4 = *SUNION2PF( 1087 &pfr_mask, AF_INET); 1088 } else if (ke->pfrke_af == AF_INET6){ 1089 if (w->pfrw_dyn->pfid_acnt6++ > 0) 1090 break; 1091 pfr_prepare_network(&pfr_mask, AF_INET6, ke->pfrke_net); 1092 w->pfrw_dyn->pfid_addr6 = *SUNION2PF( 1093 &ke->pfrke_sa, AF_INET6); 1094 w->pfrw_dyn->pfid_mask6 = *SUNION2PF( 1095 &pfr_mask, AF_INET6); 1096 } 1097 break; 1098 } 1099 return (0); 1100 } 1101 1102 int 1103 pfr_clr_tables(struct pfr_table *filter, int *ndel, int flags) 1104 { 1105 struct pfr_ktableworkq workq; 1106 struct pfr_ktable *p; 1107 int s = 0, xdel = 0; 1108 1109 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_ALLRSETS); 1110 if (pfr_table_count(filter, flags) < 0) 1111 return (ENOENT); 1112 1113 SLIST_INIT(&workq); 1114 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) { 1115 if (pfr_skip_table(filter, p, flags)) 1116 continue; 1117 if (!strcmp(p->pfrkt_anchor, PF_RESERVED_ANCHOR)) 1118 continue; 1119 if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE)) 1120 continue; 1121 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE; 1122 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1123 xdel++; 1124 } 1125 if (!(flags & PFR_FLAG_DUMMY)) { 1126 if (flags & PFR_FLAG_ATOMIC) 1127 s = splsoftnet(); 1128 pfr_setflags_ktables(&workq); 1129 if (flags & PFR_FLAG_ATOMIC) 1130 splx(s); 1131 } 1132 if (ndel != NULL) 1133 *ndel = xdel; 1134 return (0); 1135 } 1136 1137 int 1138 pfr_add_tables(struct pfr_table *tbl, int size, int *nadd, int flags) 1139 { 1140 struct pfr_ktableworkq addq, changeq; 1141 struct pfr_ktable *p, *q, *r, key; 1142 int i, rv, s = 0 /* XXX gcc */, xadd = 0; 1143 long tzero = time_second; 1144 1145 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY); 1146 SLIST_INIT(&addq); 1147 SLIST_INIT(&changeq); 1148 for (i = 0; i < size; i++) { 1149 if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t))) 1150 senderr(EFAULT); 1151 if (pfr_validate_table(&key.pfrkt_t, PFR_TFLAG_USRMASK, 1152 flags & PFR_FLAG_USERIOCTL)) 1153 senderr(EINVAL); 1154 key.pfrkt_flags |= PFR_TFLAG_ACTIVE; 1155 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key); 1156 if (p == NULL) { 1157 p = pfr_create_ktable(&key.pfrkt_t, tzero, 1); 1158 if (p == NULL) 1159 senderr(ENOMEM); 1160 SLIST_FOREACH(q, &addq, pfrkt_workq) { 1161 if (!pfr_ktable_compare(p, q)) 1162 goto _skip; 1163 } 1164 SLIST_INSERT_HEAD(&addq, p, pfrkt_workq); 1165 xadd++; 1166 if (!key.pfrkt_anchor[0]) 1167 goto _skip; 1168 1169 /* find or create root table */ 1170 bzero(key.pfrkt_anchor, sizeof(key.pfrkt_anchor)); 1171 r = RB_FIND(pfr_ktablehead, &pfr_ktables, &key); 1172 if (r != NULL) { 1173 p->pfrkt_root = r; 1174 goto _skip; 1175 } 1176 SLIST_FOREACH(q, &addq, pfrkt_workq) { 1177 if (!pfr_ktable_compare(&key, q)) { 1178 p->pfrkt_root = q; 1179 goto _skip; 1180 } 1181 } 1182 key.pfrkt_flags = 0; 1183 r = pfr_create_ktable(&key.pfrkt_t, 0, 1); 1184 if (r == NULL) 1185 senderr(ENOMEM); 1186 SLIST_INSERT_HEAD(&addq, r, pfrkt_workq); 1187 p->pfrkt_root = r; 1188 } else if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE)) { 1189 SLIST_FOREACH(q, &changeq, pfrkt_workq) 1190 if (!pfr_ktable_compare(&key, q)) 1191 goto _skip; 1192 p->pfrkt_nflags = (p->pfrkt_flags & 1193 ~PFR_TFLAG_USRMASK) | key.pfrkt_flags; 1194 SLIST_INSERT_HEAD(&changeq, p, pfrkt_workq); 1195 xadd++; 1196 } 1197 _skip: 1198 ; 1199 } 1200 if (!(flags & PFR_FLAG_DUMMY)) { 1201 if (flags & PFR_FLAG_ATOMIC) 1202 s = splsoftnet(); 1203 pfr_insert_ktables(&addq); 1204 pfr_setflags_ktables(&changeq); 1205 if (flags & PFR_FLAG_ATOMIC) 1206 splx(s); 1207 } else 1208 pfr_destroy_ktables(&addq, 0); 1209 if (nadd != NULL) 1210 *nadd = xadd; 1211 return (0); 1212 _bad: 1213 pfr_destroy_ktables(&addq, 0); 1214 return (rv); 1215 } 1216 1217 int 1218 pfr_del_tables(struct pfr_table *tbl, int size, int *ndel, int flags) 1219 { 1220 struct pfr_ktableworkq workq; 1221 struct pfr_ktable *p, *q, key; 1222 int i, s = 0, xdel = 0; 1223 1224 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY); 1225 SLIST_INIT(&workq); 1226 for (i = 0; i < size; i++) { 1227 if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t))) 1228 return (EFAULT); 1229 if (pfr_validate_table(&key.pfrkt_t, 0, 1230 flags & PFR_FLAG_USERIOCTL)) 1231 return (EINVAL); 1232 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key); 1233 if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) { 1234 SLIST_FOREACH(q, &workq, pfrkt_workq) 1235 if (!pfr_ktable_compare(p, q)) 1236 goto _skip; 1237 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE; 1238 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1239 xdel++; 1240 } 1241 _skip: 1242 ; 1243 } 1244 1245 if (!(flags & PFR_FLAG_DUMMY)) { 1246 if (flags & PFR_FLAG_ATOMIC) 1247 s = splsoftnet(); 1248 pfr_setflags_ktables(&workq); 1249 if (flags & PFR_FLAG_ATOMIC) 1250 splx(s); 1251 } 1252 if (ndel != NULL) 1253 *ndel = xdel; 1254 return (0); 1255 } 1256 1257 int 1258 pfr_get_tables(struct pfr_table *filter, struct pfr_table *tbl, int *size, 1259 int flags) 1260 { 1261 struct pfr_ktable *p; 1262 int n, nn; 1263 1264 ACCEPT_FLAGS(PFR_FLAG_ALLRSETS); 1265 n = nn = pfr_table_count(filter, flags); 1266 if (n < 0) 1267 return (ENOENT); 1268 if (n > *size) { 1269 *size = n; 1270 return (0); 1271 } 1272 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) { 1273 if (pfr_skip_table(filter, p, flags)) 1274 continue; 1275 if (n-- <= 0) 1276 continue; 1277 if (COPYOUT(&p->pfrkt_t, tbl++, sizeof(*tbl))) 1278 return (EFAULT); 1279 } 1280 if (n) { 1281 printf("pfr_get_tables: corruption detected (%d).\n", n); 1282 return (ENOTTY); 1283 } 1284 *size = nn; 1285 return (0); 1286 } 1287 1288 int 1289 pfr_get_tstats(struct pfr_table *filter, struct pfr_tstats *tbl, int *size, 1290 int flags) 1291 { 1292 struct pfr_ktable *p; 1293 struct pfr_ktableworkq workq; 1294 int s = 0 /* XXX gcc */, n, nn; 1295 long tzero = time_second; 1296 1297 ACCEPT_FLAGS(PFR_FLAG_ATOMIC|PFR_FLAG_ALLRSETS); 1298 /* XXX PFR_FLAG_CLSTATS disabled */ 1299 n = nn = pfr_table_count(filter, flags); 1300 if (n < 0) 1301 return (ENOENT); 1302 if (n > *size) { 1303 *size = n; 1304 return (0); 1305 } 1306 SLIST_INIT(&workq); 1307 if (flags & PFR_FLAG_ATOMIC) 1308 s = splsoftnet(); 1309 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) { 1310 if (pfr_skip_table(filter, p, flags)) 1311 continue; 1312 if (n-- <= 0) 1313 continue; 1314 if (!(flags & PFR_FLAG_ATOMIC)) 1315 s = splsoftnet(); 1316 if (COPYOUT(&p->pfrkt_ts, tbl++, sizeof(*tbl))) { 1317 splx(s); 1318 return (EFAULT); 1319 } 1320 if (!(flags & PFR_FLAG_ATOMIC)) 1321 splx(s); 1322 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1323 } 1324 if (flags & PFR_FLAG_CLSTATS) 1325 pfr_clstats_ktables(&workq, tzero, 1326 flags & PFR_FLAG_ADDRSTOO); 1327 if (flags & PFR_FLAG_ATOMIC) 1328 splx(s); 1329 if (n) { 1330 printf("pfr_get_tstats: corruption detected (%d).\n", n); 1331 return (ENOTTY); 1332 } 1333 *size = nn; 1334 return (0); 1335 } 1336 1337 int 1338 pfr_clr_tstats(struct pfr_table *tbl, int size, int *nzero, int flags) 1339 { 1340 struct pfr_ktableworkq workq; 1341 struct pfr_ktable *p, key; 1342 int i, s = 0 /* XXX gcc */, xzero = 0; 1343 long tzero = time_second; 1344 1345 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_ADDRSTOO); 1346 SLIST_INIT(&workq); 1347 for (i = 0; i < size; i++) { 1348 if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t))) 1349 return (EFAULT); 1350 if (pfr_validate_table(&key.pfrkt_t, 0, 0)) 1351 return (EINVAL); 1352 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key); 1353 if (p != NULL) { 1354 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1355 xzero++; 1356 } 1357 } 1358 if (!(flags & PFR_FLAG_DUMMY)) { 1359 if (flags & PFR_FLAG_ATOMIC) 1360 s = splsoftnet(); 1361 pfr_clstats_ktables(&workq, tzero, flags & PFR_FLAG_ADDRSTOO); 1362 if (flags & PFR_FLAG_ATOMIC) 1363 splx(s); 1364 } 1365 if (nzero != NULL) 1366 *nzero = xzero; 1367 return (0); 1368 } 1369 1370 int 1371 pfr_set_tflags(struct pfr_table *tbl, int size, int setflag, int clrflag, 1372 int *nchange, int *ndel, int flags) 1373 { 1374 struct pfr_ktableworkq workq; 1375 struct pfr_ktable *p, *q, key; 1376 int i, s = 0, xchange = 0, xdel = 0; 1377 1378 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY); 1379 if ((setflag & ~PFR_TFLAG_USRMASK) || 1380 (clrflag & ~PFR_TFLAG_USRMASK) || 1381 (setflag & clrflag)) 1382 return (EINVAL); 1383 SLIST_INIT(&workq); 1384 for (i = 0; i < size; i++) { 1385 if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t))) 1386 return (EFAULT); 1387 if (pfr_validate_table(&key.pfrkt_t, 0, 1388 flags & PFR_FLAG_USERIOCTL)) 1389 return (EINVAL); 1390 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key); 1391 if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) { 1392 p->pfrkt_nflags = (p->pfrkt_flags | setflag) & 1393 ~clrflag; 1394 if (p->pfrkt_nflags == p->pfrkt_flags) 1395 goto _skip; 1396 SLIST_FOREACH(q, &workq, pfrkt_workq) 1397 if (!pfr_ktable_compare(p, q)) 1398 goto _skip; 1399 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1400 if ((p->pfrkt_flags & PFR_TFLAG_PERSIST) && 1401 (clrflag & PFR_TFLAG_PERSIST) && 1402 !(p->pfrkt_flags & PFR_TFLAG_REFERENCED)) 1403 xdel++; 1404 else 1405 xchange++; 1406 } 1407 _skip: 1408 ; 1409 } 1410 if (!(flags & PFR_FLAG_DUMMY)) { 1411 if (flags & PFR_FLAG_ATOMIC) 1412 s = splsoftnet(); 1413 pfr_setflags_ktables(&workq); 1414 if (flags & PFR_FLAG_ATOMIC) 1415 splx(s); 1416 } 1417 if (nchange != NULL) 1418 *nchange = xchange; 1419 if (ndel != NULL) 1420 *ndel = xdel; 1421 return (0); 1422 } 1423 1424 int 1425 pfr_ina_begin(struct pfr_table *trs, u_int32_t *ticket, int *ndel, int flags) 1426 { 1427 struct pfr_ktableworkq workq; 1428 struct pfr_ktable *p; 1429 struct pf_ruleset *rs; 1430 int xdel = 0; 1431 1432 ACCEPT_FLAGS(PFR_FLAG_DUMMY); 1433 rs = pf_find_or_create_ruleset(trs->pfrt_anchor); 1434 if (rs == NULL) 1435 return (ENOMEM); 1436 SLIST_INIT(&workq); 1437 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) { 1438 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) || 1439 pfr_skip_table(trs, p, 0)) 1440 continue; 1441 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE; 1442 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1443 xdel++; 1444 } 1445 if (!(flags & PFR_FLAG_DUMMY)) { 1446 pfr_setflags_ktables(&workq); 1447 if (ticket != NULL) 1448 *ticket = ++rs->tticket; 1449 rs->topen = 1; 1450 } else 1451 pf_remove_if_empty_ruleset(rs); 1452 if (ndel != NULL) 1453 *ndel = xdel; 1454 return (0); 1455 } 1456 1457 int 1458 pfr_ina_define(struct pfr_table *tbl, struct pfr_addr *addr, int size, 1459 int *nadd, int *naddr, u_int32_t ticket, int flags) 1460 { 1461 struct pfr_ktableworkq tableq; 1462 struct pfr_kentryworkq addrq; 1463 struct pfr_ktable *kt, *rt, *shadow, key; 1464 struct pfr_kentry *p; 1465 struct pfr_addr ad; 1466 struct pf_ruleset *rs; 1467 int i, rv, xadd = 0, xaddr = 0; 1468 1469 ACCEPT_FLAGS(PFR_FLAG_DUMMY|PFR_FLAG_ADDRSTOO); 1470 if (size && !(flags & PFR_FLAG_ADDRSTOO)) 1471 return (EINVAL); 1472 if (pfr_validate_table(tbl, PFR_TFLAG_USRMASK, 1473 flags & PFR_FLAG_USERIOCTL)) 1474 return (EINVAL); 1475 rs = pf_find_ruleset(tbl->pfrt_anchor); 1476 if (rs == NULL || !rs->topen || ticket != rs->tticket) 1477 return (EBUSY); 1478 tbl->pfrt_flags |= PFR_TFLAG_INACTIVE; 1479 SLIST_INIT(&tableq); 1480 kt = RB_FIND(pfr_ktablehead, &pfr_ktables, (struct pfr_ktable *)tbl); 1481 if (kt == NULL) { 1482 kt = pfr_create_ktable(tbl, 0, 1); 1483 if (kt == NULL) 1484 return (ENOMEM); 1485 SLIST_INSERT_HEAD(&tableq, kt, pfrkt_workq); 1486 xadd++; 1487 if (!tbl->pfrt_anchor[0]) 1488 goto _skip; 1489 1490 /* find or create root table */ 1491 bzero(&key, sizeof(key)); 1492 strlcpy(key.pfrkt_name, tbl->pfrt_name, sizeof(key.pfrkt_name)); 1493 rt = RB_FIND(pfr_ktablehead, &pfr_ktables, &key); 1494 if (rt != NULL) { 1495 kt->pfrkt_root = rt; 1496 goto _skip; 1497 } 1498 rt = pfr_create_ktable(&key.pfrkt_t, 0, 1); 1499 if (rt == NULL) { 1500 pfr_destroy_ktables(&tableq, 0); 1501 return (ENOMEM); 1502 } 1503 SLIST_INSERT_HEAD(&tableq, rt, pfrkt_workq); 1504 kt->pfrkt_root = rt; 1505 } else if (!(kt->pfrkt_flags & PFR_TFLAG_INACTIVE)) 1506 xadd++; 1507 _skip: 1508 shadow = pfr_create_ktable(tbl, 0, 0); 1509 if (shadow == NULL) { 1510 pfr_destroy_ktables(&tableq, 0); 1511 return (ENOMEM); 1512 } 1513 SLIST_INIT(&addrq); 1514 for (i = 0; i < size; i++) { 1515 if (COPYIN(addr+i, &ad, sizeof(ad))) 1516 senderr(EFAULT); 1517 if (pfr_validate_addr(&ad)) 1518 senderr(EINVAL); 1519 if (pfr_lookup_addr(shadow, &ad, 1) != NULL) 1520 continue; 1521 p = pfr_create_kentry(&ad); 1522 if (p == NULL) 1523 senderr(ENOMEM); 1524 if (pfr_route_kentry(shadow, p)) { 1525 pfr_destroy_kentry(p); 1526 continue; 1527 } 1528 SLIST_INSERT_HEAD(&addrq, p, pfrke_workq); 1529 xaddr++; 1530 } 1531 if (!(flags & PFR_FLAG_DUMMY)) { 1532 if (kt->pfrkt_shadow != NULL) 1533 pfr_destroy_ktable(kt->pfrkt_shadow, 1); 1534 kt->pfrkt_flags |= PFR_TFLAG_INACTIVE; 1535 pfr_insert_ktables(&tableq); 1536 shadow->pfrkt_cnt = (flags & PFR_FLAG_ADDRSTOO) ? 1537 xaddr : NO_ADDRESSES; 1538 kt->pfrkt_shadow = shadow; 1539 } else { 1540 pfr_clean_node_mask(shadow, &addrq); 1541 pfr_destroy_ktable(shadow, 0); 1542 pfr_destroy_ktables(&tableq, 0); 1543 pfr_destroy_kentries(&addrq); 1544 } 1545 if (nadd != NULL) 1546 *nadd = xadd; 1547 if (naddr != NULL) 1548 *naddr = xaddr; 1549 return (0); 1550 _bad: 1551 pfr_destroy_ktable(shadow, 0); 1552 pfr_destroy_ktables(&tableq, 0); 1553 pfr_destroy_kentries(&addrq); 1554 return (rv); 1555 } 1556 1557 int 1558 pfr_ina_rollback(struct pfr_table *trs, u_int32_t ticket, int *ndel, int flags) 1559 { 1560 struct pfr_ktableworkq workq; 1561 struct pfr_ktable *p; 1562 struct pf_ruleset *rs; 1563 int xdel = 0; 1564 1565 ACCEPT_FLAGS(PFR_FLAG_DUMMY); 1566 rs = pf_find_ruleset(trs->pfrt_anchor); 1567 if (rs == NULL || !rs->topen || ticket != rs->tticket) 1568 return (0); 1569 SLIST_INIT(&workq); 1570 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) { 1571 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) || 1572 pfr_skip_table(trs, p, 0)) 1573 continue; 1574 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE; 1575 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1576 xdel++; 1577 } 1578 if (!(flags & PFR_FLAG_DUMMY)) { 1579 pfr_setflags_ktables(&workq); 1580 rs->topen = 0; 1581 pf_remove_if_empty_ruleset(rs); 1582 } 1583 if (ndel != NULL) 1584 *ndel = xdel; 1585 return (0); 1586 } 1587 1588 int 1589 pfr_ina_commit(struct pfr_table *trs, u_int32_t ticket, int *nadd, 1590 int *nchange, int flags) 1591 { 1592 struct pfr_ktable *p, *q; 1593 struct pfr_ktableworkq workq; 1594 struct pf_ruleset *rs; 1595 int s = 0 /* XXX gcc */, xadd = 0, xchange = 0; 1596 long tzero = time_second; 1597 1598 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY); 1599 rs = pf_find_ruleset(trs->pfrt_anchor); 1600 if (rs == NULL || !rs->topen || ticket != rs->tticket) 1601 return (EBUSY); 1602 1603 SLIST_INIT(&workq); 1604 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) { 1605 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) || 1606 pfr_skip_table(trs, p, 0)) 1607 continue; 1608 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1609 if (p->pfrkt_flags & PFR_TFLAG_ACTIVE) 1610 xchange++; 1611 else 1612 xadd++; 1613 } 1614 1615 if (!(flags & PFR_FLAG_DUMMY)) { 1616 if (flags & PFR_FLAG_ATOMIC) 1617 s = splsoftnet(); 1618 for (p = SLIST_FIRST(&workq); p != NULL; p = q) { 1619 q = SLIST_NEXT(p, pfrkt_workq); 1620 pfr_commit_ktable(p, tzero); 1621 } 1622 if (flags & PFR_FLAG_ATOMIC) 1623 splx(s); 1624 rs->topen = 0; 1625 pf_remove_if_empty_ruleset(rs); 1626 } 1627 if (nadd != NULL) 1628 *nadd = xadd; 1629 if (nchange != NULL) 1630 *nchange = xchange; 1631 1632 return (0); 1633 } 1634 1635 void 1636 pfr_commit_ktable(struct pfr_ktable *kt, long tzero) 1637 { 1638 struct pfr_ktable *shadow = kt->pfrkt_shadow; 1639 int nflags; 1640 1641 if (shadow->pfrkt_cnt == NO_ADDRESSES) { 1642 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 1643 pfr_clstats_ktable(kt, tzero, 1); 1644 } else if (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) { 1645 /* kt might contain addresses */ 1646 struct pfr_kentryworkq addrq, addq, changeq, delq, garbageq; 1647 struct pfr_kentry *p, *q, *next; 1648 struct pfr_addr ad; 1649 1650 pfr_enqueue_addrs(shadow, &addrq, NULL, 0); 1651 pfr_mark_addrs(kt); 1652 SLIST_INIT(&addq); 1653 SLIST_INIT(&changeq); 1654 SLIST_INIT(&delq); 1655 SLIST_INIT(&garbageq); 1656 pfr_clean_node_mask(shadow, &addrq); 1657 for (p = SLIST_FIRST(&addrq); p != NULL; p = next) { 1658 next = SLIST_NEXT(p, pfrke_workq); /* XXX */ 1659 pfr_copyout_addr(&ad, p); 1660 q = pfr_lookup_addr(kt, &ad, 1); 1661 if (q != NULL) { 1662 if (q->pfrke_not != p->pfrke_not) 1663 SLIST_INSERT_HEAD(&changeq, q, 1664 pfrke_workq); 1665 q->pfrke_mark = 1; 1666 SLIST_INSERT_HEAD(&garbageq, p, pfrke_workq); 1667 } else { 1668 p->pfrke_tzero = tzero; 1669 SLIST_INSERT_HEAD(&addq, p, pfrke_workq); 1670 } 1671 } 1672 pfr_enqueue_addrs(kt, &delq, NULL, ENQUEUE_UNMARKED_ONLY); 1673 pfr_insert_kentries(kt, &addq, tzero); 1674 pfr_remove_kentries(kt, &delq); 1675 pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG); 1676 pfr_destroy_kentries(&garbageq); 1677 } else { 1678 /* kt cannot contain addresses */ 1679 SWAP(struct radix_node_head *, kt->pfrkt_ip4, 1680 shadow->pfrkt_ip4); 1681 SWAP(struct radix_node_head *, kt->pfrkt_ip6, 1682 shadow->pfrkt_ip6); 1683 SWAP(int, kt->pfrkt_cnt, shadow->pfrkt_cnt); 1684 pfr_clstats_ktable(kt, tzero, 1); 1685 } 1686 nflags = ((shadow->pfrkt_flags & PFR_TFLAG_USRMASK) | 1687 (kt->pfrkt_flags & PFR_TFLAG_SETMASK) | PFR_TFLAG_ACTIVE) 1688 & ~PFR_TFLAG_INACTIVE; 1689 pfr_destroy_ktable(shadow, 0); 1690 kt->pfrkt_shadow = NULL; 1691 pfr_setflags_ktable(kt, nflags); 1692 } 1693 1694 int 1695 pfr_validate_table(struct pfr_table *tbl, int allowedflags, int no_reserved) 1696 { 1697 int i; 1698 1699 if (!tbl->pfrt_name[0]) 1700 return (-1); 1701 if (no_reserved && !strcmp(tbl->pfrt_anchor, PF_RESERVED_ANCHOR)) 1702 return (-1); 1703 if (tbl->pfrt_name[PF_TABLE_NAME_SIZE-1]) 1704 return (-1); 1705 for (i = strlen(tbl->pfrt_name); i < PF_TABLE_NAME_SIZE; i++) 1706 if (tbl->pfrt_name[i]) 1707 return (-1); 1708 if (tbl->pfrt_flags & ~allowedflags) 1709 return (-1); 1710 return (0); 1711 } 1712 1713 int 1714 pfr_table_count(struct pfr_table *filter, int flags) 1715 { 1716 struct pf_ruleset *rs; 1717 1718 if (flags & PFR_FLAG_ALLRSETS) 1719 return (pfr_ktable_cnt); 1720 if (filter->pfrt_anchor[0]) { 1721 rs = pf_find_ruleset(filter->pfrt_anchor); 1722 return ((rs != NULL) ? rs->tables : -1); 1723 } 1724 return (pf_main_ruleset.tables); 1725 } 1726 1727 int 1728 pfr_skip_table(struct pfr_table *filter, struct pfr_ktable *kt, int flags) 1729 { 1730 if (flags & PFR_FLAG_ALLRSETS) 1731 return (0); 1732 if (strcmp(filter->pfrt_anchor, kt->pfrkt_anchor)) 1733 return (1); 1734 return (0); 1735 } 1736 1737 void 1738 pfr_insert_ktables(struct pfr_ktableworkq *workq) 1739 { 1740 struct pfr_ktable *p; 1741 1742 SLIST_FOREACH(p, workq, pfrkt_workq) 1743 pfr_insert_ktable(p); 1744 } 1745 1746 void 1747 pfr_insert_ktable(struct pfr_ktable *kt) 1748 { 1749 RB_INSERT(pfr_ktablehead, &pfr_ktables, kt); 1750 pfr_ktable_cnt++; 1751 if (kt->pfrkt_root != NULL) 1752 if (!kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]++) 1753 pfr_setflags_ktable(kt->pfrkt_root, 1754 kt->pfrkt_root->pfrkt_flags|PFR_TFLAG_REFDANCHOR); 1755 } 1756 1757 void 1758 pfr_setflags_ktables(struct pfr_ktableworkq *workq) 1759 { 1760 struct pfr_ktable *p, *q; 1761 1762 for (p = SLIST_FIRST(workq); p; p = q) { 1763 q = SLIST_NEXT(p, pfrkt_workq); 1764 pfr_setflags_ktable(p, p->pfrkt_nflags); 1765 } 1766 } 1767 1768 void 1769 pfr_setflags_ktable(struct pfr_ktable *kt, int newf) 1770 { 1771 struct pfr_kentryworkq addrq; 1772 1773 if (!(newf & PFR_TFLAG_REFERENCED) && 1774 !(newf & PFR_TFLAG_PERSIST)) 1775 newf &= ~PFR_TFLAG_ACTIVE; 1776 if (!(newf & PFR_TFLAG_ACTIVE)) 1777 newf &= ~PFR_TFLAG_USRMASK; 1778 if (!(newf & PFR_TFLAG_SETMASK)) { 1779 RB_REMOVE(pfr_ktablehead, &pfr_ktables, kt); 1780 if (kt->pfrkt_root != NULL) 1781 if (!--kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]) 1782 pfr_setflags_ktable(kt->pfrkt_root, 1783 kt->pfrkt_root->pfrkt_flags & 1784 ~PFR_TFLAG_REFDANCHOR); 1785 pfr_destroy_ktable(kt, 1); 1786 pfr_ktable_cnt--; 1787 return; 1788 } 1789 if (!(newf & PFR_TFLAG_ACTIVE) && kt->pfrkt_cnt) { 1790 pfr_enqueue_addrs(kt, &addrq, NULL, 0); 1791 pfr_remove_kentries(kt, &addrq); 1792 } 1793 if (!(newf & PFR_TFLAG_INACTIVE) && kt->pfrkt_shadow != NULL) { 1794 pfr_destroy_ktable(kt->pfrkt_shadow, 1); 1795 kt->pfrkt_shadow = NULL; 1796 } 1797 kt->pfrkt_flags = newf; 1798 } 1799 1800 void 1801 pfr_clstats_ktables(struct pfr_ktableworkq *workq, long tzero, int recurse) 1802 { 1803 struct pfr_ktable *p; 1804 1805 SLIST_FOREACH(p, workq, pfrkt_workq) 1806 pfr_clstats_ktable(p, tzero, recurse); 1807 } 1808 1809 void 1810 pfr_clstats_ktable(struct pfr_ktable *kt, long tzero, int recurse) 1811 { 1812 struct pfr_kentryworkq addrq; 1813 int s; 1814 1815 if (recurse) { 1816 pfr_enqueue_addrs(kt, &addrq, NULL, 0); 1817 pfr_clstats_kentries(&addrq, tzero, 0); 1818 } 1819 s = splsoftnet(); 1820 bzero(kt->pfrkt_packets, sizeof(kt->pfrkt_packets)); 1821 bzero(kt->pfrkt_bytes, sizeof(kt->pfrkt_bytes)); 1822 kt->pfrkt_match = kt->pfrkt_nomatch = 0; 1823 splx(s); 1824 kt->pfrkt_tzero = tzero; 1825 } 1826 1827 struct pfr_ktable * 1828 pfr_create_ktable(struct pfr_table *tbl, long tzero, int attachruleset) 1829 { 1830 struct pfr_ktable *kt; 1831 struct pf_ruleset *rs; 1832 1833 kt = pool_get(&pfr_ktable_pl, PR_NOWAIT); 1834 if (kt == NULL) 1835 return (NULL); 1836 bzero(kt, sizeof(*kt)); 1837 kt->pfrkt_t = *tbl; 1838 1839 if (attachruleset) { 1840 rs = pf_find_or_create_ruleset(tbl->pfrt_anchor); 1841 if (!rs) { 1842 pfr_destroy_ktable(kt, 0); 1843 return (NULL); 1844 } 1845 kt->pfrkt_rs = rs; 1846 rs->tables++; 1847 } 1848 1849 if (!rn_inithead((void **)&kt->pfrkt_ip4, 1850 offsetof(struct sockaddr_in, sin_addr) * 8) || 1851 !rn_inithead((void **)&kt->pfrkt_ip6, 1852 offsetof(struct sockaddr_in6, sin6_addr) * 8)) { 1853 pfr_destroy_ktable(kt, 0); 1854 return (NULL); 1855 } 1856 kt->pfrkt_tzero = tzero; 1857 1858 return (kt); 1859 } 1860 1861 void 1862 pfr_destroy_ktables(struct pfr_ktableworkq *workq, int flushaddr) 1863 { 1864 struct pfr_ktable *p, *q; 1865 1866 for (p = SLIST_FIRST(workq); p; p = q) { 1867 q = SLIST_NEXT(p, pfrkt_workq); 1868 pfr_destroy_ktable(p, flushaddr); 1869 } 1870 } 1871 1872 void 1873 pfr_destroy_ktable(struct pfr_ktable *kt, int flushaddr) 1874 { 1875 struct pfr_kentryworkq addrq; 1876 1877 if (flushaddr) { 1878 pfr_enqueue_addrs(kt, &addrq, NULL, 0); 1879 pfr_clean_node_mask(kt, &addrq); 1880 pfr_destroy_kentries(&addrq); 1881 } 1882 if (kt->pfrkt_ip4 != NULL) 1883 free((caddr_t)kt->pfrkt_ip4, M_RTABLE); 1884 if (kt->pfrkt_ip6 != NULL) 1885 free((caddr_t)kt->pfrkt_ip6, M_RTABLE); 1886 if (kt->pfrkt_shadow != NULL) 1887 pfr_destroy_ktable(kt->pfrkt_shadow, flushaddr); 1888 if (kt->pfrkt_rs != NULL) { 1889 kt->pfrkt_rs->tables--; 1890 pf_remove_if_empty_ruleset(kt->pfrkt_rs); 1891 } 1892 pool_put(&pfr_ktable_pl, kt); 1893 } 1894 1895 int 1896 pfr_ktable_compare(struct pfr_ktable *p, struct pfr_ktable *q) 1897 { 1898 int d; 1899 1900 if ((d = strncmp(p->pfrkt_name, q->pfrkt_name, PF_TABLE_NAME_SIZE))) 1901 return (d); 1902 return (strcmp(p->pfrkt_anchor, q->pfrkt_anchor)); 1903 } 1904 1905 struct pfr_ktable * 1906 pfr_lookup_table(struct pfr_table *tbl) 1907 { 1908 /* struct pfr_ktable start like a struct pfr_table */ 1909 return (RB_FIND(pfr_ktablehead, &pfr_ktables, 1910 (struct pfr_ktable *)tbl)); 1911 } 1912 1913 int 1914 pfr_match_addr(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af) 1915 { 1916 struct pfr_kentry *ke = NULL; 1917 int match; 1918 1919 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL) 1920 kt = kt->pfrkt_root; 1921 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 1922 return (0); 1923 1924 switch (af) { 1925 #ifdef INET 1926 case AF_INET: 1927 pfr_sin.sin_addr.s_addr = a->addr32[0]; 1928 ke = (struct pfr_kentry *)rn_match(&pfr_sin, kt->pfrkt_ip4); 1929 if (ke && KENTRY_RNF_ROOT(ke)) 1930 ke = NULL; 1931 break; 1932 #endif /* INET */ 1933 #ifdef INET6 1934 case AF_INET6: 1935 bcopy(a, &pfr_sin6.sin6_addr, sizeof(pfr_sin6.sin6_addr)); 1936 ke = (struct pfr_kentry *)rn_match(&pfr_sin6, kt->pfrkt_ip6); 1937 if (ke && KENTRY_RNF_ROOT(ke)) 1938 ke = NULL; 1939 break; 1940 #endif /* INET6 */ 1941 } 1942 match = (ke && !ke->pfrke_not); 1943 if (match) 1944 kt->pfrkt_match++; 1945 else 1946 kt->pfrkt_nomatch++; 1947 return (match); 1948 } 1949 1950 void 1951 pfr_update_stats(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af, 1952 u_int64_t len, int dir_out, int op_pass, int notrule) 1953 { 1954 struct pfr_kentry *ke = NULL; 1955 1956 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL) 1957 kt = kt->pfrkt_root; 1958 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 1959 return; 1960 1961 switch (af) { 1962 #ifdef INET 1963 case AF_INET: 1964 pfr_sin.sin_addr.s_addr = a->addr32[0]; 1965 ke = (struct pfr_kentry *)rn_match(&pfr_sin, kt->pfrkt_ip4); 1966 if (ke && KENTRY_RNF_ROOT(ke)) 1967 ke = NULL; 1968 break; 1969 #endif /* INET */ 1970 #ifdef INET6 1971 case AF_INET6: 1972 bcopy(a, &pfr_sin6.sin6_addr, sizeof(pfr_sin6.sin6_addr)); 1973 ke = (struct pfr_kentry *)rn_match(&pfr_sin6, kt->pfrkt_ip6); 1974 if (ke && KENTRY_RNF_ROOT(ke)) 1975 ke = NULL; 1976 break; 1977 #endif /* INET6 */ 1978 default: 1979 ; 1980 } 1981 if ((ke == NULL || ke->pfrke_not) != notrule) { 1982 if (op_pass != PFR_OP_PASS) 1983 printf("pfr_update_stats: assertion failed.\n"); 1984 op_pass = PFR_OP_XPASS; 1985 } 1986 kt->pfrkt_packets[dir_out][op_pass]++; 1987 kt->pfrkt_bytes[dir_out][op_pass] += len; 1988 if (ke != NULL && op_pass != PFR_OP_XPASS) { 1989 ke->pfrke_packets[dir_out][op_pass]++; 1990 ke->pfrke_bytes[dir_out][op_pass] += len; 1991 } 1992 } 1993 1994 struct pfr_ktable * 1995 pfr_attach_table(struct pf_ruleset *rs, char *name) 1996 { 1997 struct pfr_ktable *kt, *rt; 1998 struct pfr_table tbl; 1999 struct pf_anchor *ac = rs->anchor; 2000 2001 bzero(&tbl, sizeof(tbl)); 2002 strlcpy(tbl.pfrt_name, name, sizeof(tbl.pfrt_name)); 2003 if (ac != NULL) 2004 strlcpy(tbl.pfrt_anchor, ac->name, sizeof(tbl.pfrt_anchor)); 2005 kt = pfr_lookup_table(&tbl); 2006 if (kt == NULL) { 2007 kt = pfr_create_ktable(&tbl, time_second, 1); 2008 if (kt == NULL) 2009 return (NULL); 2010 if (ac != NULL) { 2011 bzero(tbl.pfrt_anchor, sizeof(tbl.pfrt_anchor)); 2012 rt = pfr_lookup_table(&tbl); 2013 if (rt == NULL) { 2014 rt = pfr_create_ktable(&tbl, 0, 1); 2015 if (rt == NULL) { 2016 pfr_destroy_ktable(kt, 0); 2017 return (NULL); 2018 } 2019 pfr_insert_ktable(rt); 2020 } 2021 kt->pfrkt_root = rt; 2022 } 2023 pfr_insert_ktable(kt); 2024 } 2025 if (!kt->pfrkt_refcnt[PFR_REFCNT_RULE]++) 2026 pfr_setflags_ktable(kt, kt->pfrkt_flags|PFR_TFLAG_REFERENCED); 2027 return (kt); 2028 } 2029 2030 void 2031 pfr_detach_table(struct pfr_ktable *kt) 2032 { 2033 if (kt->pfrkt_refcnt[PFR_REFCNT_RULE] <= 0) 2034 printf("pfr_detach_table: refcount = %d.\n", 2035 kt->pfrkt_refcnt[PFR_REFCNT_RULE]); 2036 else if (!--kt->pfrkt_refcnt[PFR_REFCNT_RULE]) 2037 pfr_setflags_ktable(kt, kt->pfrkt_flags&~PFR_TFLAG_REFERENCED); 2038 } 2039 2040 int 2041 pfr_pool_get(struct pfr_ktable *kt, int *pidx, struct pf_addr *counter, 2042 struct pf_addr **raddr, struct pf_addr **rmask, sa_family_t af) 2043 { 2044 struct pfr_kentry *ke, *ke2 = (void *)0xdeadb; 2045 struct pf_addr *addr = (void *)0xdeadb; 2046 union sockaddr_union mask; 2047 int idx = -1, use_counter = 0; 2048 2049 if (af == AF_INET) 2050 addr = (struct pf_addr *)&pfr_sin.sin_addr; 2051 else if (af == AF_INET6) 2052 addr = (struct pf_addr *)&pfr_sin6.sin6_addr; 2053 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL) 2054 kt = kt->pfrkt_root; 2055 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 2056 return (-1); 2057 2058 if (pidx != NULL) 2059 idx = *pidx; 2060 if (counter != NULL && idx >= 0) 2061 use_counter = 1; 2062 if (idx < 0) 2063 idx = 0; 2064 2065 _next_block: 2066 ke = pfr_kentry_byidx(kt, idx, af); 2067 if (ke == NULL) 2068 return (1); 2069 pfr_prepare_network(&pfr_mask, af, ke->pfrke_net); 2070 *raddr = SUNION2PF(&ke->pfrke_sa, af); 2071 *rmask = SUNION2PF(&pfr_mask, af); 2072 2073 if (use_counter) { 2074 /* is supplied address within block? */ 2075 if (!PF_MATCHA(0, *raddr, *rmask, counter, af)) { 2076 /* no, go to next block in table */ 2077 idx++; 2078 use_counter = 0; 2079 goto _next_block; 2080 } 2081 PF_ACPY(addr, counter, af); 2082 } else { 2083 /* use first address of block */ 2084 PF_ACPY(addr, *raddr, af); 2085 } 2086 2087 if (!KENTRY_NETWORK(ke)) { 2088 /* this is a single IP address - no possible nested block */ 2089 PF_ACPY(counter, addr, af); 2090 *pidx = idx; 2091 return (0); 2092 } 2093 for (;;) { 2094 /* we don't want to use a nested block */ 2095 if (af == AF_INET) 2096 ke2 = (struct pfr_kentry *)rn_match(&pfr_sin, 2097 kt->pfrkt_ip4); 2098 else if (af == AF_INET6) 2099 ke2 = (struct pfr_kentry *)rn_match(&pfr_sin6, 2100 kt->pfrkt_ip6); 2101 /* no need to check KENTRY_RNF_ROOT() here */ 2102 if (ke2 == ke) { 2103 /* lookup return the same block - perfect */ 2104 PF_ACPY(counter, addr, af); 2105 *pidx = idx; 2106 return (0); 2107 } 2108 2109 /* we need to increase the counter past the nested block */ 2110 pfr_prepare_network(&mask, AF_INET, ke2->pfrke_net); 2111 PF_POOLMASK(addr, addr, SUNION2PF(&mask, af), &pfr_ffaddr, af); 2112 PF_AINC(addr, af); 2113 if (!PF_MATCHA(0, *raddr, *rmask, addr, af)) { 2114 /* ok, we reached the end of our main block */ 2115 /* go to next block in table */ 2116 idx++; 2117 use_counter = 0; 2118 goto _next_block; 2119 } 2120 } 2121 } 2122 2123 struct pfr_kentry * 2124 pfr_kentry_byidx(struct pfr_ktable *kt, int idx, int af) 2125 { 2126 struct pfr_walktree w; 2127 2128 bzero(&w, sizeof(w)); 2129 w.pfrw_op = PFRW_POOL_GET; 2130 w.pfrw_cnt = idx; 2131 2132 switch (af) { 2133 #ifdef INET 2134 case AF_INET: 2135 rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w); 2136 return (w.pfrw_kentry); 2137 #endif /* INET */ 2138 #ifdef INET6 2139 case AF_INET6: 2140 rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w); 2141 return (w.pfrw_kentry); 2142 #endif /* INET6 */ 2143 default: 2144 return (NULL); 2145 } 2146 } 2147 2148 void 2149 pfr_dynaddr_update(struct pfr_ktable *kt, struct pfi_dynaddr *dyn) 2150 { 2151 struct pfr_walktree w; 2152 int s; 2153 2154 bzero(&w, sizeof(w)); 2155 w.pfrw_op = PFRW_DYNADDR_UPDATE; 2156 w.pfrw_dyn = dyn; 2157 2158 s = splsoftnet(); 2159 dyn->pfid_acnt4 = 0; 2160 dyn->pfid_acnt6 = 0; 2161 if (!dyn->pfid_af || dyn->pfid_af == AF_INET) 2162 rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w); 2163 if (!dyn->pfid_af || dyn->pfid_af == AF_INET6) 2164 rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w); 2165 splx(s); 2166 } 2167