1 /* $NetBSD: pf_table.c,v 1.13 2007/12/11 11:08:22 lukem Exp $ */ 2 /* $OpenBSD: pf_table.c,v 1.62 2004/12/07 18:02:04 mcbride Exp $ */ 3 4 /* 5 * Copyright (c) 2002 Cedric Berger 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * - Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * - Redistributions in binary form must reproduce the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer in the documentation and/or other materials provided 17 * with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 20 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 22 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 23 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 25 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 27 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 29 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 * POSSIBILITY OF SUCH DAMAGE. 31 * 32 */ 33 34 #include <sys/cdefs.h> 35 __KERNEL_RCSID(0, "$NetBSD: pf_table.c,v 1.13 2007/12/11 11:08:22 lukem Exp $"); 36 37 #ifdef _KERNEL_OPT 38 #include "opt_inet.h" 39 #endif 40 41 #include <sys/param.h> 42 #include <sys/systm.h> 43 #include <sys/socket.h> 44 #include <sys/mbuf.h> 45 #include <sys/kernel.h> 46 47 #include <net/if.h> 48 #include <net/route.h> 49 #include <netinet/in.h> 50 #ifdef __OpenBSD__ 51 #include <netinet/ip_ipsp.h> 52 #endif 53 #include <net/pfvar.h> 54 55 #define ACCEPT_FLAGS(oklist) \ 56 do { \ 57 if ((flags & ~(oklist)) & \ 58 PFR_FLAG_ALLMASK) \ 59 return (EINVAL); \ 60 } while (0) 61 62 #define COPYIN(from, to, size) \ 63 ((flags & PFR_FLAG_USERIOCTL) ? \ 64 copyin((from), (to), (size)) : \ 65 (bcopy((from), (to), (size)), 0)) 66 67 #define COPYOUT(from, to, size) \ 68 ((flags & PFR_FLAG_USERIOCTL) ? \ 69 copyout((from), (to), (size)) : \ 70 (bcopy((from), (to), (size)), 0)) 71 72 #define FILLIN_SIN(sin, addr) \ 73 do { \ 74 (sin).sin_len = sizeof(sin); \ 75 (sin).sin_family = AF_INET; \ 76 (sin).sin_addr = (addr); \ 77 } while (0) 78 79 #define FILLIN_SIN6(sin6, addr) \ 80 do { \ 81 (sin6).sin6_len = sizeof(sin6); \ 82 (sin6).sin6_family = AF_INET6; \ 83 (sin6).sin6_addr = (addr); \ 84 } while (0) 85 86 #define SWAP(type, a1, a2) \ 87 do { \ 88 type tmp = a1; \ 89 a1 = a2; \ 90 a2 = tmp; \ 91 } while (0) 92 93 #define SUNION2PF(su, af) (((af)==AF_INET) ? \ 94 (struct pf_addr *)&(su)->sin.sin_addr : \ 95 (struct pf_addr *)&(su)->sin6.sin6_addr) 96 97 #define AF_BITS(af) (((af)==AF_INET)?32:128) 98 #define ADDR_NETWORK(ad) ((ad)->pfra_net < AF_BITS((ad)->pfra_af)) 99 #define KENTRY_NETWORK(ke) ((ke)->pfrke_net < AF_BITS((ke)->pfrke_af)) 100 #define KENTRY_RNF_ROOT(ke) \ 101 ((((struct radix_node *)(ke))->rn_flags & RNF_ROOT) != 0) 102 103 #define NO_ADDRESSES (-1) 104 #define ENQUEUE_UNMARKED_ONLY (1) 105 #define INVERT_NEG_FLAG (1) 106 107 struct pfr_walktree { 108 enum pfrw_op { 109 PFRW_MARK, 110 PFRW_SWEEP, 111 PFRW_ENQUEUE, 112 PFRW_GET_ADDRS, 113 PFRW_GET_ASTATS, 114 PFRW_POOL_GET, 115 PFRW_DYNADDR_UPDATE 116 } pfrw_op; 117 union { 118 struct pfr_addr *pfrw1_addr; 119 struct pfr_astats *pfrw1_astats; 120 struct pfr_kentryworkq *pfrw1_workq; 121 struct pfr_kentry *pfrw1_kentry; 122 struct pfi_dynaddr *pfrw1_dyn; 123 } pfrw_1; 124 int pfrw_free; 125 int pfrw_flags; 126 }; 127 #define pfrw_addr pfrw_1.pfrw1_addr 128 #define pfrw_astats pfrw_1.pfrw1_astats 129 #define pfrw_workq pfrw_1.pfrw1_workq 130 #define pfrw_kentry pfrw_1.pfrw1_kentry 131 #define pfrw_dyn pfrw_1.pfrw1_dyn 132 #define pfrw_cnt pfrw_free 133 134 #define senderr(e) do { rv = (e); goto _bad; } while (0) 135 136 struct pool pfr_ktable_pl; 137 struct pool pfr_kentry_pl; 138 struct pool pfr_kentry_pl2; 139 struct sockaddr_in pfr_sin; 140 struct sockaddr_in6 pfr_sin6; 141 union sockaddr_union pfr_mask; 142 struct pf_addr pfr_ffaddr; 143 144 void pfr_copyout_addr(struct pfr_addr *, 145 struct pfr_kentry *ke); 146 int pfr_validate_addr(struct pfr_addr *); 147 void pfr_enqueue_addrs(struct pfr_ktable *, 148 struct pfr_kentryworkq *, int *, int); 149 void pfr_mark_addrs(struct pfr_ktable *); 150 struct pfr_kentry *pfr_lookup_addr(struct pfr_ktable *, 151 struct pfr_addr *, int); 152 struct pfr_kentry *pfr_create_kentry(struct pfr_addr *, int); 153 void pfr_destroy_kentries(struct pfr_kentryworkq *); 154 void pfr_destroy_kentry(struct pfr_kentry *); 155 void pfr_insert_kentries(struct pfr_ktable *, 156 struct pfr_kentryworkq *, long); 157 void pfr_remove_kentries(struct pfr_ktable *, 158 struct pfr_kentryworkq *); 159 void pfr_clstats_kentries(struct pfr_kentryworkq *, long, 160 int); 161 void pfr_reset_feedback(struct pfr_addr *, int, int); 162 void pfr_prepare_network(union sockaddr_union *, int, int); 163 int pfr_route_kentry(struct pfr_ktable *, 164 struct pfr_kentry *); 165 int pfr_unroute_kentry(struct pfr_ktable *, 166 struct pfr_kentry *); 167 int pfr_walktree(struct radix_node *, void *); 168 int pfr_validate_table(struct pfr_table *, int, int); 169 int pfr_fix_anchor(char *); 170 void pfr_commit_ktable(struct pfr_ktable *, long); 171 void pfr_insert_ktables(struct pfr_ktableworkq *); 172 void pfr_insert_ktable(struct pfr_ktable *); 173 void pfr_setflags_ktables(struct pfr_ktableworkq *); 174 void pfr_setflags_ktable(struct pfr_ktable *, int); 175 void pfr_clstats_ktables(struct pfr_ktableworkq *, long, 176 int); 177 void pfr_clstats_ktable(struct pfr_ktable *, long, int); 178 struct pfr_ktable *pfr_create_ktable(struct pfr_table *, long, int); 179 void pfr_destroy_ktables(struct pfr_ktableworkq *, int); 180 void pfr_destroy_ktable(struct pfr_ktable *, int); 181 int pfr_ktable_compare(struct pfr_ktable *, 182 struct pfr_ktable *); 183 struct pfr_ktable *pfr_lookup_table(struct pfr_table *); 184 void pfr_clean_node_mask(struct pfr_ktable *, 185 struct pfr_kentryworkq *); 186 int pfr_table_count(struct pfr_table *, int); 187 int pfr_skip_table(struct pfr_table *, 188 struct pfr_ktable *, int); 189 struct pfr_kentry *pfr_kentry_byidx(struct pfr_ktable *, int, int); 190 191 RB_PROTOTYPE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare); 192 RB_GENERATE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare); 193 194 struct pfr_ktablehead pfr_ktables; 195 struct pfr_table pfr_nulltable; 196 int pfr_ktable_cnt; 197 198 void 199 pfr_initialize(void) 200 { 201 #ifdef __NetBSD__ 202 pool_init(&pfr_ktable_pl, sizeof(struct pfr_ktable), 0, 0, 0, 203 "pfrktable", &pool_allocator_oldnointr, IPL_NONE); 204 pool_init(&pfr_kentry_pl, sizeof(struct pfr_kentry), 0, 0, 0, 205 "pfrkentry", &pool_allocator_oldnointr, IPL_NONE); 206 pool_init(&pfr_kentry_pl2, sizeof(struct pfr_kentry), 0, 0, 0, 207 "pfrkentry2", NULL, IPL_SOFTNET); 208 #else 209 pool_init(&pfr_ktable_pl, sizeof(struct pfr_ktable), 0, 0, 0, 210 "pfrktable", &pool_allocator_oldnointr); 211 pool_init(&pfr_kentry_pl, sizeof(struct pfr_kentry), 0, 0, 0, 212 "pfrkentry", &pool_allocator_oldnointr); 213 pool_init(&pfr_kentry_pl2, sizeof(struct pfr_kentry), 0, 0, 0, 214 "pfrkentry2", NULL); 215 #endif 216 217 pfr_sin.sin_len = sizeof(pfr_sin); 218 pfr_sin.sin_family = AF_INET; 219 pfr_sin6.sin6_len = sizeof(pfr_sin6); 220 pfr_sin6.sin6_family = AF_INET6; 221 222 memset(&pfr_ffaddr, 0xff, sizeof(pfr_ffaddr)); 223 } 224 225 #ifdef _LKM 226 void 227 pfr_destroy(void) 228 { 229 pool_destroy(&pfr_ktable_pl); 230 pool_destroy(&pfr_kentry_pl); 231 pool_destroy(&pfr_kentry_pl2); 232 } 233 #endif 234 235 int 236 pfr_clr_addrs(struct pfr_table *tbl, int *ndel, int flags) 237 { 238 struct pfr_ktable *kt; 239 struct pfr_kentryworkq workq; 240 int s = 0; 241 242 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY); 243 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL)) 244 return (EINVAL); 245 kt = pfr_lookup_table(tbl); 246 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 247 return (ESRCH); 248 if (kt->pfrkt_flags & PFR_TFLAG_CONST) 249 return (EPERM); 250 pfr_enqueue_addrs(kt, &workq, ndel, 0); 251 252 if (!(flags & PFR_FLAG_DUMMY)) { 253 if (flags & PFR_FLAG_ATOMIC) 254 s = splsoftnet(); 255 pfr_remove_kentries(kt, &workq); 256 if (flags & PFR_FLAG_ATOMIC) 257 splx(s); 258 if (kt->pfrkt_cnt) { 259 printf("pfr_clr_addrs: corruption detected (%d).\n", 260 kt->pfrkt_cnt); 261 kt->pfrkt_cnt = 0; 262 } 263 } 264 return (0); 265 } 266 267 int 268 pfr_add_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size, 269 int *nadd, int flags) 270 { 271 struct pfr_ktable *kt, *tmpkt; 272 struct pfr_kentryworkq workq; 273 struct pfr_kentry *p, *q; 274 struct pfr_addr ad; 275 int i, rv, s = 0 /* XXX gcc */, xadd = 0; 276 long tzero = time_second; 277 278 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_FEEDBACK); 279 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL)) 280 return (EINVAL); 281 kt = pfr_lookup_table(tbl); 282 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 283 return (ESRCH); 284 if (kt->pfrkt_flags & PFR_TFLAG_CONST) 285 return (EPERM); 286 tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0); 287 if (tmpkt == NULL) 288 return (ENOMEM); 289 SLIST_INIT(&workq); 290 for (i = 0; i < size; i++) { 291 if (COPYIN(addr+i, &ad, sizeof(ad))) 292 senderr(EFAULT); 293 if (pfr_validate_addr(&ad)) 294 senderr(EINVAL); 295 p = pfr_lookup_addr(kt, &ad, 1); 296 q = pfr_lookup_addr(tmpkt, &ad, 1); 297 if (flags & PFR_FLAG_FEEDBACK) { 298 if (q != NULL) 299 ad.pfra_fback = PFR_FB_DUPLICATE; 300 else if (p == NULL) 301 ad.pfra_fback = PFR_FB_ADDED; 302 else if (p->pfrke_not != ad.pfra_not) 303 ad.pfra_fback = PFR_FB_CONFLICT; 304 else 305 ad.pfra_fback = PFR_FB_NONE; 306 } 307 if (p == NULL && q == NULL) { 308 p = pfr_create_kentry(&ad, 0); 309 if (p == NULL) 310 senderr(ENOMEM); 311 if (pfr_route_kentry(tmpkt, p)) { 312 pfr_destroy_kentry(p); 313 ad.pfra_fback = PFR_FB_NONE; 314 } else { 315 SLIST_INSERT_HEAD(&workq, p, pfrke_workq); 316 xadd++; 317 } 318 } 319 if (flags & PFR_FLAG_FEEDBACK) 320 if (COPYOUT(&ad, addr+i, sizeof(ad))) 321 senderr(EFAULT); 322 } 323 pfr_clean_node_mask(tmpkt, &workq); 324 if (!(flags & PFR_FLAG_DUMMY)) { 325 if (flags & PFR_FLAG_ATOMIC) 326 s = splsoftnet(); 327 pfr_insert_kentries(kt, &workq, tzero); 328 if (flags & PFR_FLAG_ATOMIC) 329 splx(s); 330 } else 331 pfr_destroy_kentries(&workq); 332 if (nadd != NULL) 333 *nadd = xadd; 334 pfr_destroy_ktable(tmpkt, 0); 335 return (0); 336 _bad: 337 pfr_clean_node_mask(tmpkt, &workq); 338 pfr_destroy_kentries(&workq); 339 if (flags & PFR_FLAG_FEEDBACK) 340 pfr_reset_feedback(addr, size, flags); 341 pfr_destroy_ktable(tmpkt, 0); 342 return (rv); 343 } 344 345 int 346 pfr_del_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size, 347 int *ndel, int flags) 348 { 349 struct pfr_ktable *kt; 350 struct pfr_kentryworkq workq; 351 struct pfr_kentry *p; 352 struct pfr_addr ad; 353 int i, rv, s = 0 /* XXX gcc */, xdel = 0, log = 1; 354 355 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_FEEDBACK); 356 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL)) 357 return (EINVAL); 358 kt = pfr_lookup_table(tbl); 359 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 360 return (ESRCH); 361 if (kt->pfrkt_flags & PFR_TFLAG_CONST) 362 return (EPERM); 363 /* 364 * there are two algorithms to choose from here. 365 * with: 366 * n: number of addresses to delete 367 * N: number of addresses in the table 368 * 369 * one is O(N) and is better for large 'n' 370 * one is O(n*LOG(N)) and is better for small 'n' 371 * 372 * following code try to decide which one is best. 373 */ 374 for (i = kt->pfrkt_cnt; i > 0; i >>= 1) 375 log++; 376 if (size > kt->pfrkt_cnt/log) { 377 /* full table scan */ 378 pfr_mark_addrs(kt); 379 } else { 380 /* iterate over addresses to delete */ 381 for (i = 0; i < size; i++) { 382 if (COPYIN(addr+i, &ad, sizeof(ad))) 383 return (EFAULT); 384 if (pfr_validate_addr(&ad)) 385 return (EINVAL); 386 p = pfr_lookup_addr(kt, &ad, 1); 387 if (p != NULL) 388 p->pfrke_mark = 0; 389 } 390 } 391 SLIST_INIT(&workq); 392 for (i = 0; i < size; i++) { 393 if (COPYIN(addr+i, &ad, sizeof(ad))) 394 senderr(EFAULT); 395 if (pfr_validate_addr(&ad)) 396 senderr(EINVAL); 397 p = pfr_lookup_addr(kt, &ad, 1); 398 if (flags & PFR_FLAG_FEEDBACK) { 399 if (p == NULL) 400 ad.pfra_fback = PFR_FB_NONE; 401 else if (p->pfrke_not != ad.pfra_not) 402 ad.pfra_fback = PFR_FB_CONFLICT; 403 else if (p->pfrke_mark) 404 ad.pfra_fback = PFR_FB_DUPLICATE; 405 else 406 ad.pfra_fback = PFR_FB_DELETED; 407 } 408 if (p != NULL && p->pfrke_not == ad.pfra_not && 409 !p->pfrke_mark) { 410 p->pfrke_mark = 1; 411 SLIST_INSERT_HEAD(&workq, p, pfrke_workq); 412 xdel++; 413 } 414 if (flags & PFR_FLAG_FEEDBACK) 415 if (COPYOUT(&ad, addr+i, sizeof(ad))) 416 senderr(EFAULT); 417 } 418 if (!(flags & PFR_FLAG_DUMMY)) { 419 if (flags & PFR_FLAG_ATOMIC) 420 s = splsoftnet(); 421 pfr_remove_kentries(kt, &workq); 422 if (flags & PFR_FLAG_ATOMIC) 423 splx(s); 424 } 425 if (ndel != NULL) 426 *ndel = xdel; 427 return (0); 428 _bad: 429 if (flags & PFR_FLAG_FEEDBACK) 430 pfr_reset_feedback(addr, size, flags); 431 return (rv); 432 } 433 434 int 435 pfr_set_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size, 436 int *size2, int *nadd, int *ndel, int *nchange, int flags) 437 { 438 struct pfr_ktable *kt, *tmpkt; 439 struct pfr_kentryworkq addq, delq, changeq; 440 struct pfr_kentry *p, *q; 441 struct pfr_addr ad; 442 int i, rv, s = 0 /* XXX gcc */, xadd = 0, xdel = 0, 443 xchange = 0; 444 long tzero = time_second; 445 446 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_FEEDBACK); 447 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL)) 448 return (EINVAL); 449 kt = pfr_lookup_table(tbl); 450 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 451 return (ESRCH); 452 if (kt->pfrkt_flags & PFR_TFLAG_CONST) 453 return (EPERM); 454 tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0); 455 if (tmpkt == NULL) 456 return (ENOMEM); 457 pfr_mark_addrs(kt); 458 SLIST_INIT(&addq); 459 SLIST_INIT(&delq); 460 SLIST_INIT(&changeq); 461 for (i = 0; i < size; i++) { 462 if (COPYIN(addr+i, &ad, sizeof(ad))) 463 senderr(EFAULT); 464 if (pfr_validate_addr(&ad)) 465 senderr(EINVAL); 466 ad.pfra_fback = PFR_FB_NONE; 467 p = pfr_lookup_addr(kt, &ad, 1); 468 if (p != NULL) { 469 if (p->pfrke_mark) { 470 ad.pfra_fback = PFR_FB_DUPLICATE; 471 goto _skip; 472 } 473 p->pfrke_mark = 1; 474 if (p->pfrke_not != ad.pfra_not) { 475 SLIST_INSERT_HEAD(&changeq, p, pfrke_workq); 476 ad.pfra_fback = PFR_FB_CHANGED; 477 xchange++; 478 } 479 } else { 480 q = pfr_lookup_addr(tmpkt, &ad, 1); 481 if (q != NULL) { 482 ad.pfra_fback = PFR_FB_DUPLICATE; 483 goto _skip; 484 } 485 p = pfr_create_kentry(&ad, 0); 486 if (p == NULL) 487 senderr(ENOMEM); 488 if (pfr_route_kentry(tmpkt, p)) { 489 pfr_destroy_kentry(p); 490 ad.pfra_fback = PFR_FB_NONE; 491 } else { 492 SLIST_INSERT_HEAD(&addq, p, pfrke_workq); 493 ad.pfra_fback = PFR_FB_ADDED; 494 xadd++; 495 } 496 } 497 _skip: 498 if (flags & PFR_FLAG_FEEDBACK) 499 if (COPYOUT(&ad, addr+i, sizeof(ad))) 500 senderr(EFAULT); 501 } 502 pfr_enqueue_addrs(kt, &delq, &xdel, ENQUEUE_UNMARKED_ONLY); 503 if ((flags & PFR_FLAG_FEEDBACK) && *size2) { 504 if (*size2 < size+xdel) { 505 *size2 = size+xdel; 506 senderr(0); 507 } 508 i = 0; 509 SLIST_FOREACH(p, &delq, pfrke_workq) { 510 pfr_copyout_addr(&ad, p); 511 ad.pfra_fback = PFR_FB_DELETED; 512 if (COPYOUT(&ad, addr+size+i, sizeof(ad))) 513 senderr(EFAULT); 514 i++; 515 } 516 } 517 pfr_clean_node_mask(tmpkt, &addq); 518 if (!(flags & PFR_FLAG_DUMMY)) { 519 if (flags & PFR_FLAG_ATOMIC) 520 s = splsoftnet(); 521 pfr_insert_kentries(kt, &addq, tzero); 522 pfr_remove_kentries(kt, &delq); 523 pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG); 524 if (flags & PFR_FLAG_ATOMIC) 525 splx(s); 526 } else 527 pfr_destroy_kentries(&addq); 528 if (nadd != NULL) 529 *nadd = xadd; 530 if (ndel != NULL) 531 *ndel = xdel; 532 if (nchange != NULL) 533 *nchange = xchange; 534 if ((flags & PFR_FLAG_FEEDBACK) && size2) 535 *size2 = size+xdel; 536 pfr_destroy_ktable(tmpkt, 0); 537 return (0); 538 _bad: 539 pfr_clean_node_mask(tmpkt, &addq); 540 pfr_destroy_kentries(&addq); 541 if (flags & PFR_FLAG_FEEDBACK) 542 pfr_reset_feedback(addr, size, flags); 543 pfr_destroy_ktable(tmpkt, 0); 544 return (rv); 545 } 546 547 int 548 pfr_tst_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size, 549 int *nmatch, int flags) 550 { 551 struct pfr_ktable *kt; 552 struct pfr_kentry *p; 553 struct pfr_addr ad; 554 int i, xmatch = 0; 555 556 ACCEPT_FLAGS(PFR_FLAG_REPLACE); 557 if (pfr_validate_table(tbl, 0, 0)) 558 return (EINVAL); 559 kt = pfr_lookup_table(tbl); 560 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 561 return (ESRCH); 562 563 for (i = 0; i < size; i++) { 564 if (COPYIN(addr+i, &ad, sizeof(ad))) 565 return (EFAULT); 566 if (pfr_validate_addr(&ad)) 567 return (EINVAL); 568 if (ADDR_NETWORK(&ad)) 569 return (EINVAL); 570 p = pfr_lookup_addr(kt, &ad, 0); 571 if (flags & PFR_FLAG_REPLACE) 572 pfr_copyout_addr(&ad, p); 573 ad.pfra_fback = (p == NULL) ? PFR_FB_NONE : 574 (p->pfrke_not ? PFR_FB_NOTMATCH : PFR_FB_MATCH); 575 if (p != NULL && !p->pfrke_not) 576 xmatch++; 577 if (COPYOUT(&ad, addr+i, sizeof(ad))) 578 return (EFAULT); 579 } 580 if (nmatch != NULL) 581 *nmatch = xmatch; 582 return (0); 583 } 584 585 int 586 pfr_get_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int *size, 587 int flags) 588 { 589 struct pfr_ktable *kt; 590 struct pfr_walktree w; 591 int rv; 592 593 ACCEPT_FLAGS(0); 594 if (pfr_validate_table(tbl, 0, 0)) 595 return (EINVAL); 596 kt = pfr_lookup_table(tbl); 597 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 598 return (ESRCH); 599 if (kt->pfrkt_cnt > *size) { 600 *size = kt->pfrkt_cnt; 601 return (0); 602 } 603 604 bzero(&w, sizeof(w)); 605 w.pfrw_op = PFRW_GET_ADDRS; 606 w.pfrw_addr = addr; 607 w.pfrw_free = kt->pfrkt_cnt; 608 w.pfrw_flags = flags; 609 rv = rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w); 610 if (!rv) 611 rv = rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w); 612 if (rv) 613 return (rv); 614 615 if (w.pfrw_free) { 616 printf("pfr_get_addrs: corruption detected (%d).\n", 617 w.pfrw_free); 618 return (ENOTTY); 619 } 620 *size = kt->pfrkt_cnt; 621 return (0); 622 } 623 624 int 625 pfr_get_astats(struct pfr_table *tbl, struct pfr_astats *addr, int *size, 626 int flags) 627 { 628 struct pfr_ktable *kt; 629 struct pfr_walktree w; 630 struct pfr_kentryworkq workq; 631 int rv, s = 0 /* XXX gcc */; 632 long tzero = time_second; 633 634 ACCEPT_FLAGS(PFR_FLAG_ATOMIC); /* XXX PFR_FLAG_CLSTATS disabled */ 635 if (pfr_validate_table(tbl, 0, 0)) 636 return (EINVAL); 637 kt = pfr_lookup_table(tbl); 638 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 639 return (ESRCH); 640 if (kt->pfrkt_cnt > *size) { 641 *size = kt->pfrkt_cnt; 642 return (0); 643 } 644 645 bzero(&w, sizeof(w)); 646 w.pfrw_op = PFRW_GET_ASTATS; 647 w.pfrw_astats = addr; 648 w.pfrw_free = kt->pfrkt_cnt; 649 w.pfrw_flags = flags; 650 if (flags & PFR_FLAG_ATOMIC) 651 s = splsoftnet(); 652 rv = rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w); 653 if (!rv) 654 rv = rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w); 655 if (!rv && (flags & PFR_FLAG_CLSTATS)) { 656 pfr_enqueue_addrs(kt, &workq, NULL, 0); 657 pfr_clstats_kentries(&workq, tzero, 0); 658 } 659 if (flags & PFR_FLAG_ATOMIC) 660 splx(s); 661 if (rv) 662 return (rv); 663 664 if (w.pfrw_free) { 665 printf("pfr_get_astats: corruption detected (%d).\n", 666 w.pfrw_free); 667 return (ENOTTY); 668 } 669 *size = kt->pfrkt_cnt; 670 return (0); 671 } 672 673 int 674 pfr_clr_astats(struct pfr_table *tbl, struct pfr_addr *addr, int size, 675 int *nzero, int flags) 676 { 677 struct pfr_ktable *kt; 678 struct pfr_kentryworkq workq; 679 struct pfr_kentry *p; 680 struct pfr_addr ad; 681 int i, rv, s = 0, xzero = 0; 682 683 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_FEEDBACK); 684 if (pfr_validate_table(tbl, 0, 0)) 685 return (EINVAL); 686 kt = pfr_lookup_table(tbl); 687 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 688 return (ESRCH); 689 SLIST_INIT(&workq); 690 for (i = 0; i < size; i++) { 691 if (COPYIN(addr+i, &ad, sizeof(ad))) 692 senderr(EFAULT); 693 if (pfr_validate_addr(&ad)) 694 senderr(EINVAL); 695 p = pfr_lookup_addr(kt, &ad, 1); 696 if (flags & PFR_FLAG_FEEDBACK) { 697 ad.pfra_fback = (p != NULL) ? 698 PFR_FB_CLEARED : PFR_FB_NONE; 699 if (COPYOUT(&ad, addr+i, sizeof(ad))) 700 senderr(EFAULT); 701 } 702 if (p != NULL) { 703 SLIST_INSERT_HEAD(&workq, p, pfrke_workq); 704 xzero++; 705 } 706 } 707 708 if (!(flags & PFR_FLAG_DUMMY)) { 709 if (flags & PFR_FLAG_ATOMIC) 710 s = splsoftnet(); 711 pfr_clstats_kentries(&workq, 0, 0); 712 if (flags & PFR_FLAG_ATOMIC) 713 splx(s); 714 } 715 if (nzero != NULL) 716 *nzero = xzero; 717 return (0); 718 _bad: 719 if (flags & PFR_FLAG_FEEDBACK) 720 pfr_reset_feedback(addr, size, flags); 721 return (rv); 722 } 723 724 int 725 pfr_validate_addr(struct pfr_addr *ad) 726 { 727 int i; 728 729 switch (ad->pfra_af) { 730 #ifdef INET 731 case AF_INET: 732 if (ad->pfra_net > 32) 733 return (-1); 734 break; 735 #endif /* INET */ 736 #ifdef INET6 737 case AF_INET6: 738 if (ad->pfra_net > 128) 739 return (-1); 740 break; 741 #endif /* INET6 */ 742 default: 743 return (-1); 744 } 745 if (ad->pfra_net < 128 && 746 (((char *)ad)[ad->pfra_net/8] & (0xFF >> (ad->pfra_net%8)))) 747 return (-1); 748 for (i = (ad->pfra_net+7)/8; i < sizeof(ad->pfra_u); i++) 749 if (((char *)ad)[i]) 750 return (-1); 751 if (ad->pfra_not && ad->pfra_not != 1) 752 return (-1); 753 if (ad->pfra_fback) 754 return (-1); 755 return (0); 756 } 757 758 void 759 pfr_enqueue_addrs(struct pfr_ktable *kt, struct pfr_kentryworkq *workq, 760 int *naddr, int sweep) 761 { 762 struct pfr_walktree w; 763 764 SLIST_INIT(workq); 765 bzero(&w, sizeof(w)); 766 w.pfrw_op = sweep ? PFRW_SWEEP : PFRW_ENQUEUE; 767 w.pfrw_workq = workq; 768 if (kt->pfrkt_ip4 != NULL) 769 if (rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w)) 770 printf("pfr_enqueue_addrs: IPv4 walktree failed.\n"); 771 if (kt->pfrkt_ip6 != NULL) 772 if (rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w)) 773 printf("pfr_enqueue_addrs: IPv6 walktree failed.\n"); 774 if (naddr != NULL) 775 *naddr = w.pfrw_cnt; 776 } 777 778 void 779 pfr_mark_addrs(struct pfr_ktable *kt) 780 { 781 struct pfr_walktree w; 782 783 bzero(&w, sizeof(w)); 784 w.pfrw_op = PFRW_MARK; 785 if (rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w)) 786 printf("pfr_mark_addrs: IPv4 walktree failed.\n"); 787 if (rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w)) 788 printf("pfr_mark_addrs: IPv6 walktree failed.\n"); 789 } 790 791 792 struct pfr_kentry * 793 pfr_lookup_addr(struct pfr_ktable *kt, struct pfr_addr *ad, int exact) 794 { 795 union sockaddr_union sa, mask; 796 struct radix_node_head *head = (void *)0xdeadb; 797 struct pfr_kentry *ke; 798 int s; 799 800 bzero(&sa, sizeof(sa)); 801 if (ad->pfra_af == AF_INET) { 802 FILLIN_SIN(sa.sin, ad->pfra_ip4addr); 803 head = kt->pfrkt_ip4; 804 } else if ( ad->pfra_af == AF_INET6 ) { 805 FILLIN_SIN6(sa.sin6, ad->pfra_ip6addr); 806 head = kt->pfrkt_ip6; 807 } 808 if (ADDR_NETWORK(ad)) { 809 pfr_prepare_network(&mask, ad->pfra_af, ad->pfra_net); 810 s = splsoftnet(); /* rn_lookup makes use of globals */ 811 ke = (struct pfr_kentry *)rn_lookup(&sa, &mask, head); 812 splx(s); 813 if (ke && KENTRY_RNF_ROOT(ke)) 814 ke = NULL; 815 } else { 816 ke = (struct pfr_kentry *)rn_match(&sa, head); 817 if (ke && KENTRY_RNF_ROOT(ke)) 818 ke = NULL; 819 if (exact && ke && KENTRY_NETWORK(ke)) 820 ke = NULL; 821 } 822 return (ke); 823 } 824 825 struct pfr_kentry * 826 pfr_create_kentry(struct pfr_addr *ad, int intr) 827 { 828 struct pfr_kentry *ke; 829 830 if (intr) 831 ke = pool_get(&pfr_kentry_pl2, PR_NOWAIT); 832 else 833 ke = pool_get(&pfr_kentry_pl, PR_NOWAIT); 834 if (ke == NULL) 835 return (NULL); 836 bzero(ke, sizeof(*ke)); 837 838 if (ad->pfra_af == AF_INET) 839 FILLIN_SIN(ke->pfrke_sa.sin, ad->pfra_ip4addr); 840 else if (ad->pfra_af == AF_INET6) 841 FILLIN_SIN6(ke->pfrke_sa.sin6, ad->pfra_ip6addr); 842 ke->pfrke_af = ad->pfra_af; 843 ke->pfrke_net = ad->pfra_net; 844 ke->pfrke_not = ad->pfra_not; 845 ke->pfrke_intrpool = intr; 846 return (ke); 847 } 848 849 void 850 pfr_destroy_kentries(struct pfr_kentryworkq *workq) 851 { 852 struct pfr_kentry *p, *q; 853 854 for (p = SLIST_FIRST(workq); p != NULL; p = q) { 855 q = SLIST_NEXT(p, pfrke_workq); 856 pfr_destroy_kentry(p); 857 } 858 } 859 860 void 861 pfr_destroy_kentry(struct pfr_kentry *ke) 862 { 863 if (ke->pfrke_intrpool) 864 pool_put(&pfr_kentry_pl2, ke); 865 else 866 pool_put(&pfr_kentry_pl, ke); 867 } 868 869 void 870 pfr_insert_kentries(struct pfr_ktable *kt, 871 struct pfr_kentryworkq *workq, long tzero) 872 { 873 struct pfr_kentry *p; 874 int rv, n = 0; 875 876 SLIST_FOREACH(p, workq, pfrke_workq) { 877 rv = pfr_route_kentry(kt, p); 878 if (rv) { 879 printf("pfr_insert_kentries: cannot route entry " 880 "(code=%d).\n", rv); 881 break; 882 } 883 p->pfrke_tzero = tzero; 884 n++; 885 } 886 kt->pfrkt_cnt += n; 887 } 888 889 int 890 pfr_insert_kentry(struct pfr_ktable *kt, struct pfr_addr *ad, long tzero) 891 { 892 struct pfr_kentry *p; 893 int rv; 894 895 p = pfr_lookup_addr(kt, ad, 1); 896 if (p != NULL) 897 return (0); 898 p = pfr_create_kentry(ad, 1); 899 if (p == NULL) 900 return (EINVAL); 901 902 rv = pfr_route_kentry(kt, p); 903 if (rv) 904 return (rv); 905 906 p->pfrke_tzero = tzero; 907 kt->pfrkt_cnt++; 908 909 return (0); 910 } 911 912 void 913 pfr_remove_kentries(struct pfr_ktable *kt, 914 struct pfr_kentryworkq *workq) 915 { 916 struct pfr_kentry *p; 917 int n = 0; 918 919 SLIST_FOREACH(p, workq, pfrke_workq) { 920 pfr_unroute_kentry(kt, p); 921 n++; 922 } 923 kt->pfrkt_cnt -= n; 924 pfr_destroy_kentries(workq); 925 } 926 927 void 928 pfr_clean_node_mask(struct pfr_ktable *kt, 929 struct pfr_kentryworkq *workq) 930 { 931 struct pfr_kentry *p; 932 933 SLIST_FOREACH(p, workq, pfrke_workq) 934 pfr_unroute_kentry(kt, p); 935 } 936 937 void 938 pfr_clstats_kentries(struct pfr_kentryworkq *workq, long tzero, int negchange) 939 { 940 struct pfr_kentry *p; 941 int s; 942 943 SLIST_FOREACH(p, workq, pfrke_workq) { 944 s = splsoftnet(); 945 if (negchange) 946 p->pfrke_not = !p->pfrke_not; 947 bzero(p->pfrke_packets, sizeof(p->pfrke_packets)); 948 bzero(p->pfrke_bytes, sizeof(p->pfrke_bytes)); 949 splx(s); 950 p->pfrke_tzero = tzero; 951 } 952 } 953 954 void 955 pfr_reset_feedback(struct pfr_addr *addr, int size, int flags) 956 { 957 struct pfr_addr ad; 958 int i; 959 960 for (i = 0; i < size; i++) { 961 if (COPYIN(addr+i, &ad, sizeof(ad))) 962 break; 963 ad.pfra_fback = PFR_FB_NONE; 964 if (COPYOUT(&ad, addr+i, sizeof(ad))) 965 break; 966 } 967 } 968 969 void 970 pfr_prepare_network(union sockaddr_union *sa, int af, int net) 971 { 972 int i; 973 974 bzero(sa, sizeof(*sa)); 975 if (af == AF_INET) { 976 sa->sin.sin_len = sizeof(sa->sin); 977 sa->sin.sin_family = AF_INET; 978 sa->sin.sin_addr.s_addr = net ? htonl(-1 << (32-net)) : 0; 979 } else if (af == AF_INET6) { 980 sa->sin6.sin6_len = sizeof(sa->sin6); 981 sa->sin6.sin6_family = AF_INET6; 982 for (i = 0; i < 4; i++) { 983 if (net <= 32) { 984 sa->sin6.sin6_addr.s6_addr32[i] = 985 net ? htonl(-1 << (32-net)) : 0; 986 break; 987 } 988 sa->sin6.sin6_addr.s6_addr32[i] = 0xFFFFFFFF; 989 net -= 32; 990 } 991 } 992 } 993 994 int 995 pfr_route_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke) 996 { 997 union sockaddr_union mask; 998 struct radix_node *rn; 999 struct radix_node_head *head = (void *)0xdeadb; 1000 int s; 1001 1002 bzero(ke->pfrke_node, sizeof(ke->pfrke_node)); 1003 if (ke->pfrke_af == AF_INET) 1004 head = kt->pfrkt_ip4; 1005 else if (ke->pfrke_af == AF_INET6) 1006 head = kt->pfrkt_ip6; 1007 1008 s = splsoftnet(); 1009 if (KENTRY_NETWORK(ke)) { 1010 pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net); 1011 rn = rn_addroute(&ke->pfrke_sa, &mask, head, ke->pfrke_node); 1012 } else 1013 rn = rn_addroute(&ke->pfrke_sa, NULL, head, ke->pfrke_node); 1014 splx(s); 1015 1016 return (rn == NULL ? -1 : 0); 1017 } 1018 1019 int 1020 pfr_unroute_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke) 1021 { 1022 union sockaddr_union mask; 1023 struct radix_node *rn; 1024 struct radix_node_head *head = (void *)0xdeadb; 1025 int s; 1026 1027 if (ke->pfrke_af == AF_INET) 1028 head = kt->pfrkt_ip4; 1029 else if (ke->pfrke_af == AF_INET6) 1030 head = kt->pfrkt_ip6; 1031 1032 s = splsoftnet(); 1033 if (KENTRY_NETWORK(ke)) { 1034 pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net); 1035 rn = rn_delete(&ke->pfrke_sa, &mask, head); 1036 } else 1037 rn = rn_delete(&ke->pfrke_sa, NULL, head); 1038 splx(s); 1039 1040 if (rn == NULL) { 1041 printf("pfr_unroute_kentry: delete failed.\n"); 1042 return (-1); 1043 } 1044 return (0); 1045 } 1046 1047 void 1048 pfr_copyout_addr(struct pfr_addr *ad, struct pfr_kentry *ke) 1049 { 1050 bzero(ad, sizeof(*ad)); 1051 if (ke == NULL) 1052 return; 1053 ad->pfra_af = ke->pfrke_af; 1054 ad->pfra_net = ke->pfrke_net; 1055 ad->pfra_not = ke->pfrke_not; 1056 if (ad->pfra_af == AF_INET) 1057 ad->pfra_ip4addr = ke->pfrke_sa.sin.sin_addr; 1058 else if (ad->pfra_af == AF_INET6) 1059 ad->pfra_ip6addr = ke->pfrke_sa.sin6.sin6_addr; 1060 } 1061 1062 int 1063 pfr_walktree(struct radix_node *rn, void *arg) 1064 { 1065 struct pfr_kentry *ke = (struct pfr_kentry *)rn; 1066 struct pfr_walktree *w = arg; 1067 int s, flags = w->pfrw_flags; 1068 1069 switch (w->pfrw_op) { 1070 case PFRW_MARK: 1071 ke->pfrke_mark = 0; 1072 break; 1073 case PFRW_SWEEP: 1074 if (ke->pfrke_mark) 1075 break; 1076 /* FALLTHROUGH */ 1077 case PFRW_ENQUEUE: 1078 SLIST_INSERT_HEAD(w->pfrw_workq, ke, pfrke_workq); 1079 w->pfrw_cnt++; 1080 break; 1081 case PFRW_GET_ADDRS: 1082 if (w->pfrw_free-- > 0) { 1083 struct pfr_addr ad; 1084 1085 pfr_copyout_addr(&ad, ke); 1086 if (copyout(&ad, w->pfrw_addr, sizeof(ad))) 1087 return (EFAULT); 1088 w->pfrw_addr++; 1089 } 1090 break; 1091 case PFRW_GET_ASTATS: 1092 if (w->pfrw_free-- > 0) { 1093 struct pfr_astats as; 1094 1095 pfr_copyout_addr(&as.pfras_a, ke); 1096 1097 s = splsoftnet(); 1098 bcopy(ke->pfrke_packets, as.pfras_packets, 1099 sizeof(as.pfras_packets)); 1100 bcopy(ke->pfrke_bytes, as.pfras_bytes, 1101 sizeof(as.pfras_bytes)); 1102 splx(s); 1103 as.pfras_tzero = ke->pfrke_tzero; 1104 1105 if (COPYOUT(&as, w->pfrw_astats, sizeof(as))) 1106 return (EFAULT); 1107 w->pfrw_astats++; 1108 } 1109 break; 1110 case PFRW_POOL_GET: 1111 if (ke->pfrke_not) 1112 break; /* negative entries are ignored */ 1113 if (!w->pfrw_cnt--) { 1114 w->pfrw_kentry = ke; 1115 return (1); /* finish search */ 1116 } 1117 break; 1118 case PFRW_DYNADDR_UPDATE: 1119 if (ke->pfrke_af == AF_INET) { 1120 if (w->pfrw_dyn->pfid_acnt4++ > 0) 1121 break; 1122 pfr_prepare_network(&pfr_mask, AF_INET, ke->pfrke_net); 1123 w->pfrw_dyn->pfid_addr4 = *SUNION2PF( 1124 &ke->pfrke_sa, AF_INET); 1125 w->pfrw_dyn->pfid_mask4 = *SUNION2PF( 1126 &pfr_mask, AF_INET); 1127 } else if (ke->pfrke_af == AF_INET6){ 1128 if (w->pfrw_dyn->pfid_acnt6++ > 0) 1129 break; 1130 pfr_prepare_network(&pfr_mask, AF_INET6, ke->pfrke_net); 1131 w->pfrw_dyn->pfid_addr6 = *SUNION2PF( 1132 &ke->pfrke_sa, AF_INET6); 1133 w->pfrw_dyn->pfid_mask6 = *SUNION2PF( 1134 &pfr_mask, AF_INET6); 1135 } 1136 break; 1137 } 1138 return (0); 1139 } 1140 1141 int 1142 pfr_clr_tables(struct pfr_table *filter, int *ndel, int flags) 1143 { 1144 struct pfr_ktableworkq workq; 1145 struct pfr_ktable *p; 1146 int s = 0, xdel = 0; 1147 1148 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_ALLRSETS); 1149 if (pfr_fix_anchor(filter->pfrt_anchor)) 1150 return (EINVAL); 1151 if (pfr_table_count(filter, flags) < 0) 1152 return (ENOENT); 1153 1154 SLIST_INIT(&workq); 1155 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) { 1156 if (pfr_skip_table(filter, p, flags)) 1157 continue; 1158 if (!strcmp(p->pfrkt_anchor, PF_RESERVED_ANCHOR)) 1159 continue; 1160 if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE)) 1161 continue; 1162 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE; 1163 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1164 xdel++; 1165 } 1166 if (!(flags & PFR_FLAG_DUMMY)) { 1167 if (flags & PFR_FLAG_ATOMIC) 1168 s = splsoftnet(); 1169 pfr_setflags_ktables(&workq); 1170 if (flags & PFR_FLAG_ATOMIC) 1171 splx(s); 1172 } 1173 if (ndel != NULL) 1174 *ndel = xdel; 1175 return (0); 1176 } 1177 1178 int 1179 pfr_add_tables(struct pfr_table *tbl, int size, int *nadd, int flags) 1180 { 1181 struct pfr_ktableworkq addq, changeq; 1182 struct pfr_ktable *p, *q, *r, key; 1183 int i, rv, s = 0 /* XXX gcc */, xadd = 0; 1184 long tzero = time_second; 1185 1186 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY); 1187 SLIST_INIT(&addq); 1188 SLIST_INIT(&changeq); 1189 for (i = 0; i < size; i++) { 1190 if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t))) 1191 senderr(EFAULT); 1192 if (pfr_validate_table(&key.pfrkt_t, PFR_TFLAG_USRMASK, 1193 flags & PFR_FLAG_USERIOCTL)) 1194 senderr(EINVAL); 1195 key.pfrkt_flags |= PFR_TFLAG_ACTIVE; 1196 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key); 1197 if (p == NULL) { 1198 p = pfr_create_ktable(&key.pfrkt_t, tzero, 1); 1199 if (p == NULL) 1200 senderr(ENOMEM); 1201 SLIST_FOREACH(q, &addq, pfrkt_workq) { 1202 if (!pfr_ktable_compare(p, q)) 1203 goto _skip; 1204 } 1205 SLIST_INSERT_HEAD(&addq, p, pfrkt_workq); 1206 xadd++; 1207 if (!key.pfrkt_anchor[0]) 1208 goto _skip; 1209 1210 /* find or create root table */ 1211 bzero(key.pfrkt_anchor, sizeof(key.pfrkt_anchor)); 1212 r = RB_FIND(pfr_ktablehead, &pfr_ktables, &key); 1213 if (r != NULL) { 1214 p->pfrkt_root = r; 1215 goto _skip; 1216 } 1217 SLIST_FOREACH(q, &addq, pfrkt_workq) { 1218 if (!pfr_ktable_compare(&key, q)) { 1219 p->pfrkt_root = q; 1220 goto _skip; 1221 } 1222 } 1223 key.pfrkt_flags = 0; 1224 r = pfr_create_ktable(&key.pfrkt_t, 0, 1); 1225 if (r == NULL) 1226 senderr(ENOMEM); 1227 SLIST_INSERT_HEAD(&addq, r, pfrkt_workq); 1228 p->pfrkt_root = r; 1229 } else if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE)) { 1230 SLIST_FOREACH(q, &changeq, pfrkt_workq) 1231 if (!pfr_ktable_compare(&key, q)) 1232 goto _skip; 1233 p->pfrkt_nflags = (p->pfrkt_flags & 1234 ~PFR_TFLAG_USRMASK) | key.pfrkt_flags; 1235 SLIST_INSERT_HEAD(&changeq, p, pfrkt_workq); 1236 xadd++; 1237 } 1238 _skip: 1239 ; 1240 } 1241 if (!(flags & PFR_FLAG_DUMMY)) { 1242 if (flags & PFR_FLAG_ATOMIC) 1243 s = splsoftnet(); 1244 pfr_insert_ktables(&addq); 1245 pfr_setflags_ktables(&changeq); 1246 if (flags & PFR_FLAG_ATOMIC) 1247 splx(s); 1248 } else 1249 pfr_destroy_ktables(&addq, 0); 1250 if (nadd != NULL) 1251 *nadd = xadd; 1252 return (0); 1253 _bad: 1254 pfr_destroy_ktables(&addq, 0); 1255 return (rv); 1256 } 1257 1258 int 1259 pfr_del_tables(struct pfr_table *tbl, int size, int *ndel, int flags) 1260 { 1261 struct pfr_ktableworkq workq; 1262 struct pfr_ktable *p, *q, key; 1263 int i, s = 0, xdel = 0; 1264 1265 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY); 1266 SLIST_INIT(&workq); 1267 for (i = 0; i < size; i++) { 1268 if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t))) 1269 return (EFAULT); 1270 if (pfr_validate_table(&key.pfrkt_t, 0, 1271 flags & PFR_FLAG_USERIOCTL)) 1272 return (EINVAL); 1273 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key); 1274 if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) { 1275 SLIST_FOREACH(q, &workq, pfrkt_workq) 1276 if (!pfr_ktable_compare(p, q)) 1277 goto _skip; 1278 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE; 1279 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1280 xdel++; 1281 } 1282 _skip: 1283 ; 1284 } 1285 1286 if (!(flags & PFR_FLAG_DUMMY)) { 1287 if (flags & PFR_FLAG_ATOMIC) 1288 s = splsoftnet(); 1289 pfr_setflags_ktables(&workq); 1290 if (flags & PFR_FLAG_ATOMIC) 1291 splx(s); 1292 } 1293 if (ndel != NULL) 1294 *ndel = xdel; 1295 return (0); 1296 } 1297 1298 int 1299 pfr_get_tables(struct pfr_table *filter, struct pfr_table *tbl, int *size, 1300 int flags) 1301 { 1302 struct pfr_ktable *p; 1303 int n, nn; 1304 1305 ACCEPT_FLAGS(PFR_FLAG_ALLRSETS); 1306 if (pfr_fix_anchor(filter->pfrt_anchor)) 1307 return (EINVAL); 1308 n = nn = pfr_table_count(filter, flags); 1309 if (n < 0) 1310 return (ENOENT); 1311 if (n > *size) { 1312 *size = n; 1313 return (0); 1314 } 1315 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) { 1316 if (pfr_skip_table(filter, p, flags)) 1317 continue; 1318 if (n-- <= 0) 1319 continue; 1320 if (COPYOUT(&p->pfrkt_t, tbl++, sizeof(*tbl))) 1321 return (EFAULT); 1322 } 1323 if (n) { 1324 printf("pfr_get_tables: corruption detected (%d).\n", n); 1325 return (ENOTTY); 1326 } 1327 *size = nn; 1328 return (0); 1329 } 1330 1331 int 1332 pfr_get_tstats(struct pfr_table *filter, struct pfr_tstats *tbl, int *size, 1333 int flags) 1334 { 1335 struct pfr_ktable *p; 1336 struct pfr_ktableworkq workq; 1337 int s = 0 /* XXX gcc */, n, nn; 1338 long tzero = time_second; 1339 1340 ACCEPT_FLAGS(PFR_FLAG_ATOMIC|PFR_FLAG_ALLRSETS); 1341 /* XXX PFR_FLAG_CLSTATS disabled */ 1342 if (pfr_fix_anchor(filter->pfrt_anchor)) 1343 return (EINVAL); 1344 n = nn = pfr_table_count(filter, flags); 1345 if (n < 0) 1346 return (ENOENT); 1347 if (n > *size) { 1348 *size = n; 1349 return (0); 1350 } 1351 SLIST_INIT(&workq); 1352 if (flags & PFR_FLAG_ATOMIC) 1353 s = splsoftnet(); 1354 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) { 1355 if (pfr_skip_table(filter, p, flags)) 1356 continue; 1357 if (n-- <= 0) 1358 continue; 1359 if (!(flags & PFR_FLAG_ATOMIC)) 1360 s = splsoftnet(); 1361 if (COPYOUT(&p->pfrkt_ts, tbl++, sizeof(*tbl))) { 1362 splx(s); 1363 return (EFAULT); 1364 } 1365 if (!(flags & PFR_FLAG_ATOMIC)) 1366 splx(s); 1367 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1368 } 1369 if (flags & PFR_FLAG_CLSTATS) 1370 pfr_clstats_ktables(&workq, tzero, 1371 flags & PFR_FLAG_ADDRSTOO); 1372 if (flags & PFR_FLAG_ATOMIC) 1373 splx(s); 1374 if (n) { 1375 printf("pfr_get_tstats: corruption detected (%d).\n", n); 1376 return (ENOTTY); 1377 } 1378 *size = nn; 1379 return (0); 1380 } 1381 1382 int 1383 pfr_clr_tstats(struct pfr_table *tbl, int size, int *nzero, int flags) 1384 { 1385 struct pfr_ktableworkq workq; 1386 struct pfr_ktable *p, key; 1387 int i, s = 0 /* XXX gcc */, xzero = 0; 1388 long tzero = time_second; 1389 1390 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_ADDRSTOO); 1391 SLIST_INIT(&workq); 1392 for (i = 0; i < size; i++) { 1393 if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t))) 1394 return (EFAULT); 1395 if (pfr_validate_table(&key.pfrkt_t, 0, 0)) 1396 return (EINVAL); 1397 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key); 1398 if (p != NULL) { 1399 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1400 xzero++; 1401 } 1402 } 1403 if (!(flags & PFR_FLAG_DUMMY)) { 1404 if (flags & PFR_FLAG_ATOMIC) 1405 s = splsoftnet(); 1406 pfr_clstats_ktables(&workq, tzero, flags & PFR_FLAG_ADDRSTOO); 1407 if (flags & PFR_FLAG_ATOMIC) 1408 splx(s); 1409 } 1410 if (nzero != NULL) 1411 *nzero = xzero; 1412 return (0); 1413 } 1414 1415 int 1416 pfr_set_tflags(struct pfr_table *tbl, int size, int setflag, int clrflag, 1417 int *nchange, int *ndel, int flags) 1418 { 1419 struct pfr_ktableworkq workq; 1420 struct pfr_ktable *p, *q, key; 1421 int i, s = 0, xchange = 0, xdel = 0; 1422 1423 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY); 1424 if ((setflag & ~PFR_TFLAG_USRMASK) || 1425 (clrflag & ~PFR_TFLAG_USRMASK) || 1426 (setflag & clrflag)) 1427 return (EINVAL); 1428 SLIST_INIT(&workq); 1429 for (i = 0; i < size; i++) { 1430 if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t))) 1431 return (EFAULT); 1432 if (pfr_validate_table(&key.pfrkt_t, 0, 1433 flags & PFR_FLAG_USERIOCTL)) 1434 return (EINVAL); 1435 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key); 1436 if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) { 1437 p->pfrkt_nflags = (p->pfrkt_flags | setflag) & 1438 ~clrflag; 1439 if (p->pfrkt_nflags == p->pfrkt_flags) 1440 goto _skip; 1441 SLIST_FOREACH(q, &workq, pfrkt_workq) 1442 if (!pfr_ktable_compare(p, q)) 1443 goto _skip; 1444 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1445 if ((p->pfrkt_flags & PFR_TFLAG_PERSIST) && 1446 (clrflag & PFR_TFLAG_PERSIST) && 1447 !(p->pfrkt_flags & PFR_TFLAG_REFERENCED)) 1448 xdel++; 1449 else 1450 xchange++; 1451 } 1452 _skip: 1453 ; 1454 } 1455 if (!(flags & PFR_FLAG_DUMMY)) { 1456 if (flags & PFR_FLAG_ATOMIC) 1457 s = splsoftnet(); 1458 pfr_setflags_ktables(&workq); 1459 if (flags & PFR_FLAG_ATOMIC) 1460 splx(s); 1461 } 1462 if (nchange != NULL) 1463 *nchange = xchange; 1464 if (ndel != NULL) 1465 *ndel = xdel; 1466 return (0); 1467 } 1468 1469 int 1470 pfr_ina_begin(struct pfr_table *trs, u_int32_t *ticket, int *ndel, int flags) 1471 { 1472 struct pfr_ktableworkq workq; 1473 struct pfr_ktable *p; 1474 struct pf_ruleset *rs; 1475 int xdel = 0; 1476 1477 ACCEPT_FLAGS(PFR_FLAG_DUMMY); 1478 rs = pf_find_or_create_ruleset(trs->pfrt_anchor); 1479 if (rs == NULL) 1480 return (ENOMEM); 1481 SLIST_INIT(&workq); 1482 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) { 1483 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) || 1484 pfr_skip_table(trs, p, 0)) 1485 continue; 1486 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE; 1487 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1488 xdel++; 1489 } 1490 if (!(flags & PFR_FLAG_DUMMY)) { 1491 pfr_setflags_ktables(&workq); 1492 if (ticket != NULL) 1493 *ticket = ++rs->tticket; 1494 rs->topen = 1; 1495 } else 1496 pf_remove_if_empty_ruleset(rs); 1497 if (ndel != NULL) 1498 *ndel = xdel; 1499 return (0); 1500 } 1501 1502 int 1503 pfr_ina_define(struct pfr_table *tbl, struct pfr_addr *addr, int size, 1504 int *nadd, int *naddr, u_int32_t ticket, int flags) 1505 { 1506 struct pfr_ktableworkq tableq; 1507 struct pfr_kentryworkq addrq; 1508 struct pfr_ktable *kt, *rt, *shadow, key; 1509 struct pfr_kentry *p; 1510 struct pfr_addr ad; 1511 struct pf_ruleset *rs; 1512 int i, rv, xadd = 0, xaddr = 0; 1513 1514 ACCEPT_FLAGS(PFR_FLAG_DUMMY|PFR_FLAG_ADDRSTOO); 1515 if (size && !(flags & PFR_FLAG_ADDRSTOO)) 1516 return (EINVAL); 1517 if (pfr_validate_table(tbl, PFR_TFLAG_USRMASK, 1518 flags & PFR_FLAG_USERIOCTL)) 1519 return (EINVAL); 1520 rs = pf_find_ruleset(tbl->pfrt_anchor); 1521 if (rs == NULL || !rs->topen || ticket != rs->tticket) 1522 return (EBUSY); 1523 tbl->pfrt_flags |= PFR_TFLAG_INACTIVE; 1524 SLIST_INIT(&tableq); 1525 kt = RB_FIND(pfr_ktablehead, &pfr_ktables, (struct pfr_ktable *)tbl); 1526 if (kt == NULL) { 1527 kt = pfr_create_ktable(tbl, 0, 1); 1528 if (kt == NULL) 1529 return (ENOMEM); 1530 SLIST_INSERT_HEAD(&tableq, kt, pfrkt_workq); 1531 xadd++; 1532 if (!tbl->pfrt_anchor[0]) 1533 goto _skip; 1534 1535 /* find or create root table */ 1536 bzero(&key, sizeof(key)); 1537 strlcpy(key.pfrkt_name, tbl->pfrt_name, sizeof(key.pfrkt_name)); 1538 rt = RB_FIND(pfr_ktablehead, &pfr_ktables, &key); 1539 if (rt != NULL) { 1540 kt->pfrkt_root = rt; 1541 goto _skip; 1542 } 1543 rt = pfr_create_ktable(&key.pfrkt_t, 0, 1); 1544 if (rt == NULL) { 1545 pfr_destroy_ktables(&tableq, 0); 1546 return (ENOMEM); 1547 } 1548 SLIST_INSERT_HEAD(&tableq, rt, pfrkt_workq); 1549 kt->pfrkt_root = rt; 1550 } else if (!(kt->pfrkt_flags & PFR_TFLAG_INACTIVE)) 1551 xadd++; 1552 _skip: 1553 shadow = pfr_create_ktable(tbl, 0, 0); 1554 if (shadow == NULL) { 1555 pfr_destroy_ktables(&tableq, 0); 1556 return (ENOMEM); 1557 } 1558 SLIST_INIT(&addrq); 1559 for (i = 0; i < size; i++) { 1560 if (COPYIN(addr+i, &ad, sizeof(ad))) 1561 senderr(EFAULT); 1562 if (pfr_validate_addr(&ad)) 1563 senderr(EINVAL); 1564 if (pfr_lookup_addr(shadow, &ad, 1) != NULL) 1565 continue; 1566 p = pfr_create_kentry(&ad, 0); 1567 if (p == NULL) 1568 senderr(ENOMEM); 1569 if (pfr_route_kentry(shadow, p)) { 1570 pfr_destroy_kentry(p); 1571 continue; 1572 } 1573 SLIST_INSERT_HEAD(&addrq, p, pfrke_workq); 1574 xaddr++; 1575 } 1576 if (!(flags & PFR_FLAG_DUMMY)) { 1577 if (kt->pfrkt_shadow != NULL) 1578 pfr_destroy_ktable(kt->pfrkt_shadow, 1); 1579 kt->pfrkt_flags |= PFR_TFLAG_INACTIVE; 1580 pfr_insert_ktables(&tableq); 1581 shadow->pfrkt_cnt = (flags & PFR_FLAG_ADDRSTOO) ? 1582 xaddr : NO_ADDRESSES; 1583 kt->pfrkt_shadow = shadow; 1584 } else { 1585 pfr_clean_node_mask(shadow, &addrq); 1586 pfr_destroy_ktable(shadow, 0); 1587 pfr_destroy_ktables(&tableq, 0); 1588 pfr_destroy_kentries(&addrq); 1589 } 1590 if (nadd != NULL) 1591 *nadd = xadd; 1592 if (naddr != NULL) 1593 *naddr = xaddr; 1594 return (0); 1595 _bad: 1596 pfr_destroy_ktable(shadow, 0); 1597 pfr_destroy_ktables(&tableq, 0); 1598 pfr_destroy_kentries(&addrq); 1599 return (rv); 1600 } 1601 1602 int 1603 pfr_ina_rollback(struct pfr_table *trs, u_int32_t ticket, int *ndel, int flags) 1604 { 1605 struct pfr_ktableworkq workq; 1606 struct pfr_ktable *p; 1607 struct pf_ruleset *rs; 1608 int xdel = 0; 1609 1610 ACCEPT_FLAGS(PFR_FLAG_DUMMY); 1611 rs = pf_find_ruleset(trs->pfrt_anchor); 1612 if (rs == NULL || !rs->topen || ticket != rs->tticket) 1613 return (0); 1614 SLIST_INIT(&workq); 1615 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) { 1616 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) || 1617 pfr_skip_table(trs, p, 0)) 1618 continue; 1619 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE; 1620 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1621 xdel++; 1622 } 1623 if (!(flags & PFR_FLAG_DUMMY)) { 1624 pfr_setflags_ktables(&workq); 1625 rs->topen = 0; 1626 pf_remove_if_empty_ruleset(rs); 1627 } 1628 if (ndel != NULL) 1629 *ndel = xdel; 1630 return (0); 1631 } 1632 1633 int 1634 pfr_ina_commit(struct pfr_table *trs, u_int32_t ticket, int *nadd, 1635 int *nchange, int flags) 1636 { 1637 struct pfr_ktable *p, *q; 1638 struct pfr_ktableworkq workq; 1639 struct pf_ruleset *rs; 1640 int s = 0 /* XXX gcc */, xadd = 0, xchange = 0; 1641 long tzero = time_second; 1642 1643 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY); 1644 rs = pf_find_ruleset(trs->pfrt_anchor); 1645 if (rs == NULL || !rs->topen || ticket != rs->tticket) 1646 return (EBUSY); 1647 1648 SLIST_INIT(&workq); 1649 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) { 1650 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) || 1651 pfr_skip_table(trs, p, 0)) 1652 continue; 1653 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1654 if (p->pfrkt_flags & PFR_TFLAG_ACTIVE) 1655 xchange++; 1656 else 1657 xadd++; 1658 } 1659 1660 if (!(flags & PFR_FLAG_DUMMY)) { 1661 if (flags & PFR_FLAG_ATOMIC) 1662 s = splsoftnet(); 1663 for (p = SLIST_FIRST(&workq); p != NULL; p = q) { 1664 q = SLIST_NEXT(p, pfrkt_workq); 1665 pfr_commit_ktable(p, tzero); 1666 } 1667 if (flags & PFR_FLAG_ATOMIC) 1668 splx(s); 1669 rs->topen = 0; 1670 pf_remove_if_empty_ruleset(rs); 1671 } 1672 if (nadd != NULL) 1673 *nadd = xadd; 1674 if (nchange != NULL) 1675 *nchange = xchange; 1676 1677 return (0); 1678 } 1679 1680 void 1681 pfr_commit_ktable(struct pfr_ktable *kt, long tzero) 1682 { 1683 struct pfr_ktable *shadow = kt->pfrkt_shadow; 1684 int nflags; 1685 1686 if (shadow->pfrkt_cnt == NO_ADDRESSES) { 1687 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 1688 pfr_clstats_ktable(kt, tzero, 1); 1689 } else if (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) { 1690 /* kt might contain addresses */ 1691 struct pfr_kentryworkq addrq, addq, changeq, delq, garbageq; 1692 struct pfr_kentry *p, *q, *next; 1693 struct pfr_addr ad; 1694 1695 pfr_enqueue_addrs(shadow, &addrq, NULL, 0); 1696 pfr_mark_addrs(kt); 1697 SLIST_INIT(&addq); 1698 SLIST_INIT(&changeq); 1699 SLIST_INIT(&delq); 1700 SLIST_INIT(&garbageq); 1701 pfr_clean_node_mask(shadow, &addrq); 1702 for (p = SLIST_FIRST(&addrq); p != NULL; p = next) { 1703 next = SLIST_NEXT(p, pfrke_workq); /* XXX */ 1704 pfr_copyout_addr(&ad, p); 1705 q = pfr_lookup_addr(kt, &ad, 1); 1706 if (q != NULL) { 1707 if (q->pfrke_not != p->pfrke_not) 1708 SLIST_INSERT_HEAD(&changeq, q, 1709 pfrke_workq); 1710 q->pfrke_mark = 1; 1711 SLIST_INSERT_HEAD(&garbageq, p, pfrke_workq); 1712 } else { 1713 p->pfrke_tzero = tzero; 1714 SLIST_INSERT_HEAD(&addq, p, pfrke_workq); 1715 } 1716 } 1717 pfr_enqueue_addrs(kt, &delq, NULL, ENQUEUE_UNMARKED_ONLY); 1718 pfr_insert_kentries(kt, &addq, tzero); 1719 pfr_remove_kentries(kt, &delq); 1720 pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG); 1721 pfr_destroy_kentries(&garbageq); 1722 } else { 1723 /* kt cannot contain addresses */ 1724 SWAP(struct radix_node_head *, kt->pfrkt_ip4, 1725 shadow->pfrkt_ip4); 1726 SWAP(struct radix_node_head *, kt->pfrkt_ip6, 1727 shadow->pfrkt_ip6); 1728 SWAP(int, kt->pfrkt_cnt, shadow->pfrkt_cnt); 1729 pfr_clstats_ktable(kt, tzero, 1); 1730 } 1731 nflags = ((shadow->pfrkt_flags & PFR_TFLAG_USRMASK) | 1732 (kt->pfrkt_flags & PFR_TFLAG_SETMASK) | PFR_TFLAG_ACTIVE) 1733 & ~PFR_TFLAG_INACTIVE; 1734 pfr_destroy_ktable(shadow, 0); 1735 kt->pfrkt_shadow = NULL; 1736 pfr_setflags_ktable(kt, nflags); 1737 } 1738 1739 int 1740 pfr_validate_table(struct pfr_table *tbl, int allowedflags, int no_reserved) 1741 { 1742 int i; 1743 1744 if (!tbl->pfrt_name[0]) 1745 return (-1); 1746 if (no_reserved && !strcmp(tbl->pfrt_anchor, PF_RESERVED_ANCHOR)) 1747 return (-1); 1748 if (tbl->pfrt_name[PF_TABLE_NAME_SIZE-1]) 1749 return (-1); 1750 for (i = strlen(tbl->pfrt_name); i < PF_TABLE_NAME_SIZE; i++) 1751 if (tbl->pfrt_name[i]) 1752 return (-1); 1753 if (pfr_fix_anchor(tbl->pfrt_anchor)) 1754 return (-1); 1755 if (tbl->pfrt_flags & ~allowedflags) 1756 return (-1); 1757 return (0); 1758 } 1759 1760 /* 1761 * Rewrite anchors referenced by tables to remove slashes 1762 * and check for validity. 1763 */ 1764 int 1765 pfr_fix_anchor(char *anchor) 1766 { 1767 size_t siz = MAXPATHLEN; 1768 int i; 1769 1770 if (anchor[0] == '/') { 1771 char *path; 1772 int off; 1773 1774 path = anchor; 1775 off = 1; 1776 while (*++path == '/') 1777 off++; 1778 bcopy(path, anchor, siz - off); 1779 memset(anchor + siz - off, 0, off); 1780 } 1781 if (anchor[siz - 1]) 1782 return (-1); 1783 for (i = strlen(anchor); i < siz; i++) 1784 if (anchor[i]) 1785 return (-1); 1786 return (0); 1787 } 1788 1789 int 1790 pfr_table_count(struct pfr_table *filter, int flags) 1791 { 1792 struct pf_ruleset *rs; 1793 1794 if (flags & PFR_FLAG_ALLRSETS) 1795 return (pfr_ktable_cnt); 1796 if (filter->pfrt_anchor[0]) { 1797 rs = pf_find_ruleset(filter->pfrt_anchor); 1798 return ((rs != NULL) ? rs->tables : -1); 1799 } 1800 return (pf_main_ruleset.tables); 1801 } 1802 1803 int 1804 pfr_skip_table(struct pfr_table *filter, struct pfr_ktable *kt, int flags) 1805 { 1806 if (flags & PFR_FLAG_ALLRSETS) 1807 return (0); 1808 if (strcmp(filter->pfrt_anchor, kt->pfrkt_anchor)) 1809 return (1); 1810 return (0); 1811 } 1812 1813 void 1814 pfr_insert_ktables(struct pfr_ktableworkq *workq) 1815 { 1816 struct pfr_ktable *p; 1817 1818 SLIST_FOREACH(p, workq, pfrkt_workq) 1819 pfr_insert_ktable(p); 1820 } 1821 1822 void 1823 pfr_insert_ktable(struct pfr_ktable *kt) 1824 { 1825 RB_INSERT(pfr_ktablehead, &pfr_ktables, kt); 1826 pfr_ktable_cnt++; 1827 if (kt->pfrkt_root != NULL) 1828 if (!kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]++) 1829 pfr_setflags_ktable(kt->pfrkt_root, 1830 kt->pfrkt_root->pfrkt_flags|PFR_TFLAG_REFDANCHOR); 1831 } 1832 1833 void 1834 pfr_setflags_ktables(struct pfr_ktableworkq *workq) 1835 { 1836 struct pfr_ktable *p, *q; 1837 1838 for (p = SLIST_FIRST(workq); p; p = q) { 1839 q = SLIST_NEXT(p, pfrkt_workq); 1840 pfr_setflags_ktable(p, p->pfrkt_nflags); 1841 } 1842 } 1843 1844 void 1845 pfr_setflags_ktable(struct pfr_ktable *kt, int newf) 1846 { 1847 struct pfr_kentryworkq addrq; 1848 1849 if (!(newf & PFR_TFLAG_REFERENCED) && 1850 !(newf & PFR_TFLAG_PERSIST)) 1851 newf &= ~PFR_TFLAG_ACTIVE; 1852 if (!(newf & PFR_TFLAG_ACTIVE)) 1853 newf &= ~PFR_TFLAG_USRMASK; 1854 if (!(newf & PFR_TFLAG_SETMASK)) { 1855 RB_REMOVE(pfr_ktablehead, &pfr_ktables, kt); 1856 if (kt->pfrkt_root != NULL) 1857 if (!--kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]) 1858 pfr_setflags_ktable(kt->pfrkt_root, 1859 kt->pfrkt_root->pfrkt_flags & 1860 ~PFR_TFLAG_REFDANCHOR); 1861 pfr_destroy_ktable(kt, 1); 1862 pfr_ktable_cnt--; 1863 return; 1864 } 1865 if (!(newf & PFR_TFLAG_ACTIVE) && kt->pfrkt_cnt) { 1866 pfr_enqueue_addrs(kt, &addrq, NULL, 0); 1867 pfr_remove_kentries(kt, &addrq); 1868 } 1869 if (!(newf & PFR_TFLAG_INACTIVE) && kt->pfrkt_shadow != NULL) { 1870 pfr_destroy_ktable(kt->pfrkt_shadow, 1); 1871 kt->pfrkt_shadow = NULL; 1872 } 1873 kt->pfrkt_flags = newf; 1874 } 1875 1876 void 1877 pfr_clstats_ktables(struct pfr_ktableworkq *workq, long tzero, int recurse) 1878 { 1879 struct pfr_ktable *p; 1880 1881 SLIST_FOREACH(p, workq, pfrkt_workq) 1882 pfr_clstats_ktable(p, tzero, recurse); 1883 } 1884 1885 void 1886 pfr_clstats_ktable(struct pfr_ktable *kt, long tzero, int recurse) 1887 { 1888 struct pfr_kentryworkq addrq; 1889 int s; 1890 1891 if (recurse) { 1892 pfr_enqueue_addrs(kt, &addrq, NULL, 0); 1893 pfr_clstats_kentries(&addrq, tzero, 0); 1894 } 1895 s = splsoftnet(); 1896 bzero(kt->pfrkt_packets, sizeof(kt->pfrkt_packets)); 1897 bzero(kt->pfrkt_bytes, sizeof(kt->pfrkt_bytes)); 1898 kt->pfrkt_match = kt->pfrkt_nomatch = 0; 1899 splx(s); 1900 kt->pfrkt_tzero = tzero; 1901 } 1902 1903 struct pfr_ktable * 1904 pfr_create_ktable(struct pfr_table *tbl, long tzero, int attachruleset) 1905 { 1906 struct pfr_ktable *kt; 1907 struct pf_ruleset *rs; 1908 void *h4 = NULL, *h6 = NULL; 1909 1910 kt = pool_get(&pfr_ktable_pl, PR_NOWAIT); 1911 if (kt == NULL) 1912 return (NULL); 1913 bzero(kt, sizeof(*kt)); 1914 kt->pfrkt_t = *tbl; 1915 1916 if (attachruleset) { 1917 rs = pf_find_or_create_ruleset(tbl->pfrt_anchor); 1918 if (!rs) { 1919 pfr_destroy_ktable(kt, 0); 1920 return (NULL); 1921 } 1922 kt->pfrkt_rs = rs; 1923 rs->tables++; 1924 } 1925 1926 if (!rn_inithead(&h4, offsetof(struct sockaddr_in, sin_addr) * 8)) 1927 goto out; 1928 1929 if (!rn_inithead(&h6, offsetof(struct sockaddr_in6, sin6_addr) * 8)) { 1930 Free(h4); 1931 goto out; 1932 } 1933 kt->pfrkt_ip4 = h4; 1934 kt->pfrkt_ip6 = h6; 1935 kt->pfrkt_tzero = tzero; 1936 1937 return (kt); 1938 out: 1939 pfr_destroy_ktable(kt, 0); 1940 return (NULL); 1941 } 1942 1943 void 1944 pfr_destroy_ktables(struct pfr_ktableworkq *workq, int flushaddr) 1945 { 1946 struct pfr_ktable *p, *q; 1947 1948 for (p = SLIST_FIRST(workq); p; p = q) { 1949 q = SLIST_NEXT(p, pfrkt_workq); 1950 pfr_destroy_ktable(p, flushaddr); 1951 } 1952 } 1953 1954 void 1955 pfr_destroy_ktable(struct pfr_ktable *kt, int flushaddr) 1956 { 1957 struct pfr_kentryworkq addrq; 1958 1959 if (flushaddr) { 1960 pfr_enqueue_addrs(kt, &addrq, NULL, 0); 1961 pfr_clean_node_mask(kt, &addrq); 1962 pfr_destroy_kentries(&addrq); 1963 } 1964 if (kt->pfrkt_ip4 != NULL) 1965 free((void *)kt->pfrkt_ip4, M_RTABLE); 1966 if (kt->pfrkt_ip6 != NULL) 1967 free((void *)kt->pfrkt_ip6, M_RTABLE); 1968 if (kt->pfrkt_shadow != NULL) 1969 pfr_destroy_ktable(kt->pfrkt_shadow, flushaddr); 1970 if (kt->pfrkt_rs != NULL) { 1971 kt->pfrkt_rs->tables--; 1972 pf_remove_if_empty_ruleset(kt->pfrkt_rs); 1973 } 1974 pool_put(&pfr_ktable_pl, kt); 1975 } 1976 1977 int 1978 pfr_ktable_compare(struct pfr_ktable *p, struct pfr_ktable *q) 1979 { 1980 int d; 1981 1982 if ((d = strncmp(p->pfrkt_name, q->pfrkt_name, PF_TABLE_NAME_SIZE))) 1983 return (d); 1984 return (strcmp(p->pfrkt_anchor, q->pfrkt_anchor)); 1985 } 1986 1987 struct pfr_ktable * 1988 pfr_lookup_table(struct pfr_table *tbl) 1989 { 1990 /* struct pfr_ktable start like a struct pfr_table */ 1991 return (RB_FIND(pfr_ktablehead, &pfr_ktables, 1992 (struct pfr_ktable *)tbl)); 1993 } 1994 1995 int 1996 pfr_match_addr(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af) 1997 { 1998 struct pfr_kentry *ke = NULL; 1999 int match; 2000 2001 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL) 2002 kt = kt->pfrkt_root; 2003 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 2004 return (0); 2005 2006 switch (af) { 2007 #ifdef INET 2008 case AF_INET: 2009 pfr_sin.sin_addr.s_addr = a->addr32[0]; 2010 ke = (struct pfr_kentry *)rn_match(&pfr_sin, kt->pfrkt_ip4); 2011 if (ke && KENTRY_RNF_ROOT(ke)) 2012 ke = NULL; 2013 break; 2014 #endif /* INET */ 2015 #ifdef INET6 2016 case AF_INET6: 2017 bcopy(a, &pfr_sin6.sin6_addr, sizeof(pfr_sin6.sin6_addr)); 2018 ke = (struct pfr_kentry *)rn_match(&pfr_sin6, kt->pfrkt_ip6); 2019 if (ke && KENTRY_RNF_ROOT(ke)) 2020 ke = NULL; 2021 break; 2022 #endif /* INET6 */ 2023 } 2024 match = (ke && !ke->pfrke_not); 2025 if (match) 2026 kt->pfrkt_match++; 2027 else 2028 kt->pfrkt_nomatch++; 2029 return (match); 2030 } 2031 2032 void 2033 pfr_update_stats(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af, 2034 u_int64_t len, int dir_out, int op_pass, int notrule) 2035 { 2036 struct pfr_kentry *ke = NULL; 2037 2038 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL) 2039 kt = kt->pfrkt_root; 2040 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 2041 return; 2042 2043 switch (af) { 2044 #ifdef INET 2045 case AF_INET: 2046 pfr_sin.sin_addr.s_addr = a->addr32[0]; 2047 ke = (struct pfr_kentry *)rn_match(&pfr_sin, kt->pfrkt_ip4); 2048 if (ke && KENTRY_RNF_ROOT(ke)) 2049 ke = NULL; 2050 break; 2051 #endif /* INET */ 2052 #ifdef INET6 2053 case AF_INET6: 2054 bcopy(a, &pfr_sin6.sin6_addr, sizeof(pfr_sin6.sin6_addr)); 2055 ke = (struct pfr_kentry *)rn_match(&pfr_sin6, kt->pfrkt_ip6); 2056 if (ke && KENTRY_RNF_ROOT(ke)) 2057 ke = NULL; 2058 break; 2059 #endif /* INET6 */ 2060 default: 2061 ; 2062 } 2063 if ((ke == NULL || ke->pfrke_not) != notrule) { 2064 if (op_pass != PFR_OP_PASS) 2065 printf("pfr_update_stats: assertion failed.\n"); 2066 op_pass = PFR_OP_XPASS; 2067 } 2068 kt->pfrkt_packets[dir_out][op_pass]++; 2069 kt->pfrkt_bytes[dir_out][op_pass] += len; 2070 if (ke != NULL && op_pass != PFR_OP_XPASS) { 2071 ke->pfrke_packets[dir_out][op_pass]++; 2072 ke->pfrke_bytes[dir_out][op_pass] += len; 2073 } 2074 } 2075 2076 struct pfr_ktable * 2077 pfr_attach_table(struct pf_ruleset *rs, char *name) 2078 { 2079 struct pfr_ktable *kt, *rt; 2080 struct pfr_table tbl; 2081 struct pf_anchor *ac = rs->anchor; 2082 2083 bzero(&tbl, sizeof(tbl)); 2084 strlcpy(tbl.pfrt_name, name, sizeof(tbl.pfrt_name)); 2085 if (ac != NULL) 2086 strlcpy(tbl.pfrt_anchor, ac->name, sizeof(tbl.pfrt_anchor)); 2087 kt = pfr_lookup_table(&tbl); 2088 if (kt == NULL) { 2089 kt = pfr_create_ktable(&tbl, time_second, 1); 2090 if (kt == NULL) 2091 return (NULL); 2092 if (ac != NULL) { 2093 bzero(tbl.pfrt_anchor, sizeof(tbl.pfrt_anchor)); 2094 rt = pfr_lookup_table(&tbl); 2095 if (rt == NULL) { 2096 rt = pfr_create_ktable(&tbl, 0, 1); 2097 if (rt == NULL) { 2098 pfr_destroy_ktable(kt, 0); 2099 return (NULL); 2100 } 2101 pfr_insert_ktable(rt); 2102 } 2103 kt->pfrkt_root = rt; 2104 } 2105 pfr_insert_ktable(kt); 2106 } 2107 if (!kt->pfrkt_refcnt[PFR_REFCNT_RULE]++) 2108 pfr_setflags_ktable(kt, kt->pfrkt_flags|PFR_TFLAG_REFERENCED); 2109 return (kt); 2110 } 2111 2112 void 2113 pfr_detach_table(struct pfr_ktable *kt) 2114 { 2115 if (kt->pfrkt_refcnt[PFR_REFCNT_RULE] <= 0) 2116 printf("pfr_detach_table: refcount = %d.\n", 2117 kt->pfrkt_refcnt[PFR_REFCNT_RULE]); 2118 else if (!--kt->pfrkt_refcnt[PFR_REFCNT_RULE]) 2119 pfr_setflags_ktable(kt, kt->pfrkt_flags&~PFR_TFLAG_REFERENCED); 2120 } 2121 2122 int 2123 pfr_pool_get(struct pfr_ktable *kt, int *pidx, struct pf_addr *counter, 2124 struct pf_addr **raddr, struct pf_addr **rmask, sa_family_t af) 2125 { 2126 struct pfr_kentry *ke, *ke2 = (void *)0xdeadb; 2127 struct pf_addr *addr = (void *)0xdeadb; 2128 union sockaddr_union mask; 2129 int idx = -1, use_counter = 0; 2130 2131 if (af == AF_INET) 2132 addr = (struct pf_addr *)&pfr_sin.sin_addr; 2133 else if (af == AF_INET6) 2134 addr = (struct pf_addr *)&pfr_sin6.sin6_addr; 2135 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL) 2136 kt = kt->pfrkt_root; 2137 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 2138 return (-1); 2139 2140 if (pidx != NULL) 2141 idx = *pidx; 2142 if (counter != NULL && idx >= 0) 2143 use_counter = 1; 2144 if (idx < 0) 2145 idx = 0; 2146 2147 _next_block: 2148 ke = pfr_kentry_byidx(kt, idx, af); 2149 if (ke == NULL) 2150 return (1); 2151 pfr_prepare_network(&pfr_mask, af, ke->pfrke_net); 2152 *raddr = SUNION2PF(&ke->pfrke_sa, af); 2153 *rmask = SUNION2PF(&pfr_mask, af); 2154 2155 if (use_counter) { 2156 /* is supplied address within block? */ 2157 if (!PF_MATCHA(0, *raddr, *rmask, counter, af)) { 2158 /* no, go to next block in table */ 2159 idx++; 2160 use_counter = 0; 2161 goto _next_block; 2162 } 2163 PF_ACPY(addr, counter, af); 2164 } else { 2165 /* use first address of block */ 2166 PF_ACPY(addr, *raddr, af); 2167 } 2168 2169 if (!KENTRY_NETWORK(ke)) { 2170 /* this is a single IP address - no possible nested block */ 2171 PF_ACPY(counter, addr, af); 2172 *pidx = idx; 2173 return (0); 2174 } 2175 for (;;) { 2176 /* we don't want to use a nested block */ 2177 if (af == AF_INET) 2178 ke2 = (struct pfr_kentry *)rn_match(&pfr_sin, 2179 kt->pfrkt_ip4); 2180 else if (af == AF_INET6) 2181 ke2 = (struct pfr_kentry *)rn_match(&pfr_sin6, 2182 kt->pfrkt_ip6); 2183 /* no need to check KENTRY_RNF_ROOT() here */ 2184 if (ke2 == ke) { 2185 /* lookup return the same block - perfect */ 2186 PF_ACPY(counter, addr, af); 2187 *pidx = idx; 2188 return (0); 2189 } 2190 2191 /* we need to increase the counter past the nested block */ 2192 pfr_prepare_network(&mask, AF_INET, ke2->pfrke_net); 2193 PF_POOLMASK(addr, addr, SUNION2PF(&mask, af), &pfr_ffaddr, af); 2194 PF_AINC(addr, af); 2195 if (!PF_MATCHA(0, *raddr, *rmask, addr, af)) { 2196 /* ok, we reached the end of our main block */ 2197 /* go to next block in table */ 2198 idx++; 2199 use_counter = 0; 2200 goto _next_block; 2201 } 2202 } 2203 } 2204 2205 struct pfr_kentry * 2206 pfr_kentry_byidx(struct pfr_ktable *kt, int idx, int af) 2207 { 2208 struct pfr_walktree w; 2209 2210 bzero(&w, sizeof(w)); 2211 w.pfrw_op = PFRW_POOL_GET; 2212 w.pfrw_cnt = idx; 2213 2214 switch (af) { 2215 #ifdef INET 2216 case AF_INET: 2217 rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w); 2218 return (w.pfrw_kentry); 2219 #endif /* INET */ 2220 #ifdef INET6 2221 case AF_INET6: 2222 rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w); 2223 return (w.pfrw_kentry); 2224 #endif /* INET6 */ 2225 default: 2226 return (NULL); 2227 } 2228 } 2229 2230 void 2231 pfr_dynaddr_update(struct pfr_ktable *kt, struct pfi_dynaddr *dyn) 2232 { 2233 struct pfr_walktree w; 2234 int s; 2235 2236 bzero(&w, sizeof(w)); 2237 w.pfrw_op = PFRW_DYNADDR_UPDATE; 2238 w.pfrw_dyn = dyn; 2239 2240 s = splsoftnet(); 2241 dyn->pfid_acnt4 = 0; 2242 dyn->pfid_acnt6 = 0; 2243 if (!dyn->pfid_af || dyn->pfid_af == AF_INET) 2244 rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w); 2245 if (!dyn->pfid_af || dyn->pfid_af == AF_INET6) 2246 rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w); 2247 splx(s); 2248 } 2249