1 /* $NetBSD: pf_table.c,v 1.12 2007/03/12 18:18:31 ad Exp $ */ 2 /* $OpenBSD: pf_table.c,v 1.62 2004/12/07 18:02:04 mcbride Exp $ */ 3 4 /* 5 * Copyright (c) 2002 Cedric Berger 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * - Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * - Redistributions in binary form must reproduce the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer in the documentation and/or other materials provided 17 * with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 20 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 22 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 23 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 25 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 27 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 29 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 * POSSIBILITY OF SUCH DAMAGE. 31 * 32 */ 33 34 #ifdef _KERNEL_OPT 35 #include "opt_inet.h" 36 #endif 37 38 #include <sys/param.h> 39 #include <sys/systm.h> 40 #include <sys/socket.h> 41 #include <sys/mbuf.h> 42 #include <sys/kernel.h> 43 44 #include <net/if.h> 45 #include <net/route.h> 46 #include <netinet/in.h> 47 #ifdef __OpenBSD__ 48 #include <netinet/ip_ipsp.h> 49 #endif 50 #include <net/pfvar.h> 51 52 #define ACCEPT_FLAGS(oklist) \ 53 do { \ 54 if ((flags & ~(oklist)) & \ 55 PFR_FLAG_ALLMASK) \ 56 return (EINVAL); \ 57 } while (0) 58 59 #define COPYIN(from, to, size) \ 60 ((flags & PFR_FLAG_USERIOCTL) ? \ 61 copyin((from), (to), (size)) : \ 62 (bcopy((from), (to), (size)), 0)) 63 64 #define COPYOUT(from, to, size) \ 65 ((flags & PFR_FLAG_USERIOCTL) ? \ 66 copyout((from), (to), (size)) : \ 67 (bcopy((from), (to), (size)), 0)) 68 69 #define FILLIN_SIN(sin, addr) \ 70 do { \ 71 (sin).sin_len = sizeof(sin); \ 72 (sin).sin_family = AF_INET; \ 73 (sin).sin_addr = (addr); \ 74 } while (0) 75 76 #define FILLIN_SIN6(sin6, addr) \ 77 do { \ 78 (sin6).sin6_len = sizeof(sin6); \ 79 (sin6).sin6_family = AF_INET6; \ 80 (sin6).sin6_addr = (addr); \ 81 } while (0) 82 83 #define SWAP(type, a1, a2) \ 84 do { \ 85 type tmp = a1; \ 86 a1 = a2; \ 87 a2 = tmp; \ 88 } while (0) 89 90 #define SUNION2PF(su, af) (((af)==AF_INET) ? \ 91 (struct pf_addr *)&(su)->sin.sin_addr : \ 92 (struct pf_addr *)&(su)->sin6.sin6_addr) 93 94 #define AF_BITS(af) (((af)==AF_INET)?32:128) 95 #define ADDR_NETWORK(ad) ((ad)->pfra_net < AF_BITS((ad)->pfra_af)) 96 #define KENTRY_NETWORK(ke) ((ke)->pfrke_net < AF_BITS((ke)->pfrke_af)) 97 #define KENTRY_RNF_ROOT(ke) \ 98 ((((struct radix_node *)(ke))->rn_flags & RNF_ROOT) != 0) 99 100 #define NO_ADDRESSES (-1) 101 #define ENQUEUE_UNMARKED_ONLY (1) 102 #define INVERT_NEG_FLAG (1) 103 104 struct pfr_walktree { 105 enum pfrw_op { 106 PFRW_MARK, 107 PFRW_SWEEP, 108 PFRW_ENQUEUE, 109 PFRW_GET_ADDRS, 110 PFRW_GET_ASTATS, 111 PFRW_POOL_GET, 112 PFRW_DYNADDR_UPDATE 113 } pfrw_op; 114 union { 115 struct pfr_addr *pfrw1_addr; 116 struct pfr_astats *pfrw1_astats; 117 struct pfr_kentryworkq *pfrw1_workq; 118 struct pfr_kentry *pfrw1_kentry; 119 struct pfi_dynaddr *pfrw1_dyn; 120 } pfrw_1; 121 int pfrw_free; 122 int pfrw_flags; 123 }; 124 #define pfrw_addr pfrw_1.pfrw1_addr 125 #define pfrw_astats pfrw_1.pfrw1_astats 126 #define pfrw_workq pfrw_1.pfrw1_workq 127 #define pfrw_kentry pfrw_1.pfrw1_kentry 128 #define pfrw_dyn pfrw_1.pfrw1_dyn 129 #define pfrw_cnt pfrw_free 130 131 #define senderr(e) do { rv = (e); goto _bad; } while (0) 132 133 struct pool pfr_ktable_pl; 134 struct pool pfr_kentry_pl; 135 struct pool pfr_kentry_pl2; 136 struct sockaddr_in pfr_sin; 137 struct sockaddr_in6 pfr_sin6; 138 union sockaddr_union pfr_mask; 139 struct pf_addr pfr_ffaddr; 140 141 void pfr_copyout_addr(struct pfr_addr *, 142 struct pfr_kentry *ke); 143 int pfr_validate_addr(struct pfr_addr *); 144 void pfr_enqueue_addrs(struct pfr_ktable *, 145 struct pfr_kentryworkq *, int *, int); 146 void pfr_mark_addrs(struct pfr_ktable *); 147 struct pfr_kentry *pfr_lookup_addr(struct pfr_ktable *, 148 struct pfr_addr *, int); 149 struct pfr_kentry *pfr_create_kentry(struct pfr_addr *, int); 150 void pfr_destroy_kentries(struct pfr_kentryworkq *); 151 void pfr_destroy_kentry(struct pfr_kentry *); 152 void pfr_insert_kentries(struct pfr_ktable *, 153 struct pfr_kentryworkq *, long); 154 void pfr_remove_kentries(struct pfr_ktable *, 155 struct pfr_kentryworkq *); 156 void pfr_clstats_kentries(struct pfr_kentryworkq *, long, 157 int); 158 void pfr_reset_feedback(struct pfr_addr *, int, int); 159 void pfr_prepare_network(union sockaddr_union *, int, int); 160 int pfr_route_kentry(struct pfr_ktable *, 161 struct pfr_kentry *); 162 int pfr_unroute_kentry(struct pfr_ktable *, 163 struct pfr_kentry *); 164 int pfr_walktree(struct radix_node *, void *); 165 int pfr_validate_table(struct pfr_table *, int, int); 166 int pfr_fix_anchor(char *); 167 void pfr_commit_ktable(struct pfr_ktable *, long); 168 void pfr_insert_ktables(struct pfr_ktableworkq *); 169 void pfr_insert_ktable(struct pfr_ktable *); 170 void pfr_setflags_ktables(struct pfr_ktableworkq *); 171 void pfr_setflags_ktable(struct pfr_ktable *, int); 172 void pfr_clstats_ktables(struct pfr_ktableworkq *, long, 173 int); 174 void pfr_clstats_ktable(struct pfr_ktable *, long, int); 175 struct pfr_ktable *pfr_create_ktable(struct pfr_table *, long, int); 176 void pfr_destroy_ktables(struct pfr_ktableworkq *, int); 177 void pfr_destroy_ktable(struct pfr_ktable *, int); 178 int pfr_ktable_compare(struct pfr_ktable *, 179 struct pfr_ktable *); 180 struct pfr_ktable *pfr_lookup_table(struct pfr_table *); 181 void pfr_clean_node_mask(struct pfr_ktable *, 182 struct pfr_kentryworkq *); 183 int pfr_table_count(struct pfr_table *, int); 184 int pfr_skip_table(struct pfr_table *, 185 struct pfr_ktable *, int); 186 struct pfr_kentry *pfr_kentry_byidx(struct pfr_ktable *, int, int); 187 188 RB_PROTOTYPE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare); 189 RB_GENERATE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare); 190 191 struct pfr_ktablehead pfr_ktables; 192 struct pfr_table pfr_nulltable; 193 int pfr_ktable_cnt; 194 195 void 196 pfr_initialize(void) 197 { 198 #ifdef __NetBSD__ 199 pool_init(&pfr_ktable_pl, sizeof(struct pfr_ktable), 0, 0, 0, 200 "pfrktable", &pool_allocator_oldnointr, IPL_NONE); 201 pool_init(&pfr_kentry_pl, sizeof(struct pfr_kentry), 0, 0, 0, 202 "pfrkentry", &pool_allocator_oldnointr, IPL_NONE); 203 pool_init(&pfr_kentry_pl2, sizeof(struct pfr_kentry), 0, 0, 0, 204 "pfrkentry2", NULL, IPL_SOFTNET); 205 #else 206 pool_init(&pfr_ktable_pl, sizeof(struct pfr_ktable), 0, 0, 0, 207 "pfrktable", &pool_allocator_oldnointr); 208 pool_init(&pfr_kentry_pl, sizeof(struct pfr_kentry), 0, 0, 0, 209 "pfrkentry", &pool_allocator_oldnointr); 210 pool_init(&pfr_kentry_pl2, sizeof(struct pfr_kentry), 0, 0, 0, 211 "pfrkentry2", NULL); 212 #endif 213 214 pfr_sin.sin_len = sizeof(pfr_sin); 215 pfr_sin.sin_family = AF_INET; 216 pfr_sin6.sin6_len = sizeof(pfr_sin6); 217 pfr_sin6.sin6_family = AF_INET6; 218 219 memset(&pfr_ffaddr, 0xff, sizeof(pfr_ffaddr)); 220 } 221 222 #ifdef _LKM 223 void 224 pfr_destroy(void) 225 { 226 pool_destroy(&pfr_ktable_pl); 227 pool_destroy(&pfr_kentry_pl); 228 pool_destroy(&pfr_kentry_pl2); 229 } 230 #endif 231 232 int 233 pfr_clr_addrs(struct pfr_table *tbl, int *ndel, int flags) 234 { 235 struct pfr_ktable *kt; 236 struct pfr_kentryworkq workq; 237 int s = 0; 238 239 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY); 240 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL)) 241 return (EINVAL); 242 kt = pfr_lookup_table(tbl); 243 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 244 return (ESRCH); 245 if (kt->pfrkt_flags & PFR_TFLAG_CONST) 246 return (EPERM); 247 pfr_enqueue_addrs(kt, &workq, ndel, 0); 248 249 if (!(flags & PFR_FLAG_DUMMY)) { 250 if (flags & PFR_FLAG_ATOMIC) 251 s = splsoftnet(); 252 pfr_remove_kentries(kt, &workq); 253 if (flags & PFR_FLAG_ATOMIC) 254 splx(s); 255 if (kt->pfrkt_cnt) { 256 printf("pfr_clr_addrs: corruption detected (%d).\n", 257 kt->pfrkt_cnt); 258 kt->pfrkt_cnt = 0; 259 } 260 } 261 return (0); 262 } 263 264 int 265 pfr_add_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size, 266 int *nadd, int flags) 267 { 268 struct pfr_ktable *kt, *tmpkt; 269 struct pfr_kentryworkq workq; 270 struct pfr_kentry *p, *q; 271 struct pfr_addr ad; 272 int i, rv, s = 0 /* XXX gcc */, xadd = 0; 273 long tzero = time_second; 274 275 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_FEEDBACK); 276 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL)) 277 return (EINVAL); 278 kt = pfr_lookup_table(tbl); 279 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 280 return (ESRCH); 281 if (kt->pfrkt_flags & PFR_TFLAG_CONST) 282 return (EPERM); 283 tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0); 284 if (tmpkt == NULL) 285 return (ENOMEM); 286 SLIST_INIT(&workq); 287 for (i = 0; i < size; i++) { 288 if (COPYIN(addr+i, &ad, sizeof(ad))) 289 senderr(EFAULT); 290 if (pfr_validate_addr(&ad)) 291 senderr(EINVAL); 292 p = pfr_lookup_addr(kt, &ad, 1); 293 q = pfr_lookup_addr(tmpkt, &ad, 1); 294 if (flags & PFR_FLAG_FEEDBACK) { 295 if (q != NULL) 296 ad.pfra_fback = PFR_FB_DUPLICATE; 297 else if (p == NULL) 298 ad.pfra_fback = PFR_FB_ADDED; 299 else if (p->pfrke_not != ad.pfra_not) 300 ad.pfra_fback = PFR_FB_CONFLICT; 301 else 302 ad.pfra_fback = PFR_FB_NONE; 303 } 304 if (p == NULL && q == NULL) { 305 p = pfr_create_kentry(&ad, 0); 306 if (p == NULL) 307 senderr(ENOMEM); 308 if (pfr_route_kentry(tmpkt, p)) { 309 pfr_destroy_kentry(p); 310 ad.pfra_fback = PFR_FB_NONE; 311 } else { 312 SLIST_INSERT_HEAD(&workq, p, pfrke_workq); 313 xadd++; 314 } 315 } 316 if (flags & PFR_FLAG_FEEDBACK) 317 if (COPYOUT(&ad, addr+i, sizeof(ad))) 318 senderr(EFAULT); 319 } 320 pfr_clean_node_mask(tmpkt, &workq); 321 if (!(flags & PFR_FLAG_DUMMY)) { 322 if (flags & PFR_FLAG_ATOMIC) 323 s = splsoftnet(); 324 pfr_insert_kentries(kt, &workq, tzero); 325 if (flags & PFR_FLAG_ATOMIC) 326 splx(s); 327 } else 328 pfr_destroy_kentries(&workq); 329 if (nadd != NULL) 330 *nadd = xadd; 331 pfr_destroy_ktable(tmpkt, 0); 332 return (0); 333 _bad: 334 pfr_clean_node_mask(tmpkt, &workq); 335 pfr_destroy_kentries(&workq); 336 if (flags & PFR_FLAG_FEEDBACK) 337 pfr_reset_feedback(addr, size, flags); 338 pfr_destroy_ktable(tmpkt, 0); 339 return (rv); 340 } 341 342 int 343 pfr_del_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size, 344 int *ndel, int flags) 345 { 346 struct pfr_ktable *kt; 347 struct pfr_kentryworkq workq; 348 struct pfr_kentry *p; 349 struct pfr_addr ad; 350 int i, rv, s = 0 /* XXX gcc */, xdel = 0, log = 1; 351 352 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_FEEDBACK); 353 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL)) 354 return (EINVAL); 355 kt = pfr_lookup_table(tbl); 356 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 357 return (ESRCH); 358 if (kt->pfrkt_flags & PFR_TFLAG_CONST) 359 return (EPERM); 360 /* 361 * there are two algorithms to choose from here. 362 * with: 363 * n: number of addresses to delete 364 * N: number of addresses in the table 365 * 366 * one is O(N) and is better for large 'n' 367 * one is O(n*LOG(N)) and is better for small 'n' 368 * 369 * following code try to decide which one is best. 370 */ 371 for (i = kt->pfrkt_cnt; i > 0; i >>= 1) 372 log++; 373 if (size > kt->pfrkt_cnt/log) { 374 /* full table scan */ 375 pfr_mark_addrs(kt); 376 } else { 377 /* iterate over addresses to delete */ 378 for (i = 0; i < size; i++) { 379 if (COPYIN(addr+i, &ad, sizeof(ad))) 380 return (EFAULT); 381 if (pfr_validate_addr(&ad)) 382 return (EINVAL); 383 p = pfr_lookup_addr(kt, &ad, 1); 384 if (p != NULL) 385 p->pfrke_mark = 0; 386 } 387 } 388 SLIST_INIT(&workq); 389 for (i = 0; i < size; i++) { 390 if (COPYIN(addr+i, &ad, sizeof(ad))) 391 senderr(EFAULT); 392 if (pfr_validate_addr(&ad)) 393 senderr(EINVAL); 394 p = pfr_lookup_addr(kt, &ad, 1); 395 if (flags & PFR_FLAG_FEEDBACK) { 396 if (p == NULL) 397 ad.pfra_fback = PFR_FB_NONE; 398 else if (p->pfrke_not != ad.pfra_not) 399 ad.pfra_fback = PFR_FB_CONFLICT; 400 else if (p->pfrke_mark) 401 ad.pfra_fback = PFR_FB_DUPLICATE; 402 else 403 ad.pfra_fback = PFR_FB_DELETED; 404 } 405 if (p != NULL && p->pfrke_not == ad.pfra_not && 406 !p->pfrke_mark) { 407 p->pfrke_mark = 1; 408 SLIST_INSERT_HEAD(&workq, p, pfrke_workq); 409 xdel++; 410 } 411 if (flags & PFR_FLAG_FEEDBACK) 412 if (COPYOUT(&ad, addr+i, sizeof(ad))) 413 senderr(EFAULT); 414 } 415 if (!(flags & PFR_FLAG_DUMMY)) { 416 if (flags & PFR_FLAG_ATOMIC) 417 s = splsoftnet(); 418 pfr_remove_kentries(kt, &workq); 419 if (flags & PFR_FLAG_ATOMIC) 420 splx(s); 421 } 422 if (ndel != NULL) 423 *ndel = xdel; 424 return (0); 425 _bad: 426 if (flags & PFR_FLAG_FEEDBACK) 427 pfr_reset_feedback(addr, size, flags); 428 return (rv); 429 } 430 431 int 432 pfr_set_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size, 433 int *size2, int *nadd, int *ndel, int *nchange, int flags) 434 { 435 struct pfr_ktable *kt, *tmpkt; 436 struct pfr_kentryworkq addq, delq, changeq; 437 struct pfr_kentry *p, *q; 438 struct pfr_addr ad; 439 int i, rv, s = 0 /* XXX gcc */, xadd = 0, xdel = 0, 440 xchange = 0; 441 long tzero = time_second; 442 443 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_FEEDBACK); 444 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL)) 445 return (EINVAL); 446 kt = pfr_lookup_table(tbl); 447 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 448 return (ESRCH); 449 if (kt->pfrkt_flags & PFR_TFLAG_CONST) 450 return (EPERM); 451 tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0); 452 if (tmpkt == NULL) 453 return (ENOMEM); 454 pfr_mark_addrs(kt); 455 SLIST_INIT(&addq); 456 SLIST_INIT(&delq); 457 SLIST_INIT(&changeq); 458 for (i = 0; i < size; i++) { 459 if (COPYIN(addr+i, &ad, sizeof(ad))) 460 senderr(EFAULT); 461 if (pfr_validate_addr(&ad)) 462 senderr(EINVAL); 463 ad.pfra_fback = PFR_FB_NONE; 464 p = pfr_lookup_addr(kt, &ad, 1); 465 if (p != NULL) { 466 if (p->pfrke_mark) { 467 ad.pfra_fback = PFR_FB_DUPLICATE; 468 goto _skip; 469 } 470 p->pfrke_mark = 1; 471 if (p->pfrke_not != ad.pfra_not) { 472 SLIST_INSERT_HEAD(&changeq, p, pfrke_workq); 473 ad.pfra_fback = PFR_FB_CHANGED; 474 xchange++; 475 } 476 } else { 477 q = pfr_lookup_addr(tmpkt, &ad, 1); 478 if (q != NULL) { 479 ad.pfra_fback = PFR_FB_DUPLICATE; 480 goto _skip; 481 } 482 p = pfr_create_kentry(&ad, 0); 483 if (p == NULL) 484 senderr(ENOMEM); 485 if (pfr_route_kentry(tmpkt, p)) { 486 pfr_destroy_kentry(p); 487 ad.pfra_fback = PFR_FB_NONE; 488 } else { 489 SLIST_INSERT_HEAD(&addq, p, pfrke_workq); 490 ad.pfra_fback = PFR_FB_ADDED; 491 xadd++; 492 } 493 } 494 _skip: 495 if (flags & PFR_FLAG_FEEDBACK) 496 if (COPYOUT(&ad, addr+i, sizeof(ad))) 497 senderr(EFAULT); 498 } 499 pfr_enqueue_addrs(kt, &delq, &xdel, ENQUEUE_UNMARKED_ONLY); 500 if ((flags & PFR_FLAG_FEEDBACK) && *size2) { 501 if (*size2 < size+xdel) { 502 *size2 = size+xdel; 503 senderr(0); 504 } 505 i = 0; 506 SLIST_FOREACH(p, &delq, pfrke_workq) { 507 pfr_copyout_addr(&ad, p); 508 ad.pfra_fback = PFR_FB_DELETED; 509 if (COPYOUT(&ad, addr+size+i, sizeof(ad))) 510 senderr(EFAULT); 511 i++; 512 } 513 } 514 pfr_clean_node_mask(tmpkt, &addq); 515 if (!(flags & PFR_FLAG_DUMMY)) { 516 if (flags & PFR_FLAG_ATOMIC) 517 s = splsoftnet(); 518 pfr_insert_kentries(kt, &addq, tzero); 519 pfr_remove_kentries(kt, &delq); 520 pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG); 521 if (flags & PFR_FLAG_ATOMIC) 522 splx(s); 523 } else 524 pfr_destroy_kentries(&addq); 525 if (nadd != NULL) 526 *nadd = xadd; 527 if (ndel != NULL) 528 *ndel = xdel; 529 if (nchange != NULL) 530 *nchange = xchange; 531 if ((flags & PFR_FLAG_FEEDBACK) && size2) 532 *size2 = size+xdel; 533 pfr_destroy_ktable(tmpkt, 0); 534 return (0); 535 _bad: 536 pfr_clean_node_mask(tmpkt, &addq); 537 pfr_destroy_kentries(&addq); 538 if (flags & PFR_FLAG_FEEDBACK) 539 pfr_reset_feedback(addr, size, flags); 540 pfr_destroy_ktable(tmpkt, 0); 541 return (rv); 542 } 543 544 int 545 pfr_tst_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size, 546 int *nmatch, int flags) 547 { 548 struct pfr_ktable *kt; 549 struct pfr_kentry *p; 550 struct pfr_addr ad; 551 int i, xmatch = 0; 552 553 ACCEPT_FLAGS(PFR_FLAG_REPLACE); 554 if (pfr_validate_table(tbl, 0, 0)) 555 return (EINVAL); 556 kt = pfr_lookup_table(tbl); 557 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 558 return (ESRCH); 559 560 for (i = 0; i < size; i++) { 561 if (COPYIN(addr+i, &ad, sizeof(ad))) 562 return (EFAULT); 563 if (pfr_validate_addr(&ad)) 564 return (EINVAL); 565 if (ADDR_NETWORK(&ad)) 566 return (EINVAL); 567 p = pfr_lookup_addr(kt, &ad, 0); 568 if (flags & PFR_FLAG_REPLACE) 569 pfr_copyout_addr(&ad, p); 570 ad.pfra_fback = (p == NULL) ? PFR_FB_NONE : 571 (p->pfrke_not ? PFR_FB_NOTMATCH : PFR_FB_MATCH); 572 if (p != NULL && !p->pfrke_not) 573 xmatch++; 574 if (COPYOUT(&ad, addr+i, sizeof(ad))) 575 return (EFAULT); 576 } 577 if (nmatch != NULL) 578 *nmatch = xmatch; 579 return (0); 580 } 581 582 int 583 pfr_get_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int *size, 584 int flags) 585 { 586 struct pfr_ktable *kt; 587 struct pfr_walktree w; 588 int rv; 589 590 ACCEPT_FLAGS(0); 591 if (pfr_validate_table(tbl, 0, 0)) 592 return (EINVAL); 593 kt = pfr_lookup_table(tbl); 594 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 595 return (ESRCH); 596 if (kt->pfrkt_cnt > *size) { 597 *size = kt->pfrkt_cnt; 598 return (0); 599 } 600 601 bzero(&w, sizeof(w)); 602 w.pfrw_op = PFRW_GET_ADDRS; 603 w.pfrw_addr = addr; 604 w.pfrw_free = kt->pfrkt_cnt; 605 w.pfrw_flags = flags; 606 rv = rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w); 607 if (!rv) 608 rv = rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w); 609 if (rv) 610 return (rv); 611 612 if (w.pfrw_free) { 613 printf("pfr_get_addrs: corruption detected (%d).\n", 614 w.pfrw_free); 615 return (ENOTTY); 616 } 617 *size = kt->pfrkt_cnt; 618 return (0); 619 } 620 621 int 622 pfr_get_astats(struct pfr_table *tbl, struct pfr_astats *addr, int *size, 623 int flags) 624 { 625 struct pfr_ktable *kt; 626 struct pfr_walktree w; 627 struct pfr_kentryworkq workq; 628 int rv, s = 0 /* XXX gcc */; 629 long tzero = time_second; 630 631 ACCEPT_FLAGS(PFR_FLAG_ATOMIC); /* XXX PFR_FLAG_CLSTATS disabled */ 632 if (pfr_validate_table(tbl, 0, 0)) 633 return (EINVAL); 634 kt = pfr_lookup_table(tbl); 635 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 636 return (ESRCH); 637 if (kt->pfrkt_cnt > *size) { 638 *size = kt->pfrkt_cnt; 639 return (0); 640 } 641 642 bzero(&w, sizeof(w)); 643 w.pfrw_op = PFRW_GET_ASTATS; 644 w.pfrw_astats = addr; 645 w.pfrw_free = kt->pfrkt_cnt; 646 w.pfrw_flags = flags; 647 if (flags & PFR_FLAG_ATOMIC) 648 s = splsoftnet(); 649 rv = rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w); 650 if (!rv) 651 rv = rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w); 652 if (!rv && (flags & PFR_FLAG_CLSTATS)) { 653 pfr_enqueue_addrs(kt, &workq, NULL, 0); 654 pfr_clstats_kentries(&workq, tzero, 0); 655 } 656 if (flags & PFR_FLAG_ATOMIC) 657 splx(s); 658 if (rv) 659 return (rv); 660 661 if (w.pfrw_free) { 662 printf("pfr_get_astats: corruption detected (%d).\n", 663 w.pfrw_free); 664 return (ENOTTY); 665 } 666 *size = kt->pfrkt_cnt; 667 return (0); 668 } 669 670 int 671 pfr_clr_astats(struct pfr_table *tbl, struct pfr_addr *addr, int size, 672 int *nzero, int flags) 673 { 674 struct pfr_ktable *kt; 675 struct pfr_kentryworkq workq; 676 struct pfr_kentry *p; 677 struct pfr_addr ad; 678 int i, rv, s = 0, xzero = 0; 679 680 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_FEEDBACK); 681 if (pfr_validate_table(tbl, 0, 0)) 682 return (EINVAL); 683 kt = pfr_lookup_table(tbl); 684 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 685 return (ESRCH); 686 SLIST_INIT(&workq); 687 for (i = 0; i < size; i++) { 688 if (COPYIN(addr+i, &ad, sizeof(ad))) 689 senderr(EFAULT); 690 if (pfr_validate_addr(&ad)) 691 senderr(EINVAL); 692 p = pfr_lookup_addr(kt, &ad, 1); 693 if (flags & PFR_FLAG_FEEDBACK) { 694 ad.pfra_fback = (p != NULL) ? 695 PFR_FB_CLEARED : PFR_FB_NONE; 696 if (COPYOUT(&ad, addr+i, sizeof(ad))) 697 senderr(EFAULT); 698 } 699 if (p != NULL) { 700 SLIST_INSERT_HEAD(&workq, p, pfrke_workq); 701 xzero++; 702 } 703 } 704 705 if (!(flags & PFR_FLAG_DUMMY)) { 706 if (flags & PFR_FLAG_ATOMIC) 707 s = splsoftnet(); 708 pfr_clstats_kentries(&workq, 0, 0); 709 if (flags & PFR_FLAG_ATOMIC) 710 splx(s); 711 } 712 if (nzero != NULL) 713 *nzero = xzero; 714 return (0); 715 _bad: 716 if (flags & PFR_FLAG_FEEDBACK) 717 pfr_reset_feedback(addr, size, flags); 718 return (rv); 719 } 720 721 int 722 pfr_validate_addr(struct pfr_addr *ad) 723 { 724 int i; 725 726 switch (ad->pfra_af) { 727 #ifdef INET 728 case AF_INET: 729 if (ad->pfra_net > 32) 730 return (-1); 731 break; 732 #endif /* INET */ 733 #ifdef INET6 734 case AF_INET6: 735 if (ad->pfra_net > 128) 736 return (-1); 737 break; 738 #endif /* INET6 */ 739 default: 740 return (-1); 741 } 742 if (ad->pfra_net < 128 && 743 (((char *)ad)[ad->pfra_net/8] & (0xFF >> (ad->pfra_net%8)))) 744 return (-1); 745 for (i = (ad->pfra_net+7)/8; i < sizeof(ad->pfra_u); i++) 746 if (((char *)ad)[i]) 747 return (-1); 748 if (ad->pfra_not && ad->pfra_not != 1) 749 return (-1); 750 if (ad->pfra_fback) 751 return (-1); 752 return (0); 753 } 754 755 void 756 pfr_enqueue_addrs(struct pfr_ktable *kt, struct pfr_kentryworkq *workq, 757 int *naddr, int sweep) 758 { 759 struct pfr_walktree w; 760 761 SLIST_INIT(workq); 762 bzero(&w, sizeof(w)); 763 w.pfrw_op = sweep ? PFRW_SWEEP : PFRW_ENQUEUE; 764 w.pfrw_workq = workq; 765 if (kt->pfrkt_ip4 != NULL) 766 if (rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w)) 767 printf("pfr_enqueue_addrs: IPv4 walktree failed.\n"); 768 if (kt->pfrkt_ip6 != NULL) 769 if (rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w)) 770 printf("pfr_enqueue_addrs: IPv6 walktree failed.\n"); 771 if (naddr != NULL) 772 *naddr = w.pfrw_cnt; 773 } 774 775 void 776 pfr_mark_addrs(struct pfr_ktable *kt) 777 { 778 struct pfr_walktree w; 779 780 bzero(&w, sizeof(w)); 781 w.pfrw_op = PFRW_MARK; 782 if (rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w)) 783 printf("pfr_mark_addrs: IPv4 walktree failed.\n"); 784 if (rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w)) 785 printf("pfr_mark_addrs: IPv6 walktree failed.\n"); 786 } 787 788 789 struct pfr_kentry * 790 pfr_lookup_addr(struct pfr_ktable *kt, struct pfr_addr *ad, int exact) 791 { 792 union sockaddr_union sa, mask; 793 struct radix_node_head *head = (void *)0xdeadb; 794 struct pfr_kentry *ke; 795 int s; 796 797 bzero(&sa, sizeof(sa)); 798 if (ad->pfra_af == AF_INET) { 799 FILLIN_SIN(sa.sin, ad->pfra_ip4addr); 800 head = kt->pfrkt_ip4; 801 } else if ( ad->pfra_af == AF_INET6 ) { 802 FILLIN_SIN6(sa.sin6, ad->pfra_ip6addr); 803 head = kt->pfrkt_ip6; 804 } 805 if (ADDR_NETWORK(ad)) { 806 pfr_prepare_network(&mask, ad->pfra_af, ad->pfra_net); 807 s = splsoftnet(); /* rn_lookup makes use of globals */ 808 ke = (struct pfr_kentry *)rn_lookup(&sa, &mask, head); 809 splx(s); 810 if (ke && KENTRY_RNF_ROOT(ke)) 811 ke = NULL; 812 } else { 813 ke = (struct pfr_kentry *)rn_match(&sa, head); 814 if (ke && KENTRY_RNF_ROOT(ke)) 815 ke = NULL; 816 if (exact && ke && KENTRY_NETWORK(ke)) 817 ke = NULL; 818 } 819 return (ke); 820 } 821 822 struct pfr_kentry * 823 pfr_create_kentry(struct pfr_addr *ad, int intr) 824 { 825 struct pfr_kentry *ke; 826 827 if (intr) 828 ke = pool_get(&pfr_kentry_pl2, PR_NOWAIT); 829 else 830 ke = pool_get(&pfr_kentry_pl, PR_NOWAIT); 831 if (ke == NULL) 832 return (NULL); 833 bzero(ke, sizeof(*ke)); 834 835 if (ad->pfra_af == AF_INET) 836 FILLIN_SIN(ke->pfrke_sa.sin, ad->pfra_ip4addr); 837 else if (ad->pfra_af == AF_INET6) 838 FILLIN_SIN6(ke->pfrke_sa.sin6, ad->pfra_ip6addr); 839 ke->pfrke_af = ad->pfra_af; 840 ke->pfrke_net = ad->pfra_net; 841 ke->pfrke_not = ad->pfra_not; 842 ke->pfrke_intrpool = intr; 843 return (ke); 844 } 845 846 void 847 pfr_destroy_kentries(struct pfr_kentryworkq *workq) 848 { 849 struct pfr_kentry *p, *q; 850 851 for (p = SLIST_FIRST(workq); p != NULL; p = q) { 852 q = SLIST_NEXT(p, pfrke_workq); 853 pfr_destroy_kentry(p); 854 } 855 } 856 857 void 858 pfr_destroy_kentry(struct pfr_kentry *ke) 859 { 860 if (ke->pfrke_intrpool) 861 pool_put(&pfr_kentry_pl2, ke); 862 else 863 pool_put(&pfr_kentry_pl, ke); 864 } 865 866 void 867 pfr_insert_kentries(struct pfr_ktable *kt, 868 struct pfr_kentryworkq *workq, long tzero) 869 { 870 struct pfr_kentry *p; 871 int rv, n = 0; 872 873 SLIST_FOREACH(p, workq, pfrke_workq) { 874 rv = pfr_route_kentry(kt, p); 875 if (rv) { 876 printf("pfr_insert_kentries: cannot route entry " 877 "(code=%d).\n", rv); 878 break; 879 } 880 p->pfrke_tzero = tzero; 881 n++; 882 } 883 kt->pfrkt_cnt += n; 884 } 885 886 int 887 pfr_insert_kentry(struct pfr_ktable *kt, struct pfr_addr *ad, long tzero) 888 { 889 struct pfr_kentry *p; 890 int rv; 891 892 p = pfr_lookup_addr(kt, ad, 1); 893 if (p != NULL) 894 return (0); 895 p = pfr_create_kentry(ad, 1); 896 if (p == NULL) 897 return (EINVAL); 898 899 rv = pfr_route_kentry(kt, p); 900 if (rv) 901 return (rv); 902 903 p->pfrke_tzero = tzero; 904 kt->pfrkt_cnt++; 905 906 return (0); 907 } 908 909 void 910 pfr_remove_kentries(struct pfr_ktable *kt, 911 struct pfr_kentryworkq *workq) 912 { 913 struct pfr_kentry *p; 914 int n = 0; 915 916 SLIST_FOREACH(p, workq, pfrke_workq) { 917 pfr_unroute_kentry(kt, p); 918 n++; 919 } 920 kt->pfrkt_cnt -= n; 921 pfr_destroy_kentries(workq); 922 } 923 924 void 925 pfr_clean_node_mask(struct pfr_ktable *kt, 926 struct pfr_kentryworkq *workq) 927 { 928 struct pfr_kentry *p; 929 930 SLIST_FOREACH(p, workq, pfrke_workq) 931 pfr_unroute_kentry(kt, p); 932 } 933 934 void 935 pfr_clstats_kentries(struct pfr_kentryworkq *workq, long tzero, int negchange) 936 { 937 struct pfr_kentry *p; 938 int s; 939 940 SLIST_FOREACH(p, workq, pfrke_workq) { 941 s = splsoftnet(); 942 if (negchange) 943 p->pfrke_not = !p->pfrke_not; 944 bzero(p->pfrke_packets, sizeof(p->pfrke_packets)); 945 bzero(p->pfrke_bytes, sizeof(p->pfrke_bytes)); 946 splx(s); 947 p->pfrke_tzero = tzero; 948 } 949 } 950 951 void 952 pfr_reset_feedback(struct pfr_addr *addr, int size, int flags) 953 { 954 struct pfr_addr ad; 955 int i; 956 957 for (i = 0; i < size; i++) { 958 if (COPYIN(addr+i, &ad, sizeof(ad))) 959 break; 960 ad.pfra_fback = PFR_FB_NONE; 961 if (COPYOUT(&ad, addr+i, sizeof(ad))) 962 break; 963 } 964 } 965 966 void 967 pfr_prepare_network(union sockaddr_union *sa, int af, int net) 968 { 969 int i; 970 971 bzero(sa, sizeof(*sa)); 972 if (af == AF_INET) { 973 sa->sin.sin_len = sizeof(sa->sin); 974 sa->sin.sin_family = AF_INET; 975 sa->sin.sin_addr.s_addr = net ? htonl(-1 << (32-net)) : 0; 976 } else if (af == AF_INET6) { 977 sa->sin6.sin6_len = sizeof(sa->sin6); 978 sa->sin6.sin6_family = AF_INET6; 979 for (i = 0; i < 4; i++) { 980 if (net <= 32) { 981 sa->sin6.sin6_addr.s6_addr32[i] = 982 net ? htonl(-1 << (32-net)) : 0; 983 break; 984 } 985 sa->sin6.sin6_addr.s6_addr32[i] = 0xFFFFFFFF; 986 net -= 32; 987 } 988 } 989 } 990 991 int 992 pfr_route_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke) 993 { 994 union sockaddr_union mask; 995 struct radix_node *rn; 996 struct radix_node_head *head = (void *)0xdeadb; 997 int s; 998 999 bzero(ke->pfrke_node, sizeof(ke->pfrke_node)); 1000 if (ke->pfrke_af == AF_INET) 1001 head = kt->pfrkt_ip4; 1002 else if (ke->pfrke_af == AF_INET6) 1003 head = kt->pfrkt_ip6; 1004 1005 s = splsoftnet(); 1006 if (KENTRY_NETWORK(ke)) { 1007 pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net); 1008 rn = rn_addroute(&ke->pfrke_sa, &mask, head, ke->pfrke_node); 1009 } else 1010 rn = rn_addroute(&ke->pfrke_sa, NULL, head, ke->pfrke_node); 1011 splx(s); 1012 1013 return (rn == NULL ? -1 : 0); 1014 } 1015 1016 int 1017 pfr_unroute_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke) 1018 { 1019 union sockaddr_union mask; 1020 struct radix_node *rn; 1021 struct radix_node_head *head = (void *)0xdeadb; 1022 int s; 1023 1024 if (ke->pfrke_af == AF_INET) 1025 head = kt->pfrkt_ip4; 1026 else if (ke->pfrke_af == AF_INET6) 1027 head = kt->pfrkt_ip6; 1028 1029 s = splsoftnet(); 1030 if (KENTRY_NETWORK(ke)) { 1031 pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net); 1032 rn = rn_delete(&ke->pfrke_sa, &mask, head); 1033 } else 1034 rn = rn_delete(&ke->pfrke_sa, NULL, head); 1035 splx(s); 1036 1037 if (rn == NULL) { 1038 printf("pfr_unroute_kentry: delete failed.\n"); 1039 return (-1); 1040 } 1041 return (0); 1042 } 1043 1044 void 1045 pfr_copyout_addr(struct pfr_addr *ad, struct pfr_kentry *ke) 1046 { 1047 bzero(ad, sizeof(*ad)); 1048 if (ke == NULL) 1049 return; 1050 ad->pfra_af = ke->pfrke_af; 1051 ad->pfra_net = ke->pfrke_net; 1052 ad->pfra_not = ke->pfrke_not; 1053 if (ad->pfra_af == AF_INET) 1054 ad->pfra_ip4addr = ke->pfrke_sa.sin.sin_addr; 1055 else if (ad->pfra_af == AF_INET6) 1056 ad->pfra_ip6addr = ke->pfrke_sa.sin6.sin6_addr; 1057 } 1058 1059 int 1060 pfr_walktree(struct radix_node *rn, void *arg) 1061 { 1062 struct pfr_kentry *ke = (struct pfr_kentry *)rn; 1063 struct pfr_walktree *w = arg; 1064 int s, flags = w->pfrw_flags; 1065 1066 switch (w->pfrw_op) { 1067 case PFRW_MARK: 1068 ke->pfrke_mark = 0; 1069 break; 1070 case PFRW_SWEEP: 1071 if (ke->pfrke_mark) 1072 break; 1073 /* FALLTHROUGH */ 1074 case PFRW_ENQUEUE: 1075 SLIST_INSERT_HEAD(w->pfrw_workq, ke, pfrke_workq); 1076 w->pfrw_cnt++; 1077 break; 1078 case PFRW_GET_ADDRS: 1079 if (w->pfrw_free-- > 0) { 1080 struct pfr_addr ad; 1081 1082 pfr_copyout_addr(&ad, ke); 1083 if (copyout(&ad, w->pfrw_addr, sizeof(ad))) 1084 return (EFAULT); 1085 w->pfrw_addr++; 1086 } 1087 break; 1088 case PFRW_GET_ASTATS: 1089 if (w->pfrw_free-- > 0) { 1090 struct pfr_astats as; 1091 1092 pfr_copyout_addr(&as.pfras_a, ke); 1093 1094 s = splsoftnet(); 1095 bcopy(ke->pfrke_packets, as.pfras_packets, 1096 sizeof(as.pfras_packets)); 1097 bcopy(ke->pfrke_bytes, as.pfras_bytes, 1098 sizeof(as.pfras_bytes)); 1099 splx(s); 1100 as.pfras_tzero = ke->pfrke_tzero; 1101 1102 if (COPYOUT(&as, w->pfrw_astats, sizeof(as))) 1103 return (EFAULT); 1104 w->pfrw_astats++; 1105 } 1106 break; 1107 case PFRW_POOL_GET: 1108 if (ke->pfrke_not) 1109 break; /* negative entries are ignored */ 1110 if (!w->pfrw_cnt--) { 1111 w->pfrw_kentry = ke; 1112 return (1); /* finish search */ 1113 } 1114 break; 1115 case PFRW_DYNADDR_UPDATE: 1116 if (ke->pfrke_af == AF_INET) { 1117 if (w->pfrw_dyn->pfid_acnt4++ > 0) 1118 break; 1119 pfr_prepare_network(&pfr_mask, AF_INET, ke->pfrke_net); 1120 w->pfrw_dyn->pfid_addr4 = *SUNION2PF( 1121 &ke->pfrke_sa, AF_INET); 1122 w->pfrw_dyn->pfid_mask4 = *SUNION2PF( 1123 &pfr_mask, AF_INET); 1124 } else if (ke->pfrke_af == AF_INET6){ 1125 if (w->pfrw_dyn->pfid_acnt6++ > 0) 1126 break; 1127 pfr_prepare_network(&pfr_mask, AF_INET6, ke->pfrke_net); 1128 w->pfrw_dyn->pfid_addr6 = *SUNION2PF( 1129 &ke->pfrke_sa, AF_INET6); 1130 w->pfrw_dyn->pfid_mask6 = *SUNION2PF( 1131 &pfr_mask, AF_INET6); 1132 } 1133 break; 1134 } 1135 return (0); 1136 } 1137 1138 int 1139 pfr_clr_tables(struct pfr_table *filter, int *ndel, int flags) 1140 { 1141 struct pfr_ktableworkq workq; 1142 struct pfr_ktable *p; 1143 int s = 0, xdel = 0; 1144 1145 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_ALLRSETS); 1146 if (pfr_fix_anchor(filter->pfrt_anchor)) 1147 return (EINVAL); 1148 if (pfr_table_count(filter, flags) < 0) 1149 return (ENOENT); 1150 1151 SLIST_INIT(&workq); 1152 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) { 1153 if (pfr_skip_table(filter, p, flags)) 1154 continue; 1155 if (!strcmp(p->pfrkt_anchor, PF_RESERVED_ANCHOR)) 1156 continue; 1157 if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE)) 1158 continue; 1159 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE; 1160 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1161 xdel++; 1162 } 1163 if (!(flags & PFR_FLAG_DUMMY)) { 1164 if (flags & PFR_FLAG_ATOMIC) 1165 s = splsoftnet(); 1166 pfr_setflags_ktables(&workq); 1167 if (flags & PFR_FLAG_ATOMIC) 1168 splx(s); 1169 } 1170 if (ndel != NULL) 1171 *ndel = xdel; 1172 return (0); 1173 } 1174 1175 int 1176 pfr_add_tables(struct pfr_table *tbl, int size, int *nadd, int flags) 1177 { 1178 struct pfr_ktableworkq addq, changeq; 1179 struct pfr_ktable *p, *q, *r, key; 1180 int i, rv, s = 0 /* XXX gcc */, xadd = 0; 1181 long tzero = time_second; 1182 1183 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY); 1184 SLIST_INIT(&addq); 1185 SLIST_INIT(&changeq); 1186 for (i = 0; i < size; i++) { 1187 if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t))) 1188 senderr(EFAULT); 1189 if (pfr_validate_table(&key.pfrkt_t, PFR_TFLAG_USRMASK, 1190 flags & PFR_FLAG_USERIOCTL)) 1191 senderr(EINVAL); 1192 key.pfrkt_flags |= PFR_TFLAG_ACTIVE; 1193 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key); 1194 if (p == NULL) { 1195 p = pfr_create_ktable(&key.pfrkt_t, tzero, 1); 1196 if (p == NULL) 1197 senderr(ENOMEM); 1198 SLIST_FOREACH(q, &addq, pfrkt_workq) { 1199 if (!pfr_ktable_compare(p, q)) 1200 goto _skip; 1201 } 1202 SLIST_INSERT_HEAD(&addq, p, pfrkt_workq); 1203 xadd++; 1204 if (!key.pfrkt_anchor[0]) 1205 goto _skip; 1206 1207 /* find or create root table */ 1208 bzero(key.pfrkt_anchor, sizeof(key.pfrkt_anchor)); 1209 r = RB_FIND(pfr_ktablehead, &pfr_ktables, &key); 1210 if (r != NULL) { 1211 p->pfrkt_root = r; 1212 goto _skip; 1213 } 1214 SLIST_FOREACH(q, &addq, pfrkt_workq) { 1215 if (!pfr_ktable_compare(&key, q)) { 1216 p->pfrkt_root = q; 1217 goto _skip; 1218 } 1219 } 1220 key.pfrkt_flags = 0; 1221 r = pfr_create_ktable(&key.pfrkt_t, 0, 1); 1222 if (r == NULL) 1223 senderr(ENOMEM); 1224 SLIST_INSERT_HEAD(&addq, r, pfrkt_workq); 1225 p->pfrkt_root = r; 1226 } else if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE)) { 1227 SLIST_FOREACH(q, &changeq, pfrkt_workq) 1228 if (!pfr_ktable_compare(&key, q)) 1229 goto _skip; 1230 p->pfrkt_nflags = (p->pfrkt_flags & 1231 ~PFR_TFLAG_USRMASK) | key.pfrkt_flags; 1232 SLIST_INSERT_HEAD(&changeq, p, pfrkt_workq); 1233 xadd++; 1234 } 1235 _skip: 1236 ; 1237 } 1238 if (!(flags & PFR_FLAG_DUMMY)) { 1239 if (flags & PFR_FLAG_ATOMIC) 1240 s = splsoftnet(); 1241 pfr_insert_ktables(&addq); 1242 pfr_setflags_ktables(&changeq); 1243 if (flags & PFR_FLAG_ATOMIC) 1244 splx(s); 1245 } else 1246 pfr_destroy_ktables(&addq, 0); 1247 if (nadd != NULL) 1248 *nadd = xadd; 1249 return (0); 1250 _bad: 1251 pfr_destroy_ktables(&addq, 0); 1252 return (rv); 1253 } 1254 1255 int 1256 pfr_del_tables(struct pfr_table *tbl, int size, int *ndel, int flags) 1257 { 1258 struct pfr_ktableworkq workq; 1259 struct pfr_ktable *p, *q, key; 1260 int i, s = 0, xdel = 0; 1261 1262 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY); 1263 SLIST_INIT(&workq); 1264 for (i = 0; i < size; i++) { 1265 if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t))) 1266 return (EFAULT); 1267 if (pfr_validate_table(&key.pfrkt_t, 0, 1268 flags & PFR_FLAG_USERIOCTL)) 1269 return (EINVAL); 1270 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key); 1271 if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) { 1272 SLIST_FOREACH(q, &workq, pfrkt_workq) 1273 if (!pfr_ktable_compare(p, q)) 1274 goto _skip; 1275 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE; 1276 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1277 xdel++; 1278 } 1279 _skip: 1280 ; 1281 } 1282 1283 if (!(flags & PFR_FLAG_DUMMY)) { 1284 if (flags & PFR_FLAG_ATOMIC) 1285 s = splsoftnet(); 1286 pfr_setflags_ktables(&workq); 1287 if (flags & PFR_FLAG_ATOMIC) 1288 splx(s); 1289 } 1290 if (ndel != NULL) 1291 *ndel = xdel; 1292 return (0); 1293 } 1294 1295 int 1296 pfr_get_tables(struct pfr_table *filter, struct pfr_table *tbl, int *size, 1297 int flags) 1298 { 1299 struct pfr_ktable *p; 1300 int n, nn; 1301 1302 ACCEPT_FLAGS(PFR_FLAG_ALLRSETS); 1303 if (pfr_fix_anchor(filter->pfrt_anchor)) 1304 return (EINVAL); 1305 n = nn = pfr_table_count(filter, flags); 1306 if (n < 0) 1307 return (ENOENT); 1308 if (n > *size) { 1309 *size = n; 1310 return (0); 1311 } 1312 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) { 1313 if (pfr_skip_table(filter, p, flags)) 1314 continue; 1315 if (n-- <= 0) 1316 continue; 1317 if (COPYOUT(&p->pfrkt_t, tbl++, sizeof(*tbl))) 1318 return (EFAULT); 1319 } 1320 if (n) { 1321 printf("pfr_get_tables: corruption detected (%d).\n", n); 1322 return (ENOTTY); 1323 } 1324 *size = nn; 1325 return (0); 1326 } 1327 1328 int 1329 pfr_get_tstats(struct pfr_table *filter, struct pfr_tstats *tbl, int *size, 1330 int flags) 1331 { 1332 struct pfr_ktable *p; 1333 struct pfr_ktableworkq workq; 1334 int s = 0 /* XXX gcc */, n, nn; 1335 long tzero = time_second; 1336 1337 ACCEPT_FLAGS(PFR_FLAG_ATOMIC|PFR_FLAG_ALLRSETS); 1338 /* XXX PFR_FLAG_CLSTATS disabled */ 1339 if (pfr_fix_anchor(filter->pfrt_anchor)) 1340 return (EINVAL); 1341 n = nn = pfr_table_count(filter, flags); 1342 if (n < 0) 1343 return (ENOENT); 1344 if (n > *size) { 1345 *size = n; 1346 return (0); 1347 } 1348 SLIST_INIT(&workq); 1349 if (flags & PFR_FLAG_ATOMIC) 1350 s = splsoftnet(); 1351 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) { 1352 if (pfr_skip_table(filter, p, flags)) 1353 continue; 1354 if (n-- <= 0) 1355 continue; 1356 if (!(flags & PFR_FLAG_ATOMIC)) 1357 s = splsoftnet(); 1358 if (COPYOUT(&p->pfrkt_ts, tbl++, sizeof(*tbl))) { 1359 splx(s); 1360 return (EFAULT); 1361 } 1362 if (!(flags & PFR_FLAG_ATOMIC)) 1363 splx(s); 1364 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1365 } 1366 if (flags & PFR_FLAG_CLSTATS) 1367 pfr_clstats_ktables(&workq, tzero, 1368 flags & PFR_FLAG_ADDRSTOO); 1369 if (flags & PFR_FLAG_ATOMIC) 1370 splx(s); 1371 if (n) { 1372 printf("pfr_get_tstats: corruption detected (%d).\n", n); 1373 return (ENOTTY); 1374 } 1375 *size = nn; 1376 return (0); 1377 } 1378 1379 int 1380 pfr_clr_tstats(struct pfr_table *tbl, int size, int *nzero, int flags) 1381 { 1382 struct pfr_ktableworkq workq; 1383 struct pfr_ktable *p, key; 1384 int i, s = 0 /* XXX gcc */, xzero = 0; 1385 long tzero = time_second; 1386 1387 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_ADDRSTOO); 1388 SLIST_INIT(&workq); 1389 for (i = 0; i < size; i++) { 1390 if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t))) 1391 return (EFAULT); 1392 if (pfr_validate_table(&key.pfrkt_t, 0, 0)) 1393 return (EINVAL); 1394 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key); 1395 if (p != NULL) { 1396 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1397 xzero++; 1398 } 1399 } 1400 if (!(flags & PFR_FLAG_DUMMY)) { 1401 if (flags & PFR_FLAG_ATOMIC) 1402 s = splsoftnet(); 1403 pfr_clstats_ktables(&workq, tzero, flags & PFR_FLAG_ADDRSTOO); 1404 if (flags & PFR_FLAG_ATOMIC) 1405 splx(s); 1406 } 1407 if (nzero != NULL) 1408 *nzero = xzero; 1409 return (0); 1410 } 1411 1412 int 1413 pfr_set_tflags(struct pfr_table *tbl, int size, int setflag, int clrflag, 1414 int *nchange, int *ndel, int flags) 1415 { 1416 struct pfr_ktableworkq workq; 1417 struct pfr_ktable *p, *q, key; 1418 int i, s = 0, xchange = 0, xdel = 0; 1419 1420 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY); 1421 if ((setflag & ~PFR_TFLAG_USRMASK) || 1422 (clrflag & ~PFR_TFLAG_USRMASK) || 1423 (setflag & clrflag)) 1424 return (EINVAL); 1425 SLIST_INIT(&workq); 1426 for (i = 0; i < size; i++) { 1427 if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t))) 1428 return (EFAULT); 1429 if (pfr_validate_table(&key.pfrkt_t, 0, 1430 flags & PFR_FLAG_USERIOCTL)) 1431 return (EINVAL); 1432 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key); 1433 if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) { 1434 p->pfrkt_nflags = (p->pfrkt_flags | setflag) & 1435 ~clrflag; 1436 if (p->pfrkt_nflags == p->pfrkt_flags) 1437 goto _skip; 1438 SLIST_FOREACH(q, &workq, pfrkt_workq) 1439 if (!pfr_ktable_compare(p, q)) 1440 goto _skip; 1441 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1442 if ((p->pfrkt_flags & PFR_TFLAG_PERSIST) && 1443 (clrflag & PFR_TFLAG_PERSIST) && 1444 !(p->pfrkt_flags & PFR_TFLAG_REFERENCED)) 1445 xdel++; 1446 else 1447 xchange++; 1448 } 1449 _skip: 1450 ; 1451 } 1452 if (!(flags & PFR_FLAG_DUMMY)) { 1453 if (flags & PFR_FLAG_ATOMIC) 1454 s = splsoftnet(); 1455 pfr_setflags_ktables(&workq); 1456 if (flags & PFR_FLAG_ATOMIC) 1457 splx(s); 1458 } 1459 if (nchange != NULL) 1460 *nchange = xchange; 1461 if (ndel != NULL) 1462 *ndel = xdel; 1463 return (0); 1464 } 1465 1466 int 1467 pfr_ina_begin(struct pfr_table *trs, u_int32_t *ticket, int *ndel, int flags) 1468 { 1469 struct pfr_ktableworkq workq; 1470 struct pfr_ktable *p; 1471 struct pf_ruleset *rs; 1472 int xdel = 0; 1473 1474 ACCEPT_FLAGS(PFR_FLAG_DUMMY); 1475 rs = pf_find_or_create_ruleset(trs->pfrt_anchor); 1476 if (rs == NULL) 1477 return (ENOMEM); 1478 SLIST_INIT(&workq); 1479 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) { 1480 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) || 1481 pfr_skip_table(trs, p, 0)) 1482 continue; 1483 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE; 1484 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1485 xdel++; 1486 } 1487 if (!(flags & PFR_FLAG_DUMMY)) { 1488 pfr_setflags_ktables(&workq); 1489 if (ticket != NULL) 1490 *ticket = ++rs->tticket; 1491 rs->topen = 1; 1492 } else 1493 pf_remove_if_empty_ruleset(rs); 1494 if (ndel != NULL) 1495 *ndel = xdel; 1496 return (0); 1497 } 1498 1499 int 1500 pfr_ina_define(struct pfr_table *tbl, struct pfr_addr *addr, int size, 1501 int *nadd, int *naddr, u_int32_t ticket, int flags) 1502 { 1503 struct pfr_ktableworkq tableq; 1504 struct pfr_kentryworkq addrq; 1505 struct pfr_ktable *kt, *rt, *shadow, key; 1506 struct pfr_kentry *p; 1507 struct pfr_addr ad; 1508 struct pf_ruleset *rs; 1509 int i, rv, xadd = 0, xaddr = 0; 1510 1511 ACCEPT_FLAGS(PFR_FLAG_DUMMY|PFR_FLAG_ADDRSTOO); 1512 if (size && !(flags & PFR_FLAG_ADDRSTOO)) 1513 return (EINVAL); 1514 if (pfr_validate_table(tbl, PFR_TFLAG_USRMASK, 1515 flags & PFR_FLAG_USERIOCTL)) 1516 return (EINVAL); 1517 rs = pf_find_ruleset(tbl->pfrt_anchor); 1518 if (rs == NULL || !rs->topen || ticket != rs->tticket) 1519 return (EBUSY); 1520 tbl->pfrt_flags |= PFR_TFLAG_INACTIVE; 1521 SLIST_INIT(&tableq); 1522 kt = RB_FIND(pfr_ktablehead, &pfr_ktables, (struct pfr_ktable *)tbl); 1523 if (kt == NULL) { 1524 kt = pfr_create_ktable(tbl, 0, 1); 1525 if (kt == NULL) 1526 return (ENOMEM); 1527 SLIST_INSERT_HEAD(&tableq, kt, pfrkt_workq); 1528 xadd++; 1529 if (!tbl->pfrt_anchor[0]) 1530 goto _skip; 1531 1532 /* find or create root table */ 1533 bzero(&key, sizeof(key)); 1534 strlcpy(key.pfrkt_name, tbl->pfrt_name, sizeof(key.pfrkt_name)); 1535 rt = RB_FIND(pfr_ktablehead, &pfr_ktables, &key); 1536 if (rt != NULL) { 1537 kt->pfrkt_root = rt; 1538 goto _skip; 1539 } 1540 rt = pfr_create_ktable(&key.pfrkt_t, 0, 1); 1541 if (rt == NULL) { 1542 pfr_destroy_ktables(&tableq, 0); 1543 return (ENOMEM); 1544 } 1545 SLIST_INSERT_HEAD(&tableq, rt, pfrkt_workq); 1546 kt->pfrkt_root = rt; 1547 } else if (!(kt->pfrkt_flags & PFR_TFLAG_INACTIVE)) 1548 xadd++; 1549 _skip: 1550 shadow = pfr_create_ktable(tbl, 0, 0); 1551 if (shadow == NULL) { 1552 pfr_destroy_ktables(&tableq, 0); 1553 return (ENOMEM); 1554 } 1555 SLIST_INIT(&addrq); 1556 for (i = 0; i < size; i++) { 1557 if (COPYIN(addr+i, &ad, sizeof(ad))) 1558 senderr(EFAULT); 1559 if (pfr_validate_addr(&ad)) 1560 senderr(EINVAL); 1561 if (pfr_lookup_addr(shadow, &ad, 1) != NULL) 1562 continue; 1563 p = pfr_create_kentry(&ad, 0); 1564 if (p == NULL) 1565 senderr(ENOMEM); 1566 if (pfr_route_kentry(shadow, p)) { 1567 pfr_destroy_kentry(p); 1568 continue; 1569 } 1570 SLIST_INSERT_HEAD(&addrq, p, pfrke_workq); 1571 xaddr++; 1572 } 1573 if (!(flags & PFR_FLAG_DUMMY)) { 1574 if (kt->pfrkt_shadow != NULL) 1575 pfr_destroy_ktable(kt->pfrkt_shadow, 1); 1576 kt->pfrkt_flags |= PFR_TFLAG_INACTIVE; 1577 pfr_insert_ktables(&tableq); 1578 shadow->pfrkt_cnt = (flags & PFR_FLAG_ADDRSTOO) ? 1579 xaddr : NO_ADDRESSES; 1580 kt->pfrkt_shadow = shadow; 1581 } else { 1582 pfr_clean_node_mask(shadow, &addrq); 1583 pfr_destroy_ktable(shadow, 0); 1584 pfr_destroy_ktables(&tableq, 0); 1585 pfr_destroy_kentries(&addrq); 1586 } 1587 if (nadd != NULL) 1588 *nadd = xadd; 1589 if (naddr != NULL) 1590 *naddr = xaddr; 1591 return (0); 1592 _bad: 1593 pfr_destroy_ktable(shadow, 0); 1594 pfr_destroy_ktables(&tableq, 0); 1595 pfr_destroy_kentries(&addrq); 1596 return (rv); 1597 } 1598 1599 int 1600 pfr_ina_rollback(struct pfr_table *trs, u_int32_t ticket, int *ndel, int flags) 1601 { 1602 struct pfr_ktableworkq workq; 1603 struct pfr_ktable *p; 1604 struct pf_ruleset *rs; 1605 int xdel = 0; 1606 1607 ACCEPT_FLAGS(PFR_FLAG_DUMMY); 1608 rs = pf_find_ruleset(trs->pfrt_anchor); 1609 if (rs == NULL || !rs->topen || ticket != rs->tticket) 1610 return (0); 1611 SLIST_INIT(&workq); 1612 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) { 1613 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) || 1614 pfr_skip_table(trs, p, 0)) 1615 continue; 1616 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE; 1617 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1618 xdel++; 1619 } 1620 if (!(flags & PFR_FLAG_DUMMY)) { 1621 pfr_setflags_ktables(&workq); 1622 rs->topen = 0; 1623 pf_remove_if_empty_ruleset(rs); 1624 } 1625 if (ndel != NULL) 1626 *ndel = xdel; 1627 return (0); 1628 } 1629 1630 int 1631 pfr_ina_commit(struct pfr_table *trs, u_int32_t ticket, int *nadd, 1632 int *nchange, int flags) 1633 { 1634 struct pfr_ktable *p, *q; 1635 struct pfr_ktableworkq workq; 1636 struct pf_ruleset *rs; 1637 int s = 0 /* XXX gcc */, xadd = 0, xchange = 0; 1638 long tzero = time_second; 1639 1640 ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY); 1641 rs = pf_find_ruleset(trs->pfrt_anchor); 1642 if (rs == NULL || !rs->topen || ticket != rs->tticket) 1643 return (EBUSY); 1644 1645 SLIST_INIT(&workq); 1646 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) { 1647 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) || 1648 pfr_skip_table(trs, p, 0)) 1649 continue; 1650 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1651 if (p->pfrkt_flags & PFR_TFLAG_ACTIVE) 1652 xchange++; 1653 else 1654 xadd++; 1655 } 1656 1657 if (!(flags & PFR_FLAG_DUMMY)) { 1658 if (flags & PFR_FLAG_ATOMIC) 1659 s = splsoftnet(); 1660 for (p = SLIST_FIRST(&workq); p != NULL; p = q) { 1661 q = SLIST_NEXT(p, pfrkt_workq); 1662 pfr_commit_ktable(p, tzero); 1663 } 1664 if (flags & PFR_FLAG_ATOMIC) 1665 splx(s); 1666 rs->topen = 0; 1667 pf_remove_if_empty_ruleset(rs); 1668 } 1669 if (nadd != NULL) 1670 *nadd = xadd; 1671 if (nchange != NULL) 1672 *nchange = xchange; 1673 1674 return (0); 1675 } 1676 1677 void 1678 pfr_commit_ktable(struct pfr_ktable *kt, long tzero) 1679 { 1680 struct pfr_ktable *shadow = kt->pfrkt_shadow; 1681 int nflags; 1682 1683 if (shadow->pfrkt_cnt == NO_ADDRESSES) { 1684 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 1685 pfr_clstats_ktable(kt, tzero, 1); 1686 } else if (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) { 1687 /* kt might contain addresses */ 1688 struct pfr_kentryworkq addrq, addq, changeq, delq, garbageq; 1689 struct pfr_kentry *p, *q, *next; 1690 struct pfr_addr ad; 1691 1692 pfr_enqueue_addrs(shadow, &addrq, NULL, 0); 1693 pfr_mark_addrs(kt); 1694 SLIST_INIT(&addq); 1695 SLIST_INIT(&changeq); 1696 SLIST_INIT(&delq); 1697 SLIST_INIT(&garbageq); 1698 pfr_clean_node_mask(shadow, &addrq); 1699 for (p = SLIST_FIRST(&addrq); p != NULL; p = next) { 1700 next = SLIST_NEXT(p, pfrke_workq); /* XXX */ 1701 pfr_copyout_addr(&ad, p); 1702 q = pfr_lookup_addr(kt, &ad, 1); 1703 if (q != NULL) { 1704 if (q->pfrke_not != p->pfrke_not) 1705 SLIST_INSERT_HEAD(&changeq, q, 1706 pfrke_workq); 1707 q->pfrke_mark = 1; 1708 SLIST_INSERT_HEAD(&garbageq, p, pfrke_workq); 1709 } else { 1710 p->pfrke_tzero = tzero; 1711 SLIST_INSERT_HEAD(&addq, p, pfrke_workq); 1712 } 1713 } 1714 pfr_enqueue_addrs(kt, &delq, NULL, ENQUEUE_UNMARKED_ONLY); 1715 pfr_insert_kentries(kt, &addq, tzero); 1716 pfr_remove_kentries(kt, &delq); 1717 pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG); 1718 pfr_destroy_kentries(&garbageq); 1719 } else { 1720 /* kt cannot contain addresses */ 1721 SWAP(struct radix_node_head *, kt->pfrkt_ip4, 1722 shadow->pfrkt_ip4); 1723 SWAP(struct radix_node_head *, kt->pfrkt_ip6, 1724 shadow->pfrkt_ip6); 1725 SWAP(int, kt->pfrkt_cnt, shadow->pfrkt_cnt); 1726 pfr_clstats_ktable(kt, tzero, 1); 1727 } 1728 nflags = ((shadow->pfrkt_flags & PFR_TFLAG_USRMASK) | 1729 (kt->pfrkt_flags & PFR_TFLAG_SETMASK) | PFR_TFLAG_ACTIVE) 1730 & ~PFR_TFLAG_INACTIVE; 1731 pfr_destroy_ktable(shadow, 0); 1732 kt->pfrkt_shadow = NULL; 1733 pfr_setflags_ktable(kt, nflags); 1734 } 1735 1736 int 1737 pfr_validate_table(struct pfr_table *tbl, int allowedflags, int no_reserved) 1738 { 1739 int i; 1740 1741 if (!tbl->pfrt_name[0]) 1742 return (-1); 1743 if (no_reserved && !strcmp(tbl->pfrt_anchor, PF_RESERVED_ANCHOR)) 1744 return (-1); 1745 if (tbl->pfrt_name[PF_TABLE_NAME_SIZE-1]) 1746 return (-1); 1747 for (i = strlen(tbl->pfrt_name); i < PF_TABLE_NAME_SIZE; i++) 1748 if (tbl->pfrt_name[i]) 1749 return (-1); 1750 if (pfr_fix_anchor(tbl->pfrt_anchor)) 1751 return (-1); 1752 if (tbl->pfrt_flags & ~allowedflags) 1753 return (-1); 1754 return (0); 1755 } 1756 1757 /* 1758 * Rewrite anchors referenced by tables to remove slashes 1759 * and check for validity. 1760 */ 1761 int 1762 pfr_fix_anchor(char *anchor) 1763 { 1764 size_t siz = MAXPATHLEN; 1765 int i; 1766 1767 if (anchor[0] == '/') { 1768 char *path; 1769 int off; 1770 1771 path = anchor; 1772 off = 1; 1773 while (*++path == '/') 1774 off++; 1775 bcopy(path, anchor, siz - off); 1776 memset(anchor + siz - off, 0, off); 1777 } 1778 if (anchor[siz - 1]) 1779 return (-1); 1780 for (i = strlen(anchor); i < siz; i++) 1781 if (anchor[i]) 1782 return (-1); 1783 return (0); 1784 } 1785 1786 int 1787 pfr_table_count(struct pfr_table *filter, int flags) 1788 { 1789 struct pf_ruleset *rs; 1790 1791 if (flags & PFR_FLAG_ALLRSETS) 1792 return (pfr_ktable_cnt); 1793 if (filter->pfrt_anchor[0]) { 1794 rs = pf_find_ruleset(filter->pfrt_anchor); 1795 return ((rs != NULL) ? rs->tables : -1); 1796 } 1797 return (pf_main_ruleset.tables); 1798 } 1799 1800 int 1801 pfr_skip_table(struct pfr_table *filter, struct pfr_ktable *kt, int flags) 1802 { 1803 if (flags & PFR_FLAG_ALLRSETS) 1804 return (0); 1805 if (strcmp(filter->pfrt_anchor, kt->pfrkt_anchor)) 1806 return (1); 1807 return (0); 1808 } 1809 1810 void 1811 pfr_insert_ktables(struct pfr_ktableworkq *workq) 1812 { 1813 struct pfr_ktable *p; 1814 1815 SLIST_FOREACH(p, workq, pfrkt_workq) 1816 pfr_insert_ktable(p); 1817 } 1818 1819 void 1820 pfr_insert_ktable(struct pfr_ktable *kt) 1821 { 1822 RB_INSERT(pfr_ktablehead, &pfr_ktables, kt); 1823 pfr_ktable_cnt++; 1824 if (kt->pfrkt_root != NULL) 1825 if (!kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]++) 1826 pfr_setflags_ktable(kt->pfrkt_root, 1827 kt->pfrkt_root->pfrkt_flags|PFR_TFLAG_REFDANCHOR); 1828 } 1829 1830 void 1831 pfr_setflags_ktables(struct pfr_ktableworkq *workq) 1832 { 1833 struct pfr_ktable *p, *q; 1834 1835 for (p = SLIST_FIRST(workq); p; p = q) { 1836 q = SLIST_NEXT(p, pfrkt_workq); 1837 pfr_setflags_ktable(p, p->pfrkt_nflags); 1838 } 1839 } 1840 1841 void 1842 pfr_setflags_ktable(struct pfr_ktable *kt, int newf) 1843 { 1844 struct pfr_kentryworkq addrq; 1845 1846 if (!(newf & PFR_TFLAG_REFERENCED) && 1847 !(newf & PFR_TFLAG_PERSIST)) 1848 newf &= ~PFR_TFLAG_ACTIVE; 1849 if (!(newf & PFR_TFLAG_ACTIVE)) 1850 newf &= ~PFR_TFLAG_USRMASK; 1851 if (!(newf & PFR_TFLAG_SETMASK)) { 1852 RB_REMOVE(pfr_ktablehead, &pfr_ktables, kt); 1853 if (kt->pfrkt_root != NULL) 1854 if (!--kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]) 1855 pfr_setflags_ktable(kt->pfrkt_root, 1856 kt->pfrkt_root->pfrkt_flags & 1857 ~PFR_TFLAG_REFDANCHOR); 1858 pfr_destroy_ktable(kt, 1); 1859 pfr_ktable_cnt--; 1860 return; 1861 } 1862 if (!(newf & PFR_TFLAG_ACTIVE) && kt->pfrkt_cnt) { 1863 pfr_enqueue_addrs(kt, &addrq, NULL, 0); 1864 pfr_remove_kentries(kt, &addrq); 1865 } 1866 if (!(newf & PFR_TFLAG_INACTIVE) && kt->pfrkt_shadow != NULL) { 1867 pfr_destroy_ktable(kt->pfrkt_shadow, 1); 1868 kt->pfrkt_shadow = NULL; 1869 } 1870 kt->pfrkt_flags = newf; 1871 } 1872 1873 void 1874 pfr_clstats_ktables(struct pfr_ktableworkq *workq, long tzero, int recurse) 1875 { 1876 struct pfr_ktable *p; 1877 1878 SLIST_FOREACH(p, workq, pfrkt_workq) 1879 pfr_clstats_ktable(p, tzero, recurse); 1880 } 1881 1882 void 1883 pfr_clstats_ktable(struct pfr_ktable *kt, long tzero, int recurse) 1884 { 1885 struct pfr_kentryworkq addrq; 1886 int s; 1887 1888 if (recurse) { 1889 pfr_enqueue_addrs(kt, &addrq, NULL, 0); 1890 pfr_clstats_kentries(&addrq, tzero, 0); 1891 } 1892 s = splsoftnet(); 1893 bzero(kt->pfrkt_packets, sizeof(kt->pfrkt_packets)); 1894 bzero(kt->pfrkt_bytes, sizeof(kt->pfrkt_bytes)); 1895 kt->pfrkt_match = kt->pfrkt_nomatch = 0; 1896 splx(s); 1897 kt->pfrkt_tzero = tzero; 1898 } 1899 1900 struct pfr_ktable * 1901 pfr_create_ktable(struct pfr_table *tbl, long tzero, int attachruleset) 1902 { 1903 struct pfr_ktable *kt; 1904 struct pf_ruleset *rs; 1905 void *h4 = NULL, *h6 = NULL; 1906 1907 kt = pool_get(&pfr_ktable_pl, PR_NOWAIT); 1908 if (kt == NULL) 1909 return (NULL); 1910 bzero(kt, sizeof(*kt)); 1911 kt->pfrkt_t = *tbl; 1912 1913 if (attachruleset) { 1914 rs = pf_find_or_create_ruleset(tbl->pfrt_anchor); 1915 if (!rs) { 1916 pfr_destroy_ktable(kt, 0); 1917 return (NULL); 1918 } 1919 kt->pfrkt_rs = rs; 1920 rs->tables++; 1921 } 1922 1923 if (!rn_inithead(&h4, offsetof(struct sockaddr_in, sin_addr) * 8)) 1924 goto out; 1925 1926 if (!rn_inithead(&h6, offsetof(struct sockaddr_in6, sin6_addr) * 8)) { 1927 Free(h4); 1928 goto out; 1929 } 1930 kt->pfrkt_ip4 = h4; 1931 kt->pfrkt_ip6 = h6; 1932 kt->pfrkt_tzero = tzero; 1933 1934 return (kt); 1935 out: 1936 pfr_destroy_ktable(kt, 0); 1937 return (NULL); 1938 } 1939 1940 void 1941 pfr_destroy_ktables(struct pfr_ktableworkq *workq, int flushaddr) 1942 { 1943 struct pfr_ktable *p, *q; 1944 1945 for (p = SLIST_FIRST(workq); p; p = q) { 1946 q = SLIST_NEXT(p, pfrkt_workq); 1947 pfr_destroy_ktable(p, flushaddr); 1948 } 1949 } 1950 1951 void 1952 pfr_destroy_ktable(struct pfr_ktable *kt, int flushaddr) 1953 { 1954 struct pfr_kentryworkq addrq; 1955 1956 if (flushaddr) { 1957 pfr_enqueue_addrs(kt, &addrq, NULL, 0); 1958 pfr_clean_node_mask(kt, &addrq); 1959 pfr_destroy_kentries(&addrq); 1960 } 1961 if (kt->pfrkt_ip4 != NULL) 1962 free((void *)kt->pfrkt_ip4, M_RTABLE); 1963 if (kt->pfrkt_ip6 != NULL) 1964 free((void *)kt->pfrkt_ip6, M_RTABLE); 1965 if (kt->pfrkt_shadow != NULL) 1966 pfr_destroy_ktable(kt->pfrkt_shadow, flushaddr); 1967 if (kt->pfrkt_rs != NULL) { 1968 kt->pfrkt_rs->tables--; 1969 pf_remove_if_empty_ruleset(kt->pfrkt_rs); 1970 } 1971 pool_put(&pfr_ktable_pl, kt); 1972 } 1973 1974 int 1975 pfr_ktable_compare(struct pfr_ktable *p, struct pfr_ktable *q) 1976 { 1977 int d; 1978 1979 if ((d = strncmp(p->pfrkt_name, q->pfrkt_name, PF_TABLE_NAME_SIZE))) 1980 return (d); 1981 return (strcmp(p->pfrkt_anchor, q->pfrkt_anchor)); 1982 } 1983 1984 struct pfr_ktable * 1985 pfr_lookup_table(struct pfr_table *tbl) 1986 { 1987 /* struct pfr_ktable start like a struct pfr_table */ 1988 return (RB_FIND(pfr_ktablehead, &pfr_ktables, 1989 (struct pfr_ktable *)tbl)); 1990 } 1991 1992 int 1993 pfr_match_addr(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af) 1994 { 1995 struct pfr_kentry *ke = NULL; 1996 int match; 1997 1998 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL) 1999 kt = kt->pfrkt_root; 2000 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 2001 return (0); 2002 2003 switch (af) { 2004 #ifdef INET 2005 case AF_INET: 2006 pfr_sin.sin_addr.s_addr = a->addr32[0]; 2007 ke = (struct pfr_kentry *)rn_match(&pfr_sin, kt->pfrkt_ip4); 2008 if (ke && KENTRY_RNF_ROOT(ke)) 2009 ke = NULL; 2010 break; 2011 #endif /* INET */ 2012 #ifdef INET6 2013 case AF_INET6: 2014 bcopy(a, &pfr_sin6.sin6_addr, sizeof(pfr_sin6.sin6_addr)); 2015 ke = (struct pfr_kentry *)rn_match(&pfr_sin6, kt->pfrkt_ip6); 2016 if (ke && KENTRY_RNF_ROOT(ke)) 2017 ke = NULL; 2018 break; 2019 #endif /* INET6 */ 2020 } 2021 match = (ke && !ke->pfrke_not); 2022 if (match) 2023 kt->pfrkt_match++; 2024 else 2025 kt->pfrkt_nomatch++; 2026 return (match); 2027 } 2028 2029 void 2030 pfr_update_stats(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af, 2031 u_int64_t len, int dir_out, int op_pass, int notrule) 2032 { 2033 struct pfr_kentry *ke = NULL; 2034 2035 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL) 2036 kt = kt->pfrkt_root; 2037 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 2038 return; 2039 2040 switch (af) { 2041 #ifdef INET 2042 case AF_INET: 2043 pfr_sin.sin_addr.s_addr = a->addr32[0]; 2044 ke = (struct pfr_kentry *)rn_match(&pfr_sin, kt->pfrkt_ip4); 2045 if (ke && KENTRY_RNF_ROOT(ke)) 2046 ke = NULL; 2047 break; 2048 #endif /* INET */ 2049 #ifdef INET6 2050 case AF_INET6: 2051 bcopy(a, &pfr_sin6.sin6_addr, sizeof(pfr_sin6.sin6_addr)); 2052 ke = (struct pfr_kentry *)rn_match(&pfr_sin6, kt->pfrkt_ip6); 2053 if (ke && KENTRY_RNF_ROOT(ke)) 2054 ke = NULL; 2055 break; 2056 #endif /* INET6 */ 2057 default: 2058 ; 2059 } 2060 if ((ke == NULL || ke->pfrke_not) != notrule) { 2061 if (op_pass != PFR_OP_PASS) 2062 printf("pfr_update_stats: assertion failed.\n"); 2063 op_pass = PFR_OP_XPASS; 2064 } 2065 kt->pfrkt_packets[dir_out][op_pass]++; 2066 kt->pfrkt_bytes[dir_out][op_pass] += len; 2067 if (ke != NULL && op_pass != PFR_OP_XPASS) { 2068 ke->pfrke_packets[dir_out][op_pass]++; 2069 ke->pfrke_bytes[dir_out][op_pass] += len; 2070 } 2071 } 2072 2073 struct pfr_ktable * 2074 pfr_attach_table(struct pf_ruleset *rs, char *name) 2075 { 2076 struct pfr_ktable *kt, *rt; 2077 struct pfr_table tbl; 2078 struct pf_anchor *ac = rs->anchor; 2079 2080 bzero(&tbl, sizeof(tbl)); 2081 strlcpy(tbl.pfrt_name, name, sizeof(tbl.pfrt_name)); 2082 if (ac != NULL) 2083 strlcpy(tbl.pfrt_anchor, ac->name, sizeof(tbl.pfrt_anchor)); 2084 kt = pfr_lookup_table(&tbl); 2085 if (kt == NULL) { 2086 kt = pfr_create_ktable(&tbl, time_second, 1); 2087 if (kt == NULL) 2088 return (NULL); 2089 if (ac != NULL) { 2090 bzero(tbl.pfrt_anchor, sizeof(tbl.pfrt_anchor)); 2091 rt = pfr_lookup_table(&tbl); 2092 if (rt == NULL) { 2093 rt = pfr_create_ktable(&tbl, 0, 1); 2094 if (rt == NULL) { 2095 pfr_destroy_ktable(kt, 0); 2096 return (NULL); 2097 } 2098 pfr_insert_ktable(rt); 2099 } 2100 kt->pfrkt_root = rt; 2101 } 2102 pfr_insert_ktable(kt); 2103 } 2104 if (!kt->pfrkt_refcnt[PFR_REFCNT_RULE]++) 2105 pfr_setflags_ktable(kt, kt->pfrkt_flags|PFR_TFLAG_REFERENCED); 2106 return (kt); 2107 } 2108 2109 void 2110 pfr_detach_table(struct pfr_ktable *kt) 2111 { 2112 if (kt->pfrkt_refcnt[PFR_REFCNT_RULE] <= 0) 2113 printf("pfr_detach_table: refcount = %d.\n", 2114 kt->pfrkt_refcnt[PFR_REFCNT_RULE]); 2115 else if (!--kt->pfrkt_refcnt[PFR_REFCNT_RULE]) 2116 pfr_setflags_ktable(kt, kt->pfrkt_flags&~PFR_TFLAG_REFERENCED); 2117 } 2118 2119 int 2120 pfr_pool_get(struct pfr_ktable *kt, int *pidx, struct pf_addr *counter, 2121 struct pf_addr **raddr, struct pf_addr **rmask, sa_family_t af) 2122 { 2123 struct pfr_kentry *ke, *ke2 = (void *)0xdeadb; 2124 struct pf_addr *addr = (void *)0xdeadb; 2125 union sockaddr_union mask; 2126 int idx = -1, use_counter = 0; 2127 2128 if (af == AF_INET) 2129 addr = (struct pf_addr *)&pfr_sin.sin_addr; 2130 else if (af == AF_INET6) 2131 addr = (struct pf_addr *)&pfr_sin6.sin6_addr; 2132 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL) 2133 kt = kt->pfrkt_root; 2134 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 2135 return (-1); 2136 2137 if (pidx != NULL) 2138 idx = *pidx; 2139 if (counter != NULL && idx >= 0) 2140 use_counter = 1; 2141 if (idx < 0) 2142 idx = 0; 2143 2144 _next_block: 2145 ke = pfr_kentry_byidx(kt, idx, af); 2146 if (ke == NULL) 2147 return (1); 2148 pfr_prepare_network(&pfr_mask, af, ke->pfrke_net); 2149 *raddr = SUNION2PF(&ke->pfrke_sa, af); 2150 *rmask = SUNION2PF(&pfr_mask, af); 2151 2152 if (use_counter) { 2153 /* is supplied address within block? */ 2154 if (!PF_MATCHA(0, *raddr, *rmask, counter, af)) { 2155 /* no, go to next block in table */ 2156 idx++; 2157 use_counter = 0; 2158 goto _next_block; 2159 } 2160 PF_ACPY(addr, counter, af); 2161 } else { 2162 /* use first address of block */ 2163 PF_ACPY(addr, *raddr, af); 2164 } 2165 2166 if (!KENTRY_NETWORK(ke)) { 2167 /* this is a single IP address - no possible nested block */ 2168 PF_ACPY(counter, addr, af); 2169 *pidx = idx; 2170 return (0); 2171 } 2172 for (;;) { 2173 /* we don't want to use a nested block */ 2174 if (af == AF_INET) 2175 ke2 = (struct pfr_kentry *)rn_match(&pfr_sin, 2176 kt->pfrkt_ip4); 2177 else if (af == AF_INET6) 2178 ke2 = (struct pfr_kentry *)rn_match(&pfr_sin6, 2179 kt->pfrkt_ip6); 2180 /* no need to check KENTRY_RNF_ROOT() here */ 2181 if (ke2 == ke) { 2182 /* lookup return the same block - perfect */ 2183 PF_ACPY(counter, addr, af); 2184 *pidx = idx; 2185 return (0); 2186 } 2187 2188 /* we need to increase the counter past the nested block */ 2189 pfr_prepare_network(&mask, AF_INET, ke2->pfrke_net); 2190 PF_POOLMASK(addr, addr, SUNION2PF(&mask, af), &pfr_ffaddr, af); 2191 PF_AINC(addr, af); 2192 if (!PF_MATCHA(0, *raddr, *rmask, addr, af)) { 2193 /* ok, we reached the end of our main block */ 2194 /* go to next block in table */ 2195 idx++; 2196 use_counter = 0; 2197 goto _next_block; 2198 } 2199 } 2200 } 2201 2202 struct pfr_kentry * 2203 pfr_kentry_byidx(struct pfr_ktable *kt, int idx, int af) 2204 { 2205 struct pfr_walktree w; 2206 2207 bzero(&w, sizeof(w)); 2208 w.pfrw_op = PFRW_POOL_GET; 2209 w.pfrw_cnt = idx; 2210 2211 switch (af) { 2212 #ifdef INET 2213 case AF_INET: 2214 rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w); 2215 return (w.pfrw_kentry); 2216 #endif /* INET */ 2217 #ifdef INET6 2218 case AF_INET6: 2219 rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w); 2220 return (w.pfrw_kentry); 2221 #endif /* INET6 */ 2222 default: 2223 return (NULL); 2224 } 2225 } 2226 2227 void 2228 pfr_dynaddr_update(struct pfr_ktable *kt, struct pfi_dynaddr *dyn) 2229 { 2230 struct pfr_walktree w; 2231 int s; 2232 2233 bzero(&w, sizeof(w)); 2234 w.pfrw_op = PFRW_DYNADDR_UPDATE; 2235 w.pfrw_dyn = dyn; 2236 2237 s = splsoftnet(); 2238 dyn->pfid_acnt4 = 0; 2239 dyn->pfid_acnt6 = 0; 2240 if (!dyn->pfid_af || dyn->pfid_af == AF_INET) 2241 rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w); 2242 if (!dyn->pfid_af || dyn->pfid_af == AF_INET6) 2243 rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w); 2244 splx(s); 2245 } 2246