1 /* $NetBSD: pf_ioctl.c,v 1.54 2018/07/11 11:13:16 kre Exp $ */ 2 /* $OpenBSD: pf_ioctl.c,v 1.182 2007/06/24 11:17:13 mcbride Exp $ */ 3 4 /* 5 * Copyright (c) 2001 Daniel Hartmeier 6 * Copyright (c) 2002,2003 Henning Brauer 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 13 * - Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * - Redistributions in binary form must reproduce the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer in the documentation and/or other materials provided 18 * with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 28 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 30 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 * POSSIBILITY OF SUCH DAMAGE. 32 * 33 * Effort sponsored in part by the Defense Advanced Research Projects 34 * Agency (DARPA) and Air Force Research Laboratory, Air Force 35 * Materiel Command, USAF, under agreement number F30602-01-2-0537. 36 * 37 */ 38 39 #include <sys/cdefs.h> 40 __KERNEL_RCSID(0, "$NetBSD: pf_ioctl.c,v 1.54 2018/07/11 11:13:16 kre Exp $"); 41 42 #ifdef _KERNEL_OPT 43 #include "opt_inet.h" 44 #endif 45 46 #include "pfsync.h" 47 48 #include <sys/param.h> 49 #include <sys/systm.h> 50 #include <sys/mbuf.h> 51 #include <sys/filio.h> 52 #include <sys/fcntl.h> 53 #include <sys/socket.h> 54 #include <sys/socketvar.h> 55 #include <sys/kernel.h> 56 #include <sys/time.h> 57 #include <sys/pool.h> 58 #include <sys/proc.h> 59 #include <sys/malloc.h> 60 #include <sys/kthread.h> 61 #include <sys/rwlock.h> 62 #include <uvm/uvm_extern.h> 63 #ifdef __NetBSD__ 64 #include <sys/conf.h> 65 #include <sys/lwp.h> 66 #include <sys/kauth.h> 67 #include <sys/module.h> 68 #include <sys/cprng.h> 69 #include <sys/device.h> 70 #endif /* __NetBSD__ */ 71 72 #include <net/if.h> 73 #include <net/if_types.h> 74 #include <net/route.h> 75 76 #include <netinet/in.h> 77 #include <netinet/in_var.h> 78 #include <netinet/in_systm.h> 79 #include <netinet/ip.h> 80 #include <netinet/ip_var.h> 81 #include <netinet/ip_icmp.h> 82 83 #ifndef __NetBSD__ 84 #include <dev/rndvar.h> 85 #include <crypto/md5.h> 86 #else 87 #include <netinet/in_offload.h> 88 #include <sys/md5.h> 89 #endif /* __NetBSD__ */ 90 #include <net/pfvar.h> 91 92 #if NPFSYNC > 0 93 #include <net/if_pfsync.h> 94 #endif /* NPFSYNC > 0 */ 95 96 #if NPFLOG > 0 97 #include <net/if_pflog.h> 98 #endif /* NPFLOG > 0 */ 99 100 #ifdef INET6 101 #include <netinet/ip6.h> 102 #include <netinet/in_pcb.h> 103 #endif /* INET6 */ 104 105 #ifdef ALTQ 106 #include <altq/altq.h> 107 #endif 108 109 #include "ioconf.h" 110 111 #ifdef _MODULE 112 void pfdetach(void); 113 #endif /* _MODULE */ 114 #ifndef __NetBSD__ 115 void pf_thread_create(void *); 116 #endif /* !__NetBSD__ */ 117 int pfopen(dev_t, int, int, struct lwp *); 118 int pfclose(dev_t, int, int, struct lwp *); 119 struct pf_pool *pf_get_pool(char *, u_int32_t, u_int8_t, u_int32_t, 120 u_int8_t, u_int8_t, u_int8_t); 121 122 void pf_mv_pool(struct pf_palist *, struct pf_palist *); 123 void pf_empty_pool(struct pf_palist *); 124 int pfioctl(dev_t, u_long, void *, int, struct lwp *); 125 #ifdef ALTQ 126 int pf_begin_altq(u_int32_t *); 127 int pf_rollback_altq(u_int32_t); 128 int pf_commit_altq(u_int32_t); 129 int pf_enable_altq(struct pf_altq *); 130 int pf_disable_altq(struct pf_altq *); 131 #endif /* ALTQ */ 132 int pf_begin_rules(u_int32_t *, int, const char *); 133 int pf_rollback_rules(u_int32_t, int, char *); 134 int pf_setup_pfsync_matching(struct pf_ruleset *); 135 void pf_hash_rule(MD5_CTX *, struct pf_rule *); 136 void pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *); 137 int pf_commit_rules(u_int32_t, int, char *); 138 void pf_state_export(struct pfsync_state *, 139 struct pf_state_key *, struct pf_state *); 140 void pf_state_import(struct pfsync_state *, 141 struct pf_state_key *, struct pf_state *); 142 143 static int pf_state_add(struct pfsync_state*); 144 145 struct pf_rule pf_default_rule; 146 #ifdef __NetBSD__ 147 krwlock_t pf_consistency_lock; 148 #else 149 struct rwlock pf_consistency_lock = RWLOCK_INITIALIZER("pfcnslk"); 150 #endif /* __NetBSD__ */ 151 #ifdef ALTQ 152 static int pf_altq_running; 153 #endif 154 155 int pf_state_lock = 0; 156 157 #define TAGID_MAX 50000 158 TAILQ_HEAD(pf_tags, pf_tagname) pf_tags = TAILQ_HEAD_INITIALIZER(pf_tags), 159 pf_qids = TAILQ_HEAD_INITIALIZER(pf_qids); 160 161 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE) 162 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE 163 #endif 164 u_int16_t tagname2tag(struct pf_tags *, char *); 165 void tag2tagname(struct pf_tags *, u_int16_t, char *); 166 void tag_unref(struct pf_tags *, u_int16_t); 167 int pf_rtlabel_add(struct pf_addr_wrap *); 168 void pf_rtlabel_remove(struct pf_addr_wrap *); 169 void pf_rtlabel_copyout(struct pf_addr_wrap *); 170 171 #ifdef __NetBSD__ 172 void pf_deferred_init(device_t); 173 #endif 174 175 #define DPFPRINTF(n, x) if (pf_status.debug >= (n)) printf x 176 177 #ifdef __NetBSD__ 178 const struct cdevsw pf_cdevsw = { 179 .d_open = pfopen, 180 .d_close = pfclose, 181 .d_read = noread, 182 .d_write = nowrite, 183 .d_ioctl = pfioctl, 184 .d_stop = nostop, 185 .d_tty = notty, 186 .d_poll = nopoll, 187 .d_mmap = nommap, 188 .d_kqfilter = nokqfilter, 189 .d_discard = nodiscard, 190 .d_flag = D_OTHER 191 }; 192 193 static int pfil4_wrapper(void *, struct mbuf **, struct ifnet *, int); 194 #ifdef INET6 195 static int pfil6_wrapper(void *, struct mbuf **, struct ifnet *, int); 196 #endif /* INET6 */ 197 198 static int pf_pfil_attach(void); 199 static int pf_pfil_detach(void); 200 201 static int pf_pfil_attached; 202 203 static kauth_listener_t pf_listener; 204 #endif /* __NetBSD__ */ 205 206 #ifdef __NetBSD__ 207 static int 208 pf_listener_cb(kauth_cred_t cred, kauth_action_t action, void *cookie, 209 void *arg0, void *arg1, void *arg2, void *arg3) 210 { 211 int result; 212 enum kauth_network_req req; 213 214 result = KAUTH_RESULT_DEFER; 215 req = (enum kauth_network_req)arg0; 216 217 if (action != KAUTH_NETWORK_FIREWALL) 218 return result; 219 220 /* These must have came from device context. */ 221 if ((req == KAUTH_REQ_NETWORK_FIREWALL_FW) || 222 (req == KAUTH_REQ_NETWORK_FIREWALL_NAT)) 223 result = KAUTH_RESULT_ALLOW; 224 225 return result; 226 } 227 #endif /* __NetBSD__ */ 228 229 void 230 pfattach(int num) 231 { 232 u_int32_t *timeout = pf_default_rule.timeout; 233 234 #ifdef __NetBSD__ 235 pool_init(&pf_rule_pl, sizeof(struct pf_rule), 0, 0, 0, "pfrulepl", 236 &pool_allocator_nointr, IPL_NONE); 237 pool_init(&pf_src_tree_pl, sizeof(struct pf_src_node), 0, 0, 0, 238 "pfsrctrpl", NULL, IPL_SOFTNET); 239 pool_init(&pf_state_pl, sizeof(struct pf_state), 0, 0, 0, "pfstatepl", 240 NULL, IPL_SOFTNET); 241 pool_init(&pf_state_key_pl, sizeof(struct pf_state_key), 0, 0, 0, 242 "pfstatekeypl", NULL, IPL_SOFTNET); 243 pool_init(&pf_altq_pl, sizeof(struct pf_altq), 0, 0, 0, "pfaltqpl", 244 &pool_allocator_nointr, IPL_NONE); 245 pool_init(&pf_pooladdr_pl, sizeof(struct pf_pooladdr), 0, 0, 0, 246 "pfpooladdrpl", &pool_allocator_nointr, IPL_NONE); 247 #else 248 pool_init(&pf_rule_pl, sizeof(struct pf_rule), 0, 0, 0, "pfrulepl", 249 &pool_allocator_nointr); 250 pool_init(&pf_src_tree_pl, sizeof(struct pf_src_node), 0, 0, 0, 251 "pfsrctrpl", NULL); 252 pool_init(&pf_state_pl, sizeof(struct pf_state), 0, 0, 0, "pfstatepl", 253 NULL); 254 pool_init(&pf_state_key_pl, sizeof(struct pf_state_key), 0, 0, 0, 255 "pfstatekeypl", NULL); 256 pool_init(&pf_altq_pl, sizeof(struct pf_altq), 0, 0, 0, "pfaltqpl", 257 &pool_allocator_nointr); 258 pool_init(&pf_pooladdr_pl, sizeof(struct pf_pooladdr), 0, 0, 0, 259 "pfpooladdrpl", &pool_allocator_nointr); 260 #endif /* !__NetBSD__ */ 261 262 pfr_initialize(); 263 pfi_initialize(); 264 pf_osfp_initialize(); 265 266 pool_sethardlimit(pf_pool_limits[PF_LIMIT_STATES].pp, 267 pf_pool_limits[PF_LIMIT_STATES].limit, NULL, 0); 268 269 if (ctob(physmem) <= 100*1024*1024) 270 pf_pool_limits[PF_LIMIT_TABLE_ENTRIES].limit = 271 PFR_KENTRY_HIWAT_SMALL; 272 273 RB_INIT(&tree_src_tracking); 274 RB_INIT(&pf_anchors); 275 pf_init_ruleset(&pf_main_ruleset); 276 TAILQ_INIT(&pf_altqs[0]); 277 TAILQ_INIT(&pf_altqs[1]); 278 TAILQ_INIT(&pf_pabuf); 279 pf_altqs_active = &pf_altqs[0]; 280 pf_altqs_inactive = &pf_altqs[1]; 281 TAILQ_INIT(&state_list); 282 283 #ifdef __NetBSD__ 284 rw_init(&pf_consistency_lock); 285 #endif /* __NetBSD__ */ 286 287 /* default rule should never be garbage collected */ 288 pf_default_rule.entries.tqe_prev = &pf_default_rule.entries.tqe_next; 289 pf_default_rule.action = PF_PASS; 290 pf_default_rule.nr = -1; 291 pf_default_rule.rtableid = -1; 292 293 /* initialize default timeouts */ 294 timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL; 295 timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL; 296 timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL; 297 timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL; 298 timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL; 299 timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL; 300 timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL; 301 timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL; 302 timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL; 303 timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL; 304 timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL; 305 timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL; 306 timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL; 307 timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL; 308 timeout[PFTM_FRAG] = PFTM_FRAG_VAL; 309 timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL; 310 timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL; 311 timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL; 312 timeout[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START; 313 timeout[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END; 314 315 pf_normalize_init(); 316 bzero(&pf_status, sizeof(pf_status)); 317 pf_status.debug = PF_DEBUG_URGENT; 318 319 #ifdef __NetBSD__ 320 /* 321 * Defer rest of initialization until we can use cprng_fast32() 322 * which requires per-CPU data to have been initialized which 323 * in turn requires that all CPUs have been discovered and 324 * attached! 325 */ 326 config_interrupts(NULL, pf_deferred_init); 327 #else 328 /* XXX do our best to avoid a conflict */ 329 pf_status.hostid = cprng_fast32(); 330 331 /* require process context to purge states, so perform in a thread */ 332 kthread_create_deferred(pf_thread_create, NULL); 333 #endif /* !__NetBSD__ */ 334 335 #ifdef __NetBSD__ 336 pf_listener = kauth_listen_scope(KAUTH_SCOPE_NETWORK, 337 pf_listener_cb, NULL); 338 #endif /* __NetBSD__ */ 339 } 340 341 #ifdef __NetBSD__ 342 /* ARGSUSED */ 343 void 344 pf_deferred_init(device_t dev) 345 { 346 347 /* XXX do our best to avoid a conflict */ 348 pf_status.hostid = cprng_fast32(); 349 350 /* require process context to purge states, so perform in a thread */ 351 if (kthread_create(PRI_NONE, 0, NULL, pf_purge_thread, NULL, NULL, 352 "pfpurge")) 353 panic("pfpurge thread"); 354 } 355 #endif /* __NetBSD__ */ 356 357 #ifdef _MODULE 358 void 359 pfdetach(void) 360 { 361 extern int pf_purge_thread_running; 362 extern int pf_purge_thread_stop; 363 struct pf_anchor *anchor; 364 struct pf_state *state; 365 struct pf_src_node *node; 366 struct pfioc_table pt; 367 u_int32_t ticket; 368 int i; 369 char r = '\0'; 370 371 pf_purge_thread_stop = 1; 372 wakeup(pf_purge_thread); 373 374 /* wait until the kthread exits */ 375 while (pf_purge_thread_running) 376 tsleep(&pf_purge_thread_running, PWAIT, "pfdown", 0); 377 378 (void)pf_pfil_detach(); 379 380 pf_status.running = 0; 381 382 /* clear the rulesets */ 383 for (i = 0; i < PF_RULESET_MAX; i++) 384 if (pf_begin_rules(&ticket, i, &r) == 0) 385 pf_commit_rules(ticket, i, &r); 386 #ifdef ALTQ 387 if (pf_begin_altq(&ticket) == 0) 388 pf_commit_altq(ticket); 389 #endif /* ALTQ */ 390 391 /* clear states */ 392 RB_FOREACH(state, pf_state_tree_id, &tree_id) { 393 state->timeout = PFTM_PURGE; 394 #if NPFSYNC > 0 395 state->sync_flags = PFSTATE_NOSYNC; 396 #endif /* NPFSYNC > 0 */ 397 } 398 pf_purge_expired_states(pf_status.states); 399 #if NPFSYNC > 0 400 pfsync_clear_states(pf_status.hostid, NULL); 401 #endif /* NPFSYNC > 0 */ 402 403 /* clear source nodes */ 404 RB_FOREACH(state, pf_state_tree_id, &tree_id) { 405 state->src_node = NULL; 406 state->nat_src_node = NULL; 407 } 408 RB_FOREACH(node, pf_src_tree, &tree_src_tracking) { 409 node->expire = 1; 410 node->states = 0; 411 } 412 pf_purge_expired_src_nodes(0); 413 414 /* clear tables */ 415 memset(&pt, '\0', sizeof(pt)); 416 pfr_clr_tables(&pt.pfrio_table, &pt.pfrio_ndel, pt.pfrio_flags); 417 418 /* destroy anchors */ 419 while ((anchor = RB_MIN(pf_anchor_global, &pf_anchors)) != NULL) { 420 for (i = 0; i < PF_RULESET_MAX; i++) 421 if (pf_begin_rules(&ticket, i, anchor->name) == 0) 422 pf_commit_rules(ticket, i, anchor->name); 423 } 424 425 /* destroy main ruleset */ 426 pf_remove_if_empty_ruleset(&pf_main_ruleset); 427 428 /* destroy the pools */ 429 pool_destroy(&pf_pooladdr_pl); 430 pool_destroy(&pf_altq_pl); 431 pool_destroy(&pf_state_key_pl); 432 pool_destroy(&pf_state_pl); 433 pool_destroy(&pf_rule_pl); 434 pool_destroy(&pf_src_tree_pl); 435 436 rw_destroy(&pf_consistency_lock); 437 438 /* destroy subsystems */ 439 pf_normalize_destroy(); 440 pf_osfp_destroy(); 441 pfr_destroy(); 442 pfi_destroy(); 443 444 /* cleanup kauth listener */ 445 kauth_unlisten_scope(pf_listener); 446 } 447 #endif /* _MODULE */ 448 449 #ifndef __NetBSD__ 450 void 451 pf_thread_create(void *v) 452 { 453 if (kthread_create(pf_purge_thread, NULL, NULL, "pfpurge")) 454 panic("pfpurge thread"); 455 } 456 #endif /* !__NetBSD__ */ 457 458 int 459 pfopen(dev_t dev, int flags, int fmt, struct lwp *l) 460 { 461 if (minor(dev) >= 1) 462 return (ENXIO); 463 return (0); 464 } 465 466 int 467 pfclose(dev_t dev, int flags, int fmt, struct lwp *l) 468 { 469 if (minor(dev) >= 1) 470 return (ENXIO); 471 return (0); 472 } 473 474 struct pf_pool * 475 pf_get_pool(char *anchor, u_int32_t ticket, u_int8_t rule_action, 476 u_int32_t rule_number, u_int8_t r_last, u_int8_t active, 477 u_int8_t check_ticket) 478 { 479 struct pf_ruleset *ruleset; 480 struct pf_rule *rule; 481 int rs_num; 482 483 ruleset = pf_find_ruleset(anchor); 484 if (ruleset == NULL) 485 return (NULL); 486 rs_num = pf_get_ruleset_number(rule_action); 487 if (rs_num >= PF_RULESET_MAX) 488 return (NULL); 489 if (active) { 490 if (check_ticket && ticket != 491 ruleset->rules[rs_num].active.ticket) 492 return (NULL); 493 if (r_last) 494 rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr, 495 pf_rulequeue); 496 else 497 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr); 498 } else { 499 if (check_ticket && ticket != 500 ruleset->rules[rs_num].inactive.ticket) 501 return (NULL); 502 if (r_last) 503 rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr, 504 pf_rulequeue); 505 else 506 rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr); 507 } 508 if (!r_last) { 509 while ((rule != NULL) && (rule->nr != rule_number)) 510 rule = TAILQ_NEXT(rule, entries); 511 } 512 if (rule == NULL) 513 return (NULL); 514 515 return (&rule->rpool); 516 } 517 518 void 519 pf_mv_pool(struct pf_palist *poola, struct pf_palist *poolb) 520 { 521 struct pf_pooladdr *mv_pool_pa; 522 523 while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) { 524 TAILQ_REMOVE(poola, mv_pool_pa, entries); 525 TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries); 526 } 527 } 528 529 void 530 pf_empty_pool(struct pf_palist *poola) 531 { 532 struct pf_pooladdr *empty_pool_pa; 533 534 while ((empty_pool_pa = TAILQ_FIRST(poola)) != NULL) { 535 pfi_dynaddr_remove(&empty_pool_pa->addr); 536 pf_tbladdr_remove(&empty_pool_pa->addr); 537 pfi_kif_unref(empty_pool_pa->kif, PFI_KIF_REF_RULE); 538 TAILQ_REMOVE(poola, empty_pool_pa, entries); 539 pool_put(&pf_pooladdr_pl, empty_pool_pa); 540 } 541 } 542 543 void 544 pf_rm_rule(struct pf_rulequeue *rulequeue, struct pf_rule *rule) 545 { 546 if (rulequeue != NULL) { 547 if (rule->states <= 0) { 548 /* 549 * XXX - we need to remove the table *before* detaching 550 * the rule to make sure the table code does not delete 551 * the anchor under our feet. 552 */ 553 pf_tbladdr_remove(&rule->src.addr); 554 pf_tbladdr_remove(&rule->dst.addr); 555 if (rule->overload_tbl) 556 pfr_detach_table(rule->overload_tbl); 557 } 558 TAILQ_REMOVE(rulequeue, rule, entries); 559 rule->entries.tqe_prev = NULL; 560 rule->nr = -1; 561 } 562 563 if (rule->states > 0 || rule->src_nodes > 0 || 564 rule->entries.tqe_prev != NULL) 565 return; 566 pf_tag_unref(rule->tag); 567 pf_tag_unref(rule->match_tag); 568 #ifdef ALTQ 569 if (rule->pqid != rule->qid) 570 pf_qid_unref(rule->pqid); 571 pf_qid_unref(rule->qid); 572 #endif 573 pf_rtlabel_remove(&rule->src.addr); 574 pf_rtlabel_remove(&rule->dst.addr); 575 pfi_dynaddr_remove(&rule->src.addr); 576 pfi_dynaddr_remove(&rule->dst.addr); 577 if (rulequeue == NULL) { 578 pf_tbladdr_remove(&rule->src.addr); 579 pf_tbladdr_remove(&rule->dst.addr); 580 if (rule->overload_tbl) 581 pfr_detach_table(rule->overload_tbl); 582 } 583 pfi_kif_unref(rule->kif, PFI_KIF_REF_RULE); 584 pf_anchor_remove(rule); 585 pf_empty_pool(&rule->rpool.list); 586 pool_put(&pf_rule_pl, rule); 587 } 588 589 u_int16_t 590 tagname2tag(struct pf_tags *head, char *tagname) 591 { 592 struct pf_tagname *tag, *p = NULL; 593 u_int16_t new_tagid = 1; 594 595 TAILQ_FOREACH(tag, head, entries) 596 if (strcmp(tagname, tag->name) == 0) { 597 tag->ref++; 598 return (tag->tag); 599 } 600 601 /* 602 * to avoid fragmentation, we do a linear search from the beginning 603 * and take the first free slot we find. if there is none or the list 604 * is empty, append a new entry at the end. 605 */ 606 607 /* new entry */ 608 if (!TAILQ_EMPTY(head)) 609 for (p = TAILQ_FIRST(head); p != NULL && 610 p->tag == new_tagid; p = TAILQ_NEXT(p, entries)) 611 new_tagid = p->tag + 1; 612 613 if (new_tagid > TAGID_MAX) 614 return (0); 615 616 /* allocate and fill new struct pf_tagname */ 617 tag = (struct pf_tagname *)malloc(sizeof(struct pf_tagname), 618 M_TEMP, M_NOWAIT); 619 if (tag == NULL) 620 return (0); 621 bzero(tag, sizeof(struct pf_tagname)); 622 strlcpy(tag->name, tagname, sizeof(tag->name)); 623 tag->tag = new_tagid; 624 tag->ref++; 625 626 if (p != NULL) /* insert new entry before p */ 627 TAILQ_INSERT_BEFORE(p, tag, entries); 628 else /* either list empty or no free slot in between */ 629 TAILQ_INSERT_TAIL(head, tag, entries); 630 631 return (tag->tag); 632 } 633 634 void 635 tag2tagname(struct pf_tags *head, u_int16_t tagid, char *p) 636 { 637 struct pf_tagname *tag; 638 639 TAILQ_FOREACH(tag, head, entries) 640 if (tag->tag == tagid) { 641 strlcpy(p, tag->name, PF_TAG_NAME_SIZE); 642 return; 643 } 644 } 645 646 void 647 tag_unref(struct pf_tags *head, u_int16_t tag) 648 { 649 struct pf_tagname *p, *next; 650 651 if (tag == 0) 652 return; 653 654 for (p = TAILQ_FIRST(head); p != NULL; p = next) { 655 next = TAILQ_NEXT(p, entries); 656 if (tag == p->tag) { 657 if (--p->ref == 0) { 658 TAILQ_REMOVE(head, p, entries); 659 free(p, M_TEMP); 660 } 661 break; 662 } 663 } 664 } 665 666 u_int16_t 667 pf_tagname2tag(char *tagname) 668 { 669 return (tagname2tag(&pf_tags, tagname)); 670 } 671 672 void 673 pf_tag2tagname(u_int16_t tagid, char *p) 674 { 675 tag2tagname(&pf_tags, tagid, p); 676 } 677 678 void 679 pf_tag_ref(u_int16_t tag) 680 { 681 struct pf_tagname *t; 682 683 TAILQ_FOREACH(t, &pf_tags, entries) 684 if (t->tag == tag) 685 break; 686 if (t != NULL) 687 t->ref++; 688 } 689 690 void 691 pf_tag_unref(u_int16_t tag) 692 { 693 tag_unref(&pf_tags, tag); 694 } 695 696 int 697 pf_rtlabel_add(struct pf_addr_wrap *a) 698 { 699 #ifndef __NetBSD__ 700 if (a->type == PF_ADDR_RTLABEL && 701 (a->v.rtlabel = rtlabel_name2id(a->v.rtlabelname)) == 0) 702 return (-1); 703 #endif /* !__NetBSD__ */ 704 return (0); 705 } 706 707 void 708 pf_rtlabel_remove(struct pf_addr_wrap *a) 709 { 710 #ifndef __NetBSD__ 711 if (a->type == PF_ADDR_RTLABEL) 712 rtlabel_unref(a->v.rtlabel); 713 #endif /* !__NetBSD__ */ 714 } 715 716 void 717 pf_rtlabel_copyout(struct pf_addr_wrap *a) 718 { 719 #ifndef __NetBSD__ 720 const char *name; 721 722 if (a->type == PF_ADDR_RTLABEL && a->v.rtlabel) { 723 if ((name = rtlabel_id2name(a->v.rtlabel)) == NULL) 724 strlcpy(a->v.rtlabelname, "?", 725 sizeof(a->v.rtlabelname)); 726 else 727 strlcpy(a->v.rtlabelname, name, 728 sizeof(a->v.rtlabelname)); 729 } 730 #endif /* !__NetBSD__ */ 731 } 732 733 #ifdef ALTQ 734 u_int32_t 735 pf_qname2qid(char *qname) 736 { 737 return ((u_int32_t)tagname2tag(&pf_qids, qname)); 738 } 739 740 void 741 pf_qid2qname(u_int32_t qid, char *p) 742 { 743 tag2tagname(&pf_qids, (u_int16_t)qid, p); 744 } 745 746 void 747 pf_qid_unref(u_int32_t qid) 748 { 749 tag_unref(&pf_qids, (u_int16_t)qid); 750 } 751 752 int 753 pf_begin_altq(u_int32_t *ticket) 754 { 755 struct pf_altq *altq; 756 int error = 0; 757 758 /* Purge the old altq list */ 759 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) { 760 TAILQ_REMOVE(pf_altqs_inactive, altq, entries); 761 if (altq->qname[0] == 0) { 762 /* detach and destroy the discipline */ 763 error = altq_remove(altq); 764 } else 765 pf_qid_unref(altq->qid); 766 pool_put(&pf_altq_pl, altq); 767 } 768 if (error) 769 return (error); 770 *ticket = ++ticket_altqs_inactive; 771 altqs_inactive_open = 1; 772 return (0); 773 } 774 775 int 776 pf_rollback_altq(u_int32_t ticket) 777 { 778 struct pf_altq *altq; 779 int error = 0; 780 781 if (!altqs_inactive_open || ticket != ticket_altqs_inactive) 782 return (0); 783 /* Purge the old altq list */ 784 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) { 785 TAILQ_REMOVE(pf_altqs_inactive, altq, entries); 786 if (altq->qname[0] == 0) { 787 /* detach and destroy the discipline */ 788 error = altq_remove(altq); 789 } else 790 pf_qid_unref(altq->qid); 791 pool_put(&pf_altq_pl, altq); 792 } 793 altqs_inactive_open = 0; 794 return (error); 795 } 796 797 int 798 pf_commit_altq(u_int32_t ticket) 799 { 800 struct pf_altqqueue *old_altqs; 801 struct pf_altq *altq; 802 int s, err, error = 0; 803 804 if (!altqs_inactive_open || ticket != ticket_altqs_inactive) 805 return (EBUSY); 806 807 /* swap altqs, keep the old. */ 808 s = splsoftnet(); 809 old_altqs = pf_altqs_active; 810 pf_altqs_active = pf_altqs_inactive; 811 pf_altqs_inactive = old_altqs; 812 ticket_altqs_active = ticket_altqs_inactive; 813 814 /* Attach new disciplines */ 815 TAILQ_FOREACH(altq, pf_altqs_active, entries) { 816 if (altq->qname[0] == 0) { 817 /* attach the discipline */ 818 error = altq_pfattach(altq); 819 if (error == 0 && pf_altq_running) 820 error = pf_enable_altq(altq); 821 if (error != 0) { 822 splx(s); 823 return (error); 824 } 825 } 826 } 827 828 /* Purge the old altq list */ 829 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) { 830 TAILQ_REMOVE(pf_altqs_inactive, altq, entries); 831 if (altq->qname[0] == 0) { 832 /* detach and destroy the discipline */ 833 if (pf_altq_running) 834 error = pf_disable_altq(altq); 835 err = altq_pfdetach(altq); 836 if (err != 0 && error == 0) 837 error = err; 838 err = altq_remove(altq); 839 if (err != 0 && error == 0) 840 error = err; 841 } else 842 pf_qid_unref(altq->qid); 843 pool_put(&pf_altq_pl, altq); 844 } 845 splx(s); 846 847 altqs_inactive_open = 0; 848 return (error); 849 } 850 851 int 852 pf_enable_altq(struct pf_altq *altq) 853 { 854 struct ifnet *ifp; 855 struct tb_profile tb; 856 int s, error = 0; 857 858 if ((ifp = ifunit(altq->ifname)) == NULL) 859 return (EINVAL); 860 861 if (ifp->if_snd.altq_type != ALTQT_NONE) 862 error = altq_enable(&ifp->if_snd); 863 864 /* set tokenbucket regulator */ 865 if (error == 0 && ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) { 866 tb.rate = altq->ifbandwidth; 867 tb.depth = altq->tbrsize; 868 s = splnet(); 869 error = tbr_set(&ifp->if_snd, &tb); 870 splx(s); 871 } 872 873 return (error); 874 } 875 876 int 877 pf_disable_altq(struct pf_altq *altq) 878 { 879 struct ifnet *ifp; 880 struct tb_profile tb; 881 int s, error; 882 883 if ((ifp = ifunit(altq->ifname)) == NULL) 884 return (EINVAL); 885 886 /* 887 * when the discipline is no longer referenced, it was overridden 888 * by a new one. if so, just return. 889 */ 890 if (altq->altq_disc != ifp->if_snd.altq_disc) 891 return (0); 892 893 error = altq_disable(&ifp->if_snd); 894 895 if (error == 0) { 896 /* clear tokenbucket regulator */ 897 tb.rate = 0; 898 s = splnet(); 899 error = tbr_set(&ifp->if_snd, &tb); 900 splx(s); 901 } 902 903 return (error); 904 } 905 #endif /* ALTQ */ 906 907 int 908 pf_begin_rules(u_int32_t *ticket, int rs_num, const char *anchor) 909 { 910 struct pf_ruleset *rs; 911 struct pf_rule *rule; 912 913 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) 914 return (EINVAL); 915 rs = pf_find_or_create_ruleset(anchor); 916 if (rs == NULL) 917 return (EINVAL); 918 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) { 919 pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule); 920 rs->rules[rs_num].inactive.rcount--; 921 } 922 *ticket = ++rs->rules[rs_num].inactive.ticket; 923 rs->rules[rs_num].inactive.open = 1; 924 return (0); 925 } 926 927 int 928 pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor) 929 { 930 struct pf_ruleset *rs; 931 struct pf_rule *rule; 932 933 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) 934 return (EINVAL); 935 rs = pf_find_ruleset(anchor); 936 if (rs == NULL || !rs->rules[rs_num].inactive.open || 937 rs->rules[rs_num].inactive.ticket != ticket) 938 return (0); 939 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) { 940 pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule); 941 rs->rules[rs_num].inactive.rcount--; 942 } 943 rs->rules[rs_num].inactive.open = 0; 944 return (0); 945 } 946 947 #define PF_MD5_UPD(st, elm) \ 948 MD5Update(ctx, (u_int8_t *) &(st)->elm, sizeof((st)->elm)) 949 950 #define PF_MD5_UPD_STR(st, elm) \ 951 MD5Update(ctx, (u_int8_t *) (st)->elm, strlen((st)->elm)) 952 953 #define PF_MD5_UPD_HTONL(st, elm, stor) do { \ 954 (stor) = htonl((st)->elm); \ 955 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int32_t));\ 956 } while (0) 957 958 #define PF_MD5_UPD_HTONS(st, elm, stor) do { \ 959 (stor) = htons((st)->elm); \ 960 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int16_t));\ 961 } while (0) 962 963 void 964 pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr) 965 { 966 PF_MD5_UPD(pfr, addr.type); 967 switch (pfr->addr.type) { 968 case PF_ADDR_DYNIFTL: 969 PF_MD5_UPD(pfr, addr.v.ifname); 970 PF_MD5_UPD(pfr, addr.iflags); 971 break; 972 case PF_ADDR_TABLE: 973 PF_MD5_UPD(pfr, addr.v.tblname); 974 break; 975 case PF_ADDR_ADDRMASK: 976 /* XXX ignore af? */ 977 PF_MD5_UPD(pfr, addr.v.a.addr.addr32); 978 PF_MD5_UPD(pfr, addr.v.a.mask.addr32); 979 break; 980 case PF_ADDR_RTLABEL: 981 PF_MD5_UPD(pfr, addr.v.rtlabelname); 982 break; 983 } 984 985 PF_MD5_UPD(pfr, port[0]); 986 PF_MD5_UPD(pfr, port[1]); 987 PF_MD5_UPD(pfr, neg); 988 PF_MD5_UPD(pfr, port_op); 989 } 990 991 void 992 pf_hash_rule(MD5_CTX *ctx, struct pf_rule *rule) 993 { 994 u_int16_t x; 995 u_int32_t y; 996 997 pf_hash_rule_addr(ctx, &rule->src); 998 pf_hash_rule_addr(ctx, &rule->dst); 999 PF_MD5_UPD_STR(rule, label); 1000 PF_MD5_UPD_STR(rule, ifname); 1001 PF_MD5_UPD_STR(rule, match_tagname); 1002 PF_MD5_UPD_HTONS(rule, match_tag, x); /* dup? */ 1003 PF_MD5_UPD_HTONL(rule, os_fingerprint, y); 1004 PF_MD5_UPD_HTONL(rule, prob, y); 1005 PF_MD5_UPD_HTONL(rule, uid.uid[0], y); 1006 PF_MD5_UPD_HTONL(rule, uid.uid[1], y); 1007 PF_MD5_UPD(rule, uid.op); 1008 PF_MD5_UPD_HTONL(rule, gid.gid[0], y); 1009 PF_MD5_UPD_HTONL(rule, gid.gid[1], y); 1010 PF_MD5_UPD(rule, gid.op); 1011 PF_MD5_UPD_HTONL(rule, rule_flag, y); 1012 PF_MD5_UPD(rule, action); 1013 PF_MD5_UPD(rule, direction); 1014 PF_MD5_UPD(rule, af); 1015 PF_MD5_UPD(rule, quick); 1016 PF_MD5_UPD(rule, ifnot); 1017 PF_MD5_UPD(rule, match_tag_not); 1018 PF_MD5_UPD(rule, natpass); 1019 PF_MD5_UPD(rule, keep_state); 1020 PF_MD5_UPD(rule, proto); 1021 PF_MD5_UPD(rule, type); 1022 PF_MD5_UPD(rule, code); 1023 PF_MD5_UPD(rule, flags); 1024 PF_MD5_UPD(rule, flagset); 1025 PF_MD5_UPD(rule, allow_opts); 1026 PF_MD5_UPD(rule, rt); 1027 PF_MD5_UPD(rule, tos); 1028 } 1029 1030 int 1031 pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor) 1032 { 1033 struct pf_ruleset *rs; 1034 struct pf_rule *rule, **old_array; 1035 struct pf_rulequeue *old_rules; 1036 int s, error; 1037 u_int32_t old_rcount; 1038 1039 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) 1040 return (EINVAL); 1041 rs = pf_find_ruleset(anchor); 1042 if (rs == NULL || !rs->rules[rs_num].inactive.open || 1043 ticket != rs->rules[rs_num].inactive.ticket) 1044 return (EBUSY); 1045 1046 /* Calculate checksum for the main ruleset */ 1047 if (rs == &pf_main_ruleset) { 1048 error = pf_setup_pfsync_matching(rs); 1049 if (error != 0) 1050 return (error); 1051 } 1052 1053 /* Swap rules, keep the old. */ 1054 s = splsoftnet(); 1055 old_rules = rs->rules[rs_num].active.ptr; 1056 old_rcount = rs->rules[rs_num].active.rcount; 1057 old_array = rs->rules[rs_num].active.ptr_array; 1058 1059 rs->rules[rs_num].active.ptr = 1060 rs->rules[rs_num].inactive.ptr; 1061 rs->rules[rs_num].active.ptr_array = 1062 rs->rules[rs_num].inactive.ptr_array; 1063 rs->rules[rs_num].active.rcount = 1064 rs->rules[rs_num].inactive.rcount; 1065 rs->rules[rs_num].inactive.ptr = old_rules; 1066 rs->rules[rs_num].inactive.ptr_array = old_array; 1067 rs->rules[rs_num].inactive.rcount = old_rcount; 1068 1069 rs->rules[rs_num].active.ticket = 1070 rs->rules[rs_num].inactive.ticket; 1071 pf_calc_skip_steps(rs->rules[rs_num].active.ptr); 1072 1073 1074 /* Purge the old rule list. */ 1075 while ((rule = TAILQ_FIRST(old_rules)) != NULL) 1076 pf_rm_rule(old_rules, rule); 1077 if (rs->rules[rs_num].inactive.ptr_array) 1078 free(rs->rules[rs_num].inactive.ptr_array, M_TEMP); 1079 rs->rules[rs_num].inactive.ptr_array = NULL; 1080 rs->rules[rs_num].inactive.rcount = 0; 1081 rs->rules[rs_num].inactive.open = 0; 1082 pf_remove_if_empty_ruleset(rs); 1083 splx(s); 1084 return (0); 1085 } 1086 1087 void 1088 pf_state_export(struct pfsync_state *sp, struct pf_state_key *sk, 1089 struct pf_state *s) 1090 { 1091 int secs = time_second; 1092 bzero(sp, sizeof(struct pfsync_state)); 1093 1094 /* copy from state key */ 1095 sp->lan.addr = sk->lan.addr; 1096 sp->lan.port = sk->lan.port; 1097 sp->gwy.addr = sk->gwy.addr; 1098 sp->gwy.port = sk->gwy.port; 1099 sp->ext.addr = sk->ext.addr; 1100 sp->ext.port = sk->ext.port; 1101 sp->proto = sk->proto; 1102 sp->af = sk->af; 1103 sp->direction = sk->direction; 1104 1105 /* copy from state */ 1106 memcpy(&sp->id, &s->id, sizeof(sp->id)); 1107 sp->creatorid = s->creatorid; 1108 strlcpy(sp->ifname, s->kif->pfik_name, sizeof(sp->ifname)); 1109 pf_state_peer_to_pfsync(&s->src, &sp->src); 1110 pf_state_peer_to_pfsync(&s->dst, &sp->dst); 1111 1112 sp->rule = s->rule.ptr->nr; 1113 sp->nat_rule = (s->nat_rule.ptr == NULL) ? -1 : s->nat_rule.ptr->nr; 1114 sp->anchor = (s->anchor.ptr == NULL) ? -1 : s->anchor.ptr->nr; 1115 1116 pf_state_counter_to_pfsync(s->bytes[0], sp->bytes[0]); 1117 pf_state_counter_to_pfsync(s->bytes[1], sp->bytes[1]); 1118 pf_state_counter_to_pfsync(s->packets[0], sp->packets[0]); 1119 pf_state_counter_to_pfsync(s->packets[1], sp->packets[1]); 1120 sp->creation = secs - s->creation; 1121 sp->expire = pf_state_expires(s); 1122 sp->log = s->log; 1123 sp->allow_opts = s->allow_opts; 1124 sp->timeout = s->timeout; 1125 1126 if (s->src_node) 1127 sp->sync_flags |= PFSYNC_FLAG_SRCNODE; 1128 if (s->nat_src_node) 1129 sp->sync_flags |= PFSYNC_FLAG_NATSRCNODE; 1130 1131 if (sp->expire > secs) 1132 sp->expire -= secs; 1133 else 1134 sp->expire = 0; 1135 1136 } 1137 1138 void 1139 pf_state_import(struct pfsync_state *sp, struct pf_state_key *sk, 1140 struct pf_state *s) 1141 { 1142 /* copy to state key */ 1143 sk->lan.addr = sp->lan.addr; 1144 sk->lan.port = sp->lan.port; 1145 sk->gwy.addr = sp->gwy.addr; 1146 sk->gwy.port = sp->gwy.port; 1147 sk->ext.addr = sp->ext.addr; 1148 sk->ext.port = sp->ext.port; 1149 sk->proto = sp->proto; 1150 sk->af = sp->af; 1151 sk->direction = sp->direction; 1152 1153 /* copy to state */ 1154 memcpy(&s->id, &sp->id, sizeof(sp->id)); 1155 s->creatorid = sp->creatorid; 1156 pf_state_peer_from_pfsync(&sp->src, &s->src); 1157 pf_state_peer_from_pfsync(&sp->dst, &s->dst); 1158 1159 s->rule.ptr = &pf_default_rule; 1160 s->rule.ptr->states++; 1161 s->nat_rule.ptr = NULL; 1162 s->anchor.ptr = NULL; 1163 s->rt_kif = NULL; 1164 s->creation = time_second; 1165 s->expire = time_second; 1166 s->timeout = sp->timeout; 1167 if (sp->expire > 0) 1168 s->expire -= pf_default_rule.timeout[sp->timeout] - sp->expire; 1169 s->pfsync_time = 0; 1170 s->packets[0] = s->packets[1] = 0; 1171 s->bytes[0] = s->bytes[1] = 0; 1172 } 1173 1174 int 1175 pf_state_add(struct pfsync_state* sp) 1176 { 1177 struct pf_state *s; 1178 struct pf_state_key *sk; 1179 struct pfi_kif *kif; 1180 1181 if (sp->timeout >= PFTM_MAX && 1182 sp->timeout != PFTM_UNTIL_PACKET) { 1183 return EINVAL; 1184 } 1185 s = pool_get(&pf_state_pl, PR_NOWAIT); 1186 if (s == NULL) { 1187 return ENOMEM; 1188 } 1189 bzero(s, sizeof(struct pf_state)); 1190 if ((sk = pf_alloc_state_key(s)) == NULL) { 1191 pool_put(&pf_state_pl, s); 1192 return ENOMEM; 1193 } 1194 pf_state_import(sp, sk, s); 1195 kif = pfi_kif_get(sp->ifname); 1196 if (kif == NULL) { 1197 pool_put(&pf_state_pl, s); 1198 pool_put(&pf_state_key_pl, sk); 1199 return ENOENT; 1200 } 1201 if (pf_insert_state(kif, s)) { 1202 pfi_kif_unref(kif, PFI_KIF_REF_NONE); 1203 pool_put(&pf_state_pl, s); 1204 return ENOMEM; 1205 } 1206 1207 return 0; 1208 } 1209 1210 1211 int 1212 pf_setup_pfsync_matching(struct pf_ruleset *rs) 1213 { 1214 MD5_CTX ctx; 1215 struct pf_rule *rule; 1216 int rs_cnt; 1217 u_int8_t digest[PF_MD5_DIGEST_LENGTH]; 1218 1219 MD5Init(&ctx); 1220 for (rs_cnt = 0; rs_cnt < PF_RULESET_MAX; rs_cnt++) { 1221 /* XXX PF_RULESET_SCRUB as well? */ 1222 if (rs_cnt == PF_RULESET_SCRUB) 1223 continue; 1224 1225 if (rs->rules[rs_cnt].inactive.ptr_array) 1226 free(rs->rules[rs_cnt].inactive.ptr_array, M_TEMP); 1227 rs->rules[rs_cnt].inactive.ptr_array = NULL; 1228 1229 if (rs->rules[rs_cnt].inactive.rcount) { 1230 rs->rules[rs_cnt].inactive.ptr_array = 1231 malloc(sizeof(void *) * 1232 rs->rules[rs_cnt].inactive.rcount, 1233 M_TEMP, M_NOWAIT); 1234 1235 if (!rs->rules[rs_cnt].inactive.ptr_array) 1236 return (ENOMEM); 1237 } 1238 1239 TAILQ_FOREACH(rule, rs->rules[rs_cnt].inactive.ptr, 1240 entries) { 1241 pf_hash_rule(&ctx, rule); 1242 (rs->rules[rs_cnt].inactive.ptr_array)[rule->nr] = rule; 1243 } 1244 } 1245 1246 MD5Final(digest, &ctx); 1247 memcpy(pf_status.pf_chksum, digest, sizeof(pf_status.pf_chksum)); 1248 return (0); 1249 } 1250 1251 int 1252 pfioctl(dev_t dev, u_long cmd, void *addr, int flags, struct lwp *l) 1253 { 1254 struct pf_pooladdr *pa = NULL; 1255 struct pf_pool *pool = NULL; 1256 int s; 1257 int error = 0; 1258 1259 /* XXX keep in sync with switch() below */ 1260 if (kauth_authorize_network(l->l_cred, KAUTH_NETWORK_FIREWALL, 1261 KAUTH_REQ_NETWORK_FIREWALL_FW, NULL, NULL, NULL)) 1262 switch (cmd) { 1263 case DIOCGETRULES: 1264 case DIOCGETRULE: 1265 case DIOCGETADDRS: 1266 case DIOCGETADDR: 1267 case DIOCGETSTATE: 1268 case DIOCSETSTATUSIF: 1269 case DIOCGETSTATUS: 1270 case DIOCCLRSTATUS: 1271 case DIOCNATLOOK: 1272 case DIOCSETDEBUG: 1273 case DIOCGETSTATES: 1274 case DIOCGETTIMEOUT: 1275 case DIOCCLRRULECTRS: 1276 case DIOCGETLIMIT: 1277 case DIOCGETALTQS: 1278 case DIOCGETALTQ: 1279 case DIOCGETQSTATS: 1280 case DIOCGETRULESETS: 1281 case DIOCGETRULESET: 1282 case DIOCRGETTABLES: 1283 case DIOCRGETTSTATS: 1284 case DIOCRCLRTSTATS: 1285 case DIOCRCLRADDRS: 1286 case DIOCRADDADDRS: 1287 case DIOCRDELADDRS: 1288 case DIOCRSETADDRS: 1289 case DIOCRGETADDRS: 1290 case DIOCRGETASTATS: 1291 case DIOCRCLRASTATS: 1292 case DIOCRTSTADDRS: 1293 case DIOCOSFPGET: 1294 case DIOCGETSRCNODES: 1295 case DIOCCLRSRCNODES: 1296 case DIOCIGETIFACES: 1297 case DIOCSETIFFLAG: 1298 case DIOCCLRIFFLAG: 1299 case DIOCSETLCK: 1300 case DIOCADDSTATES: 1301 break; 1302 case DIOCRCLRTABLES: 1303 case DIOCRADDTABLES: 1304 case DIOCRDELTABLES: 1305 case DIOCRSETTFLAGS: 1306 if (((struct pfioc_table *)addr)->pfrio_flags & 1307 PFR_FLAG_DUMMY) 1308 break; /* dummy operation ok */ 1309 return (EPERM); 1310 default: 1311 return (EPERM); 1312 } 1313 1314 if (!(flags & FWRITE)) 1315 switch (cmd) { 1316 case DIOCGETRULES: 1317 case DIOCGETADDRS: 1318 case DIOCGETADDR: 1319 case DIOCGETSTATE: 1320 case DIOCGETSTATUS: 1321 case DIOCGETSTATES: 1322 case DIOCGETTIMEOUT: 1323 case DIOCGETLIMIT: 1324 case DIOCGETALTQS: 1325 case DIOCGETALTQ: 1326 case DIOCGETQSTATS: 1327 case DIOCGETRULESETS: 1328 case DIOCGETRULESET: 1329 case DIOCNATLOOK: 1330 case DIOCRGETTABLES: 1331 case DIOCRGETTSTATS: 1332 case DIOCRGETADDRS: 1333 case DIOCRGETASTATS: 1334 case DIOCRTSTADDRS: 1335 case DIOCOSFPGET: 1336 case DIOCGETSRCNODES: 1337 case DIOCIGETIFACES: 1338 case DIOCSETLCK: 1339 break; 1340 case DIOCRCLRTABLES: 1341 case DIOCRADDTABLES: 1342 case DIOCRDELTABLES: 1343 case DIOCRCLRTSTATS: 1344 case DIOCRCLRADDRS: 1345 case DIOCRADDADDRS: 1346 case DIOCRDELADDRS: 1347 case DIOCRSETADDRS: 1348 case DIOCRSETTFLAGS: 1349 case DIOCADDSTATES: 1350 if (((struct pfioc_table *)addr)->pfrio_flags & 1351 PFR_FLAG_DUMMY) { 1352 flags |= FWRITE; /* need write lock for dummy */ 1353 break; /* dummy operation ok */ 1354 } 1355 return (EACCES); 1356 case DIOCGETRULE: 1357 if (((struct pfioc_rule *)addr)->action == PF_GET_CLR_CNTR) 1358 return (EACCES); 1359 break; 1360 default: 1361 return (EACCES); 1362 } 1363 1364 if (flags & FWRITE) 1365 rw_enter_write(&pf_consistency_lock); 1366 else 1367 rw_enter_read(&pf_consistency_lock); 1368 1369 s = splsoftnet(); 1370 switch (cmd) { 1371 1372 case DIOCSTART: 1373 if (pf_status.running) 1374 error = EEXIST; 1375 else { 1376 #ifdef __NetBSD__ 1377 error = pf_pfil_attach(); 1378 if (error) 1379 break; 1380 #endif /* __NetBSD__ */ 1381 pf_status.running = 1; 1382 pf_status.since = time_second; 1383 if (pf_status.stateid == 0) { 1384 pf_status.stateid = time_second; 1385 pf_status.stateid = pf_status.stateid << 32; 1386 } 1387 DPFPRINTF(PF_DEBUG_MISC, ("pf: started\n")); 1388 } 1389 break; 1390 1391 case DIOCSTOP: 1392 if (!pf_status.running) 1393 error = ENOENT; 1394 else { 1395 #ifdef __NetBSD__ 1396 error = pf_pfil_detach(); 1397 if (error) 1398 break; 1399 #endif /* __NetBSD__ */ 1400 pf_status.running = 0; 1401 pf_status.since = time_second; 1402 DPFPRINTF(PF_DEBUG_MISC, ("pf: stopped\n")); 1403 } 1404 break; 1405 1406 case DIOCADDRULE: { 1407 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 1408 struct pf_ruleset *ruleset; 1409 struct pf_rule *rule, *tail; 1410 int rs_num; 1411 1412 pr->anchor[sizeof(pr->anchor) - 1] = 0; 1413 ruleset = pf_find_ruleset(pr->anchor); 1414 if (ruleset == NULL) { 1415 error = EINVAL; 1416 break; 1417 } 1418 rs_num = pf_get_ruleset_number(pr->rule.action); 1419 if (rs_num >= PF_RULESET_MAX) { 1420 error = EINVAL; 1421 break; 1422 } 1423 if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) { 1424 error = EINVAL; 1425 break; 1426 } 1427 if (pr->ticket != ruleset->rules[rs_num].inactive.ticket) { 1428 error = EBUSY; 1429 break; 1430 } 1431 if (pr->pool_ticket != ticket_pabuf) { 1432 error = EBUSY; 1433 break; 1434 } 1435 rule = pool_get(&pf_rule_pl, PR_NOWAIT); 1436 if (rule == NULL) { 1437 error = ENOMEM; 1438 break; 1439 } 1440 bcopy(&pr->rule, rule, sizeof(struct pf_rule)); 1441 #ifdef __NetBSD__ 1442 rule->cuid = kauth_cred_getuid(l->l_cred); 1443 rule->cpid = l->l_proc->p_pid; 1444 #else 1445 rule->cuid = p->p_cred->p_ruid; 1446 rule->cpid = p->p_pid; 1447 #endif /* !__NetBSD__ */ 1448 rule->anchor = NULL; 1449 rule->kif = NULL; 1450 TAILQ_INIT(&rule->rpool.list); 1451 /* initialize refcounting */ 1452 rule->states = 0; 1453 rule->src_nodes = 0; 1454 rule->entries.tqe_prev = NULL; 1455 #ifndef INET 1456 if (rule->af == AF_INET) { 1457 pool_put(&pf_rule_pl, rule); 1458 error = EAFNOSUPPORT; 1459 break; 1460 } 1461 #endif /* INET */ 1462 #ifndef INET6 1463 if (rule->af == AF_INET6) { 1464 pool_put(&pf_rule_pl, rule); 1465 error = EAFNOSUPPORT; 1466 break; 1467 } 1468 #endif /* INET6 */ 1469 tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr, 1470 pf_rulequeue); 1471 if (tail) 1472 rule->nr = tail->nr + 1; 1473 else 1474 rule->nr = 0; 1475 if (rule->ifname[0]) { 1476 rule->kif = pfi_kif_get(rule->ifname); 1477 if (rule->kif == NULL) { 1478 pool_put(&pf_rule_pl, rule); 1479 error = EINVAL; 1480 break; 1481 } 1482 pfi_kif_ref(rule->kif, PFI_KIF_REF_RULE); 1483 } 1484 1485 #ifndef __NetBSD__ 1486 if (rule->rtableid > 0 && !rtable_exists(rule->rtableid)) 1487 error = EBUSY; 1488 #endif /* !__NetBSD__ */ 1489 1490 #ifdef ALTQ 1491 /* set queue IDs */ 1492 if (rule->qname[0] != 0) { 1493 if ((rule->qid = pf_qname2qid(rule->qname)) == 0) 1494 error = EBUSY; 1495 else if (rule->pqname[0] != 0) { 1496 if ((rule->pqid = 1497 pf_qname2qid(rule->pqname)) == 0) 1498 error = EBUSY; 1499 } else 1500 rule->pqid = rule->qid; 1501 } 1502 #endif 1503 if (rule->tagname[0]) 1504 if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0) 1505 error = EBUSY; 1506 if (rule->match_tagname[0]) 1507 if ((rule->match_tag = 1508 pf_tagname2tag(rule->match_tagname)) == 0) 1509 error = EBUSY; 1510 if (rule->rt && !rule->direction) 1511 error = EINVAL; 1512 #if NPFLOG > 0 1513 if (!rule->log) 1514 rule->logif = 0; 1515 if (rule->logif >= PFLOGIFS_MAX) 1516 error = EINVAL; 1517 #endif 1518 if (pf_rtlabel_add(&rule->src.addr) || 1519 pf_rtlabel_add(&rule->dst.addr)) 1520 error = EBUSY; 1521 if (pfi_dynaddr_setup(&rule->src.addr, rule->af)) 1522 error = EINVAL; 1523 if (pfi_dynaddr_setup(&rule->dst.addr, rule->af)) 1524 error = EINVAL; 1525 if (pf_tbladdr_setup(ruleset, &rule->src.addr)) 1526 error = EINVAL; 1527 if (pf_tbladdr_setup(ruleset, &rule->dst.addr)) 1528 error = EINVAL; 1529 if (pf_anchor_setup(rule, ruleset, pr->anchor_call)) 1530 error = EINVAL; 1531 TAILQ_FOREACH(pa, &pf_pabuf, entries) 1532 if (pf_tbladdr_setup(ruleset, &pa->addr)) 1533 error = EINVAL; 1534 1535 rule->overload_tbl = NULL; 1536 if (rule->overload_tblname[0]) { 1537 if ((rule->overload_tbl = pfr_attach_table(ruleset, 1538 rule->overload_tblname)) == NULL) 1539 error = EINVAL; 1540 else 1541 rule->overload_tbl->pfrkt_flags |= 1542 PFR_TFLAG_ACTIVE; 1543 } 1544 1545 pf_mv_pool(&pf_pabuf, &rule->rpool.list); 1546 if (((((rule->action == PF_NAT) || (rule->action == PF_RDR) || 1547 (rule->action == PF_BINAT)) && rule->anchor == NULL) || 1548 (rule->rt > PF_FASTROUTE)) && 1549 (TAILQ_FIRST(&rule->rpool.list) == NULL)) 1550 error = EINVAL; 1551 1552 if (error) { 1553 pf_rm_rule(NULL, rule); 1554 break; 1555 } 1556 rule->rpool.cur = TAILQ_FIRST(&rule->rpool.list); 1557 rule->evaluations = rule->packets[0] = rule->packets[1] = 1558 rule->bytes[0] = rule->bytes[1] = 0; 1559 TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr, 1560 rule, entries); 1561 ruleset->rules[rs_num].inactive.rcount++; 1562 break; 1563 } 1564 1565 case DIOCGETRULES: { 1566 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 1567 struct pf_ruleset *ruleset; 1568 struct pf_rule *tail; 1569 int rs_num; 1570 1571 pr->anchor[sizeof(pr->anchor) - 1] = 0; 1572 ruleset = pf_find_ruleset(pr->anchor); 1573 if (ruleset == NULL) { 1574 error = EINVAL; 1575 break; 1576 } 1577 rs_num = pf_get_ruleset_number(pr->rule.action); 1578 if (rs_num >= PF_RULESET_MAX) { 1579 error = EINVAL; 1580 break; 1581 } 1582 tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr, 1583 pf_rulequeue); 1584 if (tail) 1585 pr->nr = tail->nr + 1; 1586 else 1587 pr->nr = 0; 1588 pr->ticket = ruleset->rules[rs_num].active.ticket; 1589 break; 1590 } 1591 1592 case DIOCGETRULE: { 1593 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 1594 struct pf_ruleset *ruleset; 1595 struct pf_rule *rule; 1596 int rs_num, i; 1597 1598 pr->anchor[sizeof(pr->anchor) - 1] = 0; 1599 ruleset = pf_find_ruleset(pr->anchor); 1600 if (ruleset == NULL) { 1601 error = EINVAL; 1602 break; 1603 } 1604 rs_num = pf_get_ruleset_number(pr->rule.action); 1605 if (rs_num >= PF_RULESET_MAX) { 1606 error = EINVAL; 1607 break; 1608 } 1609 if (pr->ticket != ruleset->rules[rs_num].active.ticket) { 1610 error = EBUSY; 1611 break; 1612 } 1613 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr); 1614 while ((rule != NULL) && (rule->nr != pr->nr)) 1615 rule = TAILQ_NEXT(rule, entries); 1616 if (rule == NULL) { 1617 error = EBUSY; 1618 break; 1619 } 1620 bcopy(rule, &pr->rule, sizeof(struct pf_rule)); 1621 if (pf_anchor_copyout(ruleset, rule, pr)) { 1622 error = EBUSY; 1623 break; 1624 } 1625 pfi_dynaddr_copyout(&pr->rule.src.addr); 1626 pfi_dynaddr_copyout(&pr->rule.dst.addr); 1627 pf_tbladdr_copyout(&pr->rule.src.addr); 1628 pf_tbladdr_copyout(&pr->rule.dst.addr); 1629 pf_rtlabel_copyout(&pr->rule.src.addr); 1630 pf_rtlabel_copyout(&pr->rule.dst.addr); 1631 for (i = 0; i < PF_SKIP_COUNT; ++i) 1632 if (rule->skip[i].ptr == NULL) 1633 pr->rule.skip[i].nr = -1; 1634 else 1635 pr->rule.skip[i].nr = 1636 rule->skip[i].ptr->nr; 1637 1638 if (pr->action == PF_GET_CLR_CNTR) { 1639 rule->evaluations = 0; 1640 rule->packets[0] = rule->packets[1] = 0; 1641 rule->bytes[0] = rule->bytes[1] = 0; 1642 } 1643 break; 1644 } 1645 1646 case DIOCCHANGERULE: { 1647 struct pfioc_rule *pcr = (struct pfioc_rule *)addr; 1648 struct pf_ruleset *ruleset; 1649 struct pf_rule *oldrule = NULL, *newrule = NULL; 1650 u_int32_t nr = 0; 1651 int rs_num; 1652 1653 if (!(pcr->action == PF_CHANGE_REMOVE || 1654 pcr->action == PF_CHANGE_GET_TICKET) && 1655 pcr->pool_ticket != ticket_pabuf) { 1656 error = EBUSY; 1657 break; 1658 } 1659 1660 if (pcr->action < PF_CHANGE_ADD_HEAD || 1661 pcr->action > PF_CHANGE_GET_TICKET) { 1662 error = EINVAL; 1663 break; 1664 } 1665 ruleset = pf_find_ruleset(pcr->anchor); 1666 if (ruleset == NULL) { 1667 error = EINVAL; 1668 break; 1669 } 1670 rs_num = pf_get_ruleset_number(pcr->rule.action); 1671 if (rs_num >= PF_RULESET_MAX) { 1672 error = EINVAL; 1673 break; 1674 } 1675 1676 if (pcr->action == PF_CHANGE_GET_TICKET) { 1677 pcr->ticket = ++ruleset->rules[rs_num].active.ticket; 1678 break; 1679 } else { 1680 if (pcr->ticket != 1681 ruleset->rules[rs_num].active.ticket) { 1682 error = EINVAL; 1683 break; 1684 } 1685 if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) { 1686 error = EINVAL; 1687 break; 1688 } 1689 } 1690 1691 if (pcr->action != PF_CHANGE_REMOVE) { 1692 newrule = pool_get(&pf_rule_pl, PR_NOWAIT); 1693 if (newrule == NULL) { 1694 error = ENOMEM; 1695 break; 1696 } 1697 bcopy(&pcr->rule, newrule, sizeof(struct pf_rule)); 1698 #ifdef __NetBSD__ 1699 newrule->cuid = kauth_cred_getuid(l->l_cred); 1700 newrule->cpid = l->l_proc->p_pid; 1701 #else 1702 newrule->cuid = p->p_cred->p_ruid; 1703 newrule->cpid = p->p_pid; 1704 #endif /* !__NetBSD__ */ 1705 TAILQ_INIT(&newrule->rpool.list); 1706 /* initialize refcounting */ 1707 newrule->states = 0; 1708 newrule->entries.tqe_prev = NULL; 1709 #ifndef INET 1710 if (newrule->af == AF_INET) { 1711 pool_put(&pf_rule_pl, newrule); 1712 error = EAFNOSUPPORT; 1713 break; 1714 } 1715 #endif /* INET */ 1716 #ifndef INET6 1717 if (newrule->af == AF_INET6) { 1718 pool_put(&pf_rule_pl, newrule); 1719 error = EAFNOSUPPORT; 1720 break; 1721 } 1722 #endif /* INET6 */ 1723 if (newrule->ifname[0]) { 1724 newrule->kif = pfi_kif_get(newrule->ifname); 1725 if (newrule->kif == NULL) { 1726 pool_put(&pf_rule_pl, newrule); 1727 error = EINVAL; 1728 break; 1729 } 1730 pfi_kif_ref(newrule->kif, PFI_KIF_REF_RULE); 1731 } else 1732 newrule->kif = NULL; 1733 1734 #ifndef __NetBSD__ 1735 if (newrule->rtableid > 0 && 1736 !rtable_exists(newrule->rtableid)) 1737 error = EBUSY; 1738 #endif /* !__NetBSD__ */ 1739 1740 #ifdef ALTQ 1741 /* set queue IDs */ 1742 if (newrule->qname[0] != 0) { 1743 if ((newrule->qid = 1744 pf_qname2qid(newrule->qname)) == 0) 1745 error = EBUSY; 1746 else if (newrule->pqname[0] != 0) { 1747 if ((newrule->pqid = 1748 pf_qname2qid(newrule->pqname)) == 0) 1749 error = EBUSY; 1750 } else 1751 newrule->pqid = newrule->qid; 1752 } 1753 #endif /* ALTQ */ 1754 if (newrule->tagname[0]) 1755 if ((newrule->tag = 1756 pf_tagname2tag(newrule->tagname)) == 0) 1757 error = EBUSY; 1758 if (newrule->match_tagname[0]) 1759 if ((newrule->match_tag = pf_tagname2tag( 1760 newrule->match_tagname)) == 0) 1761 error = EBUSY; 1762 if (newrule->rt && !newrule->direction) 1763 error = EINVAL; 1764 #if NPFLOG > 0 1765 if (!newrule->log) 1766 newrule->logif = 0; 1767 if (newrule->logif >= PFLOGIFS_MAX) 1768 error = EINVAL; 1769 #endif 1770 if (pf_rtlabel_add(&newrule->src.addr) || 1771 pf_rtlabel_add(&newrule->dst.addr)) 1772 error = EBUSY; 1773 if (pfi_dynaddr_setup(&newrule->src.addr, newrule->af)) 1774 error = EINVAL; 1775 if (pfi_dynaddr_setup(&newrule->dst.addr, newrule->af)) 1776 error = EINVAL; 1777 if (pf_tbladdr_setup(ruleset, &newrule->src.addr)) 1778 error = EINVAL; 1779 if (pf_tbladdr_setup(ruleset, &newrule->dst.addr)) 1780 error = EINVAL; 1781 if (pf_anchor_setup(newrule, ruleset, pcr->anchor_call)) 1782 error = EINVAL; 1783 TAILQ_FOREACH(pa, &pf_pabuf, entries) 1784 if (pf_tbladdr_setup(ruleset, &pa->addr)) 1785 error = EINVAL; 1786 1787 newrule->overload_tbl = NULL; 1788 if (newrule->overload_tblname[0]) { 1789 if ((newrule->overload_tbl = pfr_attach_table( 1790 ruleset, newrule->overload_tblname)) == 1791 NULL) 1792 error = EINVAL; 1793 else 1794 newrule->overload_tbl->pfrkt_flags |= 1795 PFR_TFLAG_ACTIVE; 1796 } 1797 1798 pf_mv_pool(&pf_pabuf, &newrule->rpool.list); 1799 if (((((newrule->action == PF_NAT) || 1800 (newrule->action == PF_RDR) || 1801 (newrule->action == PF_BINAT) || 1802 (newrule->rt > PF_FASTROUTE)) && 1803 !newrule->anchor)) && 1804 (TAILQ_FIRST(&newrule->rpool.list) == NULL)) 1805 error = EINVAL; 1806 1807 if (error) { 1808 pf_rm_rule(NULL, newrule); 1809 break; 1810 } 1811 newrule->rpool.cur = TAILQ_FIRST(&newrule->rpool.list); 1812 newrule->evaluations = 0; 1813 newrule->packets[0] = newrule->packets[1] = 0; 1814 newrule->bytes[0] = newrule->bytes[1] = 0; 1815 } 1816 pf_empty_pool(&pf_pabuf); 1817 1818 if (pcr->action == PF_CHANGE_ADD_HEAD) 1819 oldrule = TAILQ_FIRST( 1820 ruleset->rules[rs_num].active.ptr); 1821 else if (pcr->action == PF_CHANGE_ADD_TAIL) 1822 oldrule = TAILQ_LAST( 1823 ruleset->rules[rs_num].active.ptr, pf_rulequeue); 1824 else { 1825 oldrule = TAILQ_FIRST( 1826 ruleset->rules[rs_num].active.ptr); 1827 while ((oldrule != NULL) && (oldrule->nr != pcr->nr)) 1828 oldrule = TAILQ_NEXT(oldrule, entries); 1829 if (oldrule == NULL) { 1830 if (newrule != NULL) 1831 pf_rm_rule(NULL, newrule); 1832 error = EINVAL; 1833 break; 1834 } 1835 } 1836 1837 if (pcr->action == PF_CHANGE_REMOVE) { 1838 pf_rm_rule(ruleset->rules[rs_num].active.ptr, oldrule); 1839 ruleset->rules[rs_num].active.rcount--; 1840 } else { 1841 if (oldrule == NULL) 1842 TAILQ_INSERT_TAIL( 1843 ruleset->rules[rs_num].active.ptr, 1844 newrule, entries); 1845 else if (pcr->action == PF_CHANGE_ADD_HEAD || 1846 pcr->action == PF_CHANGE_ADD_BEFORE) 1847 TAILQ_INSERT_BEFORE(oldrule, newrule, entries); 1848 else 1849 TAILQ_INSERT_AFTER( 1850 ruleset->rules[rs_num].active.ptr, 1851 oldrule, newrule, entries); 1852 ruleset->rules[rs_num].active.rcount++; 1853 } 1854 1855 nr = 0; 1856 TAILQ_FOREACH(oldrule, 1857 ruleset->rules[rs_num].active.ptr, entries) 1858 oldrule->nr = nr++; 1859 1860 ruleset->rules[rs_num].active.ticket++; 1861 1862 pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr); 1863 pf_remove_if_empty_ruleset(ruleset); 1864 1865 break; 1866 } 1867 1868 case DIOCCLRSTATES: { 1869 struct pf_state *ps, *nexts; 1870 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr; 1871 int killed = 0; 1872 1873 for (ps = RB_MIN(pf_state_tree_id, &tree_id); ps; ps = nexts) { 1874 nexts = RB_NEXT(pf_state_tree_id, &tree_id, ps); 1875 1876 if (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname, 1877 ps->kif->pfik_name)) { 1878 #if NPFSYNC 1879 /* don't send out individual delete messages */ 1880 ps->sync_flags = PFSTATE_NOSYNC; 1881 #endif 1882 pf_unlink_state(ps); 1883 killed++; 1884 } 1885 } 1886 psk->psk_af = killed; 1887 #if NPFSYNC 1888 pfsync_clear_states(pf_status.hostid, psk->psk_ifname); 1889 #endif 1890 break; 1891 } 1892 1893 case DIOCKILLSTATES: { 1894 struct pf_state *ps, *nexts; 1895 struct pf_state_key *sk; 1896 struct pf_state_host *src, *dst; 1897 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr; 1898 int killed = 0; 1899 1900 for (ps = RB_MIN(pf_state_tree_id, &tree_id); ps; 1901 ps = nexts) { 1902 nexts = RB_NEXT(pf_state_tree_id, &tree_id, ps); 1903 sk = ps->state_key; 1904 1905 if (sk->direction == PF_OUT) { 1906 src = &sk->lan; 1907 dst = &sk->ext; 1908 } else { 1909 src = &sk->ext; 1910 dst = &sk->lan; 1911 } 1912 if ((!psk->psk_af || sk->af == psk->psk_af) 1913 && (!psk->psk_proto || psk->psk_proto == 1914 sk->proto) && 1915 PF_MATCHA(psk->psk_src.neg, 1916 &psk->psk_src.addr.v.a.addr, 1917 &psk->psk_src.addr.v.a.mask, 1918 &src->addr, sk->af) && 1919 PF_MATCHA(psk->psk_dst.neg, 1920 &psk->psk_dst.addr.v.a.addr, 1921 &psk->psk_dst.addr.v.a.mask, 1922 &dst->addr, sk->af) && 1923 (psk->psk_src.port_op == 0 || 1924 pf_match_port(psk->psk_src.port_op, 1925 psk->psk_src.port[0], psk->psk_src.port[1], 1926 src->port)) && 1927 (psk->psk_dst.port_op == 0 || 1928 pf_match_port(psk->psk_dst.port_op, 1929 psk->psk_dst.port[0], psk->psk_dst.port[1], 1930 dst->port)) && 1931 (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname, 1932 ps->kif->pfik_name))) { 1933 #if NPFSYNC > 0 1934 /* send immediate delete of state */ 1935 pfsync_delete_state(ps); 1936 ps->sync_flags |= PFSTATE_NOSYNC; 1937 #endif 1938 pf_unlink_state(ps); 1939 killed++; 1940 } 1941 } 1942 psk->psk_af = killed; 1943 break; 1944 } 1945 1946 case DIOCADDSTATE: { 1947 struct pfioc_state *ps = (struct pfioc_state *)addr; 1948 struct pfsync_state *sp = (struct pfsync_state *)ps->state; 1949 1950 error = pf_state_add(sp); 1951 break; 1952 } 1953 1954 case DIOCADDSTATES: { 1955 struct pfioc_states *ps = (struct pfioc_states *)addr; 1956 struct pfsync_state *p = (struct pfsync_state *) ps->ps_states; 1957 struct pfsync_state *pk; 1958 int size = ps->ps_len; 1959 int i = 0; 1960 error = 0; 1961 1962 pk = malloc(sizeof(*pk), M_TEMP,M_WAITOK); 1963 1964 while (error == 0 && i < size) 1965 { 1966 if (copyin(p, pk, sizeof(struct pfsync_state))) 1967 { 1968 error = EFAULT; 1969 free(pk, M_TEMP); 1970 } else { 1971 error = pf_state_add(pk); 1972 i += sizeof(*p); 1973 p++; 1974 } 1975 } 1976 1977 free(pk, M_TEMP); 1978 break; 1979 } 1980 1981 1982 case DIOCGETSTATE: { 1983 struct pfioc_state *ps = (struct pfioc_state *)addr; 1984 struct pf_state *pfs; 1985 u_int32_t nr; 1986 1987 nr = 0; 1988 RB_FOREACH(pfs, pf_state_tree_id, &tree_id) { 1989 if (nr >= ps->nr) 1990 break; 1991 nr++; 1992 } 1993 if (pfs == NULL) { 1994 error = EBUSY; 1995 break; 1996 } 1997 1998 pf_state_export((struct pfsync_state *)&ps->state, 1999 pfs->state_key, pfs); 2000 break; 2001 } 2002 2003 case DIOCGETSTATES: { 2004 struct pfioc_states *ps = (struct pfioc_states *)addr; 2005 struct pf_state *state; 2006 struct pfsync_state *p, *pstore; 2007 u_int32_t nr = 0; 2008 2009 if (ps->ps_len == 0) { 2010 nr = pf_status.states; 2011 ps->ps_len = sizeof(struct pfsync_state) * nr; 2012 break; 2013 } 2014 2015 pstore = malloc(sizeof(*pstore), M_TEMP, M_WAITOK); 2016 2017 p = ps->ps_states; 2018 2019 state = TAILQ_FIRST(&state_list); 2020 while (state) { 2021 if (state->timeout != PFTM_UNLINKED) { 2022 if ((nr+1) * sizeof(*p) > (unsigned)ps->ps_len) 2023 break; 2024 2025 pf_state_export(pstore, 2026 state->state_key, state); 2027 error = copyout(pstore, p, sizeof(*p)); 2028 if (error) { 2029 free(pstore, M_TEMP); 2030 goto fail; 2031 } 2032 p++; 2033 nr++; 2034 } 2035 state = TAILQ_NEXT(state, entry_list); 2036 } 2037 2038 ps->ps_len = sizeof(struct pfsync_state) * nr; 2039 2040 free(pstore, M_TEMP); 2041 break; 2042 } 2043 2044 case DIOCGETSTATUS: { 2045 struct pf_status *ps = (struct pf_status *)addr; 2046 bcopy(&pf_status, ps, sizeof(struct pf_status)); 2047 pfi_fill_oldstatus(ps); 2048 break; 2049 } 2050 2051 case DIOCSETSTATUSIF: { 2052 struct pfioc_if *pi = (struct pfioc_if *)addr; 2053 2054 if (pi->ifname[0] == 0) { 2055 bzero(pf_status.ifname, IFNAMSIZ); 2056 break; 2057 } 2058 if (ifunit(pi->ifname) == NULL) { 2059 error = EINVAL; 2060 break; 2061 } 2062 strlcpy(pf_status.ifname, pi->ifname, IFNAMSIZ); 2063 break; 2064 } 2065 2066 case DIOCCLRSTATUS: { 2067 bzero(pf_status.counters, sizeof(pf_status.counters)); 2068 bzero(pf_status.fcounters, sizeof(pf_status.fcounters)); 2069 bzero(pf_status.scounters, sizeof(pf_status.scounters)); 2070 pf_status.since = time_second; 2071 if (*pf_status.ifname) 2072 pfi_clr_istats(pf_status.ifname); 2073 break; 2074 } 2075 2076 case DIOCNATLOOK: { 2077 struct pfioc_natlook *pnl = (struct pfioc_natlook *)addr; 2078 struct pf_state_key *sk; 2079 struct pf_state *state; 2080 struct pf_state_key_cmp key; 2081 int m = 0, direction = pnl->direction; 2082 2083 key.af = pnl->af; 2084 key.proto = pnl->proto; 2085 2086 if (!pnl->proto || 2087 PF_AZERO(&pnl->saddr, pnl->af) || 2088 PF_AZERO(&pnl->daddr, pnl->af) || 2089 ((pnl->proto == IPPROTO_TCP || 2090 pnl->proto == IPPROTO_UDP) && 2091 (!pnl->dport || !pnl->sport))) 2092 error = EINVAL; 2093 else { 2094 /* 2095 * userland gives us source and dest of connection, 2096 * reverse the lookup so we ask for what happens with 2097 * the return traffic, enabling us to find it in the 2098 * state tree. 2099 */ 2100 if (direction == PF_IN) { 2101 PF_ACPY(&key.ext.addr, &pnl->daddr, pnl->af); 2102 key.ext.port = pnl->dport; 2103 PF_ACPY(&key.gwy.addr, &pnl->saddr, pnl->af); 2104 key.gwy.port = pnl->sport; 2105 state = pf_find_state_all(&key, PF_EXT_GWY, &m); 2106 } else { 2107 PF_ACPY(&key.lan.addr, &pnl->daddr, pnl->af); 2108 key.lan.port = pnl->dport; 2109 PF_ACPY(&key.ext.addr, &pnl->saddr, pnl->af); 2110 key.ext.port = pnl->sport; 2111 state = pf_find_state_all(&key, PF_LAN_EXT, &m); 2112 } 2113 if (m > 1) 2114 error = E2BIG; /* more than one state */ 2115 else if (state != NULL) { 2116 sk = state->state_key; 2117 if (direction == PF_IN) { 2118 PF_ACPY(&pnl->rsaddr, &sk->lan.addr, 2119 sk->af); 2120 pnl->rsport = sk->lan.port; 2121 PF_ACPY(&pnl->rdaddr, &pnl->daddr, 2122 pnl->af); 2123 pnl->rdport = pnl->dport; 2124 } else { 2125 PF_ACPY(&pnl->rdaddr, &sk->gwy.addr, 2126 sk->af); 2127 pnl->rdport = sk->gwy.port; 2128 PF_ACPY(&pnl->rsaddr, &pnl->saddr, 2129 pnl->af); 2130 pnl->rsport = pnl->sport; 2131 } 2132 } else 2133 error = ENOENT; 2134 } 2135 break; 2136 } 2137 2138 case DIOCSETTIMEOUT: { 2139 struct pfioc_tm *pt = (struct pfioc_tm *)addr; 2140 int old; 2141 2142 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX || 2143 pt->seconds < 0) { 2144 error = EINVAL; 2145 goto fail; 2146 } 2147 old = pf_default_rule.timeout[pt->timeout]; 2148 if (pt->timeout == PFTM_INTERVAL && pt->seconds == 0) 2149 pt->seconds = 1; 2150 pf_default_rule.timeout[pt->timeout] = pt->seconds; 2151 if (pt->timeout == PFTM_INTERVAL && pt->seconds < old) 2152 wakeup(pf_purge_thread); 2153 pt->seconds = old; 2154 break; 2155 } 2156 2157 case DIOCGETTIMEOUT: { 2158 struct pfioc_tm *pt = (struct pfioc_tm *)addr; 2159 2160 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) { 2161 error = EINVAL; 2162 goto fail; 2163 } 2164 pt->seconds = pf_default_rule.timeout[pt->timeout]; 2165 break; 2166 } 2167 2168 case DIOCGETLIMIT: { 2169 struct pfioc_limit *pl = (struct pfioc_limit *)addr; 2170 2171 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) { 2172 error = EINVAL; 2173 goto fail; 2174 } 2175 pl->limit = pf_pool_limits[pl->index].limit; 2176 break; 2177 } 2178 2179 case DIOCSETLIMIT: { 2180 struct pfioc_limit *pl = (struct pfioc_limit *)addr; 2181 int old_limit; 2182 2183 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX || 2184 pf_pool_limits[pl->index].pp == NULL) { 2185 error = EINVAL; 2186 goto fail; 2187 } 2188 #ifdef __NetBSD__ 2189 pool_sethardlimit(pf_pool_limits[pl->index].pp, 2190 pl->limit, NULL, 0); 2191 #else 2192 if (pool_sethardlimit(pf_pool_limits[pl->index].pp, 2193 pl->limit, NULL, 0) != 0) { 2194 error = EBUSY; 2195 goto fail; 2196 } 2197 #endif /* !__NetBSD__ */ 2198 old_limit = pf_pool_limits[pl->index].limit; 2199 pf_pool_limits[pl->index].limit = pl->limit; 2200 pl->limit = old_limit; 2201 break; 2202 } 2203 2204 case DIOCSETDEBUG: { 2205 u_int32_t *level = (u_int32_t *)addr; 2206 2207 pf_status.debug = *level; 2208 break; 2209 } 2210 2211 case DIOCCLRRULECTRS: { 2212 /* obsoleted by DIOCGETRULE with action=PF_GET_CLR_CNTR */ 2213 struct pf_ruleset *ruleset = &pf_main_ruleset; 2214 struct pf_rule *rule; 2215 2216 TAILQ_FOREACH(rule, 2217 ruleset->rules[PF_RULESET_FILTER].active.ptr, entries) { 2218 rule->evaluations = 0; 2219 rule->packets[0] = rule->packets[1] = 0; 2220 rule->bytes[0] = rule->bytes[1] = 0; 2221 } 2222 break; 2223 } 2224 2225 #ifdef ALTQ 2226 case DIOCSTARTALTQ: { 2227 struct pf_altq *altq; 2228 2229 /* enable all altq interfaces on active list */ 2230 TAILQ_FOREACH(altq, pf_altqs_active, entries) { 2231 if (altq->qname[0] == 0) { 2232 error = pf_enable_altq(altq); 2233 if (error != 0) 2234 break; 2235 } 2236 } 2237 if (error == 0) 2238 pf_altq_running = 1; 2239 DPFPRINTF(PF_DEBUG_MISC, ("altq: started\n")); 2240 break; 2241 } 2242 2243 case DIOCSTOPALTQ: { 2244 struct pf_altq *altq; 2245 2246 /* disable all altq interfaces on active list */ 2247 TAILQ_FOREACH(altq, pf_altqs_active, entries) { 2248 if (altq->qname[0] == 0) { 2249 error = pf_disable_altq(altq); 2250 if (error != 0) 2251 break; 2252 } 2253 } 2254 if (error == 0) 2255 pf_altq_running = 0; 2256 DPFPRINTF(PF_DEBUG_MISC, ("altq: stopped\n")); 2257 break; 2258 } 2259 2260 case DIOCADDALTQ: { 2261 struct pfioc_altq *paa = (struct pfioc_altq *)addr; 2262 struct pf_altq *altq, *a; 2263 2264 if (paa->ticket != ticket_altqs_inactive) { 2265 error = EBUSY; 2266 break; 2267 } 2268 altq = pool_get(&pf_altq_pl, PR_NOWAIT); 2269 if (altq == NULL) { 2270 error = ENOMEM; 2271 break; 2272 } 2273 bcopy(&paa->altq, altq, sizeof(struct pf_altq)); 2274 2275 /* 2276 * if this is for a queue, find the discipline and 2277 * copy the necessary fields 2278 */ 2279 if (altq->qname[0] != 0) { 2280 if ((altq->qid = pf_qname2qid(altq->qname)) == 0) { 2281 error = EBUSY; 2282 pool_put(&pf_altq_pl, altq); 2283 break; 2284 } 2285 TAILQ_FOREACH(a, pf_altqs_inactive, entries) { 2286 if (strncmp(a->ifname, altq->ifname, 2287 IFNAMSIZ) == 0 && a->qname[0] == 0) { 2288 altq->altq_disc = a->altq_disc; 2289 break; 2290 } 2291 } 2292 } 2293 2294 error = altq_add(altq); 2295 if (error) { 2296 pool_put(&pf_altq_pl, altq); 2297 break; 2298 } 2299 2300 TAILQ_INSERT_TAIL(pf_altqs_inactive, altq, entries); 2301 bcopy(altq, &paa->altq, sizeof(struct pf_altq)); 2302 break; 2303 } 2304 2305 case DIOCGETALTQS: { 2306 struct pfioc_altq *paa = (struct pfioc_altq *)addr; 2307 struct pf_altq *altq; 2308 2309 paa->nr = 0; 2310 TAILQ_FOREACH(altq, pf_altqs_active, entries) 2311 paa->nr++; 2312 paa->ticket = ticket_altqs_active; 2313 break; 2314 } 2315 2316 case DIOCGETALTQ: { 2317 struct pfioc_altq *paa = (struct pfioc_altq *)addr; 2318 struct pf_altq *altq; 2319 u_int32_t nr; 2320 2321 if (paa->ticket != ticket_altqs_active) { 2322 error = EBUSY; 2323 break; 2324 } 2325 nr = 0; 2326 altq = TAILQ_FIRST(pf_altqs_active); 2327 while ((altq != NULL) && (nr < paa->nr)) { 2328 altq = TAILQ_NEXT(altq, entries); 2329 nr++; 2330 } 2331 if (altq == NULL) { 2332 error = EBUSY; 2333 break; 2334 } 2335 bcopy(altq, &paa->altq, sizeof(struct pf_altq)); 2336 break; 2337 } 2338 2339 case DIOCCHANGEALTQ: 2340 /* CHANGEALTQ not supported yet! */ 2341 error = ENODEV; 2342 break; 2343 2344 case DIOCGETQSTATS: { 2345 struct pfioc_qstats *pq = (struct pfioc_qstats *)addr; 2346 struct pf_altq *altq; 2347 u_int32_t nr; 2348 int nbytes; 2349 2350 if (pq->ticket != ticket_altqs_active) { 2351 error = EBUSY; 2352 break; 2353 } 2354 nbytes = pq->nbytes; 2355 nr = 0; 2356 altq = TAILQ_FIRST(pf_altqs_active); 2357 while ((altq != NULL) && (nr < pq->nr)) { 2358 altq = TAILQ_NEXT(altq, entries); 2359 nr++; 2360 } 2361 if (altq == NULL) { 2362 error = EBUSY; 2363 break; 2364 } 2365 error = altq_getqstats(altq, pq->buf, &nbytes); 2366 if (error == 0) { 2367 pq->scheduler = altq->scheduler; 2368 pq->nbytes = nbytes; 2369 } 2370 break; 2371 } 2372 #endif /* ALTQ */ 2373 2374 case DIOCBEGINADDRS: { 2375 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 2376 2377 pf_empty_pool(&pf_pabuf); 2378 pp->ticket = ++ticket_pabuf; 2379 break; 2380 } 2381 2382 case DIOCADDADDR: { 2383 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 2384 2385 if (pp->ticket != ticket_pabuf) { 2386 error = EBUSY; 2387 break; 2388 } 2389 #ifndef INET 2390 if (pp->af == AF_INET) { 2391 error = EAFNOSUPPORT; 2392 break; 2393 } 2394 #endif /* INET */ 2395 #ifndef INET6 2396 if (pp->af == AF_INET6) { 2397 error = EAFNOSUPPORT; 2398 break; 2399 } 2400 #endif /* INET6 */ 2401 if (pp->addr.addr.type != PF_ADDR_ADDRMASK && 2402 pp->addr.addr.type != PF_ADDR_DYNIFTL && 2403 pp->addr.addr.type != PF_ADDR_TABLE) { 2404 error = EINVAL; 2405 break; 2406 } 2407 pa = pool_get(&pf_pooladdr_pl, PR_NOWAIT); 2408 if (pa == NULL) { 2409 error = ENOMEM; 2410 break; 2411 } 2412 bcopy(&pp->addr, pa, sizeof(struct pf_pooladdr)); 2413 if (pa->ifname[0]) { 2414 pa->kif = pfi_kif_get(pa->ifname); 2415 if (pa->kif == NULL) { 2416 pool_put(&pf_pooladdr_pl, pa); 2417 error = EINVAL; 2418 break; 2419 } 2420 pfi_kif_ref(pa->kif, PFI_KIF_REF_RULE); 2421 } 2422 if (pfi_dynaddr_setup(&pa->addr, pp->af)) { 2423 pfi_dynaddr_remove(&pa->addr); 2424 pfi_kif_unref(pa->kif, PFI_KIF_REF_RULE); 2425 pool_put(&pf_pooladdr_pl, pa); 2426 error = EINVAL; 2427 break; 2428 } 2429 TAILQ_INSERT_TAIL(&pf_pabuf, pa, entries); 2430 break; 2431 } 2432 2433 case DIOCGETADDRS: { 2434 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 2435 2436 pp->nr = 0; 2437 pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action, 2438 pp->r_num, 0, 1, 0); 2439 if (pool == NULL) { 2440 error = EBUSY; 2441 break; 2442 } 2443 TAILQ_FOREACH(pa, &pool->list, entries) 2444 pp->nr++; 2445 break; 2446 } 2447 2448 case DIOCGETADDR: { 2449 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 2450 u_int32_t nr = 0; 2451 2452 pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action, 2453 pp->r_num, 0, 1, 1); 2454 if (pool == NULL) { 2455 error = EBUSY; 2456 break; 2457 } 2458 pa = TAILQ_FIRST(&pool->list); 2459 while ((pa != NULL) && (nr < pp->nr)) { 2460 pa = TAILQ_NEXT(pa, entries); 2461 nr++; 2462 } 2463 if (pa == NULL) { 2464 error = EBUSY; 2465 break; 2466 } 2467 bcopy(pa, &pp->addr, sizeof(struct pf_pooladdr)); 2468 pfi_dynaddr_copyout(&pp->addr.addr); 2469 pf_tbladdr_copyout(&pp->addr.addr); 2470 pf_rtlabel_copyout(&pp->addr.addr); 2471 break; 2472 } 2473 2474 case DIOCCHANGEADDR: { 2475 struct pfioc_pooladdr *pca = (struct pfioc_pooladdr *)addr; 2476 struct pf_pooladdr *oldpa = NULL, *newpa = NULL; 2477 struct pf_ruleset *ruleset; 2478 2479 if (pca->action < PF_CHANGE_ADD_HEAD || 2480 pca->action > PF_CHANGE_REMOVE) { 2481 error = EINVAL; 2482 break; 2483 } 2484 if (pca->addr.addr.type != PF_ADDR_ADDRMASK && 2485 pca->addr.addr.type != PF_ADDR_DYNIFTL && 2486 pca->addr.addr.type != PF_ADDR_TABLE) { 2487 error = EINVAL; 2488 break; 2489 } 2490 2491 ruleset = pf_find_ruleset(pca->anchor); 2492 if (ruleset == NULL) { 2493 error = EBUSY; 2494 break; 2495 } 2496 pool = pf_get_pool(pca->anchor, pca->ticket, pca->r_action, 2497 pca->r_num, pca->r_last, 1, 1); 2498 if (pool == NULL) { 2499 error = EBUSY; 2500 break; 2501 } 2502 if (pca->action != PF_CHANGE_REMOVE) { 2503 newpa = pool_get(&pf_pooladdr_pl, PR_NOWAIT); 2504 if (newpa == NULL) { 2505 error = ENOMEM; 2506 break; 2507 } 2508 bcopy(&pca->addr, newpa, sizeof(struct pf_pooladdr)); 2509 #ifndef INET 2510 if (pca->af == AF_INET) { 2511 pool_put(&pf_pooladdr_pl, newpa); 2512 error = EAFNOSUPPORT; 2513 break; 2514 } 2515 #endif /* INET */ 2516 #ifndef INET6 2517 if (pca->af == AF_INET6) { 2518 pool_put(&pf_pooladdr_pl, newpa); 2519 error = EAFNOSUPPORT; 2520 break; 2521 } 2522 #endif /* INET6 */ 2523 if (newpa->ifname[0]) { 2524 newpa->kif = pfi_kif_get(newpa->ifname); 2525 if (newpa->kif == NULL) { 2526 pool_put(&pf_pooladdr_pl, newpa); 2527 error = EINVAL; 2528 break; 2529 } 2530 pfi_kif_ref(newpa->kif, PFI_KIF_REF_RULE); 2531 } else 2532 newpa->kif = NULL; 2533 if (pfi_dynaddr_setup(&newpa->addr, pca->af) || 2534 pf_tbladdr_setup(ruleset, &newpa->addr)) { 2535 pfi_dynaddr_remove(&newpa->addr); 2536 pfi_kif_unref(newpa->kif, PFI_KIF_REF_RULE); 2537 pool_put(&pf_pooladdr_pl, newpa); 2538 error = EINVAL; 2539 break; 2540 } 2541 } 2542 2543 if (pca->action == PF_CHANGE_ADD_HEAD) 2544 oldpa = TAILQ_FIRST(&pool->list); 2545 else if (pca->action == PF_CHANGE_ADD_TAIL) 2546 oldpa = TAILQ_LAST(&pool->list, pf_palist); 2547 else { 2548 int i = 0; 2549 2550 oldpa = TAILQ_FIRST(&pool->list); 2551 while ((oldpa != NULL) && (i < pca->nr)) { 2552 oldpa = TAILQ_NEXT(oldpa, entries); 2553 i++; 2554 } 2555 if (oldpa == NULL) { 2556 error = EINVAL; 2557 break; 2558 } 2559 } 2560 2561 if (pca->action == PF_CHANGE_REMOVE) { 2562 TAILQ_REMOVE(&pool->list, oldpa, entries); 2563 pfi_dynaddr_remove(&oldpa->addr); 2564 pf_tbladdr_remove(&oldpa->addr); 2565 pfi_kif_unref(oldpa->kif, PFI_KIF_REF_RULE); 2566 pool_put(&pf_pooladdr_pl, oldpa); 2567 } else { 2568 if (oldpa == NULL) 2569 TAILQ_INSERT_TAIL(&pool->list, newpa, entries); 2570 else if (pca->action == PF_CHANGE_ADD_HEAD || 2571 pca->action == PF_CHANGE_ADD_BEFORE) 2572 TAILQ_INSERT_BEFORE(oldpa, newpa, entries); 2573 else 2574 TAILQ_INSERT_AFTER(&pool->list, oldpa, 2575 newpa, entries); 2576 } 2577 2578 pool->cur = TAILQ_FIRST(&pool->list); 2579 PF_ACPY(&pool->counter, &pool->cur->addr.v.a.addr, 2580 pca->af); 2581 break; 2582 } 2583 2584 case DIOCGETRULESETS: { 2585 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr; 2586 struct pf_ruleset *ruleset; 2587 struct pf_anchor *anchor; 2588 2589 pr->path[sizeof(pr->path) - 1] = 0; 2590 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) { 2591 error = EINVAL; 2592 break; 2593 } 2594 pr->nr = 0; 2595 if (ruleset->anchor == NULL) { 2596 /* XXX kludge for pf_main_ruleset */ 2597 RB_FOREACH(anchor, pf_anchor_global, &pf_anchors) 2598 if (anchor->parent == NULL) 2599 pr->nr++; 2600 } else { 2601 RB_FOREACH(anchor, pf_anchor_node, 2602 &ruleset->anchor->children) 2603 pr->nr++; 2604 } 2605 break; 2606 } 2607 2608 case DIOCGETRULESET: { 2609 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr; 2610 struct pf_ruleset *ruleset; 2611 struct pf_anchor *anchor; 2612 u_int32_t nr = 0; 2613 2614 pr->path[sizeof(pr->path) - 1] = 0; 2615 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) { 2616 error = EINVAL; 2617 break; 2618 } 2619 pr->name[0] = 0; 2620 if (ruleset->anchor == NULL) { 2621 /* XXX kludge for pf_main_ruleset */ 2622 RB_FOREACH(anchor, pf_anchor_global, &pf_anchors) 2623 if (anchor->parent == NULL && nr++ == pr->nr) { 2624 strlcpy(pr->name, anchor->name, 2625 sizeof(pr->name)); 2626 break; 2627 } 2628 } else { 2629 RB_FOREACH(anchor, pf_anchor_node, 2630 &ruleset->anchor->children) 2631 if (nr++ == pr->nr) { 2632 strlcpy(pr->name, anchor->name, 2633 sizeof(pr->name)); 2634 break; 2635 } 2636 } 2637 if (!pr->name[0]) 2638 error = EBUSY; 2639 break; 2640 } 2641 2642 case DIOCRCLRTABLES: { 2643 struct pfioc_table *io = (struct pfioc_table *)addr; 2644 2645 if (io->pfrio_esize != 0) { 2646 error = ENODEV; 2647 break; 2648 } 2649 error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel, 2650 io->pfrio_flags | PFR_FLAG_USERIOCTL); 2651 break; 2652 } 2653 2654 case DIOCRADDTABLES: { 2655 struct pfioc_table *io = (struct pfioc_table *)addr; 2656 2657 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2658 error = ENODEV; 2659 break; 2660 } 2661 error = pfr_add_tables(io->pfrio_buffer, io->pfrio_size, 2662 &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2663 break; 2664 } 2665 2666 case DIOCRDELTABLES: { 2667 struct pfioc_table *io = (struct pfioc_table *)addr; 2668 2669 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2670 error = ENODEV; 2671 break; 2672 } 2673 error = pfr_del_tables(io->pfrio_buffer, io->pfrio_size, 2674 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2675 break; 2676 } 2677 2678 case DIOCRGETTABLES: { 2679 struct pfioc_table *io = (struct pfioc_table *)addr; 2680 2681 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2682 error = ENODEV; 2683 break; 2684 } 2685 error = pfr_get_tables(&io->pfrio_table, io->pfrio_buffer, 2686 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2687 break; 2688 } 2689 2690 case DIOCRGETTSTATS: { 2691 struct pfioc_table *io = (struct pfioc_table *)addr; 2692 2693 if (io->pfrio_esize != sizeof(struct pfr_tstats)) { 2694 error = ENODEV; 2695 break; 2696 } 2697 error = pfr_get_tstats(&io->pfrio_table, io->pfrio_buffer, 2698 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2699 break; 2700 } 2701 2702 case DIOCRCLRTSTATS: { 2703 struct pfioc_table *io = (struct pfioc_table *)addr; 2704 2705 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2706 error = ENODEV; 2707 break; 2708 } 2709 error = pfr_clr_tstats(io->pfrio_buffer, io->pfrio_size, 2710 &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2711 break; 2712 } 2713 2714 case DIOCRSETTFLAGS: { 2715 struct pfioc_table *io = (struct pfioc_table *)addr; 2716 2717 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2718 error = ENODEV; 2719 break; 2720 } 2721 error = pfr_set_tflags(io->pfrio_buffer, io->pfrio_size, 2722 io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange, 2723 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2724 break; 2725 } 2726 2727 case DIOCRCLRADDRS: { 2728 struct pfioc_table *io = (struct pfioc_table *)addr; 2729 2730 if (io->pfrio_esize != 0) { 2731 error = ENODEV; 2732 break; 2733 } 2734 error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel, 2735 io->pfrio_flags | PFR_FLAG_USERIOCTL); 2736 break; 2737 } 2738 2739 case DIOCRADDADDRS: { 2740 struct pfioc_table *io = (struct pfioc_table *)addr; 2741 2742 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2743 error = ENODEV; 2744 break; 2745 } 2746 error = pfr_add_addrs(&io->pfrio_table, io->pfrio_buffer, 2747 io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags | 2748 PFR_FLAG_USERIOCTL); 2749 break; 2750 } 2751 2752 case DIOCRDELADDRS: { 2753 struct pfioc_table *io = (struct pfioc_table *)addr; 2754 2755 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2756 error = ENODEV; 2757 break; 2758 } 2759 error = pfr_del_addrs(&io->pfrio_table, io->pfrio_buffer, 2760 io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags | 2761 PFR_FLAG_USERIOCTL); 2762 break; 2763 } 2764 2765 case DIOCRSETADDRS: { 2766 struct pfioc_table *io = (struct pfioc_table *)addr; 2767 2768 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2769 error = ENODEV; 2770 break; 2771 } 2772 error = pfr_set_addrs(&io->pfrio_table, io->pfrio_buffer, 2773 io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd, 2774 &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags | 2775 PFR_FLAG_USERIOCTL, 0); 2776 break; 2777 } 2778 2779 case DIOCRGETADDRS: { 2780 struct pfioc_table *io = (struct pfioc_table *)addr; 2781 2782 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2783 error = ENODEV; 2784 break; 2785 } 2786 error = pfr_get_addrs(&io->pfrio_table, io->pfrio_buffer, 2787 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2788 break; 2789 } 2790 2791 case DIOCRGETASTATS: { 2792 struct pfioc_table *io = (struct pfioc_table *)addr; 2793 2794 if (io->pfrio_esize != sizeof(struct pfr_astats)) { 2795 error = ENODEV; 2796 break; 2797 } 2798 error = pfr_get_astats(&io->pfrio_table, io->pfrio_buffer, 2799 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2800 break; 2801 } 2802 2803 case DIOCRCLRASTATS: { 2804 struct pfioc_table *io = (struct pfioc_table *)addr; 2805 2806 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2807 error = ENODEV; 2808 break; 2809 } 2810 error = pfr_clr_astats(&io->pfrio_table, io->pfrio_buffer, 2811 io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags | 2812 PFR_FLAG_USERIOCTL); 2813 break; 2814 } 2815 2816 case DIOCRTSTADDRS: { 2817 struct pfioc_table *io = (struct pfioc_table *)addr; 2818 2819 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2820 error = ENODEV; 2821 break; 2822 } 2823 error = pfr_tst_addrs(&io->pfrio_table, io->pfrio_buffer, 2824 io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags | 2825 PFR_FLAG_USERIOCTL); 2826 break; 2827 } 2828 2829 case DIOCRINADEFINE: { 2830 struct pfioc_table *io = (struct pfioc_table *)addr; 2831 2832 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2833 error = ENODEV; 2834 break; 2835 } 2836 error = pfr_ina_define(&io->pfrio_table, io->pfrio_buffer, 2837 io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr, 2838 io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2839 break; 2840 } 2841 2842 case DIOCOSFPADD: { 2843 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr; 2844 error = pf_osfp_add(io); 2845 break; 2846 } 2847 2848 case DIOCOSFPGET: { 2849 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr; 2850 error = pf_osfp_get(io); 2851 break; 2852 } 2853 2854 case DIOCXBEGIN: { 2855 struct pfioc_trans *io = (struct pfioc_trans *)addr; 2856 struct pfioc_trans_e *ioe; 2857 struct pfr_table *table; 2858 int i; 2859 2860 if (io->esize != sizeof(*ioe)) { 2861 error = ENODEV; 2862 goto fail; 2863 } 2864 ioe = (struct pfioc_trans_e *)malloc(sizeof(*ioe), 2865 M_TEMP, M_WAITOK); 2866 table = (struct pfr_table *)malloc(sizeof(*table), 2867 M_TEMP, M_WAITOK); 2868 for (i = 0; i < io->size; i++) { 2869 if (copyin(io->array+i, ioe, sizeof(*ioe))) { 2870 free(table, M_TEMP); 2871 free(ioe, M_TEMP); 2872 error = EFAULT; 2873 goto fail; 2874 } 2875 switch (ioe->rs_num) { 2876 #ifdef ALTQ 2877 case PF_RULESET_ALTQ: 2878 if (ioe->anchor[0]) { 2879 free(table, M_TEMP); 2880 free(ioe, M_TEMP); 2881 error = EINVAL; 2882 goto fail; 2883 } 2884 if ((error = pf_begin_altq(&ioe->ticket))) { 2885 free(table, M_TEMP); 2886 free(ioe, M_TEMP); 2887 goto fail; 2888 } 2889 break; 2890 #endif /* ALTQ */ 2891 case PF_RULESET_TABLE: 2892 bzero(table, sizeof(*table)); 2893 strlcpy(table->pfrt_anchor, ioe->anchor, 2894 sizeof(table->pfrt_anchor)); 2895 if ((error = pfr_ina_begin(table, 2896 &ioe->ticket, NULL, 0))) { 2897 free(table, M_TEMP); 2898 free(ioe, M_TEMP); 2899 goto fail; 2900 } 2901 break; 2902 default: 2903 if ((error = pf_begin_rules(&ioe->ticket, 2904 ioe->rs_num, ioe->anchor))) { 2905 free(table, M_TEMP); 2906 free(ioe, M_TEMP); 2907 goto fail; 2908 } 2909 break; 2910 } 2911 if (copyout(ioe, io->array+i, sizeof(io->array[i]))) { 2912 free(table, M_TEMP); 2913 free(ioe, M_TEMP); 2914 error = EFAULT; 2915 goto fail; 2916 } 2917 } 2918 free(table, M_TEMP); 2919 free(ioe, M_TEMP); 2920 break; 2921 } 2922 2923 case DIOCXROLLBACK: { 2924 struct pfioc_trans *io = (struct pfioc_trans *)addr; 2925 struct pfioc_trans_e *ioe; 2926 struct pfr_table *table; 2927 int i; 2928 2929 if (io->esize != sizeof(*ioe)) { 2930 error = ENODEV; 2931 goto fail; 2932 } 2933 ioe = (struct pfioc_trans_e *)malloc(sizeof(*ioe), 2934 M_TEMP, M_WAITOK); 2935 table = (struct pfr_table *)malloc(sizeof(*table), 2936 M_TEMP, M_WAITOK); 2937 for (i = 0; i < io->size; i++) { 2938 if (copyin(io->array+i, ioe, sizeof(*ioe))) { 2939 free(table, M_TEMP); 2940 free(ioe, M_TEMP); 2941 error = EFAULT; 2942 goto fail; 2943 } 2944 switch (ioe->rs_num) { 2945 #ifdef ALTQ 2946 case PF_RULESET_ALTQ: 2947 if (ioe->anchor[0]) { 2948 free(table, M_TEMP); 2949 free(ioe, M_TEMP); 2950 error = EINVAL; 2951 goto fail; 2952 } 2953 if ((error = pf_rollback_altq(ioe->ticket))) { 2954 free(table, M_TEMP); 2955 free(ioe, M_TEMP); 2956 goto fail; /* really bad */ 2957 } 2958 break; 2959 #endif /* ALTQ */ 2960 case PF_RULESET_TABLE: 2961 bzero(table, sizeof(*table)); 2962 strlcpy(table->pfrt_anchor, ioe->anchor, 2963 sizeof(table->pfrt_anchor)); 2964 if ((error = pfr_ina_rollback(table, 2965 ioe->ticket, NULL, 0))) { 2966 free(table, M_TEMP); 2967 free(ioe, M_TEMP); 2968 goto fail; /* really bad */ 2969 } 2970 break; 2971 default: 2972 if ((error = pf_rollback_rules(ioe->ticket, 2973 ioe->rs_num, ioe->anchor))) { 2974 free(table, M_TEMP); 2975 free(ioe, M_TEMP); 2976 goto fail; /* really bad */ 2977 } 2978 break; 2979 } 2980 } 2981 free(table, M_TEMP); 2982 free(ioe, M_TEMP); 2983 break; 2984 } 2985 2986 case DIOCXCOMMIT: { 2987 struct pfioc_trans *io = (struct pfioc_trans *)addr; 2988 struct pfioc_trans_e *ioe; 2989 struct pfr_table *table; 2990 struct pf_ruleset *rs; 2991 int i; 2992 2993 if (io->esize != sizeof(*ioe)) { 2994 error = ENODEV; 2995 goto fail; 2996 } 2997 ioe = (struct pfioc_trans_e *)malloc(sizeof(*ioe), 2998 M_TEMP, M_WAITOK); 2999 table = (struct pfr_table *)malloc(sizeof(*table), 3000 M_TEMP, M_WAITOK); 3001 /* first makes sure everything will succeed */ 3002 for (i = 0; i < io->size; i++) { 3003 if (copyin(io->array+i, ioe, sizeof(*ioe))) { 3004 free(table, M_TEMP); 3005 free(ioe, M_TEMP); 3006 error = EFAULT; 3007 goto fail; 3008 } 3009 switch (ioe->rs_num) { 3010 #ifdef ALTQ 3011 case PF_RULESET_ALTQ: 3012 if (ioe->anchor[0]) { 3013 free(table, M_TEMP); 3014 free(ioe, M_TEMP); 3015 error = EINVAL; 3016 goto fail; 3017 } 3018 if (!altqs_inactive_open || ioe->ticket != 3019 ticket_altqs_inactive) { 3020 free(table, M_TEMP); 3021 free(ioe, M_TEMP); 3022 error = EBUSY; 3023 goto fail; 3024 } 3025 break; 3026 #endif /* ALTQ */ 3027 case PF_RULESET_TABLE: 3028 rs = pf_find_ruleset(ioe->anchor); 3029 if (rs == NULL || !rs->topen || ioe->ticket != 3030 rs->tticket) { 3031 free(table, M_TEMP); 3032 free(ioe, M_TEMP); 3033 error = EBUSY; 3034 goto fail; 3035 } 3036 break; 3037 default: 3038 if (ioe->rs_num < 0 || ioe->rs_num >= 3039 PF_RULESET_MAX) { 3040 free(table, M_TEMP); 3041 free(ioe, M_TEMP); 3042 error = EINVAL; 3043 goto fail; 3044 } 3045 rs = pf_find_ruleset(ioe->anchor); 3046 if (rs == NULL || 3047 !rs->rules[ioe->rs_num].inactive.open || 3048 rs->rules[ioe->rs_num].inactive.ticket != 3049 ioe->ticket) { 3050 free(table, M_TEMP); 3051 free(ioe, M_TEMP); 3052 error = EBUSY; 3053 goto fail; 3054 } 3055 break; 3056 } 3057 } 3058 /* now do the commit - no errors should happen here */ 3059 for (i = 0; i < io->size; i++) { 3060 if (copyin(io->array+i, ioe, sizeof(*ioe))) { 3061 free(table, M_TEMP); 3062 free(ioe, M_TEMP); 3063 error = EFAULT; 3064 goto fail; 3065 } 3066 switch (ioe->rs_num) { 3067 #ifdef ALTQ 3068 case PF_RULESET_ALTQ: 3069 if ((error = pf_commit_altq(ioe->ticket))) { 3070 free(table, M_TEMP); 3071 free(ioe, M_TEMP); 3072 goto fail; /* really bad */ 3073 } 3074 break; 3075 #endif /* ALTQ */ 3076 case PF_RULESET_TABLE: 3077 bzero(table, sizeof(*table)); 3078 strlcpy(table->pfrt_anchor, ioe->anchor, 3079 sizeof(table->pfrt_anchor)); 3080 if ((error = pfr_ina_commit(table, ioe->ticket, 3081 NULL, NULL, 0))) { 3082 free(table, M_TEMP); 3083 free(ioe, M_TEMP); 3084 goto fail; /* really bad */ 3085 } 3086 break; 3087 default: 3088 if ((error = pf_commit_rules(ioe->ticket, 3089 ioe->rs_num, ioe->anchor))) { 3090 free(table, M_TEMP); 3091 free(ioe, M_TEMP); 3092 goto fail; /* really bad */ 3093 } 3094 break; 3095 } 3096 } 3097 free(table, M_TEMP); 3098 free(ioe, M_TEMP); 3099 break; 3100 } 3101 3102 case DIOCGETSRCNODES: { 3103 struct pfioc_src_nodes *psn = (struct pfioc_src_nodes *)addr; 3104 struct pf_src_node *n, *p, *pstore; 3105 u_int32_t nr = 0; 3106 int space = psn->psn_len; 3107 3108 if (space == 0) { 3109 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) 3110 nr++; 3111 psn->psn_len = sizeof(struct pf_src_node) * nr; 3112 break; 3113 } 3114 3115 pstore = malloc(sizeof(*pstore), M_TEMP, M_WAITOK); 3116 3117 p = psn->psn_src_nodes; 3118 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) { 3119 int secs = time_second, diff; 3120 3121 if ((nr + 1) * sizeof(*p) > (unsigned)psn->psn_len) 3122 break; 3123 3124 bcopy(n, pstore, sizeof(*pstore)); 3125 if (n->rule.ptr != NULL) 3126 pstore->rule.nr = n->rule.ptr->nr; 3127 pstore->creation = secs - pstore->creation; 3128 if (pstore->expire > secs) 3129 pstore->expire -= secs; 3130 else 3131 pstore->expire = 0; 3132 3133 /* adjust the connection rate estimate */ 3134 diff = secs - n->conn_rate.last; 3135 if (diff >= n->conn_rate.seconds) 3136 pstore->conn_rate.count = 0; 3137 else 3138 pstore->conn_rate.count -= 3139 n->conn_rate.count * diff / 3140 n->conn_rate.seconds; 3141 3142 error = copyout(pstore, p, sizeof(*p)); 3143 if (error) { 3144 free(pstore, M_TEMP); 3145 goto fail; 3146 } 3147 p++; 3148 nr++; 3149 } 3150 psn->psn_len = sizeof(struct pf_src_node) * nr; 3151 3152 free(pstore, M_TEMP); 3153 break; 3154 } 3155 3156 case DIOCCLRSRCNODES: { 3157 struct pf_src_node *n; 3158 struct pf_state *state; 3159 3160 RB_FOREACH(state, pf_state_tree_id, &tree_id) { 3161 state->src_node = NULL; 3162 state->nat_src_node = NULL; 3163 } 3164 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) { 3165 n->expire = 1; 3166 n->states = 0; 3167 } 3168 pf_purge_expired_src_nodes(1); 3169 pf_status.src_nodes = 0; 3170 break; 3171 } 3172 3173 case DIOCKILLSRCNODES: { 3174 struct pf_src_node *sn; 3175 struct pf_state *ps; 3176 struct pfioc_src_node_kill *psnk = \ 3177 (struct pfioc_src_node_kill *) addr; 3178 int killed = 0; 3179 3180 RB_FOREACH(sn, pf_src_tree, &tree_src_tracking) { 3181 if (PF_MATCHA(psnk->psnk_src.neg, \ 3182 &psnk->psnk_src.addr.v.a.addr, \ 3183 &psnk->psnk_src.addr.v.a.mask, \ 3184 &sn->addr, sn->af) && 3185 PF_MATCHA(psnk->psnk_dst.neg, \ 3186 &psnk->psnk_dst.addr.v.a.addr, \ 3187 &psnk->psnk_dst.addr.v.a.mask, \ 3188 &sn->raddr, sn->af)) { 3189 /* Handle state to src_node linkage */ 3190 if (sn->states != 0) { 3191 RB_FOREACH(ps, pf_state_tree_id, 3192 &tree_id) { 3193 if (ps->src_node == sn) 3194 ps->src_node = NULL; 3195 if (ps->nat_src_node == sn) 3196 ps->nat_src_node = NULL; 3197 } 3198 sn->states = 0; 3199 } 3200 sn->expire = 1; 3201 killed++; 3202 } 3203 } 3204 3205 if (killed > 0) 3206 pf_purge_expired_src_nodes(1); 3207 3208 psnk->psnk_af = killed; 3209 break; 3210 } 3211 3212 case DIOCSETHOSTID: { 3213 u_int32_t *hid = (u_int32_t *)addr; 3214 3215 if (*hid == 0) 3216 pf_status.hostid = cprng_fast32(); 3217 else 3218 pf_status.hostid = *hid; 3219 break; 3220 } 3221 3222 case DIOCOSFPFLUSH: 3223 pf_osfp_flush(); 3224 break; 3225 3226 case DIOCIGETIFACES: { 3227 struct pfioc_iface *io = (struct pfioc_iface *)addr; 3228 3229 if (io->pfiio_esize != sizeof(struct pfi_kif)) { 3230 error = ENODEV; 3231 break; 3232 } 3233 error = pfi_get_ifaces(io->pfiio_name, io->pfiio_buffer, 3234 &io->pfiio_size); 3235 break; 3236 } 3237 3238 case DIOCSETIFFLAG: { 3239 struct pfioc_iface *io = (struct pfioc_iface *)addr; 3240 3241 error = pfi_set_flags(io->pfiio_name, io->pfiio_flags); 3242 break; 3243 } 3244 3245 case DIOCCLRIFFLAG: { 3246 struct pfioc_iface *io = (struct pfioc_iface *)addr; 3247 3248 error = pfi_clear_flags(io->pfiio_name, io->pfiio_flags); 3249 break; 3250 } 3251 3252 case DIOCSETLCK: { 3253 pf_state_lock = *(uint32_t*)addr; 3254 break; 3255 } 3256 3257 default: 3258 error = ENODEV; 3259 break; 3260 } 3261 fail: 3262 splx(s); 3263 if (flags & FWRITE) 3264 rw_exit_write(&pf_consistency_lock); 3265 else 3266 rw_exit_read(&pf_consistency_lock); 3267 return (error); 3268 } 3269 3270 #ifdef __NetBSD__ 3271 #ifdef INET 3272 static int 3273 pfil4_wrapper(void *arg, struct mbuf **mp, struct ifnet *ifp, int dir) 3274 { 3275 int error; 3276 3277 /* 3278 * ensure that mbufs are writable beforehand 3279 * as it's assumed by pf code. 3280 * ip hdr (60 bytes) + tcp hdr (60 bytes) should be enough. 3281 * XXX inefficient 3282 */ 3283 error = m_makewritable(mp, 0, 60 + 60, M_DONTWAIT); 3284 if (error) { 3285 m_freem(*mp); 3286 *mp = NULL; 3287 return error; 3288 } 3289 3290 /* 3291 * If the packet is out-bound, we can't delay checksums 3292 * here. For in-bound, the checksum has already been 3293 * validated. 3294 */ 3295 if (dir == PFIL_OUT) { 3296 if ((*mp)->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) { 3297 in_undefer_cksum_tcpudp(*mp); 3298 (*mp)->m_pkthdr.csum_flags &= 3299 ~(M_CSUM_TCPv4|M_CSUM_UDPv4); 3300 } 3301 } 3302 3303 if (pf_test(dir == PFIL_OUT ? PF_OUT : PF_IN, ifp, mp, NULL) 3304 != PF_PASS) { 3305 m_freem(*mp); 3306 *mp = NULL; 3307 return EHOSTUNREACH; 3308 } 3309 3310 /* 3311 * we're not compatible with fast-forward. 3312 */ 3313 3314 if (dir == PFIL_IN && *mp) { 3315 (*mp)->m_flags &= ~M_CANFASTFWD; 3316 } 3317 3318 return (0); 3319 } 3320 #endif /* INET */ 3321 3322 #ifdef INET6 3323 static int 3324 pfil6_wrapper(void *arg, struct mbuf **mp, struct ifnet *ifp, int dir) 3325 { 3326 int error; 3327 3328 /* 3329 * ensure that mbufs are writable beforehand 3330 * as it's assumed by pf code. 3331 * XXX inefficient 3332 */ 3333 error = m_makewritable(mp, 0, M_COPYALL, M_DONTWAIT); 3334 if (error) { 3335 m_freem(*mp); 3336 *mp = NULL; 3337 return error; 3338 } 3339 3340 /* 3341 * If the packet is out-bound, we can't delay checksums 3342 * here. For in-bound, the checksum has already been 3343 * validated. 3344 */ 3345 if (dir == PFIL_OUT) { 3346 if ((*mp)->m_pkthdr.csum_flags & (M_CSUM_TCPv6|M_CSUM_UDPv6)) { 3347 in6_delayed_cksum(*mp); 3348 (*mp)->m_pkthdr.csum_flags &= 3349 ~(M_CSUM_TCPv6|M_CSUM_UDPv6); 3350 } 3351 } 3352 3353 if (pf_test6(dir == PFIL_OUT ? PF_OUT : PF_IN, ifp, mp, NULL) 3354 != PF_PASS) { 3355 m_freem(*mp); 3356 *mp = NULL; 3357 return EHOSTUNREACH; 3358 } else 3359 return (0); 3360 } 3361 #endif /* INET6 */ 3362 3363 static int 3364 pf_pfil_attach(void) 3365 { 3366 pfil_head_t *ph_inet; 3367 #ifdef INET6 3368 pfil_head_t *ph_inet6; 3369 #endif /* INET6 */ 3370 int error; 3371 3372 if (pf_pfil_attached) 3373 return (EBUSY); 3374 3375 ph_inet = pfil_head_get(PFIL_TYPE_AF, (void *)AF_INET); 3376 if (ph_inet) 3377 error = pfil_add_hook((void *)pfil4_wrapper, NULL, 3378 PFIL_IN|PFIL_OUT, ph_inet); 3379 else 3380 error = ENOENT; 3381 if (error) 3382 return (error); 3383 3384 #ifdef INET6 3385 ph_inet6 = pfil_head_get(PFIL_TYPE_AF, (void *)AF_INET6); 3386 if (ph_inet6) 3387 error = pfil_add_hook((void *)pfil6_wrapper, NULL, 3388 PFIL_IN|PFIL_OUT, ph_inet6); 3389 else 3390 error = ENOENT; 3391 if (error) 3392 goto bad; 3393 #endif /* INET6 */ 3394 3395 pf_pfil_attached = 1; 3396 3397 return (0); 3398 3399 #ifdef INET6 3400 bad: 3401 pfil_remove_hook(pfil4_wrapper, NULL, PFIL_IN|PFIL_OUT, ph_inet); 3402 #endif /* INET6 */ 3403 3404 return (error); 3405 } 3406 3407 static int 3408 pf_pfil_detach(void) 3409 { 3410 pfil_head_t *ph_inet; 3411 #ifdef INET6 3412 pfil_head_t *ph_inet6; 3413 #endif /* INET6 */ 3414 3415 if (pf_pfil_attached == 0) 3416 return (EBUSY); 3417 3418 ph_inet = pfil_head_get(PFIL_TYPE_AF, (void *)AF_INET); 3419 if (ph_inet) 3420 pfil_remove_hook((void *)pfil4_wrapper, NULL, 3421 PFIL_IN|PFIL_OUT, ph_inet); 3422 #ifdef INET6 3423 ph_inet6 = pfil_head_get(PFIL_TYPE_AF, (void *)AF_INET6); 3424 if (ph_inet6) 3425 pfil_remove_hook((void *)pfil6_wrapper, NULL, 3426 PFIL_IN|PFIL_OUT, ph_inet6); 3427 #endif /* INET6 */ 3428 pf_pfil_attached = 0; 3429 3430 return (0); 3431 } 3432 #endif /* __NetBSD__ */ 3433 3434 #if defined(__NetBSD__) 3435 MODULE(MODULE_CLASS_DRIVER, pf, "bpf"); 3436 3437 static int 3438 pf_modcmd(modcmd_t cmd, void *opaque) 3439 { 3440 #ifdef _MODULE 3441 extern void pflogattach(int); 3442 extern void pflogdetach(void); 3443 3444 devmajor_t cmajor = NODEVMAJOR, bmajor = NODEVMAJOR; 3445 int err; 3446 3447 switch (cmd) { 3448 case MODULE_CMD_INIT: 3449 err = devsw_attach("pf", NULL, &bmajor, &pf_cdevsw, &cmajor); 3450 if (err) 3451 return err; 3452 pfattach(1); 3453 pflogattach(1); 3454 return 0; 3455 case MODULE_CMD_FINI: 3456 if (pf_status.running) { 3457 return EBUSY; 3458 } else { 3459 pfdetach(); 3460 pflogdetach(); 3461 return devsw_detach(NULL, &pf_cdevsw); 3462 } 3463 default: 3464 return ENOTTY; 3465 } 3466 #else 3467 if (cmd == MODULE_CMD_INIT) 3468 return 0; 3469 return ENOTTY; 3470 #endif 3471 } 3472 #endif 3473