1 /* $NetBSD: pf_ioctl.c,v 1.41 2010/04/13 13:08:16 ahoka Exp $ */ 2 /* $OpenBSD: pf_ioctl.c,v 1.182 2007/06/24 11:17:13 mcbride Exp $ */ 3 4 /* 5 * Copyright (c) 2001 Daniel Hartmeier 6 * Copyright (c) 2002,2003 Henning Brauer 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 13 * - Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * - Redistributions in binary form must reproduce the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer in the documentation and/or other materials provided 18 * with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 28 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 30 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 * POSSIBILITY OF SUCH DAMAGE. 32 * 33 * Effort sponsored in part by the Defense Advanced Research Projects 34 * Agency (DARPA) and Air Force Research Laboratory, Air Force 35 * Materiel Command, USAF, under agreement number F30602-01-2-0537. 36 * 37 */ 38 39 #include <sys/cdefs.h> 40 __KERNEL_RCSID(0, "$NetBSD: pf_ioctl.c,v 1.41 2010/04/13 13:08:16 ahoka Exp $"); 41 42 #ifdef _KERNEL_OPT 43 #include "opt_inet.h" 44 #include "opt_pfil_hooks.h" 45 #endif 46 47 #include "pfsync.h" 48 49 #include <sys/param.h> 50 #include <sys/systm.h> 51 #include <sys/mbuf.h> 52 #include <sys/filio.h> 53 #include <sys/fcntl.h> 54 #include <sys/socket.h> 55 #include <sys/socketvar.h> 56 #include <sys/kernel.h> 57 #include <sys/time.h> 58 #include <sys/pool.h> 59 #include <sys/proc.h> 60 #include <sys/malloc.h> 61 #include <sys/kthread.h> 62 #include <sys/rwlock.h> 63 #include <uvm/uvm_extern.h> 64 #ifdef __NetBSD__ 65 #include <sys/conf.h> 66 #include <sys/lwp.h> 67 #include <sys/kauth.h> 68 #include <sys/module.h> 69 #endif /* __NetBSD__ */ 70 71 #include <net/if.h> 72 #include <net/if_types.h> 73 #include <net/route.h> 74 75 #include <netinet/in.h> 76 #include <netinet/in_var.h> 77 #include <netinet/in_systm.h> 78 #include <netinet/ip.h> 79 #include <netinet/ip_var.h> 80 #include <netinet/ip_icmp.h> 81 82 #ifndef __NetBSD__ 83 #include <dev/rndvar.h> 84 #include <crypto/md5.h> 85 #else 86 #include <sys/md5.h> 87 #endif /* __NetBSD__ */ 88 #include <net/pfvar.h> 89 90 #if NPFSYNC > 0 91 #include <net/if_pfsync.h> 92 #endif /* NPFSYNC > 0 */ 93 94 #if NPFLOG > 0 95 #include <net/if_pflog.h> 96 #endif /* NPFLOG > 0 */ 97 98 #ifdef INET6 99 #include <netinet/ip6.h> 100 #include <netinet/in_pcb.h> 101 #endif /* INET6 */ 102 103 #ifdef ALTQ 104 #include <altq/altq.h> 105 #endif 106 107 void pfattach(int); 108 #ifdef _MODULE 109 void pfdetach(void); 110 #endif /* _MODULE */ 111 #ifndef __NetBSD__ 112 void pf_thread_create(void *); 113 #endif /* !__NetBSD__ */ 114 int pfopen(dev_t, int, int, struct lwp *); 115 int pfclose(dev_t, int, int, struct lwp *); 116 struct pf_pool *pf_get_pool(char *, u_int32_t, u_int8_t, u_int32_t, 117 u_int8_t, u_int8_t, u_int8_t); 118 119 void pf_mv_pool(struct pf_palist *, struct pf_palist *); 120 void pf_empty_pool(struct pf_palist *); 121 int pfioctl(dev_t, u_long, void *, int, struct lwp *); 122 #ifdef ALTQ 123 int pf_begin_altq(u_int32_t *); 124 int pf_rollback_altq(u_int32_t); 125 int pf_commit_altq(u_int32_t); 126 int pf_enable_altq(struct pf_altq *); 127 int pf_disable_altq(struct pf_altq *); 128 #endif /* ALTQ */ 129 int pf_begin_rules(u_int32_t *, int, const char *); 130 int pf_rollback_rules(u_int32_t, int, char *); 131 int pf_setup_pfsync_matching(struct pf_ruleset *); 132 void pf_hash_rule(MD5_CTX *, struct pf_rule *); 133 void pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *); 134 int pf_commit_rules(u_int32_t, int, char *); 135 void pf_state_export(struct pfsync_state *, 136 struct pf_state_key *, struct pf_state *); 137 void pf_state_import(struct pfsync_state *, 138 struct pf_state_key *, struct pf_state *); 139 140 struct pf_rule pf_default_rule; 141 #ifdef __NetBSD__ 142 krwlock_t pf_consistency_lock; 143 #else 144 struct rwlock pf_consistency_lock = RWLOCK_INITIALIZER("pfcnslk"); 145 #endif /* __NetBSD__ */ 146 #ifdef ALTQ 147 static int pf_altq_running; 148 #endif 149 150 #define TAGID_MAX 50000 151 TAILQ_HEAD(pf_tags, pf_tagname) pf_tags = TAILQ_HEAD_INITIALIZER(pf_tags), 152 pf_qids = TAILQ_HEAD_INITIALIZER(pf_qids); 153 154 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE) 155 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE 156 #endif 157 u_int16_t tagname2tag(struct pf_tags *, char *); 158 void tag2tagname(struct pf_tags *, u_int16_t, char *); 159 void tag_unref(struct pf_tags *, u_int16_t); 160 int pf_rtlabel_add(struct pf_addr_wrap *); 161 void pf_rtlabel_remove(struct pf_addr_wrap *); 162 void pf_rtlabel_copyout(struct pf_addr_wrap *); 163 164 #define DPFPRINTF(n, x) if (pf_status.debug >= (n)) printf x 165 166 #ifdef __NetBSD__ 167 const struct cdevsw pf_cdevsw = { 168 pfopen, pfclose, noread, nowrite, pfioctl, 169 nostop, notty, nopoll, nommap, nokqfilter, D_OTHER 170 }; 171 172 static int pfil4_wrapper(void *, struct mbuf **, struct ifnet *, int); 173 #ifdef INET6 174 static int pfil6_wrapper(void *, struct mbuf **, struct ifnet *, int); 175 #endif /* INET6 */ 176 177 static int pf_pfil_attach(void); 178 static int pf_pfil_detach(void); 179 180 static int pf_pfil_attached; 181 182 static kauth_listener_t pf_listener; 183 #endif /* __NetBSD__ */ 184 185 #ifdef __NetBSD__ 186 static int 187 pf_listener_cb(kauth_cred_t cred, kauth_action_t action, void *cookie, 188 void *arg0, void *arg1, void *arg2, void *arg3) 189 { 190 int result; 191 enum kauth_network_req req; 192 193 result = KAUTH_RESULT_DEFER; 194 req = (enum kauth_network_req)arg0; 195 196 if (action != KAUTH_NETWORK_FIREWALL) 197 return result; 198 199 /* These must have came from device context. */ 200 if ((req == KAUTH_REQ_NETWORK_FIREWALL_FW) || 201 (req == KAUTH_REQ_NETWORK_FIREWALL_NAT)) 202 result = KAUTH_RESULT_ALLOW; 203 204 return result; 205 } 206 #endif /* __NetBSD__ */ 207 208 void 209 pfattach(int num) 210 { 211 u_int32_t *timeout = pf_default_rule.timeout; 212 213 #ifdef __NetBSD__ 214 pool_init(&pf_rule_pl, sizeof(struct pf_rule), 0, 0, 0, "pfrulepl", 215 &pool_allocator_nointr, IPL_NONE); 216 pool_init(&pf_src_tree_pl, sizeof(struct pf_src_node), 0, 0, 0, 217 "pfsrctrpl", NULL, IPL_SOFTNET); 218 pool_init(&pf_state_pl, sizeof(struct pf_state), 0, 0, 0, "pfstatepl", 219 NULL, IPL_SOFTNET); 220 pool_init(&pf_state_key_pl, sizeof(struct pf_state_key), 0, 0, 0, 221 "pfstatekeypl", NULL, IPL_SOFTNET); 222 pool_init(&pf_altq_pl, sizeof(struct pf_altq), 0, 0, 0, "pfaltqpl", 223 &pool_allocator_nointr, IPL_NONE); 224 pool_init(&pf_pooladdr_pl, sizeof(struct pf_pooladdr), 0, 0, 0, 225 "pfpooladdrpl", &pool_allocator_nointr, IPL_NONE); 226 #else 227 pool_init(&pf_rule_pl, sizeof(struct pf_rule), 0, 0, 0, "pfrulepl", 228 &pool_allocator_nointr); 229 pool_init(&pf_src_tree_pl, sizeof(struct pf_src_node), 0, 0, 0, 230 "pfsrctrpl", NULL); 231 pool_init(&pf_state_pl, sizeof(struct pf_state), 0, 0, 0, "pfstatepl", 232 NULL); 233 pool_init(&pf_state_key_pl, sizeof(struct pf_state_key), 0, 0, 0, 234 "pfstatekeypl", NULL); 235 pool_init(&pf_altq_pl, sizeof(struct pf_altq), 0, 0, 0, "pfaltqpl", 236 &pool_allocator_nointr); 237 pool_init(&pf_pooladdr_pl, sizeof(struct pf_pooladdr), 0, 0, 0, 238 "pfpooladdrpl", &pool_allocator_nointr); 239 #endif /* !__NetBSD__ */ 240 241 pfr_initialize(); 242 pfi_initialize(); 243 pf_osfp_initialize(); 244 245 pool_sethardlimit(pf_pool_limits[PF_LIMIT_STATES].pp, 246 pf_pool_limits[PF_LIMIT_STATES].limit, NULL, 0); 247 248 if (ctob(physmem) <= 100*1024*1024) 249 pf_pool_limits[PF_LIMIT_TABLE_ENTRIES].limit = 250 PFR_KENTRY_HIWAT_SMALL; 251 252 RB_INIT(&tree_src_tracking); 253 RB_INIT(&pf_anchors); 254 pf_init_ruleset(&pf_main_ruleset); 255 TAILQ_INIT(&pf_altqs[0]); 256 TAILQ_INIT(&pf_altqs[1]); 257 TAILQ_INIT(&pf_pabuf); 258 pf_altqs_active = &pf_altqs[0]; 259 pf_altqs_inactive = &pf_altqs[1]; 260 TAILQ_INIT(&state_list); 261 262 #ifdef __NetBSD__ 263 rw_init(&pf_consistency_lock); 264 #endif /* __NetBSD__ */ 265 266 /* default rule should never be garbage collected */ 267 pf_default_rule.entries.tqe_prev = &pf_default_rule.entries.tqe_next; 268 pf_default_rule.action = PF_PASS; 269 pf_default_rule.nr = -1; 270 pf_default_rule.rtableid = -1; 271 272 /* initialize default timeouts */ 273 timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL; 274 timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL; 275 timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL; 276 timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL; 277 timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL; 278 timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL; 279 timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL; 280 timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL; 281 timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL; 282 timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL; 283 timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL; 284 timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL; 285 timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL; 286 timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL; 287 timeout[PFTM_FRAG] = PFTM_FRAG_VAL; 288 timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL; 289 timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL; 290 timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL; 291 timeout[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START; 292 timeout[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END; 293 294 pf_normalize_init(); 295 bzero(&pf_status, sizeof(pf_status)); 296 pf_status.debug = PF_DEBUG_URGENT; 297 298 /* XXX do our best to avoid a conflict */ 299 pf_status.hostid = arc4random(); 300 301 /* require process context to purge states, so perform in a thread */ 302 #ifdef __NetBSD__ 303 if (kthread_create(PRI_NONE, 0, NULL, pf_purge_thread, NULL, NULL, 304 "pfpurge")) 305 panic("pfpurge thread"); 306 #else 307 kthread_create_deferred(pf_thread_create, NULL); 308 #endif /* !__NetBSD__ */ 309 310 #ifdef __NetBSD__ 311 pf_listener = kauth_listen_scope(KAUTH_SCOPE_NETWORK, 312 pf_listener_cb, NULL); 313 #endif /* __NetBSD__ */ 314 } 315 316 #ifdef _MODULE 317 void 318 pfdetach(void) 319 { 320 extern int pf_purge_thread_running; 321 extern int pf_purge_thread_stop; 322 struct pf_anchor *anchor; 323 struct pf_state *state; 324 struct pf_src_node *node; 325 struct pfioc_table pt; 326 u_int32_t ticket; 327 int i; 328 char r = '\0'; 329 330 pf_purge_thread_stop = 1; 331 wakeup(pf_purge_thread); 332 333 /* wait until the kthread exits */ 334 while (pf_purge_thread_running) 335 tsleep(&pf_purge_thread_running, PWAIT, "pfdown", 0); 336 337 (void)pf_pfil_detach(); 338 339 pf_status.running = 0; 340 341 /* clear the rulesets */ 342 for (i = 0; i < PF_RULESET_MAX; i++) 343 if (pf_begin_rules(&ticket, i, &r) == 0) 344 pf_commit_rules(ticket, i, &r); 345 #ifdef ALTQ 346 if (pf_begin_altq(&ticket) == 0) 347 pf_commit_altq(ticket); 348 #endif /* ALTQ */ 349 350 /* clear states */ 351 RB_FOREACH(state, pf_state_tree_id, &tree_id) { 352 state->timeout = PFTM_PURGE; 353 #if NPFSYNC > 0 354 state->sync_flags = PFSTATE_NOSYNC; 355 #endif /* NPFSYNC > 0 */ 356 } 357 pf_purge_expired_states(pf_status.states); 358 #if NPFSYNC > 0 359 pfsync_clear_states(pf_status.hostid, NULL); 360 #endif /* NPFSYNC > 0 */ 361 362 /* clear source nodes */ 363 RB_FOREACH(state, pf_state_tree_id, &tree_id) { 364 state->src_node = NULL; 365 state->nat_src_node = NULL; 366 } 367 RB_FOREACH(node, pf_src_tree, &tree_src_tracking) { 368 node->expire = 1; 369 node->states = 0; 370 } 371 pf_purge_expired_src_nodes(0); 372 373 /* clear tables */ 374 memset(&pt, '\0', sizeof(pt)); 375 pfr_clr_tables(&pt.pfrio_table, &pt.pfrio_ndel, pt.pfrio_flags); 376 377 /* destroy anchors */ 378 while ((anchor = RB_MIN(pf_anchor_global, &pf_anchors)) != NULL) { 379 for (i = 0; i < PF_RULESET_MAX; i++) 380 if (pf_begin_rules(&ticket, i, anchor->name) == 0) 381 pf_commit_rules(ticket, i, anchor->name); 382 } 383 384 /* destroy main ruleset */ 385 pf_remove_if_empty_ruleset(&pf_main_ruleset); 386 387 /* destroy the pools */ 388 pool_destroy(&pf_pooladdr_pl); 389 pool_destroy(&pf_altq_pl); 390 pool_destroy(&pf_state_key_pl); 391 pool_destroy(&pf_state_pl); 392 pool_destroy(&pf_rule_pl); 393 pool_destroy(&pf_src_tree_pl); 394 395 rw_destroy(&pf_consistency_lock); 396 397 /* destroy subsystems */ 398 pf_normalize_destroy(); 399 pf_osfp_destroy(); 400 pfr_destroy(); 401 pfi_destroy(); 402 403 /* cleanup kauth listener */ 404 kauth_unlisten_scope(pf_listener); 405 } 406 #endif /* _MODULE */ 407 408 #ifndef __NetBSD__ 409 void 410 pf_thread_create(void *v) 411 { 412 if (kthread_create(pf_purge_thread, NULL, NULL, "pfpurge")) 413 panic("pfpurge thread"); 414 } 415 #endif /* !__NetBSD__ */ 416 417 int 418 pfopen(dev_t dev, int flags, int fmt, struct lwp *l) 419 { 420 if (minor(dev) >= 1) 421 return (ENXIO); 422 return (0); 423 } 424 425 int 426 pfclose(dev_t dev, int flags, int fmt, struct lwp *l) 427 { 428 if (minor(dev) >= 1) 429 return (ENXIO); 430 return (0); 431 } 432 433 struct pf_pool * 434 pf_get_pool(char *anchor, u_int32_t ticket, u_int8_t rule_action, 435 u_int32_t rule_number, u_int8_t r_last, u_int8_t active, 436 u_int8_t check_ticket) 437 { 438 struct pf_ruleset *ruleset; 439 struct pf_rule *rule; 440 int rs_num; 441 442 ruleset = pf_find_ruleset(anchor); 443 if (ruleset == NULL) 444 return (NULL); 445 rs_num = pf_get_ruleset_number(rule_action); 446 if (rs_num >= PF_RULESET_MAX) 447 return (NULL); 448 if (active) { 449 if (check_ticket && ticket != 450 ruleset->rules[rs_num].active.ticket) 451 return (NULL); 452 if (r_last) 453 rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr, 454 pf_rulequeue); 455 else 456 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr); 457 } else { 458 if (check_ticket && ticket != 459 ruleset->rules[rs_num].inactive.ticket) 460 return (NULL); 461 if (r_last) 462 rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr, 463 pf_rulequeue); 464 else 465 rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr); 466 } 467 if (!r_last) { 468 while ((rule != NULL) && (rule->nr != rule_number)) 469 rule = TAILQ_NEXT(rule, entries); 470 } 471 if (rule == NULL) 472 return (NULL); 473 474 return (&rule->rpool); 475 } 476 477 void 478 pf_mv_pool(struct pf_palist *poola, struct pf_palist *poolb) 479 { 480 struct pf_pooladdr *mv_pool_pa; 481 482 while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) { 483 TAILQ_REMOVE(poola, mv_pool_pa, entries); 484 TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries); 485 } 486 } 487 488 void 489 pf_empty_pool(struct pf_palist *poola) 490 { 491 struct pf_pooladdr *empty_pool_pa; 492 493 while ((empty_pool_pa = TAILQ_FIRST(poola)) != NULL) { 494 pfi_dynaddr_remove(&empty_pool_pa->addr); 495 pf_tbladdr_remove(&empty_pool_pa->addr); 496 pfi_kif_unref(empty_pool_pa->kif, PFI_KIF_REF_RULE); 497 TAILQ_REMOVE(poola, empty_pool_pa, entries); 498 pool_put(&pf_pooladdr_pl, empty_pool_pa); 499 } 500 } 501 502 void 503 pf_rm_rule(struct pf_rulequeue *rulequeue, struct pf_rule *rule) 504 { 505 if (rulequeue != NULL) { 506 if (rule->states <= 0) { 507 /* 508 * XXX - we need to remove the table *before* detaching 509 * the rule to make sure the table code does not delete 510 * the anchor under our feet. 511 */ 512 pf_tbladdr_remove(&rule->src.addr); 513 pf_tbladdr_remove(&rule->dst.addr); 514 if (rule->overload_tbl) 515 pfr_detach_table(rule->overload_tbl); 516 } 517 TAILQ_REMOVE(rulequeue, rule, entries); 518 rule->entries.tqe_prev = NULL; 519 rule->nr = -1; 520 } 521 522 if (rule->states > 0 || rule->src_nodes > 0 || 523 rule->entries.tqe_prev != NULL) 524 return; 525 pf_tag_unref(rule->tag); 526 pf_tag_unref(rule->match_tag); 527 #ifdef ALTQ 528 if (rule->pqid != rule->qid) 529 pf_qid_unref(rule->pqid); 530 pf_qid_unref(rule->qid); 531 #endif 532 pf_rtlabel_remove(&rule->src.addr); 533 pf_rtlabel_remove(&rule->dst.addr); 534 pfi_dynaddr_remove(&rule->src.addr); 535 pfi_dynaddr_remove(&rule->dst.addr); 536 if (rulequeue == NULL) { 537 pf_tbladdr_remove(&rule->src.addr); 538 pf_tbladdr_remove(&rule->dst.addr); 539 if (rule->overload_tbl) 540 pfr_detach_table(rule->overload_tbl); 541 } 542 pfi_kif_unref(rule->kif, PFI_KIF_REF_RULE); 543 pf_anchor_remove(rule); 544 pf_empty_pool(&rule->rpool.list); 545 pool_put(&pf_rule_pl, rule); 546 } 547 548 u_int16_t 549 tagname2tag(struct pf_tags *head, char *tagname) 550 { 551 struct pf_tagname *tag, *p = NULL; 552 u_int16_t new_tagid = 1; 553 554 TAILQ_FOREACH(tag, head, entries) 555 if (strcmp(tagname, tag->name) == 0) { 556 tag->ref++; 557 return (tag->tag); 558 } 559 560 /* 561 * to avoid fragmentation, we do a linear search from the beginning 562 * and take the first free slot we find. if there is none or the list 563 * is empty, append a new entry at the end. 564 */ 565 566 /* new entry */ 567 if (!TAILQ_EMPTY(head)) 568 for (p = TAILQ_FIRST(head); p != NULL && 569 p->tag == new_tagid; p = TAILQ_NEXT(p, entries)) 570 new_tagid = p->tag + 1; 571 572 if (new_tagid > TAGID_MAX) 573 return (0); 574 575 /* allocate and fill new struct pf_tagname */ 576 tag = (struct pf_tagname *)malloc(sizeof(struct pf_tagname), 577 M_TEMP, M_NOWAIT); 578 if (tag == NULL) 579 return (0); 580 bzero(tag, sizeof(struct pf_tagname)); 581 strlcpy(tag->name, tagname, sizeof(tag->name)); 582 tag->tag = new_tagid; 583 tag->ref++; 584 585 if (p != NULL) /* insert new entry before p */ 586 TAILQ_INSERT_BEFORE(p, tag, entries); 587 else /* either list empty or no free slot in between */ 588 TAILQ_INSERT_TAIL(head, tag, entries); 589 590 return (tag->tag); 591 } 592 593 void 594 tag2tagname(struct pf_tags *head, u_int16_t tagid, char *p) 595 { 596 struct pf_tagname *tag; 597 598 TAILQ_FOREACH(tag, head, entries) 599 if (tag->tag == tagid) { 600 strlcpy(p, tag->name, PF_TAG_NAME_SIZE); 601 return; 602 } 603 } 604 605 void 606 tag_unref(struct pf_tags *head, u_int16_t tag) 607 { 608 struct pf_tagname *p, *next; 609 610 if (tag == 0) 611 return; 612 613 for (p = TAILQ_FIRST(head); p != NULL; p = next) { 614 next = TAILQ_NEXT(p, entries); 615 if (tag == p->tag) { 616 if (--p->ref == 0) { 617 TAILQ_REMOVE(head, p, entries); 618 free(p, M_TEMP); 619 } 620 break; 621 } 622 } 623 } 624 625 u_int16_t 626 pf_tagname2tag(char *tagname) 627 { 628 return (tagname2tag(&pf_tags, tagname)); 629 } 630 631 void 632 pf_tag2tagname(u_int16_t tagid, char *p) 633 { 634 tag2tagname(&pf_tags, tagid, p); 635 } 636 637 void 638 pf_tag_ref(u_int16_t tag) 639 { 640 struct pf_tagname *t; 641 642 TAILQ_FOREACH(t, &pf_tags, entries) 643 if (t->tag == tag) 644 break; 645 if (t != NULL) 646 t->ref++; 647 } 648 649 void 650 pf_tag_unref(u_int16_t tag) 651 { 652 tag_unref(&pf_tags, tag); 653 } 654 655 int 656 pf_rtlabel_add(struct pf_addr_wrap *a) 657 { 658 #ifndef __NetBSD__ 659 if (a->type == PF_ADDR_RTLABEL && 660 (a->v.rtlabel = rtlabel_name2id(a->v.rtlabelname)) == 0) 661 return (-1); 662 #endif /* !__NetBSD__ */ 663 return (0); 664 } 665 666 void 667 pf_rtlabel_remove(struct pf_addr_wrap *a) 668 { 669 #ifndef __NetBSD__ 670 if (a->type == PF_ADDR_RTLABEL) 671 rtlabel_unref(a->v.rtlabel); 672 #endif /* !__NetBSD__ */ 673 } 674 675 void 676 pf_rtlabel_copyout(struct pf_addr_wrap *a) 677 { 678 #ifndef __NetBSD__ 679 const char *name; 680 681 if (a->type == PF_ADDR_RTLABEL && a->v.rtlabel) { 682 if ((name = rtlabel_id2name(a->v.rtlabel)) == NULL) 683 strlcpy(a->v.rtlabelname, "?", 684 sizeof(a->v.rtlabelname)); 685 else 686 strlcpy(a->v.rtlabelname, name, 687 sizeof(a->v.rtlabelname)); 688 } 689 #endif /* !__NetBSD__ */ 690 } 691 692 #ifdef ALTQ 693 u_int32_t 694 pf_qname2qid(char *qname) 695 { 696 return ((u_int32_t)tagname2tag(&pf_qids, qname)); 697 } 698 699 void 700 pf_qid2qname(u_int32_t qid, char *p) 701 { 702 tag2tagname(&pf_qids, (u_int16_t)qid, p); 703 } 704 705 void 706 pf_qid_unref(u_int32_t qid) 707 { 708 tag_unref(&pf_qids, (u_int16_t)qid); 709 } 710 711 int 712 pf_begin_altq(u_int32_t *ticket) 713 { 714 struct pf_altq *altq; 715 int error = 0; 716 717 /* Purge the old altq list */ 718 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) { 719 TAILQ_REMOVE(pf_altqs_inactive, altq, entries); 720 if (altq->qname[0] == 0) { 721 /* detach and destroy the discipline */ 722 error = altq_remove(altq); 723 } else 724 pf_qid_unref(altq->qid); 725 pool_put(&pf_altq_pl, altq); 726 } 727 if (error) 728 return (error); 729 *ticket = ++ticket_altqs_inactive; 730 altqs_inactive_open = 1; 731 return (0); 732 } 733 734 int 735 pf_rollback_altq(u_int32_t ticket) 736 { 737 struct pf_altq *altq; 738 int error = 0; 739 740 if (!altqs_inactive_open || ticket != ticket_altqs_inactive) 741 return (0); 742 /* Purge the old altq list */ 743 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) { 744 TAILQ_REMOVE(pf_altqs_inactive, altq, entries); 745 if (altq->qname[0] == 0) { 746 /* detach and destroy the discipline */ 747 error = altq_remove(altq); 748 } else 749 pf_qid_unref(altq->qid); 750 pool_put(&pf_altq_pl, altq); 751 } 752 altqs_inactive_open = 0; 753 return (error); 754 } 755 756 int 757 pf_commit_altq(u_int32_t ticket) 758 { 759 struct pf_altqqueue *old_altqs; 760 struct pf_altq *altq; 761 int s, err, error = 0; 762 763 if (!altqs_inactive_open || ticket != ticket_altqs_inactive) 764 return (EBUSY); 765 766 /* swap altqs, keep the old. */ 767 s = splsoftnet(); 768 old_altqs = pf_altqs_active; 769 pf_altqs_active = pf_altqs_inactive; 770 pf_altqs_inactive = old_altqs; 771 ticket_altqs_active = ticket_altqs_inactive; 772 773 /* Attach new disciplines */ 774 TAILQ_FOREACH(altq, pf_altqs_active, entries) { 775 if (altq->qname[0] == 0) { 776 /* attach the discipline */ 777 error = altq_pfattach(altq); 778 if (error == 0 && pf_altq_running) 779 error = pf_enable_altq(altq); 780 if (error != 0) { 781 splx(s); 782 return (error); 783 } 784 } 785 } 786 787 /* Purge the old altq list */ 788 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) { 789 TAILQ_REMOVE(pf_altqs_inactive, altq, entries); 790 if (altq->qname[0] == 0) { 791 /* detach and destroy the discipline */ 792 if (pf_altq_running) 793 error = pf_disable_altq(altq); 794 err = altq_pfdetach(altq); 795 if (err != 0 && error == 0) 796 error = err; 797 err = altq_remove(altq); 798 if (err != 0 && error == 0) 799 error = err; 800 } else 801 pf_qid_unref(altq->qid); 802 pool_put(&pf_altq_pl, altq); 803 } 804 splx(s); 805 806 altqs_inactive_open = 0; 807 return (error); 808 } 809 810 int 811 pf_enable_altq(struct pf_altq *altq) 812 { 813 struct ifnet *ifp; 814 struct tb_profile tb; 815 int s, error = 0; 816 817 if ((ifp = ifunit(altq->ifname)) == NULL) 818 return (EINVAL); 819 820 if (ifp->if_snd.altq_type != ALTQT_NONE) 821 error = altq_enable(&ifp->if_snd); 822 823 /* set tokenbucket regulator */ 824 if (error == 0 && ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) { 825 tb.rate = altq->ifbandwidth; 826 tb.depth = altq->tbrsize; 827 s = splnet(); 828 error = tbr_set(&ifp->if_snd, &tb); 829 splx(s); 830 } 831 832 return (error); 833 } 834 835 int 836 pf_disable_altq(struct pf_altq *altq) 837 { 838 struct ifnet *ifp; 839 struct tb_profile tb; 840 int s, error; 841 842 if ((ifp = ifunit(altq->ifname)) == NULL) 843 return (EINVAL); 844 845 /* 846 * when the discipline is no longer referenced, it was overridden 847 * by a new one. if so, just return. 848 */ 849 if (altq->altq_disc != ifp->if_snd.altq_disc) 850 return (0); 851 852 error = altq_disable(&ifp->if_snd); 853 854 if (error == 0) { 855 /* clear tokenbucket regulator */ 856 tb.rate = 0; 857 s = splnet(); 858 error = tbr_set(&ifp->if_snd, &tb); 859 splx(s); 860 } 861 862 return (error); 863 } 864 #endif /* ALTQ */ 865 866 int 867 pf_begin_rules(u_int32_t *ticket, int rs_num, const char *anchor) 868 { 869 struct pf_ruleset *rs; 870 struct pf_rule *rule; 871 872 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) 873 return (EINVAL); 874 rs = pf_find_or_create_ruleset(anchor); 875 if (rs == NULL) 876 return (EINVAL); 877 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) { 878 pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule); 879 rs->rules[rs_num].inactive.rcount--; 880 } 881 *ticket = ++rs->rules[rs_num].inactive.ticket; 882 rs->rules[rs_num].inactive.open = 1; 883 return (0); 884 } 885 886 int 887 pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor) 888 { 889 struct pf_ruleset *rs; 890 struct pf_rule *rule; 891 892 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) 893 return (EINVAL); 894 rs = pf_find_ruleset(anchor); 895 if (rs == NULL || !rs->rules[rs_num].inactive.open || 896 rs->rules[rs_num].inactive.ticket != ticket) 897 return (0); 898 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) { 899 pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule); 900 rs->rules[rs_num].inactive.rcount--; 901 } 902 rs->rules[rs_num].inactive.open = 0; 903 return (0); 904 } 905 906 #define PF_MD5_UPD(st, elm) \ 907 MD5Update(ctx, (u_int8_t *) &(st)->elm, sizeof((st)->elm)) 908 909 #define PF_MD5_UPD_STR(st, elm) \ 910 MD5Update(ctx, (u_int8_t *) (st)->elm, strlen((st)->elm)) 911 912 #define PF_MD5_UPD_HTONL(st, elm, stor) do { \ 913 (stor) = htonl((st)->elm); \ 914 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int32_t));\ 915 } while (0) 916 917 #define PF_MD5_UPD_HTONS(st, elm, stor) do { \ 918 (stor) = htons((st)->elm); \ 919 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int16_t));\ 920 } while (0) 921 922 void 923 pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr) 924 { 925 PF_MD5_UPD(pfr, addr.type); 926 switch (pfr->addr.type) { 927 case PF_ADDR_DYNIFTL: 928 PF_MD5_UPD(pfr, addr.v.ifname); 929 PF_MD5_UPD(pfr, addr.iflags); 930 break; 931 case PF_ADDR_TABLE: 932 PF_MD5_UPD(pfr, addr.v.tblname); 933 break; 934 case PF_ADDR_ADDRMASK: 935 /* XXX ignore af? */ 936 PF_MD5_UPD(pfr, addr.v.a.addr.addr32); 937 PF_MD5_UPD(pfr, addr.v.a.mask.addr32); 938 break; 939 case PF_ADDR_RTLABEL: 940 PF_MD5_UPD(pfr, addr.v.rtlabelname); 941 break; 942 } 943 944 PF_MD5_UPD(pfr, port[0]); 945 PF_MD5_UPD(pfr, port[1]); 946 PF_MD5_UPD(pfr, neg); 947 PF_MD5_UPD(pfr, port_op); 948 } 949 950 void 951 pf_hash_rule(MD5_CTX *ctx, struct pf_rule *rule) 952 { 953 u_int16_t x; 954 u_int32_t y; 955 956 pf_hash_rule_addr(ctx, &rule->src); 957 pf_hash_rule_addr(ctx, &rule->dst); 958 PF_MD5_UPD_STR(rule, label); 959 PF_MD5_UPD_STR(rule, ifname); 960 PF_MD5_UPD_STR(rule, match_tagname); 961 PF_MD5_UPD_HTONS(rule, match_tag, x); /* dup? */ 962 PF_MD5_UPD_HTONL(rule, os_fingerprint, y); 963 PF_MD5_UPD_HTONL(rule, prob, y); 964 PF_MD5_UPD_HTONL(rule, uid.uid[0], y); 965 PF_MD5_UPD_HTONL(rule, uid.uid[1], y); 966 PF_MD5_UPD(rule, uid.op); 967 PF_MD5_UPD_HTONL(rule, gid.gid[0], y); 968 PF_MD5_UPD_HTONL(rule, gid.gid[1], y); 969 PF_MD5_UPD(rule, gid.op); 970 PF_MD5_UPD_HTONL(rule, rule_flag, y); 971 PF_MD5_UPD(rule, action); 972 PF_MD5_UPD(rule, direction); 973 PF_MD5_UPD(rule, af); 974 PF_MD5_UPD(rule, quick); 975 PF_MD5_UPD(rule, ifnot); 976 PF_MD5_UPD(rule, match_tag_not); 977 PF_MD5_UPD(rule, natpass); 978 PF_MD5_UPD(rule, keep_state); 979 PF_MD5_UPD(rule, proto); 980 PF_MD5_UPD(rule, type); 981 PF_MD5_UPD(rule, code); 982 PF_MD5_UPD(rule, flags); 983 PF_MD5_UPD(rule, flagset); 984 PF_MD5_UPD(rule, allow_opts); 985 PF_MD5_UPD(rule, rt); 986 PF_MD5_UPD(rule, tos); 987 } 988 989 int 990 pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor) 991 { 992 struct pf_ruleset *rs; 993 struct pf_rule *rule, **old_array; 994 struct pf_rulequeue *old_rules; 995 int s, error; 996 u_int32_t old_rcount; 997 998 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) 999 return (EINVAL); 1000 rs = pf_find_ruleset(anchor); 1001 if (rs == NULL || !rs->rules[rs_num].inactive.open || 1002 ticket != rs->rules[rs_num].inactive.ticket) 1003 return (EBUSY); 1004 1005 /* Calculate checksum for the main ruleset */ 1006 if (rs == &pf_main_ruleset) { 1007 error = pf_setup_pfsync_matching(rs); 1008 if (error != 0) 1009 return (error); 1010 } 1011 1012 /* Swap rules, keep the old. */ 1013 s = splsoftnet(); 1014 old_rules = rs->rules[rs_num].active.ptr; 1015 old_rcount = rs->rules[rs_num].active.rcount; 1016 old_array = rs->rules[rs_num].active.ptr_array; 1017 1018 rs->rules[rs_num].active.ptr = 1019 rs->rules[rs_num].inactive.ptr; 1020 rs->rules[rs_num].active.ptr_array = 1021 rs->rules[rs_num].inactive.ptr_array; 1022 rs->rules[rs_num].active.rcount = 1023 rs->rules[rs_num].inactive.rcount; 1024 rs->rules[rs_num].inactive.ptr = old_rules; 1025 rs->rules[rs_num].inactive.ptr_array = old_array; 1026 rs->rules[rs_num].inactive.rcount = old_rcount; 1027 1028 rs->rules[rs_num].active.ticket = 1029 rs->rules[rs_num].inactive.ticket; 1030 pf_calc_skip_steps(rs->rules[rs_num].active.ptr); 1031 1032 1033 /* Purge the old rule list. */ 1034 while ((rule = TAILQ_FIRST(old_rules)) != NULL) 1035 pf_rm_rule(old_rules, rule); 1036 if (rs->rules[rs_num].inactive.ptr_array) 1037 free(rs->rules[rs_num].inactive.ptr_array, M_TEMP); 1038 rs->rules[rs_num].inactive.ptr_array = NULL; 1039 rs->rules[rs_num].inactive.rcount = 0; 1040 rs->rules[rs_num].inactive.open = 0; 1041 pf_remove_if_empty_ruleset(rs); 1042 splx(s); 1043 return (0); 1044 } 1045 1046 void 1047 pf_state_export(struct pfsync_state *sp, struct pf_state_key *sk, 1048 struct pf_state *s) 1049 { 1050 int secs = time_second; 1051 bzero(sp, sizeof(struct pfsync_state)); 1052 1053 /* copy from state key */ 1054 sp->lan.addr = sk->lan.addr; 1055 sp->lan.port = sk->lan.port; 1056 sp->gwy.addr = sk->gwy.addr; 1057 sp->gwy.port = sk->gwy.port; 1058 sp->ext.addr = sk->ext.addr; 1059 sp->ext.port = sk->ext.port; 1060 sp->proto = sk->proto; 1061 sp->af = sk->af; 1062 sp->direction = sk->direction; 1063 1064 /* copy from state */ 1065 memcpy(&sp->id, &s->id, sizeof(sp->id)); 1066 sp->creatorid = s->creatorid; 1067 strlcpy(sp->ifname, s->kif->pfik_name, sizeof(sp->ifname)); 1068 pf_state_peer_to_pfsync(&s->src, &sp->src); 1069 pf_state_peer_to_pfsync(&s->dst, &sp->dst); 1070 1071 sp->rule = s->rule.ptr->nr; 1072 sp->nat_rule = (s->nat_rule.ptr == NULL) ? -1 : s->nat_rule.ptr->nr; 1073 sp->anchor = (s->anchor.ptr == NULL) ? -1 : s->anchor.ptr->nr; 1074 1075 pf_state_counter_to_pfsync(s->bytes[0], sp->bytes[0]); 1076 pf_state_counter_to_pfsync(s->bytes[1], sp->bytes[1]); 1077 pf_state_counter_to_pfsync(s->packets[0], sp->packets[0]); 1078 pf_state_counter_to_pfsync(s->packets[1], sp->packets[1]); 1079 sp->creation = secs - s->creation; 1080 sp->expire = pf_state_expires(s); 1081 sp->log = s->log; 1082 sp->allow_opts = s->allow_opts; 1083 sp->timeout = s->timeout; 1084 1085 if (s->src_node) 1086 sp->sync_flags |= PFSYNC_FLAG_SRCNODE; 1087 if (s->nat_src_node) 1088 sp->sync_flags |= PFSYNC_FLAG_NATSRCNODE; 1089 1090 if (sp->expire > secs) 1091 sp->expire -= secs; 1092 else 1093 sp->expire = 0; 1094 1095 } 1096 1097 void 1098 pf_state_import(struct pfsync_state *sp, struct pf_state_key *sk, 1099 struct pf_state *s) 1100 { 1101 /* copy to state key */ 1102 sk->lan.addr = sp->lan.addr; 1103 sk->lan.port = sp->lan.port; 1104 sk->gwy.addr = sp->gwy.addr; 1105 sk->gwy.port = sp->gwy.port; 1106 sk->ext.addr = sp->ext.addr; 1107 sk->ext.port = sp->ext.port; 1108 sk->proto = sp->proto; 1109 sk->af = sp->af; 1110 sk->direction = sp->direction; 1111 1112 /* copy to state */ 1113 memcpy(&s->id, &sp->id, sizeof(sp->id)); 1114 s->creatorid = sp->creatorid; 1115 strlcpy(sp->ifname, s->kif->pfik_name, sizeof(sp->ifname)); 1116 pf_state_peer_from_pfsync(&sp->src, &s->src); 1117 pf_state_peer_from_pfsync(&sp->dst, &s->dst); 1118 1119 s->rule.ptr = &pf_default_rule; 1120 s->nat_rule.ptr = NULL; 1121 s->anchor.ptr = NULL; 1122 s->rt_kif = NULL; 1123 s->creation = time_second; 1124 s->pfsync_time = 0; 1125 s->packets[0] = s->packets[1] = 0; 1126 s->bytes[0] = s->bytes[1] = 0; 1127 } 1128 1129 int 1130 pf_setup_pfsync_matching(struct pf_ruleset *rs) 1131 { 1132 MD5_CTX ctx; 1133 struct pf_rule *rule; 1134 int rs_cnt; 1135 u_int8_t digest[PF_MD5_DIGEST_LENGTH]; 1136 1137 MD5Init(&ctx); 1138 for (rs_cnt = 0; rs_cnt < PF_RULESET_MAX; rs_cnt++) { 1139 /* XXX PF_RULESET_SCRUB as well? */ 1140 if (rs_cnt == PF_RULESET_SCRUB) 1141 continue; 1142 1143 if (rs->rules[rs_cnt].inactive.ptr_array) 1144 free(rs->rules[rs_cnt].inactive.ptr_array, M_TEMP); 1145 rs->rules[rs_cnt].inactive.ptr_array = NULL; 1146 1147 if (rs->rules[rs_cnt].inactive.rcount) { 1148 rs->rules[rs_cnt].inactive.ptr_array = 1149 malloc(sizeof(void *) * 1150 rs->rules[rs_cnt].inactive.rcount, 1151 M_TEMP, M_NOWAIT); 1152 1153 if (!rs->rules[rs_cnt].inactive.ptr_array) 1154 return (ENOMEM); 1155 } 1156 1157 TAILQ_FOREACH(rule, rs->rules[rs_cnt].inactive.ptr, 1158 entries) { 1159 pf_hash_rule(&ctx, rule); 1160 (rs->rules[rs_cnt].inactive.ptr_array)[rule->nr] = rule; 1161 } 1162 } 1163 1164 MD5Final(digest, &ctx); 1165 memcpy(pf_status.pf_chksum, digest, sizeof(pf_status.pf_chksum)); 1166 return (0); 1167 } 1168 1169 int 1170 pfioctl(dev_t dev, u_long cmd, void *addr, int flags, struct lwp *l) 1171 { 1172 struct pf_pooladdr *pa = NULL; 1173 struct pf_pool *pool = NULL; 1174 int s; 1175 int error = 0; 1176 1177 /* XXX keep in sync with switch() below */ 1178 if (kauth_authorize_network(l->l_cred, KAUTH_NETWORK_FIREWALL, 1179 KAUTH_REQ_NETWORK_FIREWALL_FW, NULL, NULL, NULL)) 1180 switch (cmd) { 1181 case DIOCGETRULES: 1182 case DIOCGETRULE: 1183 case DIOCGETADDRS: 1184 case DIOCGETADDR: 1185 case DIOCGETSTATE: 1186 case DIOCSETSTATUSIF: 1187 case DIOCGETSTATUS: 1188 case DIOCCLRSTATUS: 1189 case DIOCNATLOOK: 1190 case DIOCSETDEBUG: 1191 case DIOCGETSTATES: 1192 case DIOCGETTIMEOUT: 1193 case DIOCCLRRULECTRS: 1194 case DIOCGETLIMIT: 1195 case DIOCGETALTQS: 1196 case DIOCGETALTQ: 1197 case DIOCGETQSTATS: 1198 case DIOCGETRULESETS: 1199 case DIOCGETRULESET: 1200 case DIOCRGETTABLES: 1201 case DIOCRGETTSTATS: 1202 case DIOCRCLRTSTATS: 1203 case DIOCRCLRADDRS: 1204 case DIOCRADDADDRS: 1205 case DIOCRDELADDRS: 1206 case DIOCRSETADDRS: 1207 case DIOCRGETADDRS: 1208 case DIOCRGETASTATS: 1209 case DIOCRCLRASTATS: 1210 case DIOCRTSTADDRS: 1211 case DIOCOSFPGET: 1212 case DIOCGETSRCNODES: 1213 case DIOCCLRSRCNODES: 1214 case DIOCIGETIFACES: 1215 case DIOCSETIFFLAG: 1216 case DIOCCLRIFFLAG: 1217 break; 1218 case DIOCRCLRTABLES: 1219 case DIOCRADDTABLES: 1220 case DIOCRDELTABLES: 1221 case DIOCRSETTFLAGS: 1222 if (((struct pfioc_table *)addr)->pfrio_flags & 1223 PFR_FLAG_DUMMY) 1224 break; /* dummy operation ok */ 1225 return (EPERM); 1226 default: 1227 return (EPERM); 1228 } 1229 1230 if (!(flags & FWRITE)) 1231 switch (cmd) { 1232 case DIOCGETRULES: 1233 case DIOCGETADDRS: 1234 case DIOCGETADDR: 1235 case DIOCGETSTATE: 1236 case DIOCGETSTATUS: 1237 case DIOCGETSTATES: 1238 case DIOCGETTIMEOUT: 1239 case DIOCGETLIMIT: 1240 case DIOCGETALTQS: 1241 case DIOCGETALTQ: 1242 case DIOCGETQSTATS: 1243 case DIOCGETRULESETS: 1244 case DIOCGETRULESET: 1245 case DIOCNATLOOK: 1246 case DIOCRGETTABLES: 1247 case DIOCRGETTSTATS: 1248 case DIOCRGETADDRS: 1249 case DIOCRGETASTATS: 1250 case DIOCRTSTADDRS: 1251 case DIOCOSFPGET: 1252 case DIOCGETSRCNODES: 1253 case DIOCIGETIFACES: 1254 break; 1255 case DIOCRCLRTABLES: 1256 case DIOCRADDTABLES: 1257 case DIOCRDELTABLES: 1258 case DIOCRCLRTSTATS: 1259 case DIOCRCLRADDRS: 1260 case DIOCRADDADDRS: 1261 case DIOCRDELADDRS: 1262 case DIOCRSETADDRS: 1263 case DIOCRSETTFLAGS: 1264 if (((struct pfioc_table *)addr)->pfrio_flags & 1265 PFR_FLAG_DUMMY) { 1266 flags |= FWRITE; /* need write lock for dummy */ 1267 break; /* dummy operation ok */ 1268 } 1269 return (EACCES); 1270 case DIOCGETRULE: 1271 if (((struct pfioc_rule *)addr)->action == PF_GET_CLR_CNTR) 1272 return (EACCES); 1273 break; 1274 default: 1275 return (EACCES); 1276 } 1277 1278 if (flags & FWRITE) 1279 rw_enter_write(&pf_consistency_lock); 1280 else 1281 rw_enter_read(&pf_consistency_lock); 1282 1283 s = splsoftnet(); 1284 switch (cmd) { 1285 1286 case DIOCSTART: 1287 if (pf_status.running) 1288 error = EEXIST; 1289 else { 1290 #ifdef __NetBSD__ 1291 error = pf_pfil_attach(); 1292 if (error) 1293 break; 1294 #endif /* __NetBSD__ */ 1295 pf_status.running = 1; 1296 pf_status.since = time_second; 1297 if (pf_status.stateid == 0) { 1298 pf_status.stateid = time_second; 1299 pf_status.stateid = pf_status.stateid << 32; 1300 } 1301 DPFPRINTF(PF_DEBUG_MISC, ("pf: started\n")); 1302 } 1303 break; 1304 1305 case DIOCSTOP: 1306 if (!pf_status.running) 1307 error = ENOENT; 1308 else { 1309 #ifdef __NetBSD__ 1310 error = pf_pfil_detach(); 1311 if (error) 1312 break; 1313 #endif /* __NetBSD__ */ 1314 pf_status.running = 0; 1315 pf_status.since = time_second; 1316 DPFPRINTF(PF_DEBUG_MISC, ("pf: stopped\n")); 1317 } 1318 break; 1319 1320 case DIOCADDRULE: { 1321 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 1322 struct pf_ruleset *ruleset; 1323 struct pf_rule *rule, *tail; 1324 struct pf_pooladdr *pa; 1325 int rs_num; 1326 1327 pr->anchor[sizeof(pr->anchor) - 1] = 0; 1328 ruleset = pf_find_ruleset(pr->anchor); 1329 if (ruleset == NULL) { 1330 error = EINVAL; 1331 break; 1332 } 1333 rs_num = pf_get_ruleset_number(pr->rule.action); 1334 if (rs_num >= PF_RULESET_MAX) { 1335 error = EINVAL; 1336 break; 1337 } 1338 if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) { 1339 error = EINVAL; 1340 break; 1341 } 1342 if (pr->ticket != ruleset->rules[rs_num].inactive.ticket) { 1343 error = EBUSY; 1344 break; 1345 } 1346 if (pr->pool_ticket != ticket_pabuf) { 1347 error = EBUSY; 1348 break; 1349 } 1350 rule = pool_get(&pf_rule_pl, PR_NOWAIT); 1351 if (rule == NULL) { 1352 error = ENOMEM; 1353 break; 1354 } 1355 bcopy(&pr->rule, rule, sizeof(struct pf_rule)); 1356 #ifdef __NetBSD__ 1357 rule->cuid = kauth_cred_getuid(l->l_cred); 1358 rule->cpid = l->l_proc->p_pid; 1359 #else 1360 rule->cuid = p->p_cred->p_ruid; 1361 rule->cpid = p->p_pid; 1362 #endif /* !__NetBSD__ */ 1363 rule->anchor = NULL; 1364 rule->kif = NULL; 1365 TAILQ_INIT(&rule->rpool.list); 1366 /* initialize refcounting */ 1367 rule->states = 0; 1368 rule->src_nodes = 0; 1369 rule->entries.tqe_prev = NULL; 1370 #ifndef INET 1371 if (rule->af == AF_INET) { 1372 pool_put(&pf_rule_pl, rule); 1373 error = EAFNOSUPPORT; 1374 break; 1375 } 1376 #endif /* INET */ 1377 #ifndef INET6 1378 if (rule->af == AF_INET6) { 1379 pool_put(&pf_rule_pl, rule); 1380 error = EAFNOSUPPORT; 1381 break; 1382 } 1383 #endif /* INET6 */ 1384 tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr, 1385 pf_rulequeue); 1386 if (tail) 1387 rule->nr = tail->nr + 1; 1388 else 1389 rule->nr = 0; 1390 if (rule->ifname[0]) { 1391 rule->kif = pfi_kif_get(rule->ifname); 1392 if (rule->kif == NULL) { 1393 pool_put(&pf_rule_pl, rule); 1394 error = EINVAL; 1395 break; 1396 } 1397 pfi_kif_ref(rule->kif, PFI_KIF_REF_RULE); 1398 } 1399 1400 #ifndef __NetBSD__ 1401 if (rule->rtableid > 0 && !rtable_exists(rule->rtableid)) 1402 error = EBUSY; 1403 #endif /* !__NetBSD__ */ 1404 1405 #ifdef ALTQ 1406 /* set queue IDs */ 1407 if (rule->qname[0] != 0) { 1408 if ((rule->qid = pf_qname2qid(rule->qname)) == 0) 1409 error = EBUSY; 1410 else if (rule->pqname[0] != 0) { 1411 if ((rule->pqid = 1412 pf_qname2qid(rule->pqname)) == 0) 1413 error = EBUSY; 1414 } else 1415 rule->pqid = rule->qid; 1416 } 1417 #endif 1418 if (rule->tagname[0]) 1419 if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0) 1420 error = EBUSY; 1421 if (rule->match_tagname[0]) 1422 if ((rule->match_tag = 1423 pf_tagname2tag(rule->match_tagname)) == 0) 1424 error = EBUSY; 1425 if (rule->rt && !rule->direction) 1426 error = EINVAL; 1427 #if NPFLOG > 0 1428 if (!rule->log) 1429 rule->logif = 0; 1430 if (rule->logif >= PFLOGIFS_MAX) 1431 error = EINVAL; 1432 #endif 1433 if (pf_rtlabel_add(&rule->src.addr) || 1434 pf_rtlabel_add(&rule->dst.addr)) 1435 error = EBUSY; 1436 if (pfi_dynaddr_setup(&rule->src.addr, rule->af)) 1437 error = EINVAL; 1438 if (pfi_dynaddr_setup(&rule->dst.addr, rule->af)) 1439 error = EINVAL; 1440 if (pf_tbladdr_setup(ruleset, &rule->src.addr)) 1441 error = EINVAL; 1442 if (pf_tbladdr_setup(ruleset, &rule->dst.addr)) 1443 error = EINVAL; 1444 if (pf_anchor_setup(rule, ruleset, pr->anchor_call)) 1445 error = EINVAL; 1446 TAILQ_FOREACH(pa, &pf_pabuf, entries) 1447 if (pf_tbladdr_setup(ruleset, &pa->addr)) 1448 error = EINVAL; 1449 1450 if (rule->overload_tblname[0]) { 1451 if ((rule->overload_tbl = pfr_attach_table(ruleset, 1452 rule->overload_tblname)) == NULL) 1453 error = EINVAL; 1454 else 1455 rule->overload_tbl->pfrkt_flags |= 1456 PFR_TFLAG_ACTIVE; 1457 } 1458 1459 pf_mv_pool(&pf_pabuf, &rule->rpool.list); 1460 if (((((rule->action == PF_NAT) || (rule->action == PF_RDR) || 1461 (rule->action == PF_BINAT)) && rule->anchor == NULL) || 1462 (rule->rt > PF_FASTROUTE)) && 1463 (TAILQ_FIRST(&rule->rpool.list) == NULL)) 1464 error = EINVAL; 1465 1466 if (error) { 1467 pf_rm_rule(NULL, rule); 1468 break; 1469 } 1470 rule->rpool.cur = TAILQ_FIRST(&rule->rpool.list); 1471 rule->evaluations = rule->packets[0] = rule->packets[1] = 1472 rule->bytes[0] = rule->bytes[1] = 0; 1473 TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr, 1474 rule, entries); 1475 ruleset->rules[rs_num].inactive.rcount++; 1476 break; 1477 } 1478 1479 case DIOCGETRULES: { 1480 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 1481 struct pf_ruleset *ruleset; 1482 struct pf_rule *tail; 1483 int rs_num; 1484 1485 pr->anchor[sizeof(pr->anchor) - 1] = 0; 1486 ruleset = pf_find_ruleset(pr->anchor); 1487 if (ruleset == NULL) { 1488 error = EINVAL; 1489 break; 1490 } 1491 rs_num = pf_get_ruleset_number(pr->rule.action); 1492 if (rs_num >= PF_RULESET_MAX) { 1493 error = EINVAL; 1494 break; 1495 } 1496 tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr, 1497 pf_rulequeue); 1498 if (tail) 1499 pr->nr = tail->nr + 1; 1500 else 1501 pr->nr = 0; 1502 pr->ticket = ruleset->rules[rs_num].active.ticket; 1503 break; 1504 } 1505 1506 case DIOCGETRULE: { 1507 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 1508 struct pf_ruleset *ruleset; 1509 struct pf_rule *rule; 1510 int rs_num, i; 1511 1512 pr->anchor[sizeof(pr->anchor) - 1] = 0; 1513 ruleset = pf_find_ruleset(pr->anchor); 1514 if (ruleset == NULL) { 1515 error = EINVAL; 1516 break; 1517 } 1518 rs_num = pf_get_ruleset_number(pr->rule.action); 1519 if (rs_num >= PF_RULESET_MAX) { 1520 error = EINVAL; 1521 break; 1522 } 1523 if (pr->ticket != ruleset->rules[rs_num].active.ticket) { 1524 error = EBUSY; 1525 break; 1526 } 1527 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr); 1528 while ((rule != NULL) && (rule->nr != pr->nr)) 1529 rule = TAILQ_NEXT(rule, entries); 1530 if (rule == NULL) { 1531 error = EBUSY; 1532 break; 1533 } 1534 bcopy(rule, &pr->rule, sizeof(struct pf_rule)); 1535 if (pf_anchor_copyout(ruleset, rule, pr)) { 1536 error = EBUSY; 1537 break; 1538 } 1539 pfi_dynaddr_copyout(&pr->rule.src.addr); 1540 pfi_dynaddr_copyout(&pr->rule.dst.addr); 1541 pf_tbladdr_copyout(&pr->rule.src.addr); 1542 pf_tbladdr_copyout(&pr->rule.dst.addr); 1543 pf_rtlabel_copyout(&pr->rule.src.addr); 1544 pf_rtlabel_copyout(&pr->rule.dst.addr); 1545 for (i = 0; i < PF_SKIP_COUNT; ++i) 1546 if (rule->skip[i].ptr == NULL) 1547 pr->rule.skip[i].nr = -1; 1548 else 1549 pr->rule.skip[i].nr = 1550 rule->skip[i].ptr->nr; 1551 1552 if (pr->action == PF_GET_CLR_CNTR) { 1553 rule->evaluations = 0; 1554 rule->packets[0] = rule->packets[1] = 0; 1555 rule->bytes[0] = rule->bytes[1] = 0; 1556 } 1557 break; 1558 } 1559 1560 case DIOCCHANGERULE: { 1561 struct pfioc_rule *pcr = (struct pfioc_rule *)addr; 1562 struct pf_ruleset *ruleset; 1563 struct pf_rule *oldrule = NULL, *newrule = NULL; 1564 u_int32_t nr = 0; 1565 int rs_num; 1566 1567 if (!(pcr->action == PF_CHANGE_REMOVE || 1568 pcr->action == PF_CHANGE_GET_TICKET) && 1569 pcr->pool_ticket != ticket_pabuf) { 1570 error = EBUSY; 1571 break; 1572 } 1573 1574 if (pcr->action < PF_CHANGE_ADD_HEAD || 1575 pcr->action > PF_CHANGE_GET_TICKET) { 1576 error = EINVAL; 1577 break; 1578 } 1579 ruleset = pf_find_ruleset(pcr->anchor); 1580 if (ruleset == NULL) { 1581 error = EINVAL; 1582 break; 1583 } 1584 rs_num = pf_get_ruleset_number(pcr->rule.action); 1585 if (rs_num >= PF_RULESET_MAX) { 1586 error = EINVAL; 1587 break; 1588 } 1589 1590 if (pcr->action == PF_CHANGE_GET_TICKET) { 1591 pcr->ticket = ++ruleset->rules[rs_num].active.ticket; 1592 break; 1593 } else { 1594 if (pcr->ticket != 1595 ruleset->rules[rs_num].active.ticket) { 1596 error = EINVAL; 1597 break; 1598 } 1599 if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) { 1600 error = EINVAL; 1601 break; 1602 } 1603 } 1604 1605 if (pcr->action != PF_CHANGE_REMOVE) { 1606 newrule = pool_get(&pf_rule_pl, PR_NOWAIT); 1607 if (newrule == NULL) { 1608 error = ENOMEM; 1609 break; 1610 } 1611 bcopy(&pcr->rule, newrule, sizeof(struct pf_rule)); 1612 #ifdef __NetBSD__ 1613 newrule->cuid = kauth_cred_getuid(l->l_cred); 1614 newrule->cpid = l->l_proc->p_pid; 1615 #else 1616 newrule->cuid = p->p_cred->p_ruid; 1617 newrule->cpid = p->p_pid; 1618 #endif /* !__NetBSD__ */ 1619 TAILQ_INIT(&newrule->rpool.list); 1620 /* initialize refcounting */ 1621 newrule->states = 0; 1622 newrule->entries.tqe_prev = NULL; 1623 #ifndef INET 1624 if (newrule->af == AF_INET) { 1625 pool_put(&pf_rule_pl, newrule); 1626 error = EAFNOSUPPORT; 1627 break; 1628 } 1629 #endif /* INET */ 1630 #ifndef INET6 1631 if (newrule->af == AF_INET6) { 1632 pool_put(&pf_rule_pl, newrule); 1633 error = EAFNOSUPPORT; 1634 break; 1635 } 1636 #endif /* INET6 */ 1637 if (newrule->ifname[0]) { 1638 newrule->kif = pfi_kif_get(newrule->ifname); 1639 if (newrule->kif == NULL) { 1640 pool_put(&pf_rule_pl, newrule); 1641 error = EINVAL; 1642 break; 1643 } 1644 pfi_kif_ref(newrule->kif, PFI_KIF_REF_RULE); 1645 } else 1646 newrule->kif = NULL; 1647 1648 #ifndef __NetBSD__ 1649 if (newrule->rtableid > 0 && 1650 !rtable_exists(newrule->rtableid)) 1651 error = EBUSY; 1652 #endif /* !__NetBSD__ */ 1653 1654 #ifdef ALTQ 1655 /* set queue IDs */ 1656 if (newrule->qname[0] != 0) { 1657 if ((newrule->qid = 1658 pf_qname2qid(newrule->qname)) == 0) 1659 error = EBUSY; 1660 else if (newrule->pqname[0] != 0) { 1661 if ((newrule->pqid = 1662 pf_qname2qid(newrule->pqname)) == 0) 1663 error = EBUSY; 1664 } else 1665 newrule->pqid = newrule->qid; 1666 } 1667 #endif /* ALTQ */ 1668 if (newrule->tagname[0]) 1669 if ((newrule->tag = 1670 pf_tagname2tag(newrule->tagname)) == 0) 1671 error = EBUSY; 1672 if (newrule->match_tagname[0]) 1673 if ((newrule->match_tag = pf_tagname2tag( 1674 newrule->match_tagname)) == 0) 1675 error = EBUSY; 1676 if (newrule->rt && !newrule->direction) 1677 error = EINVAL; 1678 #if NPFLOG > 0 1679 if (!newrule->log) 1680 newrule->logif = 0; 1681 if (newrule->logif >= PFLOGIFS_MAX) 1682 error = EINVAL; 1683 #endif 1684 if (pf_rtlabel_add(&newrule->src.addr) || 1685 pf_rtlabel_add(&newrule->dst.addr)) 1686 error = EBUSY; 1687 if (pfi_dynaddr_setup(&newrule->src.addr, newrule->af)) 1688 error = EINVAL; 1689 if (pfi_dynaddr_setup(&newrule->dst.addr, newrule->af)) 1690 error = EINVAL; 1691 if (pf_tbladdr_setup(ruleset, &newrule->src.addr)) 1692 error = EINVAL; 1693 if (pf_tbladdr_setup(ruleset, &newrule->dst.addr)) 1694 error = EINVAL; 1695 if (pf_anchor_setup(newrule, ruleset, pcr->anchor_call)) 1696 error = EINVAL; 1697 TAILQ_FOREACH(pa, &pf_pabuf, entries) 1698 if (pf_tbladdr_setup(ruleset, &pa->addr)) 1699 error = EINVAL; 1700 1701 if (newrule->overload_tblname[0]) { 1702 if ((newrule->overload_tbl = pfr_attach_table( 1703 ruleset, newrule->overload_tblname)) == 1704 NULL) 1705 error = EINVAL; 1706 else 1707 newrule->overload_tbl->pfrkt_flags |= 1708 PFR_TFLAG_ACTIVE; 1709 } 1710 1711 pf_mv_pool(&pf_pabuf, &newrule->rpool.list); 1712 if (((((newrule->action == PF_NAT) || 1713 (newrule->action == PF_RDR) || 1714 (newrule->action == PF_BINAT) || 1715 (newrule->rt > PF_FASTROUTE)) && 1716 !newrule->anchor)) && 1717 (TAILQ_FIRST(&newrule->rpool.list) == NULL)) 1718 error = EINVAL; 1719 1720 if (error) { 1721 pf_rm_rule(NULL, newrule); 1722 break; 1723 } 1724 newrule->rpool.cur = TAILQ_FIRST(&newrule->rpool.list); 1725 newrule->evaluations = 0; 1726 newrule->packets[0] = newrule->packets[1] = 0; 1727 newrule->bytes[0] = newrule->bytes[1] = 0; 1728 } 1729 pf_empty_pool(&pf_pabuf); 1730 1731 if (pcr->action == PF_CHANGE_ADD_HEAD) 1732 oldrule = TAILQ_FIRST( 1733 ruleset->rules[rs_num].active.ptr); 1734 else if (pcr->action == PF_CHANGE_ADD_TAIL) 1735 oldrule = TAILQ_LAST( 1736 ruleset->rules[rs_num].active.ptr, pf_rulequeue); 1737 else { 1738 oldrule = TAILQ_FIRST( 1739 ruleset->rules[rs_num].active.ptr); 1740 while ((oldrule != NULL) && (oldrule->nr != pcr->nr)) 1741 oldrule = TAILQ_NEXT(oldrule, entries); 1742 if (oldrule == NULL) { 1743 if (newrule != NULL) 1744 pf_rm_rule(NULL, newrule); 1745 error = EINVAL; 1746 break; 1747 } 1748 } 1749 1750 if (pcr->action == PF_CHANGE_REMOVE) { 1751 pf_rm_rule(ruleset->rules[rs_num].active.ptr, oldrule); 1752 ruleset->rules[rs_num].active.rcount--; 1753 } else { 1754 if (oldrule == NULL) 1755 TAILQ_INSERT_TAIL( 1756 ruleset->rules[rs_num].active.ptr, 1757 newrule, entries); 1758 else if (pcr->action == PF_CHANGE_ADD_HEAD || 1759 pcr->action == PF_CHANGE_ADD_BEFORE) 1760 TAILQ_INSERT_BEFORE(oldrule, newrule, entries); 1761 else 1762 TAILQ_INSERT_AFTER( 1763 ruleset->rules[rs_num].active.ptr, 1764 oldrule, newrule, entries); 1765 ruleset->rules[rs_num].active.rcount++; 1766 } 1767 1768 nr = 0; 1769 TAILQ_FOREACH(oldrule, 1770 ruleset->rules[rs_num].active.ptr, entries) 1771 oldrule->nr = nr++; 1772 1773 ruleset->rules[rs_num].active.ticket++; 1774 1775 pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr); 1776 pf_remove_if_empty_ruleset(ruleset); 1777 1778 break; 1779 } 1780 1781 case DIOCCLRSTATES: { 1782 struct pf_state *s, *nexts; 1783 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr; 1784 int killed = 0; 1785 1786 for (s = RB_MIN(pf_state_tree_id, &tree_id); s; s = nexts) { 1787 nexts = RB_NEXT(pf_state_tree_id, &tree_id, s); 1788 1789 if (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname, 1790 s->kif->pfik_name)) { 1791 #if NPFSYNC 1792 /* don't send out individual delete messages */ 1793 s->sync_flags = PFSTATE_NOSYNC; 1794 #endif 1795 pf_unlink_state(s); 1796 killed++; 1797 } 1798 } 1799 psk->psk_af = killed; 1800 #if NPFSYNC 1801 pfsync_clear_states(pf_status.hostid, psk->psk_ifname); 1802 #endif 1803 break; 1804 } 1805 1806 case DIOCKILLSTATES: { 1807 struct pf_state *s, *nexts; 1808 struct pf_state_key *sk; 1809 struct pf_state_host *src, *dst; 1810 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr; 1811 int killed = 0; 1812 1813 for (s = RB_MIN(pf_state_tree_id, &tree_id); s; 1814 s = nexts) { 1815 nexts = RB_NEXT(pf_state_tree_id, &tree_id, s); 1816 sk = s->state_key; 1817 1818 if (sk->direction == PF_OUT) { 1819 src = &sk->lan; 1820 dst = &sk->ext; 1821 } else { 1822 src = &sk->ext; 1823 dst = &sk->lan; 1824 } 1825 if ((!psk->psk_af || sk->af == psk->psk_af) 1826 && (!psk->psk_proto || psk->psk_proto == 1827 sk->proto) && 1828 PF_MATCHA(psk->psk_src.neg, 1829 &psk->psk_src.addr.v.a.addr, 1830 &psk->psk_src.addr.v.a.mask, 1831 &src->addr, sk->af) && 1832 PF_MATCHA(psk->psk_dst.neg, 1833 &psk->psk_dst.addr.v.a.addr, 1834 &psk->psk_dst.addr.v.a.mask, 1835 &dst->addr, sk->af) && 1836 (psk->psk_src.port_op == 0 || 1837 pf_match_port(psk->psk_src.port_op, 1838 psk->psk_src.port[0], psk->psk_src.port[1], 1839 src->port)) && 1840 (psk->psk_dst.port_op == 0 || 1841 pf_match_port(psk->psk_dst.port_op, 1842 psk->psk_dst.port[0], psk->psk_dst.port[1], 1843 dst->port)) && 1844 (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname, 1845 s->kif->pfik_name))) { 1846 #if NPFSYNC > 0 1847 /* send immediate delete of state */ 1848 pfsync_delete_state(s); 1849 s->sync_flags |= PFSTATE_NOSYNC; 1850 #endif 1851 pf_unlink_state(s); 1852 killed++; 1853 } 1854 } 1855 psk->psk_af = killed; 1856 break; 1857 } 1858 1859 case DIOCADDSTATE: { 1860 struct pfioc_state *ps = (struct pfioc_state *)addr; 1861 struct pfsync_state *sp = (struct pfsync_state *)ps->state; 1862 struct pf_state *s; 1863 struct pf_state_key *sk; 1864 struct pfi_kif *kif; 1865 1866 if (sp->timeout >= PFTM_MAX && 1867 sp->timeout != PFTM_UNTIL_PACKET) { 1868 error = EINVAL; 1869 break; 1870 } 1871 s = pool_get(&pf_state_pl, PR_NOWAIT); 1872 if (s == NULL) { 1873 error = ENOMEM; 1874 break; 1875 } 1876 bzero(s, sizeof(struct pf_state)); 1877 if ((sk = pf_alloc_state_key(s)) == NULL) { 1878 error = ENOMEM; 1879 break; 1880 } 1881 pf_state_import(sp, sk, s); 1882 kif = pfi_kif_get(sp->ifname); 1883 if (kif == NULL) { 1884 pool_put(&pf_state_pl, s); 1885 pool_put(&pf_state_key_pl, sk); 1886 error = ENOENT; 1887 break; 1888 } 1889 if (pf_insert_state(kif, s)) { 1890 pfi_kif_unref(kif, PFI_KIF_REF_NONE); 1891 pool_put(&pf_state_pl, s); 1892 pool_put(&pf_state_key_pl, sk); 1893 error = ENOMEM; 1894 } 1895 break; 1896 } 1897 1898 case DIOCGETSTATE: { 1899 struct pfioc_state *ps = (struct pfioc_state *)addr; 1900 struct pf_state *s; 1901 u_int32_t nr; 1902 1903 nr = 0; 1904 RB_FOREACH(s, pf_state_tree_id, &tree_id) { 1905 if (nr >= ps->nr) 1906 break; 1907 nr++; 1908 } 1909 if (s == NULL) { 1910 error = EBUSY; 1911 break; 1912 } 1913 1914 pf_state_export((struct pfsync_state *)&ps->state, 1915 s->state_key, s); 1916 break; 1917 } 1918 1919 case DIOCGETSTATES: { 1920 struct pfioc_states *ps = (struct pfioc_states *)addr; 1921 struct pf_state *state; 1922 struct pfsync_state *p, *pstore; 1923 u_int32_t nr = 0; 1924 1925 if (ps->ps_len == 0) { 1926 nr = pf_status.states; 1927 ps->ps_len = sizeof(struct pfsync_state) * nr; 1928 break; 1929 } 1930 1931 pstore = malloc(sizeof(*pstore), M_TEMP, M_WAITOK); 1932 1933 p = ps->ps_states; 1934 1935 state = TAILQ_FIRST(&state_list); 1936 while (state) { 1937 if (state->timeout != PFTM_UNLINKED) { 1938 if ((nr+1) * sizeof(*p) > (unsigned)ps->ps_len) 1939 break; 1940 1941 pf_state_export(pstore, 1942 state->state_key, state); 1943 error = copyout(pstore, p, sizeof(*p)); 1944 if (error) { 1945 free(pstore, M_TEMP); 1946 goto fail; 1947 } 1948 p++; 1949 nr++; 1950 } 1951 state = TAILQ_NEXT(state, entry_list); 1952 } 1953 1954 ps->ps_len = sizeof(struct pfsync_state) * nr; 1955 1956 free(pstore, M_TEMP); 1957 break; 1958 } 1959 1960 case DIOCGETSTATUS: { 1961 struct pf_status *s = (struct pf_status *)addr; 1962 bcopy(&pf_status, s, sizeof(struct pf_status)); 1963 pfi_fill_oldstatus(s); 1964 break; 1965 } 1966 1967 case DIOCSETSTATUSIF: { 1968 struct pfioc_if *pi = (struct pfioc_if *)addr; 1969 1970 if (pi->ifname[0] == 0) { 1971 bzero(pf_status.ifname, IFNAMSIZ); 1972 break; 1973 } 1974 if (ifunit(pi->ifname) == NULL) { 1975 error = EINVAL; 1976 break; 1977 } 1978 strlcpy(pf_status.ifname, pi->ifname, IFNAMSIZ); 1979 break; 1980 } 1981 1982 case DIOCCLRSTATUS: { 1983 bzero(pf_status.counters, sizeof(pf_status.counters)); 1984 bzero(pf_status.fcounters, sizeof(pf_status.fcounters)); 1985 bzero(pf_status.scounters, sizeof(pf_status.scounters)); 1986 pf_status.since = time_second; 1987 if (*pf_status.ifname) 1988 pfi_clr_istats(pf_status.ifname); 1989 break; 1990 } 1991 1992 case DIOCNATLOOK: { 1993 struct pfioc_natlook *pnl = (struct pfioc_natlook *)addr; 1994 struct pf_state_key *sk; 1995 struct pf_state *state; 1996 struct pf_state_key_cmp key; 1997 int m = 0, direction = pnl->direction; 1998 1999 key.af = pnl->af; 2000 key.proto = pnl->proto; 2001 2002 if (!pnl->proto || 2003 PF_AZERO(&pnl->saddr, pnl->af) || 2004 PF_AZERO(&pnl->daddr, pnl->af) || 2005 ((pnl->proto == IPPROTO_TCP || 2006 pnl->proto == IPPROTO_UDP) && 2007 (!pnl->dport || !pnl->sport))) 2008 error = EINVAL; 2009 else { 2010 /* 2011 * userland gives us source and dest of connection, 2012 * reverse the lookup so we ask for what happens with 2013 * the return traffic, enabling us to find it in the 2014 * state tree. 2015 */ 2016 if (direction == PF_IN) { 2017 PF_ACPY(&key.ext.addr, &pnl->daddr, pnl->af); 2018 key.ext.port = pnl->dport; 2019 PF_ACPY(&key.gwy.addr, &pnl->saddr, pnl->af); 2020 key.gwy.port = pnl->sport; 2021 state = pf_find_state_all(&key, PF_EXT_GWY, &m); 2022 } else { 2023 PF_ACPY(&key.lan.addr, &pnl->daddr, pnl->af); 2024 key.lan.port = pnl->dport; 2025 PF_ACPY(&key.ext.addr, &pnl->saddr, pnl->af); 2026 key.ext.port = pnl->sport; 2027 state = pf_find_state_all(&key, PF_LAN_EXT, &m); 2028 } 2029 if (m > 1) 2030 error = E2BIG; /* more than one state */ 2031 else if (state != NULL) { 2032 sk = state->state_key; 2033 if (direction == PF_IN) { 2034 PF_ACPY(&pnl->rsaddr, &sk->lan.addr, 2035 sk->af); 2036 pnl->rsport = sk->lan.port; 2037 PF_ACPY(&pnl->rdaddr, &pnl->daddr, 2038 pnl->af); 2039 pnl->rdport = pnl->dport; 2040 } else { 2041 PF_ACPY(&pnl->rdaddr, &sk->gwy.addr, 2042 sk->af); 2043 pnl->rdport = sk->gwy.port; 2044 PF_ACPY(&pnl->rsaddr, &pnl->saddr, 2045 pnl->af); 2046 pnl->rsport = pnl->sport; 2047 } 2048 } else 2049 error = ENOENT; 2050 } 2051 break; 2052 } 2053 2054 case DIOCSETTIMEOUT: { 2055 struct pfioc_tm *pt = (struct pfioc_tm *)addr; 2056 int old; 2057 2058 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX || 2059 pt->seconds < 0) { 2060 error = EINVAL; 2061 goto fail; 2062 } 2063 old = pf_default_rule.timeout[pt->timeout]; 2064 if (pt->timeout == PFTM_INTERVAL && pt->seconds == 0) 2065 pt->seconds = 1; 2066 pf_default_rule.timeout[pt->timeout] = pt->seconds; 2067 if (pt->timeout == PFTM_INTERVAL && pt->seconds < old) 2068 wakeup(pf_purge_thread); 2069 pt->seconds = old; 2070 break; 2071 } 2072 2073 case DIOCGETTIMEOUT: { 2074 struct pfioc_tm *pt = (struct pfioc_tm *)addr; 2075 2076 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) { 2077 error = EINVAL; 2078 goto fail; 2079 } 2080 pt->seconds = pf_default_rule.timeout[pt->timeout]; 2081 break; 2082 } 2083 2084 case DIOCGETLIMIT: { 2085 struct pfioc_limit *pl = (struct pfioc_limit *)addr; 2086 2087 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) { 2088 error = EINVAL; 2089 goto fail; 2090 } 2091 pl->limit = pf_pool_limits[pl->index].limit; 2092 break; 2093 } 2094 2095 case DIOCSETLIMIT: { 2096 struct pfioc_limit *pl = (struct pfioc_limit *)addr; 2097 int old_limit; 2098 2099 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX || 2100 pf_pool_limits[pl->index].pp == NULL) { 2101 error = EINVAL; 2102 goto fail; 2103 } 2104 #ifdef __NetBSD__ 2105 pool_sethardlimit(pf_pool_limits[pl->index].pp, 2106 pl->limit, NULL, 0); 2107 #else 2108 if (pool_sethardlimit(pf_pool_limits[pl->index].pp, 2109 pl->limit, NULL, 0) != 0) { 2110 error = EBUSY; 2111 goto fail; 2112 } 2113 #endif /* !__NetBSD__ */ 2114 old_limit = pf_pool_limits[pl->index].limit; 2115 pf_pool_limits[pl->index].limit = pl->limit; 2116 pl->limit = old_limit; 2117 break; 2118 } 2119 2120 case DIOCSETDEBUG: { 2121 u_int32_t *level = (u_int32_t *)addr; 2122 2123 pf_status.debug = *level; 2124 break; 2125 } 2126 2127 case DIOCCLRRULECTRS: { 2128 /* obsoleted by DIOCGETRULE with action=PF_GET_CLR_CNTR */ 2129 struct pf_ruleset *ruleset = &pf_main_ruleset; 2130 struct pf_rule *rule; 2131 2132 TAILQ_FOREACH(rule, 2133 ruleset->rules[PF_RULESET_FILTER].active.ptr, entries) { 2134 rule->evaluations = 0; 2135 rule->packets[0] = rule->packets[1] = 0; 2136 rule->bytes[0] = rule->bytes[1] = 0; 2137 } 2138 break; 2139 } 2140 2141 #ifdef ALTQ 2142 case DIOCSTARTALTQ: { 2143 struct pf_altq *altq; 2144 2145 /* enable all altq interfaces on active list */ 2146 TAILQ_FOREACH(altq, pf_altqs_active, entries) { 2147 if (altq->qname[0] == 0) { 2148 error = pf_enable_altq(altq); 2149 if (error != 0) 2150 break; 2151 } 2152 } 2153 if (error == 0) 2154 pf_altq_running = 1; 2155 DPFPRINTF(PF_DEBUG_MISC, ("altq: started\n")); 2156 break; 2157 } 2158 2159 case DIOCSTOPALTQ: { 2160 struct pf_altq *altq; 2161 2162 /* disable all altq interfaces on active list */ 2163 TAILQ_FOREACH(altq, pf_altqs_active, entries) { 2164 if (altq->qname[0] == 0) { 2165 error = pf_disable_altq(altq); 2166 if (error != 0) 2167 break; 2168 } 2169 } 2170 if (error == 0) 2171 pf_altq_running = 0; 2172 DPFPRINTF(PF_DEBUG_MISC, ("altq: stopped\n")); 2173 break; 2174 } 2175 2176 case DIOCADDALTQ: { 2177 struct pfioc_altq *pa = (struct pfioc_altq *)addr; 2178 struct pf_altq *altq, *a; 2179 2180 if (pa->ticket != ticket_altqs_inactive) { 2181 error = EBUSY; 2182 break; 2183 } 2184 altq = pool_get(&pf_altq_pl, PR_NOWAIT); 2185 if (altq == NULL) { 2186 error = ENOMEM; 2187 break; 2188 } 2189 bcopy(&pa->altq, altq, sizeof(struct pf_altq)); 2190 2191 /* 2192 * if this is for a queue, find the discipline and 2193 * copy the necessary fields 2194 */ 2195 if (altq->qname[0] != 0) { 2196 if ((altq->qid = pf_qname2qid(altq->qname)) == 0) { 2197 error = EBUSY; 2198 pool_put(&pf_altq_pl, altq); 2199 break; 2200 } 2201 TAILQ_FOREACH(a, pf_altqs_inactive, entries) { 2202 if (strncmp(a->ifname, altq->ifname, 2203 IFNAMSIZ) == 0 && a->qname[0] == 0) { 2204 altq->altq_disc = a->altq_disc; 2205 break; 2206 } 2207 } 2208 } 2209 2210 error = altq_add(altq); 2211 if (error) { 2212 pool_put(&pf_altq_pl, altq); 2213 break; 2214 } 2215 2216 TAILQ_INSERT_TAIL(pf_altqs_inactive, altq, entries); 2217 bcopy(altq, &pa->altq, sizeof(struct pf_altq)); 2218 break; 2219 } 2220 2221 case DIOCGETALTQS: { 2222 struct pfioc_altq *pa = (struct pfioc_altq *)addr; 2223 struct pf_altq *altq; 2224 2225 pa->nr = 0; 2226 TAILQ_FOREACH(altq, pf_altqs_active, entries) 2227 pa->nr++; 2228 pa->ticket = ticket_altqs_active; 2229 break; 2230 } 2231 2232 case DIOCGETALTQ: { 2233 struct pfioc_altq *pa = (struct pfioc_altq *)addr; 2234 struct pf_altq *altq; 2235 u_int32_t nr; 2236 2237 if (pa->ticket != ticket_altqs_active) { 2238 error = EBUSY; 2239 break; 2240 } 2241 nr = 0; 2242 altq = TAILQ_FIRST(pf_altqs_active); 2243 while ((altq != NULL) && (nr < pa->nr)) { 2244 altq = TAILQ_NEXT(altq, entries); 2245 nr++; 2246 } 2247 if (altq == NULL) { 2248 error = EBUSY; 2249 break; 2250 } 2251 bcopy(altq, &pa->altq, sizeof(struct pf_altq)); 2252 break; 2253 } 2254 2255 case DIOCCHANGEALTQ: 2256 /* CHANGEALTQ not supported yet! */ 2257 error = ENODEV; 2258 break; 2259 2260 case DIOCGETQSTATS: { 2261 struct pfioc_qstats *pq = (struct pfioc_qstats *)addr; 2262 struct pf_altq *altq; 2263 u_int32_t nr; 2264 int nbytes; 2265 2266 if (pq->ticket != ticket_altqs_active) { 2267 error = EBUSY; 2268 break; 2269 } 2270 nbytes = pq->nbytes; 2271 nr = 0; 2272 altq = TAILQ_FIRST(pf_altqs_active); 2273 while ((altq != NULL) && (nr < pq->nr)) { 2274 altq = TAILQ_NEXT(altq, entries); 2275 nr++; 2276 } 2277 if (altq == NULL) { 2278 error = EBUSY; 2279 break; 2280 } 2281 error = altq_getqstats(altq, pq->buf, &nbytes); 2282 if (error == 0) { 2283 pq->scheduler = altq->scheduler; 2284 pq->nbytes = nbytes; 2285 } 2286 break; 2287 } 2288 #endif /* ALTQ */ 2289 2290 case DIOCBEGINADDRS: { 2291 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 2292 2293 pf_empty_pool(&pf_pabuf); 2294 pp->ticket = ++ticket_pabuf; 2295 break; 2296 } 2297 2298 case DIOCADDADDR: { 2299 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 2300 2301 if (pp->ticket != ticket_pabuf) { 2302 error = EBUSY; 2303 break; 2304 } 2305 #ifndef INET 2306 if (pp->af == AF_INET) { 2307 error = EAFNOSUPPORT; 2308 break; 2309 } 2310 #endif /* INET */ 2311 #ifndef INET6 2312 if (pp->af == AF_INET6) { 2313 error = EAFNOSUPPORT; 2314 break; 2315 } 2316 #endif /* INET6 */ 2317 if (pp->addr.addr.type != PF_ADDR_ADDRMASK && 2318 pp->addr.addr.type != PF_ADDR_DYNIFTL && 2319 pp->addr.addr.type != PF_ADDR_TABLE) { 2320 error = EINVAL; 2321 break; 2322 } 2323 pa = pool_get(&pf_pooladdr_pl, PR_NOWAIT); 2324 if (pa == NULL) { 2325 error = ENOMEM; 2326 break; 2327 } 2328 bcopy(&pp->addr, pa, sizeof(struct pf_pooladdr)); 2329 if (pa->ifname[0]) { 2330 pa->kif = pfi_kif_get(pa->ifname); 2331 if (pa->kif == NULL) { 2332 pool_put(&pf_pooladdr_pl, pa); 2333 error = EINVAL; 2334 break; 2335 } 2336 pfi_kif_ref(pa->kif, PFI_KIF_REF_RULE); 2337 } 2338 if (pfi_dynaddr_setup(&pa->addr, pp->af)) { 2339 pfi_dynaddr_remove(&pa->addr); 2340 pfi_kif_unref(pa->kif, PFI_KIF_REF_RULE); 2341 pool_put(&pf_pooladdr_pl, pa); 2342 error = EINVAL; 2343 break; 2344 } 2345 TAILQ_INSERT_TAIL(&pf_pabuf, pa, entries); 2346 break; 2347 } 2348 2349 case DIOCGETADDRS: { 2350 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 2351 2352 pp->nr = 0; 2353 pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action, 2354 pp->r_num, 0, 1, 0); 2355 if (pool == NULL) { 2356 error = EBUSY; 2357 break; 2358 } 2359 TAILQ_FOREACH(pa, &pool->list, entries) 2360 pp->nr++; 2361 break; 2362 } 2363 2364 case DIOCGETADDR: { 2365 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 2366 u_int32_t nr = 0; 2367 2368 pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action, 2369 pp->r_num, 0, 1, 1); 2370 if (pool == NULL) { 2371 error = EBUSY; 2372 break; 2373 } 2374 pa = TAILQ_FIRST(&pool->list); 2375 while ((pa != NULL) && (nr < pp->nr)) { 2376 pa = TAILQ_NEXT(pa, entries); 2377 nr++; 2378 } 2379 if (pa == NULL) { 2380 error = EBUSY; 2381 break; 2382 } 2383 bcopy(pa, &pp->addr, sizeof(struct pf_pooladdr)); 2384 pfi_dynaddr_copyout(&pp->addr.addr); 2385 pf_tbladdr_copyout(&pp->addr.addr); 2386 pf_rtlabel_copyout(&pp->addr.addr); 2387 break; 2388 } 2389 2390 case DIOCCHANGEADDR: { 2391 struct pfioc_pooladdr *pca = (struct pfioc_pooladdr *)addr; 2392 struct pf_pooladdr *oldpa = NULL, *newpa = NULL; 2393 struct pf_ruleset *ruleset; 2394 2395 if (pca->action < PF_CHANGE_ADD_HEAD || 2396 pca->action > PF_CHANGE_REMOVE) { 2397 error = EINVAL; 2398 break; 2399 } 2400 if (pca->addr.addr.type != PF_ADDR_ADDRMASK && 2401 pca->addr.addr.type != PF_ADDR_DYNIFTL && 2402 pca->addr.addr.type != PF_ADDR_TABLE) { 2403 error = EINVAL; 2404 break; 2405 } 2406 2407 ruleset = pf_find_ruleset(pca->anchor); 2408 if (ruleset == NULL) { 2409 error = EBUSY; 2410 break; 2411 } 2412 pool = pf_get_pool(pca->anchor, pca->ticket, pca->r_action, 2413 pca->r_num, pca->r_last, 1, 1); 2414 if (pool == NULL) { 2415 error = EBUSY; 2416 break; 2417 } 2418 if (pca->action != PF_CHANGE_REMOVE) { 2419 newpa = pool_get(&pf_pooladdr_pl, PR_NOWAIT); 2420 if (newpa == NULL) { 2421 error = ENOMEM; 2422 break; 2423 } 2424 bcopy(&pca->addr, newpa, sizeof(struct pf_pooladdr)); 2425 #ifndef INET 2426 if (pca->af == AF_INET) { 2427 pool_put(&pf_pooladdr_pl, newpa); 2428 error = EAFNOSUPPORT; 2429 break; 2430 } 2431 #endif /* INET */ 2432 #ifndef INET6 2433 if (pca->af == AF_INET6) { 2434 pool_put(&pf_pooladdr_pl, newpa); 2435 error = EAFNOSUPPORT; 2436 break; 2437 } 2438 #endif /* INET6 */ 2439 if (newpa->ifname[0]) { 2440 newpa->kif = pfi_kif_get(newpa->ifname); 2441 if (newpa->kif == NULL) { 2442 pool_put(&pf_pooladdr_pl, newpa); 2443 error = EINVAL; 2444 break; 2445 } 2446 pfi_kif_ref(newpa->kif, PFI_KIF_REF_RULE); 2447 } else 2448 newpa->kif = NULL; 2449 if (pfi_dynaddr_setup(&newpa->addr, pca->af) || 2450 pf_tbladdr_setup(ruleset, &newpa->addr)) { 2451 pfi_dynaddr_remove(&newpa->addr); 2452 pfi_kif_unref(newpa->kif, PFI_KIF_REF_RULE); 2453 pool_put(&pf_pooladdr_pl, newpa); 2454 error = EINVAL; 2455 break; 2456 } 2457 } 2458 2459 if (pca->action == PF_CHANGE_ADD_HEAD) 2460 oldpa = TAILQ_FIRST(&pool->list); 2461 else if (pca->action == PF_CHANGE_ADD_TAIL) 2462 oldpa = TAILQ_LAST(&pool->list, pf_palist); 2463 else { 2464 int i = 0; 2465 2466 oldpa = TAILQ_FIRST(&pool->list); 2467 while ((oldpa != NULL) && (i < pca->nr)) { 2468 oldpa = TAILQ_NEXT(oldpa, entries); 2469 i++; 2470 } 2471 if (oldpa == NULL) { 2472 error = EINVAL; 2473 break; 2474 } 2475 } 2476 2477 if (pca->action == PF_CHANGE_REMOVE) { 2478 TAILQ_REMOVE(&pool->list, oldpa, entries); 2479 pfi_dynaddr_remove(&oldpa->addr); 2480 pf_tbladdr_remove(&oldpa->addr); 2481 pfi_kif_unref(oldpa->kif, PFI_KIF_REF_RULE); 2482 pool_put(&pf_pooladdr_pl, oldpa); 2483 } else { 2484 if (oldpa == NULL) 2485 TAILQ_INSERT_TAIL(&pool->list, newpa, entries); 2486 else if (pca->action == PF_CHANGE_ADD_HEAD || 2487 pca->action == PF_CHANGE_ADD_BEFORE) 2488 TAILQ_INSERT_BEFORE(oldpa, newpa, entries); 2489 else 2490 TAILQ_INSERT_AFTER(&pool->list, oldpa, 2491 newpa, entries); 2492 } 2493 2494 pool->cur = TAILQ_FIRST(&pool->list); 2495 PF_ACPY(&pool->counter, &pool->cur->addr.v.a.addr, 2496 pca->af); 2497 break; 2498 } 2499 2500 case DIOCGETRULESETS: { 2501 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr; 2502 struct pf_ruleset *ruleset; 2503 struct pf_anchor *anchor; 2504 2505 pr->path[sizeof(pr->path) - 1] = 0; 2506 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) { 2507 error = EINVAL; 2508 break; 2509 } 2510 pr->nr = 0; 2511 if (ruleset->anchor == NULL) { 2512 /* XXX kludge for pf_main_ruleset */ 2513 RB_FOREACH(anchor, pf_anchor_global, &pf_anchors) 2514 if (anchor->parent == NULL) 2515 pr->nr++; 2516 } else { 2517 RB_FOREACH(anchor, pf_anchor_node, 2518 &ruleset->anchor->children) 2519 pr->nr++; 2520 } 2521 break; 2522 } 2523 2524 case DIOCGETRULESET: { 2525 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr; 2526 struct pf_ruleset *ruleset; 2527 struct pf_anchor *anchor; 2528 u_int32_t nr = 0; 2529 2530 pr->path[sizeof(pr->path) - 1] = 0; 2531 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) { 2532 error = EINVAL; 2533 break; 2534 } 2535 pr->name[0] = 0; 2536 if (ruleset->anchor == NULL) { 2537 /* XXX kludge for pf_main_ruleset */ 2538 RB_FOREACH(anchor, pf_anchor_global, &pf_anchors) 2539 if (anchor->parent == NULL && nr++ == pr->nr) { 2540 strlcpy(pr->name, anchor->name, 2541 sizeof(pr->name)); 2542 break; 2543 } 2544 } else { 2545 RB_FOREACH(anchor, pf_anchor_node, 2546 &ruleset->anchor->children) 2547 if (nr++ == pr->nr) { 2548 strlcpy(pr->name, anchor->name, 2549 sizeof(pr->name)); 2550 break; 2551 } 2552 } 2553 if (!pr->name[0]) 2554 error = EBUSY; 2555 break; 2556 } 2557 2558 case DIOCRCLRTABLES: { 2559 struct pfioc_table *io = (struct pfioc_table *)addr; 2560 2561 if (io->pfrio_esize != 0) { 2562 error = ENODEV; 2563 break; 2564 } 2565 error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel, 2566 io->pfrio_flags | PFR_FLAG_USERIOCTL); 2567 break; 2568 } 2569 2570 case DIOCRADDTABLES: { 2571 struct pfioc_table *io = (struct pfioc_table *)addr; 2572 2573 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2574 error = ENODEV; 2575 break; 2576 } 2577 error = pfr_add_tables(io->pfrio_buffer, io->pfrio_size, 2578 &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2579 break; 2580 } 2581 2582 case DIOCRDELTABLES: { 2583 struct pfioc_table *io = (struct pfioc_table *)addr; 2584 2585 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2586 error = ENODEV; 2587 break; 2588 } 2589 error = pfr_del_tables(io->pfrio_buffer, io->pfrio_size, 2590 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2591 break; 2592 } 2593 2594 case DIOCRGETTABLES: { 2595 struct pfioc_table *io = (struct pfioc_table *)addr; 2596 2597 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2598 error = ENODEV; 2599 break; 2600 } 2601 error = pfr_get_tables(&io->pfrio_table, io->pfrio_buffer, 2602 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2603 break; 2604 } 2605 2606 case DIOCRGETTSTATS: { 2607 struct pfioc_table *io = (struct pfioc_table *)addr; 2608 2609 if (io->pfrio_esize != sizeof(struct pfr_tstats)) { 2610 error = ENODEV; 2611 break; 2612 } 2613 error = pfr_get_tstats(&io->pfrio_table, io->pfrio_buffer, 2614 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2615 break; 2616 } 2617 2618 case DIOCRCLRTSTATS: { 2619 struct pfioc_table *io = (struct pfioc_table *)addr; 2620 2621 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2622 error = ENODEV; 2623 break; 2624 } 2625 error = pfr_clr_tstats(io->pfrio_buffer, io->pfrio_size, 2626 &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2627 break; 2628 } 2629 2630 case DIOCRSETTFLAGS: { 2631 struct pfioc_table *io = (struct pfioc_table *)addr; 2632 2633 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2634 error = ENODEV; 2635 break; 2636 } 2637 error = pfr_set_tflags(io->pfrio_buffer, io->pfrio_size, 2638 io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange, 2639 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2640 break; 2641 } 2642 2643 case DIOCRCLRADDRS: { 2644 struct pfioc_table *io = (struct pfioc_table *)addr; 2645 2646 if (io->pfrio_esize != 0) { 2647 error = ENODEV; 2648 break; 2649 } 2650 error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel, 2651 io->pfrio_flags | PFR_FLAG_USERIOCTL); 2652 break; 2653 } 2654 2655 case DIOCRADDADDRS: { 2656 struct pfioc_table *io = (struct pfioc_table *)addr; 2657 2658 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2659 error = ENODEV; 2660 break; 2661 } 2662 error = pfr_add_addrs(&io->pfrio_table, io->pfrio_buffer, 2663 io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags | 2664 PFR_FLAG_USERIOCTL); 2665 break; 2666 } 2667 2668 case DIOCRDELADDRS: { 2669 struct pfioc_table *io = (struct pfioc_table *)addr; 2670 2671 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2672 error = ENODEV; 2673 break; 2674 } 2675 error = pfr_del_addrs(&io->pfrio_table, io->pfrio_buffer, 2676 io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags | 2677 PFR_FLAG_USERIOCTL); 2678 break; 2679 } 2680 2681 case DIOCRSETADDRS: { 2682 struct pfioc_table *io = (struct pfioc_table *)addr; 2683 2684 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2685 error = ENODEV; 2686 break; 2687 } 2688 error = pfr_set_addrs(&io->pfrio_table, io->pfrio_buffer, 2689 io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd, 2690 &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags | 2691 PFR_FLAG_USERIOCTL, 0); 2692 break; 2693 } 2694 2695 case DIOCRGETADDRS: { 2696 struct pfioc_table *io = (struct pfioc_table *)addr; 2697 2698 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2699 error = ENODEV; 2700 break; 2701 } 2702 error = pfr_get_addrs(&io->pfrio_table, io->pfrio_buffer, 2703 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2704 break; 2705 } 2706 2707 case DIOCRGETASTATS: { 2708 struct pfioc_table *io = (struct pfioc_table *)addr; 2709 2710 if (io->pfrio_esize != sizeof(struct pfr_astats)) { 2711 error = ENODEV; 2712 break; 2713 } 2714 error = pfr_get_astats(&io->pfrio_table, io->pfrio_buffer, 2715 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2716 break; 2717 } 2718 2719 case DIOCRCLRASTATS: { 2720 struct pfioc_table *io = (struct pfioc_table *)addr; 2721 2722 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2723 error = ENODEV; 2724 break; 2725 } 2726 error = pfr_clr_astats(&io->pfrio_table, io->pfrio_buffer, 2727 io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags | 2728 PFR_FLAG_USERIOCTL); 2729 break; 2730 } 2731 2732 case DIOCRTSTADDRS: { 2733 struct pfioc_table *io = (struct pfioc_table *)addr; 2734 2735 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2736 error = ENODEV; 2737 break; 2738 } 2739 error = pfr_tst_addrs(&io->pfrio_table, io->pfrio_buffer, 2740 io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags | 2741 PFR_FLAG_USERIOCTL); 2742 break; 2743 } 2744 2745 case DIOCRINADEFINE: { 2746 struct pfioc_table *io = (struct pfioc_table *)addr; 2747 2748 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2749 error = ENODEV; 2750 break; 2751 } 2752 error = pfr_ina_define(&io->pfrio_table, io->pfrio_buffer, 2753 io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr, 2754 io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2755 break; 2756 } 2757 2758 case DIOCOSFPADD: { 2759 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr; 2760 error = pf_osfp_add(io); 2761 break; 2762 } 2763 2764 case DIOCOSFPGET: { 2765 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr; 2766 error = pf_osfp_get(io); 2767 break; 2768 } 2769 2770 case DIOCXBEGIN: { 2771 struct pfioc_trans *io = (struct pfioc_trans *)addr; 2772 struct pfioc_trans_e *ioe; 2773 struct pfr_table *table; 2774 int i; 2775 2776 if (io->esize != sizeof(*ioe)) { 2777 error = ENODEV; 2778 goto fail; 2779 } 2780 ioe = (struct pfioc_trans_e *)malloc(sizeof(*ioe), 2781 M_TEMP, M_WAITOK); 2782 table = (struct pfr_table *)malloc(sizeof(*table), 2783 M_TEMP, M_WAITOK); 2784 for (i = 0; i < io->size; i++) { 2785 if (copyin(io->array+i, ioe, sizeof(*ioe))) { 2786 free(table, M_TEMP); 2787 free(ioe, M_TEMP); 2788 error = EFAULT; 2789 goto fail; 2790 } 2791 switch (ioe->rs_num) { 2792 #ifdef ALTQ 2793 case PF_RULESET_ALTQ: 2794 if (ioe->anchor[0]) { 2795 free(table, M_TEMP); 2796 free(ioe, M_TEMP); 2797 error = EINVAL; 2798 goto fail; 2799 } 2800 if ((error = pf_begin_altq(&ioe->ticket))) { 2801 free(table, M_TEMP); 2802 free(ioe, M_TEMP); 2803 goto fail; 2804 } 2805 break; 2806 #endif /* ALTQ */ 2807 case PF_RULESET_TABLE: 2808 bzero(table, sizeof(*table)); 2809 strlcpy(table->pfrt_anchor, ioe->anchor, 2810 sizeof(table->pfrt_anchor)); 2811 if ((error = pfr_ina_begin(table, 2812 &ioe->ticket, NULL, 0))) { 2813 free(table, M_TEMP); 2814 free(ioe, M_TEMP); 2815 goto fail; 2816 } 2817 break; 2818 default: 2819 if ((error = pf_begin_rules(&ioe->ticket, 2820 ioe->rs_num, ioe->anchor))) { 2821 free(table, M_TEMP); 2822 free(ioe, M_TEMP); 2823 goto fail; 2824 } 2825 break; 2826 } 2827 if (copyout(ioe, io->array+i, sizeof(io->array[i]))) { 2828 free(table, M_TEMP); 2829 free(ioe, M_TEMP); 2830 error = EFAULT; 2831 goto fail; 2832 } 2833 } 2834 free(table, M_TEMP); 2835 free(ioe, M_TEMP); 2836 break; 2837 } 2838 2839 case DIOCXROLLBACK: { 2840 struct pfioc_trans *io = (struct pfioc_trans *)addr; 2841 struct pfioc_trans_e *ioe; 2842 struct pfr_table *table; 2843 int i; 2844 2845 if (io->esize != sizeof(*ioe)) { 2846 error = ENODEV; 2847 goto fail; 2848 } 2849 ioe = (struct pfioc_trans_e *)malloc(sizeof(*ioe), 2850 M_TEMP, M_WAITOK); 2851 table = (struct pfr_table *)malloc(sizeof(*table), 2852 M_TEMP, M_WAITOK); 2853 for (i = 0; i < io->size; i++) { 2854 if (copyin(io->array+i, ioe, sizeof(*ioe))) { 2855 free(table, M_TEMP); 2856 free(ioe, M_TEMP); 2857 error = EFAULT; 2858 goto fail; 2859 } 2860 switch (ioe->rs_num) { 2861 #ifdef ALTQ 2862 case PF_RULESET_ALTQ: 2863 if (ioe->anchor[0]) { 2864 free(table, M_TEMP); 2865 free(ioe, M_TEMP); 2866 error = EINVAL; 2867 goto fail; 2868 } 2869 if ((error = pf_rollback_altq(ioe->ticket))) { 2870 free(table, M_TEMP); 2871 free(ioe, M_TEMP); 2872 goto fail; /* really bad */ 2873 } 2874 break; 2875 #endif /* ALTQ */ 2876 case PF_RULESET_TABLE: 2877 bzero(table, sizeof(*table)); 2878 strlcpy(table->pfrt_anchor, ioe->anchor, 2879 sizeof(table->pfrt_anchor)); 2880 if ((error = pfr_ina_rollback(table, 2881 ioe->ticket, NULL, 0))) { 2882 free(table, M_TEMP); 2883 free(ioe, M_TEMP); 2884 goto fail; /* really bad */ 2885 } 2886 break; 2887 default: 2888 if ((error = pf_rollback_rules(ioe->ticket, 2889 ioe->rs_num, ioe->anchor))) { 2890 free(table, M_TEMP); 2891 free(ioe, M_TEMP); 2892 goto fail; /* really bad */ 2893 } 2894 break; 2895 } 2896 } 2897 free(table, M_TEMP); 2898 free(ioe, M_TEMP); 2899 break; 2900 } 2901 2902 case DIOCXCOMMIT: { 2903 struct pfioc_trans *io = (struct pfioc_trans *)addr; 2904 struct pfioc_trans_e *ioe; 2905 struct pfr_table *table; 2906 struct pf_ruleset *rs; 2907 int i; 2908 2909 if (io->esize != sizeof(*ioe)) { 2910 error = ENODEV; 2911 goto fail; 2912 } 2913 ioe = (struct pfioc_trans_e *)malloc(sizeof(*ioe), 2914 M_TEMP, M_WAITOK); 2915 table = (struct pfr_table *)malloc(sizeof(*table), 2916 M_TEMP, M_WAITOK); 2917 /* first makes sure everything will succeed */ 2918 for (i = 0; i < io->size; i++) { 2919 if (copyin(io->array+i, ioe, sizeof(*ioe))) { 2920 free(table, M_TEMP); 2921 free(ioe, M_TEMP); 2922 error = EFAULT; 2923 goto fail; 2924 } 2925 switch (ioe->rs_num) { 2926 #ifdef ALTQ 2927 case PF_RULESET_ALTQ: 2928 if (ioe->anchor[0]) { 2929 free(table, M_TEMP); 2930 free(ioe, M_TEMP); 2931 error = EINVAL; 2932 goto fail; 2933 } 2934 if (!altqs_inactive_open || ioe->ticket != 2935 ticket_altqs_inactive) { 2936 free(table, M_TEMP); 2937 free(ioe, M_TEMP); 2938 error = EBUSY; 2939 goto fail; 2940 } 2941 break; 2942 #endif /* ALTQ */ 2943 case PF_RULESET_TABLE: 2944 rs = pf_find_ruleset(ioe->anchor); 2945 if (rs == NULL || !rs->topen || ioe->ticket != 2946 rs->tticket) { 2947 free(table, M_TEMP); 2948 free(ioe, M_TEMP); 2949 error = EBUSY; 2950 goto fail; 2951 } 2952 break; 2953 default: 2954 if (ioe->rs_num < 0 || ioe->rs_num >= 2955 PF_RULESET_MAX) { 2956 free(table, M_TEMP); 2957 free(ioe, M_TEMP); 2958 error = EINVAL; 2959 goto fail; 2960 } 2961 rs = pf_find_ruleset(ioe->anchor); 2962 if (rs == NULL || 2963 !rs->rules[ioe->rs_num].inactive.open || 2964 rs->rules[ioe->rs_num].inactive.ticket != 2965 ioe->ticket) { 2966 free(table, M_TEMP); 2967 free(ioe, M_TEMP); 2968 error = EBUSY; 2969 goto fail; 2970 } 2971 break; 2972 } 2973 } 2974 /* now do the commit - no errors should happen here */ 2975 for (i = 0; i < io->size; i++) { 2976 if (copyin(io->array+i, ioe, sizeof(*ioe))) { 2977 free(table, M_TEMP); 2978 free(ioe, M_TEMP); 2979 error = EFAULT; 2980 goto fail; 2981 } 2982 switch (ioe->rs_num) { 2983 #ifdef ALTQ 2984 case PF_RULESET_ALTQ: 2985 if ((error = pf_commit_altq(ioe->ticket))) { 2986 free(table, M_TEMP); 2987 free(ioe, M_TEMP); 2988 goto fail; /* really bad */ 2989 } 2990 break; 2991 #endif /* ALTQ */ 2992 case PF_RULESET_TABLE: 2993 bzero(table, sizeof(*table)); 2994 strlcpy(table->pfrt_anchor, ioe->anchor, 2995 sizeof(table->pfrt_anchor)); 2996 if ((error = pfr_ina_commit(table, ioe->ticket, 2997 NULL, NULL, 0))) { 2998 free(table, M_TEMP); 2999 free(ioe, M_TEMP); 3000 goto fail; /* really bad */ 3001 } 3002 break; 3003 default: 3004 if ((error = pf_commit_rules(ioe->ticket, 3005 ioe->rs_num, ioe->anchor))) { 3006 free(table, M_TEMP); 3007 free(ioe, M_TEMP); 3008 goto fail; /* really bad */ 3009 } 3010 break; 3011 } 3012 } 3013 free(table, M_TEMP); 3014 free(ioe, M_TEMP); 3015 break; 3016 } 3017 3018 case DIOCGETSRCNODES: { 3019 struct pfioc_src_nodes *psn = (struct pfioc_src_nodes *)addr; 3020 struct pf_src_node *n, *p, *pstore; 3021 u_int32_t nr = 0; 3022 int space = psn->psn_len; 3023 3024 if (space == 0) { 3025 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) 3026 nr++; 3027 psn->psn_len = sizeof(struct pf_src_node) * nr; 3028 break; 3029 } 3030 3031 pstore = malloc(sizeof(*pstore), M_TEMP, M_WAITOK); 3032 3033 p = psn->psn_src_nodes; 3034 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) { 3035 int secs = time_second, diff; 3036 3037 if ((nr + 1) * sizeof(*p) > (unsigned)psn->psn_len) 3038 break; 3039 3040 bcopy(n, pstore, sizeof(*pstore)); 3041 if (n->rule.ptr != NULL) 3042 pstore->rule.nr = n->rule.ptr->nr; 3043 pstore->creation = secs - pstore->creation; 3044 if (pstore->expire > secs) 3045 pstore->expire -= secs; 3046 else 3047 pstore->expire = 0; 3048 3049 /* adjust the connection rate estimate */ 3050 diff = secs - n->conn_rate.last; 3051 if (diff >= n->conn_rate.seconds) 3052 pstore->conn_rate.count = 0; 3053 else 3054 pstore->conn_rate.count -= 3055 n->conn_rate.count * diff / 3056 n->conn_rate.seconds; 3057 3058 error = copyout(pstore, p, sizeof(*p)); 3059 if (error) { 3060 free(pstore, M_TEMP); 3061 goto fail; 3062 } 3063 p++; 3064 nr++; 3065 } 3066 psn->psn_len = sizeof(struct pf_src_node) * nr; 3067 3068 free(pstore, M_TEMP); 3069 break; 3070 } 3071 3072 case DIOCCLRSRCNODES: { 3073 struct pf_src_node *n; 3074 struct pf_state *state; 3075 3076 RB_FOREACH(state, pf_state_tree_id, &tree_id) { 3077 state->src_node = NULL; 3078 state->nat_src_node = NULL; 3079 } 3080 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) { 3081 n->expire = 1; 3082 n->states = 0; 3083 } 3084 pf_purge_expired_src_nodes(1); 3085 pf_status.src_nodes = 0; 3086 break; 3087 } 3088 3089 case DIOCKILLSRCNODES: { 3090 struct pf_src_node *sn; 3091 struct pf_state *s; 3092 struct pfioc_src_node_kill *psnk = \ 3093 (struct pfioc_src_node_kill *) addr; 3094 int killed = 0; 3095 3096 RB_FOREACH(sn, pf_src_tree, &tree_src_tracking) { 3097 if (PF_MATCHA(psnk->psnk_src.neg, \ 3098 &psnk->psnk_src.addr.v.a.addr, \ 3099 &psnk->psnk_src.addr.v.a.mask, \ 3100 &sn->addr, sn->af) && 3101 PF_MATCHA(psnk->psnk_dst.neg, \ 3102 &psnk->psnk_dst.addr.v.a.addr, \ 3103 &psnk->psnk_dst.addr.v.a.mask, \ 3104 &sn->raddr, sn->af)) { 3105 /* Handle state to src_node linkage */ 3106 if (sn->states != 0) { 3107 RB_FOREACH(s, pf_state_tree_id, 3108 &tree_id) { 3109 if (s->src_node == sn) 3110 s->src_node = NULL; 3111 if (s->nat_src_node == sn) 3112 s->nat_src_node = NULL; 3113 } 3114 sn->states = 0; 3115 } 3116 sn->expire = 1; 3117 killed++; 3118 } 3119 } 3120 3121 if (killed > 0) 3122 pf_purge_expired_src_nodes(1); 3123 3124 psnk->psnk_af = killed; 3125 break; 3126 } 3127 3128 case DIOCSETHOSTID: { 3129 u_int32_t *hostid = (u_int32_t *)addr; 3130 3131 if (*hostid == 0) 3132 pf_status.hostid = arc4random(); 3133 else 3134 pf_status.hostid = *hostid; 3135 break; 3136 } 3137 3138 case DIOCOSFPFLUSH: 3139 pf_osfp_flush(); 3140 break; 3141 3142 case DIOCIGETIFACES: { 3143 struct pfioc_iface *io = (struct pfioc_iface *)addr; 3144 3145 if (io->pfiio_esize != sizeof(struct pfi_kif)) { 3146 error = ENODEV; 3147 break; 3148 } 3149 error = pfi_get_ifaces(io->pfiio_name, io->pfiio_buffer, 3150 &io->pfiio_size); 3151 break; 3152 } 3153 3154 case DIOCSETIFFLAG: { 3155 struct pfioc_iface *io = (struct pfioc_iface *)addr; 3156 3157 error = pfi_set_flags(io->pfiio_name, io->pfiio_flags); 3158 break; 3159 } 3160 3161 case DIOCCLRIFFLAG: { 3162 struct pfioc_iface *io = (struct pfioc_iface *)addr; 3163 3164 error = pfi_clear_flags(io->pfiio_name, io->pfiio_flags); 3165 break; 3166 } 3167 3168 default: 3169 error = ENODEV; 3170 break; 3171 } 3172 fail: 3173 splx(s); 3174 if (flags & FWRITE) 3175 rw_exit_write(&pf_consistency_lock); 3176 else 3177 rw_exit_read(&pf_consistency_lock); 3178 return (error); 3179 } 3180 3181 #ifdef __NetBSD__ 3182 #ifdef INET 3183 static int 3184 pfil4_wrapper(void *arg, struct mbuf **mp, struct ifnet *ifp, int dir) 3185 { 3186 int error; 3187 3188 /* 3189 * ensure that mbufs are writable beforehand 3190 * as it's assumed by pf code. 3191 * ip hdr (60 bytes) + tcp hdr (60 bytes) should be enough. 3192 * XXX inefficient 3193 */ 3194 error = m_makewritable(mp, 0, 60 + 60, M_DONTWAIT); 3195 if (error) { 3196 m_freem(*mp); 3197 *mp = NULL; 3198 return error; 3199 } 3200 3201 /* 3202 * If the packet is out-bound, we can't delay checksums 3203 * here. For in-bound, the checksum has already been 3204 * validated. 3205 */ 3206 if (dir == PFIL_OUT) { 3207 if ((*mp)->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) { 3208 in_delayed_cksum(*mp); 3209 (*mp)->m_pkthdr.csum_flags &= 3210 ~(M_CSUM_TCPv4|M_CSUM_UDPv4); 3211 } 3212 } 3213 3214 if (pf_test(dir == PFIL_OUT ? PF_OUT : PF_IN, ifp, mp, NULL) 3215 != PF_PASS) { 3216 m_freem(*mp); 3217 *mp = NULL; 3218 return EHOSTUNREACH; 3219 } 3220 3221 /* 3222 * we're not compatible with fast-forward. 3223 */ 3224 3225 if (dir == PFIL_IN && *mp) { 3226 (*mp)->m_flags &= ~M_CANFASTFWD; 3227 } 3228 3229 return (0); 3230 } 3231 #endif /* INET */ 3232 3233 #ifdef INET6 3234 static int 3235 pfil6_wrapper(void *arg, struct mbuf **mp, struct ifnet *ifp, int dir) 3236 { 3237 int error; 3238 3239 /* 3240 * ensure that mbufs are writable beforehand 3241 * as it's assumed by pf code. 3242 * XXX inefficient 3243 */ 3244 error = m_makewritable(mp, 0, M_COPYALL, M_DONTWAIT); 3245 if (error) { 3246 m_freem(*mp); 3247 *mp = NULL; 3248 return error; 3249 } 3250 3251 /* 3252 * If the packet is out-bound, we can't delay checksums 3253 * here. For in-bound, the checksum has already been 3254 * validated. 3255 */ 3256 if (dir == PFIL_OUT) { 3257 if ((*mp)->m_pkthdr.csum_flags & (M_CSUM_TCPv6|M_CSUM_UDPv6)) { 3258 in6_delayed_cksum(*mp); 3259 (*mp)->m_pkthdr.csum_flags &= 3260 ~(M_CSUM_TCPv6|M_CSUM_UDPv6); 3261 } 3262 } 3263 3264 if (pf_test6(dir == PFIL_OUT ? PF_OUT : PF_IN, ifp, mp, NULL) 3265 != PF_PASS) { 3266 m_freem(*mp); 3267 *mp = NULL; 3268 return EHOSTUNREACH; 3269 } else 3270 return (0); 3271 } 3272 #endif /* INET6 */ 3273 3274 static int 3275 pf_pfil_attach(void) 3276 { 3277 struct pfil_head *ph_inet; 3278 #ifdef INET6 3279 struct pfil_head *ph_inet6; 3280 #endif /* INET6 */ 3281 int error; 3282 3283 if (pf_pfil_attached) 3284 return (EBUSY); 3285 3286 ph_inet = pfil_head_get(PFIL_TYPE_AF, AF_INET); 3287 if (ph_inet) 3288 error = pfil_add_hook((void *)pfil4_wrapper, NULL, 3289 PFIL_IN|PFIL_OUT, ph_inet); 3290 else 3291 error = ENOENT; 3292 if (error) 3293 return (error); 3294 3295 #ifdef INET6 3296 ph_inet6 = pfil_head_get(PFIL_TYPE_AF, AF_INET6); 3297 if (ph_inet6) 3298 error = pfil_add_hook((void *)pfil6_wrapper, NULL, 3299 PFIL_IN|PFIL_OUT, ph_inet6); 3300 else 3301 error = ENOENT; 3302 if (error) 3303 goto bad; 3304 #endif /* INET6 */ 3305 3306 pf_pfil_attached = 1; 3307 3308 return (0); 3309 3310 #ifdef INET6 3311 bad: 3312 pfil_remove_hook(pfil4_wrapper, NULL, PFIL_IN|PFIL_OUT, ph_inet); 3313 #endif /* INET6 */ 3314 3315 return (error); 3316 } 3317 3318 static int 3319 pf_pfil_detach(void) 3320 { 3321 struct pfil_head *ph_inet; 3322 #ifdef INET6 3323 struct pfil_head *ph_inet6; 3324 #endif /* INET6 */ 3325 3326 if (pf_pfil_attached == 0) 3327 return (EBUSY); 3328 3329 ph_inet = pfil_head_get(PFIL_TYPE_AF, AF_INET); 3330 if (ph_inet) 3331 pfil_remove_hook((void *)pfil4_wrapper, NULL, 3332 PFIL_IN|PFIL_OUT, ph_inet); 3333 #ifdef INET6 3334 ph_inet6 = pfil_head_get(PFIL_TYPE_AF, AF_INET6); 3335 if (ph_inet6) 3336 pfil_remove_hook((void *)pfil6_wrapper, NULL, 3337 PFIL_IN|PFIL_OUT, ph_inet6); 3338 #endif /* INET6 */ 3339 pf_pfil_attached = 0; 3340 3341 return (0); 3342 } 3343 #endif /* __NetBSD__ */ 3344 3345 #if defined(__NetBSD__) 3346 MODULE(MODULE_CLASS_DRIVER, pf, "bpf"); 3347 3348 static int 3349 pf_modcmd(modcmd_t cmd, void *opaque) 3350 { 3351 #ifdef _MODULE 3352 extern void pflogattach(int); 3353 extern void pflogdetach(void); 3354 3355 devmajor_t cmajor = NODEVMAJOR, bmajor = NODEVMAJOR; 3356 int err; 3357 3358 switch (cmd) { 3359 case MODULE_CMD_INIT: 3360 err = devsw_attach("pf", NULL, &bmajor, &pf_cdevsw, &cmajor); 3361 if (err) 3362 return err; 3363 pfattach(1); 3364 pflogattach(1); 3365 return 0; 3366 case MODULE_CMD_FINI: 3367 if (pf_status.running) { 3368 return EBUSY; 3369 } else { 3370 pfdetach(); 3371 pflogdetach(); 3372 return devsw_detach(NULL, &pf_cdevsw); 3373 } 3374 default: 3375 return ENOTTY; 3376 } 3377 #else 3378 if (cmd == MODULE_CMD_INIT) 3379 return 0; 3380 return ENOTTY; 3381 #endif 3382 } 3383 #endif 3384