1 /* $NetBSD: pf_ioctl.c,v 1.43 2011/01/19 19:58:02 drochner Exp $ */ 2 /* $OpenBSD: pf_ioctl.c,v 1.182 2007/06/24 11:17:13 mcbride Exp $ */ 3 4 /* 5 * Copyright (c) 2001 Daniel Hartmeier 6 * Copyright (c) 2002,2003 Henning Brauer 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 13 * - Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * - Redistributions in binary form must reproduce the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer in the documentation and/or other materials provided 18 * with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 28 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 30 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 * POSSIBILITY OF SUCH DAMAGE. 32 * 33 * Effort sponsored in part by the Defense Advanced Research Projects 34 * Agency (DARPA) and Air Force Research Laboratory, Air Force 35 * Materiel Command, USAF, under agreement number F30602-01-2-0537. 36 * 37 */ 38 39 #include <sys/cdefs.h> 40 __KERNEL_RCSID(0, "$NetBSD: pf_ioctl.c,v 1.43 2011/01/19 19:58:02 drochner Exp $"); 41 42 #ifdef _KERNEL_OPT 43 #include "opt_inet.h" 44 #include "opt_pfil_hooks.h" 45 #endif 46 47 #include "pfsync.h" 48 49 #include <sys/param.h> 50 #include <sys/systm.h> 51 #include <sys/mbuf.h> 52 #include <sys/filio.h> 53 #include <sys/fcntl.h> 54 #include <sys/socket.h> 55 #include <sys/socketvar.h> 56 #include <sys/kernel.h> 57 #include <sys/time.h> 58 #include <sys/pool.h> 59 #include <sys/proc.h> 60 #include <sys/malloc.h> 61 #include <sys/kthread.h> 62 #include <sys/rwlock.h> 63 #include <uvm/uvm_extern.h> 64 #ifdef __NetBSD__ 65 #include <sys/conf.h> 66 #include <sys/lwp.h> 67 #include <sys/kauth.h> 68 #include <sys/module.h> 69 #endif /* __NetBSD__ */ 70 71 #include <net/if.h> 72 #include <net/if_types.h> 73 #include <net/route.h> 74 75 #include <netinet/in.h> 76 #include <netinet/in_var.h> 77 #include <netinet/in_systm.h> 78 #include <netinet/ip.h> 79 #include <netinet/ip_var.h> 80 #include <netinet/ip_icmp.h> 81 82 #ifndef __NetBSD__ 83 #include <dev/rndvar.h> 84 #include <crypto/md5.h> 85 #else 86 #include <sys/md5.h> 87 #endif /* __NetBSD__ */ 88 #include <net/pfvar.h> 89 90 #if NPFSYNC > 0 91 #include <net/if_pfsync.h> 92 #endif /* NPFSYNC > 0 */ 93 94 #if NPFLOG > 0 95 #include <net/if_pflog.h> 96 #endif /* NPFLOG > 0 */ 97 98 #ifdef INET6 99 #include <netinet/ip6.h> 100 #include <netinet/in_pcb.h> 101 #endif /* INET6 */ 102 103 #ifdef ALTQ 104 #include <altq/altq.h> 105 #endif 106 107 void pfattach(int); 108 #ifdef _MODULE 109 void pfdetach(void); 110 #endif /* _MODULE */ 111 #ifndef __NetBSD__ 112 void pf_thread_create(void *); 113 #endif /* !__NetBSD__ */ 114 int pfopen(dev_t, int, int, struct lwp *); 115 int pfclose(dev_t, int, int, struct lwp *); 116 struct pf_pool *pf_get_pool(char *, u_int32_t, u_int8_t, u_int32_t, 117 u_int8_t, u_int8_t, u_int8_t); 118 119 void pf_mv_pool(struct pf_palist *, struct pf_palist *); 120 void pf_empty_pool(struct pf_palist *); 121 int pfioctl(dev_t, u_long, void *, int, struct lwp *); 122 #ifdef ALTQ 123 int pf_begin_altq(u_int32_t *); 124 int pf_rollback_altq(u_int32_t); 125 int pf_commit_altq(u_int32_t); 126 int pf_enable_altq(struct pf_altq *); 127 int pf_disable_altq(struct pf_altq *); 128 #endif /* ALTQ */ 129 int pf_begin_rules(u_int32_t *, int, const char *); 130 int pf_rollback_rules(u_int32_t, int, char *); 131 int pf_setup_pfsync_matching(struct pf_ruleset *); 132 void pf_hash_rule(MD5_CTX *, struct pf_rule *); 133 void pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *); 134 int pf_commit_rules(u_int32_t, int, char *); 135 void pf_state_export(struct pfsync_state *, 136 struct pf_state_key *, struct pf_state *); 137 void pf_state_import(struct pfsync_state *, 138 struct pf_state_key *, struct pf_state *); 139 140 static int pf_state_add(struct pfsync_state*); 141 142 struct pf_rule pf_default_rule; 143 #ifdef __NetBSD__ 144 krwlock_t pf_consistency_lock; 145 #else 146 struct rwlock pf_consistency_lock = RWLOCK_INITIALIZER("pfcnslk"); 147 #endif /* __NetBSD__ */ 148 #ifdef ALTQ 149 static int pf_altq_running; 150 #endif 151 152 int pf_state_lock = 0; 153 154 #define TAGID_MAX 50000 155 TAILQ_HEAD(pf_tags, pf_tagname) pf_tags = TAILQ_HEAD_INITIALIZER(pf_tags), 156 pf_qids = TAILQ_HEAD_INITIALIZER(pf_qids); 157 158 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE) 159 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE 160 #endif 161 u_int16_t tagname2tag(struct pf_tags *, char *); 162 void tag2tagname(struct pf_tags *, u_int16_t, char *); 163 void tag_unref(struct pf_tags *, u_int16_t); 164 int pf_rtlabel_add(struct pf_addr_wrap *); 165 void pf_rtlabel_remove(struct pf_addr_wrap *); 166 void pf_rtlabel_copyout(struct pf_addr_wrap *); 167 168 #define DPFPRINTF(n, x) if (pf_status.debug >= (n)) printf x 169 170 #ifdef __NetBSD__ 171 const struct cdevsw pf_cdevsw = { 172 pfopen, pfclose, noread, nowrite, pfioctl, 173 nostop, notty, nopoll, nommap, nokqfilter, D_OTHER 174 }; 175 176 static int pfil4_wrapper(void *, struct mbuf **, struct ifnet *, int); 177 #ifdef INET6 178 static int pfil6_wrapper(void *, struct mbuf **, struct ifnet *, int); 179 #endif /* INET6 */ 180 181 static int pf_pfil_attach(void); 182 static int pf_pfil_detach(void); 183 184 static int pf_pfil_attached; 185 186 static kauth_listener_t pf_listener; 187 #endif /* __NetBSD__ */ 188 189 #ifdef __NetBSD__ 190 static int 191 pf_listener_cb(kauth_cred_t cred, kauth_action_t action, void *cookie, 192 void *arg0, void *arg1, void *arg2, void *arg3) 193 { 194 int result; 195 enum kauth_network_req req; 196 197 result = KAUTH_RESULT_DEFER; 198 req = (enum kauth_network_req)arg0; 199 200 if (action != KAUTH_NETWORK_FIREWALL) 201 return result; 202 203 /* These must have came from device context. */ 204 if ((req == KAUTH_REQ_NETWORK_FIREWALL_FW) || 205 (req == KAUTH_REQ_NETWORK_FIREWALL_NAT)) 206 result = KAUTH_RESULT_ALLOW; 207 208 return result; 209 } 210 #endif /* __NetBSD__ */ 211 212 void 213 pfattach(int num) 214 { 215 u_int32_t *timeout = pf_default_rule.timeout; 216 217 #ifdef __NetBSD__ 218 pool_init(&pf_rule_pl, sizeof(struct pf_rule), 0, 0, 0, "pfrulepl", 219 &pool_allocator_nointr, IPL_NONE); 220 pool_init(&pf_src_tree_pl, sizeof(struct pf_src_node), 0, 0, 0, 221 "pfsrctrpl", NULL, IPL_SOFTNET); 222 pool_init(&pf_state_pl, sizeof(struct pf_state), 0, 0, 0, "pfstatepl", 223 NULL, IPL_SOFTNET); 224 pool_init(&pf_state_key_pl, sizeof(struct pf_state_key), 0, 0, 0, 225 "pfstatekeypl", NULL, IPL_SOFTNET); 226 pool_init(&pf_altq_pl, sizeof(struct pf_altq), 0, 0, 0, "pfaltqpl", 227 &pool_allocator_nointr, IPL_NONE); 228 pool_init(&pf_pooladdr_pl, sizeof(struct pf_pooladdr), 0, 0, 0, 229 "pfpooladdrpl", &pool_allocator_nointr, IPL_NONE); 230 #else 231 pool_init(&pf_rule_pl, sizeof(struct pf_rule), 0, 0, 0, "pfrulepl", 232 &pool_allocator_nointr); 233 pool_init(&pf_src_tree_pl, sizeof(struct pf_src_node), 0, 0, 0, 234 "pfsrctrpl", NULL); 235 pool_init(&pf_state_pl, sizeof(struct pf_state), 0, 0, 0, "pfstatepl", 236 NULL); 237 pool_init(&pf_state_key_pl, sizeof(struct pf_state_key), 0, 0, 0, 238 "pfstatekeypl", NULL); 239 pool_init(&pf_altq_pl, sizeof(struct pf_altq), 0, 0, 0, "pfaltqpl", 240 &pool_allocator_nointr); 241 pool_init(&pf_pooladdr_pl, sizeof(struct pf_pooladdr), 0, 0, 0, 242 "pfpooladdrpl", &pool_allocator_nointr); 243 #endif /* !__NetBSD__ */ 244 245 pfr_initialize(); 246 pfi_initialize(); 247 pf_osfp_initialize(); 248 249 pool_sethardlimit(pf_pool_limits[PF_LIMIT_STATES].pp, 250 pf_pool_limits[PF_LIMIT_STATES].limit, NULL, 0); 251 252 if (ctob(physmem) <= 100*1024*1024) 253 pf_pool_limits[PF_LIMIT_TABLE_ENTRIES].limit = 254 PFR_KENTRY_HIWAT_SMALL; 255 256 RB_INIT(&tree_src_tracking); 257 RB_INIT(&pf_anchors); 258 pf_init_ruleset(&pf_main_ruleset); 259 TAILQ_INIT(&pf_altqs[0]); 260 TAILQ_INIT(&pf_altqs[1]); 261 TAILQ_INIT(&pf_pabuf); 262 pf_altqs_active = &pf_altqs[0]; 263 pf_altqs_inactive = &pf_altqs[1]; 264 TAILQ_INIT(&state_list); 265 266 #ifdef __NetBSD__ 267 rw_init(&pf_consistency_lock); 268 #endif /* __NetBSD__ */ 269 270 /* default rule should never be garbage collected */ 271 pf_default_rule.entries.tqe_prev = &pf_default_rule.entries.tqe_next; 272 pf_default_rule.action = PF_PASS; 273 pf_default_rule.nr = -1; 274 pf_default_rule.rtableid = -1; 275 276 /* initialize default timeouts */ 277 timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL; 278 timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL; 279 timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL; 280 timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL; 281 timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL; 282 timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL; 283 timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL; 284 timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL; 285 timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL; 286 timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL; 287 timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL; 288 timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL; 289 timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL; 290 timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL; 291 timeout[PFTM_FRAG] = PFTM_FRAG_VAL; 292 timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL; 293 timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL; 294 timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL; 295 timeout[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START; 296 timeout[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END; 297 298 pf_normalize_init(); 299 bzero(&pf_status, sizeof(pf_status)); 300 pf_status.debug = PF_DEBUG_URGENT; 301 302 /* XXX do our best to avoid a conflict */ 303 pf_status.hostid = arc4random(); 304 305 /* require process context to purge states, so perform in a thread */ 306 #ifdef __NetBSD__ 307 if (kthread_create(PRI_NONE, 0, NULL, pf_purge_thread, NULL, NULL, 308 "pfpurge")) 309 panic("pfpurge thread"); 310 #else 311 kthread_create_deferred(pf_thread_create, NULL); 312 #endif /* !__NetBSD__ */ 313 314 #ifdef __NetBSD__ 315 pf_listener = kauth_listen_scope(KAUTH_SCOPE_NETWORK, 316 pf_listener_cb, NULL); 317 #endif /* __NetBSD__ */ 318 } 319 320 #ifdef _MODULE 321 void 322 pfdetach(void) 323 { 324 extern int pf_purge_thread_running; 325 extern int pf_purge_thread_stop; 326 struct pf_anchor *anchor; 327 struct pf_state *state; 328 struct pf_src_node *node; 329 struct pfioc_table pt; 330 u_int32_t ticket; 331 int i; 332 char r = '\0'; 333 334 pf_purge_thread_stop = 1; 335 wakeup(pf_purge_thread); 336 337 /* wait until the kthread exits */ 338 while (pf_purge_thread_running) 339 tsleep(&pf_purge_thread_running, PWAIT, "pfdown", 0); 340 341 (void)pf_pfil_detach(); 342 343 pf_status.running = 0; 344 345 /* clear the rulesets */ 346 for (i = 0; i < PF_RULESET_MAX; i++) 347 if (pf_begin_rules(&ticket, i, &r) == 0) 348 pf_commit_rules(ticket, i, &r); 349 #ifdef ALTQ 350 if (pf_begin_altq(&ticket) == 0) 351 pf_commit_altq(ticket); 352 #endif /* ALTQ */ 353 354 /* clear states */ 355 RB_FOREACH(state, pf_state_tree_id, &tree_id) { 356 state->timeout = PFTM_PURGE; 357 #if NPFSYNC > 0 358 state->sync_flags = PFSTATE_NOSYNC; 359 #endif /* NPFSYNC > 0 */ 360 } 361 pf_purge_expired_states(pf_status.states); 362 #if NPFSYNC > 0 363 pfsync_clear_states(pf_status.hostid, NULL); 364 #endif /* NPFSYNC > 0 */ 365 366 /* clear source nodes */ 367 RB_FOREACH(state, pf_state_tree_id, &tree_id) { 368 state->src_node = NULL; 369 state->nat_src_node = NULL; 370 } 371 RB_FOREACH(node, pf_src_tree, &tree_src_tracking) { 372 node->expire = 1; 373 node->states = 0; 374 } 375 pf_purge_expired_src_nodes(0); 376 377 /* clear tables */ 378 memset(&pt, '\0', sizeof(pt)); 379 pfr_clr_tables(&pt.pfrio_table, &pt.pfrio_ndel, pt.pfrio_flags); 380 381 /* destroy anchors */ 382 while ((anchor = RB_MIN(pf_anchor_global, &pf_anchors)) != NULL) { 383 for (i = 0; i < PF_RULESET_MAX; i++) 384 if (pf_begin_rules(&ticket, i, anchor->name) == 0) 385 pf_commit_rules(ticket, i, anchor->name); 386 } 387 388 /* destroy main ruleset */ 389 pf_remove_if_empty_ruleset(&pf_main_ruleset); 390 391 /* destroy the pools */ 392 pool_destroy(&pf_pooladdr_pl); 393 pool_destroy(&pf_altq_pl); 394 pool_destroy(&pf_state_key_pl); 395 pool_destroy(&pf_state_pl); 396 pool_destroy(&pf_rule_pl); 397 pool_destroy(&pf_src_tree_pl); 398 399 rw_destroy(&pf_consistency_lock); 400 401 /* destroy subsystems */ 402 pf_normalize_destroy(); 403 pf_osfp_destroy(); 404 pfr_destroy(); 405 pfi_destroy(); 406 407 /* cleanup kauth listener */ 408 kauth_unlisten_scope(pf_listener); 409 } 410 #endif /* _MODULE */ 411 412 #ifndef __NetBSD__ 413 void 414 pf_thread_create(void *v) 415 { 416 if (kthread_create(pf_purge_thread, NULL, NULL, "pfpurge")) 417 panic("pfpurge thread"); 418 } 419 #endif /* !__NetBSD__ */ 420 421 int 422 pfopen(dev_t dev, int flags, int fmt, struct lwp *l) 423 { 424 if (minor(dev) >= 1) 425 return (ENXIO); 426 return (0); 427 } 428 429 int 430 pfclose(dev_t dev, int flags, int fmt, struct lwp *l) 431 { 432 if (minor(dev) >= 1) 433 return (ENXIO); 434 return (0); 435 } 436 437 struct pf_pool * 438 pf_get_pool(char *anchor, u_int32_t ticket, u_int8_t rule_action, 439 u_int32_t rule_number, u_int8_t r_last, u_int8_t active, 440 u_int8_t check_ticket) 441 { 442 struct pf_ruleset *ruleset; 443 struct pf_rule *rule; 444 int rs_num; 445 446 ruleset = pf_find_ruleset(anchor); 447 if (ruleset == NULL) 448 return (NULL); 449 rs_num = pf_get_ruleset_number(rule_action); 450 if (rs_num >= PF_RULESET_MAX) 451 return (NULL); 452 if (active) { 453 if (check_ticket && ticket != 454 ruleset->rules[rs_num].active.ticket) 455 return (NULL); 456 if (r_last) 457 rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr, 458 pf_rulequeue); 459 else 460 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr); 461 } else { 462 if (check_ticket && ticket != 463 ruleset->rules[rs_num].inactive.ticket) 464 return (NULL); 465 if (r_last) 466 rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr, 467 pf_rulequeue); 468 else 469 rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr); 470 } 471 if (!r_last) { 472 while ((rule != NULL) && (rule->nr != rule_number)) 473 rule = TAILQ_NEXT(rule, entries); 474 } 475 if (rule == NULL) 476 return (NULL); 477 478 return (&rule->rpool); 479 } 480 481 void 482 pf_mv_pool(struct pf_palist *poola, struct pf_palist *poolb) 483 { 484 struct pf_pooladdr *mv_pool_pa; 485 486 while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) { 487 TAILQ_REMOVE(poola, mv_pool_pa, entries); 488 TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries); 489 } 490 } 491 492 void 493 pf_empty_pool(struct pf_palist *poola) 494 { 495 struct pf_pooladdr *empty_pool_pa; 496 497 while ((empty_pool_pa = TAILQ_FIRST(poola)) != NULL) { 498 pfi_dynaddr_remove(&empty_pool_pa->addr); 499 pf_tbladdr_remove(&empty_pool_pa->addr); 500 pfi_kif_unref(empty_pool_pa->kif, PFI_KIF_REF_RULE); 501 TAILQ_REMOVE(poola, empty_pool_pa, entries); 502 pool_put(&pf_pooladdr_pl, empty_pool_pa); 503 } 504 } 505 506 void 507 pf_rm_rule(struct pf_rulequeue *rulequeue, struct pf_rule *rule) 508 { 509 if (rulequeue != NULL) { 510 if (rule->states <= 0) { 511 /* 512 * XXX - we need to remove the table *before* detaching 513 * the rule to make sure the table code does not delete 514 * the anchor under our feet. 515 */ 516 pf_tbladdr_remove(&rule->src.addr); 517 pf_tbladdr_remove(&rule->dst.addr); 518 if (rule->overload_tbl) 519 pfr_detach_table(rule->overload_tbl); 520 } 521 TAILQ_REMOVE(rulequeue, rule, entries); 522 rule->entries.tqe_prev = NULL; 523 rule->nr = -1; 524 } 525 526 if (rule->states > 0 || rule->src_nodes > 0 || 527 rule->entries.tqe_prev != NULL) 528 return; 529 pf_tag_unref(rule->tag); 530 pf_tag_unref(rule->match_tag); 531 #ifdef ALTQ 532 if (rule->pqid != rule->qid) 533 pf_qid_unref(rule->pqid); 534 pf_qid_unref(rule->qid); 535 #endif 536 pf_rtlabel_remove(&rule->src.addr); 537 pf_rtlabel_remove(&rule->dst.addr); 538 pfi_dynaddr_remove(&rule->src.addr); 539 pfi_dynaddr_remove(&rule->dst.addr); 540 if (rulequeue == NULL) { 541 pf_tbladdr_remove(&rule->src.addr); 542 pf_tbladdr_remove(&rule->dst.addr); 543 if (rule->overload_tbl) 544 pfr_detach_table(rule->overload_tbl); 545 } 546 pfi_kif_unref(rule->kif, PFI_KIF_REF_RULE); 547 pf_anchor_remove(rule); 548 pf_empty_pool(&rule->rpool.list); 549 pool_put(&pf_rule_pl, rule); 550 } 551 552 u_int16_t 553 tagname2tag(struct pf_tags *head, char *tagname) 554 { 555 struct pf_tagname *tag, *p = NULL; 556 u_int16_t new_tagid = 1; 557 558 TAILQ_FOREACH(tag, head, entries) 559 if (strcmp(tagname, tag->name) == 0) { 560 tag->ref++; 561 return (tag->tag); 562 } 563 564 /* 565 * to avoid fragmentation, we do a linear search from the beginning 566 * and take the first free slot we find. if there is none or the list 567 * is empty, append a new entry at the end. 568 */ 569 570 /* new entry */ 571 if (!TAILQ_EMPTY(head)) 572 for (p = TAILQ_FIRST(head); p != NULL && 573 p->tag == new_tagid; p = TAILQ_NEXT(p, entries)) 574 new_tagid = p->tag + 1; 575 576 if (new_tagid > TAGID_MAX) 577 return (0); 578 579 /* allocate and fill new struct pf_tagname */ 580 tag = (struct pf_tagname *)malloc(sizeof(struct pf_tagname), 581 M_TEMP, M_NOWAIT); 582 if (tag == NULL) 583 return (0); 584 bzero(tag, sizeof(struct pf_tagname)); 585 strlcpy(tag->name, tagname, sizeof(tag->name)); 586 tag->tag = new_tagid; 587 tag->ref++; 588 589 if (p != NULL) /* insert new entry before p */ 590 TAILQ_INSERT_BEFORE(p, tag, entries); 591 else /* either list empty or no free slot in between */ 592 TAILQ_INSERT_TAIL(head, tag, entries); 593 594 return (tag->tag); 595 } 596 597 void 598 tag2tagname(struct pf_tags *head, u_int16_t tagid, char *p) 599 { 600 struct pf_tagname *tag; 601 602 TAILQ_FOREACH(tag, head, entries) 603 if (tag->tag == tagid) { 604 strlcpy(p, tag->name, PF_TAG_NAME_SIZE); 605 return; 606 } 607 } 608 609 void 610 tag_unref(struct pf_tags *head, u_int16_t tag) 611 { 612 struct pf_tagname *p, *next; 613 614 if (tag == 0) 615 return; 616 617 for (p = TAILQ_FIRST(head); p != NULL; p = next) { 618 next = TAILQ_NEXT(p, entries); 619 if (tag == p->tag) { 620 if (--p->ref == 0) { 621 TAILQ_REMOVE(head, p, entries); 622 free(p, M_TEMP); 623 } 624 break; 625 } 626 } 627 } 628 629 u_int16_t 630 pf_tagname2tag(char *tagname) 631 { 632 return (tagname2tag(&pf_tags, tagname)); 633 } 634 635 void 636 pf_tag2tagname(u_int16_t tagid, char *p) 637 { 638 tag2tagname(&pf_tags, tagid, p); 639 } 640 641 void 642 pf_tag_ref(u_int16_t tag) 643 { 644 struct pf_tagname *t; 645 646 TAILQ_FOREACH(t, &pf_tags, entries) 647 if (t->tag == tag) 648 break; 649 if (t != NULL) 650 t->ref++; 651 } 652 653 void 654 pf_tag_unref(u_int16_t tag) 655 { 656 tag_unref(&pf_tags, tag); 657 } 658 659 int 660 pf_rtlabel_add(struct pf_addr_wrap *a) 661 { 662 #ifndef __NetBSD__ 663 if (a->type == PF_ADDR_RTLABEL && 664 (a->v.rtlabel = rtlabel_name2id(a->v.rtlabelname)) == 0) 665 return (-1); 666 #endif /* !__NetBSD__ */ 667 return (0); 668 } 669 670 void 671 pf_rtlabel_remove(struct pf_addr_wrap *a) 672 { 673 #ifndef __NetBSD__ 674 if (a->type == PF_ADDR_RTLABEL) 675 rtlabel_unref(a->v.rtlabel); 676 #endif /* !__NetBSD__ */ 677 } 678 679 void 680 pf_rtlabel_copyout(struct pf_addr_wrap *a) 681 { 682 #ifndef __NetBSD__ 683 const char *name; 684 685 if (a->type == PF_ADDR_RTLABEL && a->v.rtlabel) { 686 if ((name = rtlabel_id2name(a->v.rtlabel)) == NULL) 687 strlcpy(a->v.rtlabelname, "?", 688 sizeof(a->v.rtlabelname)); 689 else 690 strlcpy(a->v.rtlabelname, name, 691 sizeof(a->v.rtlabelname)); 692 } 693 #endif /* !__NetBSD__ */ 694 } 695 696 #ifdef ALTQ 697 u_int32_t 698 pf_qname2qid(char *qname) 699 { 700 return ((u_int32_t)tagname2tag(&pf_qids, qname)); 701 } 702 703 void 704 pf_qid2qname(u_int32_t qid, char *p) 705 { 706 tag2tagname(&pf_qids, (u_int16_t)qid, p); 707 } 708 709 void 710 pf_qid_unref(u_int32_t qid) 711 { 712 tag_unref(&pf_qids, (u_int16_t)qid); 713 } 714 715 int 716 pf_begin_altq(u_int32_t *ticket) 717 { 718 struct pf_altq *altq; 719 int error = 0; 720 721 /* Purge the old altq list */ 722 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) { 723 TAILQ_REMOVE(pf_altqs_inactive, altq, entries); 724 if (altq->qname[0] == 0) { 725 /* detach and destroy the discipline */ 726 error = altq_remove(altq); 727 } else 728 pf_qid_unref(altq->qid); 729 pool_put(&pf_altq_pl, altq); 730 } 731 if (error) 732 return (error); 733 *ticket = ++ticket_altqs_inactive; 734 altqs_inactive_open = 1; 735 return (0); 736 } 737 738 int 739 pf_rollback_altq(u_int32_t ticket) 740 { 741 struct pf_altq *altq; 742 int error = 0; 743 744 if (!altqs_inactive_open || ticket != ticket_altqs_inactive) 745 return (0); 746 /* Purge the old altq list */ 747 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) { 748 TAILQ_REMOVE(pf_altqs_inactive, altq, entries); 749 if (altq->qname[0] == 0) { 750 /* detach and destroy the discipline */ 751 error = altq_remove(altq); 752 } else 753 pf_qid_unref(altq->qid); 754 pool_put(&pf_altq_pl, altq); 755 } 756 altqs_inactive_open = 0; 757 return (error); 758 } 759 760 int 761 pf_commit_altq(u_int32_t ticket) 762 { 763 struct pf_altqqueue *old_altqs; 764 struct pf_altq *altq; 765 int s, err, error = 0; 766 767 if (!altqs_inactive_open || ticket != ticket_altqs_inactive) 768 return (EBUSY); 769 770 /* swap altqs, keep the old. */ 771 s = splsoftnet(); 772 old_altqs = pf_altqs_active; 773 pf_altqs_active = pf_altqs_inactive; 774 pf_altqs_inactive = old_altqs; 775 ticket_altqs_active = ticket_altqs_inactive; 776 777 /* Attach new disciplines */ 778 TAILQ_FOREACH(altq, pf_altqs_active, entries) { 779 if (altq->qname[0] == 0) { 780 /* attach the discipline */ 781 error = altq_pfattach(altq); 782 if (error == 0 && pf_altq_running) 783 error = pf_enable_altq(altq); 784 if (error != 0) { 785 splx(s); 786 return (error); 787 } 788 } 789 } 790 791 /* Purge the old altq list */ 792 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) { 793 TAILQ_REMOVE(pf_altqs_inactive, altq, entries); 794 if (altq->qname[0] == 0) { 795 /* detach and destroy the discipline */ 796 if (pf_altq_running) 797 error = pf_disable_altq(altq); 798 err = altq_pfdetach(altq); 799 if (err != 0 && error == 0) 800 error = err; 801 err = altq_remove(altq); 802 if (err != 0 && error == 0) 803 error = err; 804 } else 805 pf_qid_unref(altq->qid); 806 pool_put(&pf_altq_pl, altq); 807 } 808 splx(s); 809 810 altqs_inactive_open = 0; 811 return (error); 812 } 813 814 int 815 pf_enable_altq(struct pf_altq *altq) 816 { 817 struct ifnet *ifp; 818 struct tb_profile tb; 819 int s, error = 0; 820 821 if ((ifp = ifunit(altq->ifname)) == NULL) 822 return (EINVAL); 823 824 if (ifp->if_snd.altq_type != ALTQT_NONE) 825 error = altq_enable(&ifp->if_snd); 826 827 /* set tokenbucket regulator */ 828 if (error == 0 && ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) { 829 tb.rate = altq->ifbandwidth; 830 tb.depth = altq->tbrsize; 831 s = splnet(); 832 error = tbr_set(&ifp->if_snd, &tb); 833 splx(s); 834 } 835 836 return (error); 837 } 838 839 int 840 pf_disable_altq(struct pf_altq *altq) 841 { 842 struct ifnet *ifp; 843 struct tb_profile tb; 844 int s, error; 845 846 if ((ifp = ifunit(altq->ifname)) == NULL) 847 return (EINVAL); 848 849 /* 850 * when the discipline is no longer referenced, it was overridden 851 * by a new one. if so, just return. 852 */ 853 if (altq->altq_disc != ifp->if_snd.altq_disc) 854 return (0); 855 856 error = altq_disable(&ifp->if_snd); 857 858 if (error == 0) { 859 /* clear tokenbucket regulator */ 860 tb.rate = 0; 861 s = splnet(); 862 error = tbr_set(&ifp->if_snd, &tb); 863 splx(s); 864 } 865 866 return (error); 867 } 868 #endif /* ALTQ */ 869 870 int 871 pf_begin_rules(u_int32_t *ticket, int rs_num, const char *anchor) 872 { 873 struct pf_ruleset *rs; 874 struct pf_rule *rule; 875 876 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) 877 return (EINVAL); 878 rs = pf_find_or_create_ruleset(anchor); 879 if (rs == NULL) 880 return (EINVAL); 881 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) { 882 pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule); 883 rs->rules[rs_num].inactive.rcount--; 884 } 885 *ticket = ++rs->rules[rs_num].inactive.ticket; 886 rs->rules[rs_num].inactive.open = 1; 887 return (0); 888 } 889 890 int 891 pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor) 892 { 893 struct pf_ruleset *rs; 894 struct pf_rule *rule; 895 896 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) 897 return (EINVAL); 898 rs = pf_find_ruleset(anchor); 899 if (rs == NULL || !rs->rules[rs_num].inactive.open || 900 rs->rules[rs_num].inactive.ticket != ticket) 901 return (0); 902 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) { 903 pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule); 904 rs->rules[rs_num].inactive.rcount--; 905 } 906 rs->rules[rs_num].inactive.open = 0; 907 return (0); 908 } 909 910 #define PF_MD5_UPD(st, elm) \ 911 MD5Update(ctx, (u_int8_t *) &(st)->elm, sizeof((st)->elm)) 912 913 #define PF_MD5_UPD_STR(st, elm) \ 914 MD5Update(ctx, (u_int8_t *) (st)->elm, strlen((st)->elm)) 915 916 #define PF_MD5_UPD_HTONL(st, elm, stor) do { \ 917 (stor) = htonl((st)->elm); \ 918 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int32_t));\ 919 } while (0) 920 921 #define PF_MD5_UPD_HTONS(st, elm, stor) do { \ 922 (stor) = htons((st)->elm); \ 923 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int16_t));\ 924 } while (0) 925 926 void 927 pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr) 928 { 929 PF_MD5_UPD(pfr, addr.type); 930 switch (pfr->addr.type) { 931 case PF_ADDR_DYNIFTL: 932 PF_MD5_UPD(pfr, addr.v.ifname); 933 PF_MD5_UPD(pfr, addr.iflags); 934 break; 935 case PF_ADDR_TABLE: 936 PF_MD5_UPD(pfr, addr.v.tblname); 937 break; 938 case PF_ADDR_ADDRMASK: 939 /* XXX ignore af? */ 940 PF_MD5_UPD(pfr, addr.v.a.addr.addr32); 941 PF_MD5_UPD(pfr, addr.v.a.mask.addr32); 942 break; 943 case PF_ADDR_RTLABEL: 944 PF_MD5_UPD(pfr, addr.v.rtlabelname); 945 break; 946 } 947 948 PF_MD5_UPD(pfr, port[0]); 949 PF_MD5_UPD(pfr, port[1]); 950 PF_MD5_UPD(pfr, neg); 951 PF_MD5_UPD(pfr, port_op); 952 } 953 954 void 955 pf_hash_rule(MD5_CTX *ctx, struct pf_rule *rule) 956 { 957 u_int16_t x; 958 u_int32_t y; 959 960 pf_hash_rule_addr(ctx, &rule->src); 961 pf_hash_rule_addr(ctx, &rule->dst); 962 PF_MD5_UPD_STR(rule, label); 963 PF_MD5_UPD_STR(rule, ifname); 964 PF_MD5_UPD_STR(rule, match_tagname); 965 PF_MD5_UPD_HTONS(rule, match_tag, x); /* dup? */ 966 PF_MD5_UPD_HTONL(rule, os_fingerprint, y); 967 PF_MD5_UPD_HTONL(rule, prob, y); 968 PF_MD5_UPD_HTONL(rule, uid.uid[0], y); 969 PF_MD5_UPD_HTONL(rule, uid.uid[1], y); 970 PF_MD5_UPD(rule, uid.op); 971 PF_MD5_UPD_HTONL(rule, gid.gid[0], y); 972 PF_MD5_UPD_HTONL(rule, gid.gid[1], y); 973 PF_MD5_UPD(rule, gid.op); 974 PF_MD5_UPD_HTONL(rule, rule_flag, y); 975 PF_MD5_UPD(rule, action); 976 PF_MD5_UPD(rule, direction); 977 PF_MD5_UPD(rule, af); 978 PF_MD5_UPD(rule, quick); 979 PF_MD5_UPD(rule, ifnot); 980 PF_MD5_UPD(rule, match_tag_not); 981 PF_MD5_UPD(rule, natpass); 982 PF_MD5_UPD(rule, keep_state); 983 PF_MD5_UPD(rule, proto); 984 PF_MD5_UPD(rule, type); 985 PF_MD5_UPD(rule, code); 986 PF_MD5_UPD(rule, flags); 987 PF_MD5_UPD(rule, flagset); 988 PF_MD5_UPD(rule, allow_opts); 989 PF_MD5_UPD(rule, rt); 990 PF_MD5_UPD(rule, tos); 991 } 992 993 int 994 pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor) 995 { 996 struct pf_ruleset *rs; 997 struct pf_rule *rule, **old_array; 998 struct pf_rulequeue *old_rules; 999 int s, error; 1000 u_int32_t old_rcount; 1001 1002 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) 1003 return (EINVAL); 1004 rs = pf_find_ruleset(anchor); 1005 if (rs == NULL || !rs->rules[rs_num].inactive.open || 1006 ticket != rs->rules[rs_num].inactive.ticket) 1007 return (EBUSY); 1008 1009 /* Calculate checksum for the main ruleset */ 1010 if (rs == &pf_main_ruleset) { 1011 error = pf_setup_pfsync_matching(rs); 1012 if (error != 0) 1013 return (error); 1014 } 1015 1016 /* Swap rules, keep the old. */ 1017 s = splsoftnet(); 1018 old_rules = rs->rules[rs_num].active.ptr; 1019 old_rcount = rs->rules[rs_num].active.rcount; 1020 old_array = rs->rules[rs_num].active.ptr_array; 1021 1022 rs->rules[rs_num].active.ptr = 1023 rs->rules[rs_num].inactive.ptr; 1024 rs->rules[rs_num].active.ptr_array = 1025 rs->rules[rs_num].inactive.ptr_array; 1026 rs->rules[rs_num].active.rcount = 1027 rs->rules[rs_num].inactive.rcount; 1028 rs->rules[rs_num].inactive.ptr = old_rules; 1029 rs->rules[rs_num].inactive.ptr_array = old_array; 1030 rs->rules[rs_num].inactive.rcount = old_rcount; 1031 1032 rs->rules[rs_num].active.ticket = 1033 rs->rules[rs_num].inactive.ticket; 1034 pf_calc_skip_steps(rs->rules[rs_num].active.ptr); 1035 1036 1037 /* Purge the old rule list. */ 1038 while ((rule = TAILQ_FIRST(old_rules)) != NULL) 1039 pf_rm_rule(old_rules, rule); 1040 if (rs->rules[rs_num].inactive.ptr_array) 1041 free(rs->rules[rs_num].inactive.ptr_array, M_TEMP); 1042 rs->rules[rs_num].inactive.ptr_array = NULL; 1043 rs->rules[rs_num].inactive.rcount = 0; 1044 rs->rules[rs_num].inactive.open = 0; 1045 pf_remove_if_empty_ruleset(rs); 1046 splx(s); 1047 return (0); 1048 } 1049 1050 void 1051 pf_state_export(struct pfsync_state *sp, struct pf_state_key *sk, 1052 struct pf_state *s) 1053 { 1054 int secs = time_second; 1055 bzero(sp, sizeof(struct pfsync_state)); 1056 1057 /* copy from state key */ 1058 sp->lan.addr = sk->lan.addr; 1059 sp->lan.port = sk->lan.port; 1060 sp->gwy.addr = sk->gwy.addr; 1061 sp->gwy.port = sk->gwy.port; 1062 sp->ext.addr = sk->ext.addr; 1063 sp->ext.port = sk->ext.port; 1064 sp->proto = sk->proto; 1065 sp->af = sk->af; 1066 sp->direction = sk->direction; 1067 1068 /* copy from state */ 1069 memcpy(&sp->id, &s->id, sizeof(sp->id)); 1070 sp->creatorid = s->creatorid; 1071 strlcpy(sp->ifname, s->kif->pfik_name, sizeof(sp->ifname)); 1072 pf_state_peer_to_pfsync(&s->src, &sp->src); 1073 pf_state_peer_to_pfsync(&s->dst, &sp->dst); 1074 1075 sp->rule = s->rule.ptr->nr; 1076 sp->nat_rule = (s->nat_rule.ptr == NULL) ? -1 : s->nat_rule.ptr->nr; 1077 sp->anchor = (s->anchor.ptr == NULL) ? -1 : s->anchor.ptr->nr; 1078 1079 pf_state_counter_to_pfsync(s->bytes[0], sp->bytes[0]); 1080 pf_state_counter_to_pfsync(s->bytes[1], sp->bytes[1]); 1081 pf_state_counter_to_pfsync(s->packets[0], sp->packets[0]); 1082 pf_state_counter_to_pfsync(s->packets[1], sp->packets[1]); 1083 sp->creation = secs - s->creation; 1084 sp->expire = pf_state_expires(s); 1085 sp->log = s->log; 1086 sp->allow_opts = s->allow_opts; 1087 sp->timeout = s->timeout; 1088 1089 if (s->src_node) 1090 sp->sync_flags |= PFSYNC_FLAG_SRCNODE; 1091 if (s->nat_src_node) 1092 sp->sync_flags |= PFSYNC_FLAG_NATSRCNODE; 1093 1094 if (sp->expire > secs) 1095 sp->expire -= secs; 1096 else 1097 sp->expire = 0; 1098 1099 } 1100 1101 void 1102 pf_state_import(struct pfsync_state *sp, struct pf_state_key *sk, 1103 struct pf_state *s) 1104 { 1105 /* copy to state key */ 1106 sk->lan.addr = sp->lan.addr; 1107 sk->lan.port = sp->lan.port; 1108 sk->gwy.addr = sp->gwy.addr; 1109 sk->gwy.port = sp->gwy.port; 1110 sk->ext.addr = sp->ext.addr; 1111 sk->ext.port = sp->ext.port; 1112 sk->proto = sp->proto; 1113 sk->af = sp->af; 1114 sk->direction = sp->direction; 1115 1116 /* copy to state */ 1117 memcpy(&s->id, &sp->id, sizeof(sp->id)); 1118 s->creatorid = sp->creatorid; 1119 pf_state_peer_from_pfsync(&sp->src, &s->src); 1120 pf_state_peer_from_pfsync(&sp->dst, &s->dst); 1121 1122 s->rule.ptr = &pf_default_rule; 1123 s->rule.ptr->states++; 1124 s->nat_rule.ptr = NULL; 1125 s->anchor.ptr = NULL; 1126 s->rt_kif = NULL; 1127 s->creation = time_second; 1128 s->expire = time_second; 1129 s->timeout = sp->timeout; 1130 if (sp->expire > 0) 1131 s->expire -= pf_default_rule.timeout[sp->timeout] - sp->expire; 1132 s->pfsync_time = 0; 1133 s->packets[0] = s->packets[1] = 0; 1134 s->bytes[0] = s->bytes[1] = 0; 1135 } 1136 1137 int 1138 pf_state_add(struct pfsync_state* sp) 1139 { 1140 struct pf_state *s; 1141 struct pf_state_key *sk; 1142 struct pfi_kif *kif; 1143 1144 if (sp->timeout >= PFTM_MAX && 1145 sp->timeout != PFTM_UNTIL_PACKET) { 1146 return EINVAL; 1147 } 1148 s = pool_get(&pf_state_pl, PR_NOWAIT); 1149 if (s == NULL) { 1150 return ENOMEM; 1151 } 1152 bzero(s, sizeof(struct pf_state)); 1153 if ((sk = pf_alloc_state_key(s)) == NULL) { 1154 pool_put(&pf_state_pl, s); 1155 return ENOMEM; 1156 } 1157 pf_state_import(sp, sk, s); 1158 kif = pfi_kif_get(sp->ifname); 1159 if (kif == NULL) { 1160 pool_put(&pf_state_pl, s); 1161 pool_put(&pf_state_key_pl, sk); 1162 return ENOENT; 1163 } 1164 if (pf_insert_state(kif, s)) { 1165 pfi_kif_unref(kif, PFI_KIF_REF_NONE); 1166 pool_put(&pf_state_pl, s); 1167 return ENOMEM; 1168 } 1169 1170 return 0; 1171 } 1172 1173 1174 int 1175 pf_setup_pfsync_matching(struct pf_ruleset *rs) 1176 { 1177 MD5_CTX ctx; 1178 struct pf_rule *rule; 1179 int rs_cnt; 1180 u_int8_t digest[PF_MD5_DIGEST_LENGTH]; 1181 1182 MD5Init(&ctx); 1183 for (rs_cnt = 0; rs_cnt < PF_RULESET_MAX; rs_cnt++) { 1184 /* XXX PF_RULESET_SCRUB as well? */ 1185 if (rs_cnt == PF_RULESET_SCRUB) 1186 continue; 1187 1188 if (rs->rules[rs_cnt].inactive.ptr_array) 1189 free(rs->rules[rs_cnt].inactive.ptr_array, M_TEMP); 1190 rs->rules[rs_cnt].inactive.ptr_array = NULL; 1191 1192 if (rs->rules[rs_cnt].inactive.rcount) { 1193 rs->rules[rs_cnt].inactive.ptr_array = 1194 malloc(sizeof(void *) * 1195 rs->rules[rs_cnt].inactive.rcount, 1196 M_TEMP, M_NOWAIT); 1197 1198 if (!rs->rules[rs_cnt].inactive.ptr_array) 1199 return (ENOMEM); 1200 } 1201 1202 TAILQ_FOREACH(rule, rs->rules[rs_cnt].inactive.ptr, 1203 entries) { 1204 pf_hash_rule(&ctx, rule); 1205 (rs->rules[rs_cnt].inactive.ptr_array)[rule->nr] = rule; 1206 } 1207 } 1208 1209 MD5Final(digest, &ctx); 1210 memcpy(pf_status.pf_chksum, digest, sizeof(pf_status.pf_chksum)); 1211 return (0); 1212 } 1213 1214 int 1215 pfioctl(dev_t dev, u_long cmd, void *addr, int flags, struct lwp *l) 1216 { 1217 struct pf_pooladdr *pa = NULL; 1218 struct pf_pool *pool = NULL; 1219 int s; 1220 int error = 0; 1221 1222 /* XXX keep in sync with switch() below */ 1223 if (kauth_authorize_network(l->l_cred, KAUTH_NETWORK_FIREWALL, 1224 KAUTH_REQ_NETWORK_FIREWALL_FW, NULL, NULL, NULL)) 1225 switch (cmd) { 1226 case DIOCGETRULES: 1227 case DIOCGETRULE: 1228 case DIOCGETADDRS: 1229 case DIOCGETADDR: 1230 case DIOCGETSTATE: 1231 case DIOCSETSTATUSIF: 1232 case DIOCGETSTATUS: 1233 case DIOCCLRSTATUS: 1234 case DIOCNATLOOK: 1235 case DIOCSETDEBUG: 1236 case DIOCGETSTATES: 1237 case DIOCGETTIMEOUT: 1238 case DIOCCLRRULECTRS: 1239 case DIOCGETLIMIT: 1240 case DIOCGETALTQS: 1241 case DIOCGETALTQ: 1242 case DIOCGETQSTATS: 1243 case DIOCGETRULESETS: 1244 case DIOCGETRULESET: 1245 case DIOCRGETTABLES: 1246 case DIOCRGETTSTATS: 1247 case DIOCRCLRTSTATS: 1248 case DIOCRCLRADDRS: 1249 case DIOCRADDADDRS: 1250 case DIOCRDELADDRS: 1251 case DIOCRSETADDRS: 1252 case DIOCRGETADDRS: 1253 case DIOCRGETASTATS: 1254 case DIOCRCLRASTATS: 1255 case DIOCRTSTADDRS: 1256 case DIOCOSFPGET: 1257 case DIOCGETSRCNODES: 1258 case DIOCCLRSRCNODES: 1259 case DIOCIGETIFACES: 1260 case DIOCSETIFFLAG: 1261 case DIOCCLRIFFLAG: 1262 case DIOCSETLCK: 1263 case DIOCADDSTATES: 1264 break; 1265 case DIOCRCLRTABLES: 1266 case DIOCRADDTABLES: 1267 case DIOCRDELTABLES: 1268 case DIOCRSETTFLAGS: 1269 if (((struct pfioc_table *)addr)->pfrio_flags & 1270 PFR_FLAG_DUMMY) 1271 break; /* dummy operation ok */ 1272 return (EPERM); 1273 default: 1274 return (EPERM); 1275 } 1276 1277 if (!(flags & FWRITE)) 1278 switch (cmd) { 1279 case DIOCGETRULES: 1280 case DIOCGETADDRS: 1281 case DIOCGETADDR: 1282 case DIOCGETSTATE: 1283 case DIOCGETSTATUS: 1284 case DIOCGETSTATES: 1285 case DIOCGETTIMEOUT: 1286 case DIOCGETLIMIT: 1287 case DIOCGETALTQS: 1288 case DIOCGETALTQ: 1289 case DIOCGETQSTATS: 1290 case DIOCGETRULESETS: 1291 case DIOCGETRULESET: 1292 case DIOCNATLOOK: 1293 case DIOCRGETTABLES: 1294 case DIOCRGETTSTATS: 1295 case DIOCRGETADDRS: 1296 case DIOCRGETASTATS: 1297 case DIOCRTSTADDRS: 1298 case DIOCOSFPGET: 1299 case DIOCGETSRCNODES: 1300 case DIOCIGETIFACES: 1301 case DIOCSETLCK: 1302 break; 1303 case DIOCRCLRTABLES: 1304 case DIOCRADDTABLES: 1305 case DIOCRDELTABLES: 1306 case DIOCRCLRTSTATS: 1307 case DIOCRCLRADDRS: 1308 case DIOCRADDADDRS: 1309 case DIOCRDELADDRS: 1310 case DIOCRSETADDRS: 1311 case DIOCRSETTFLAGS: 1312 case DIOCADDSTATES: 1313 if (((struct pfioc_table *)addr)->pfrio_flags & 1314 PFR_FLAG_DUMMY) { 1315 flags |= FWRITE; /* need write lock for dummy */ 1316 break; /* dummy operation ok */ 1317 } 1318 return (EACCES); 1319 case DIOCGETRULE: 1320 if (((struct pfioc_rule *)addr)->action == PF_GET_CLR_CNTR) 1321 return (EACCES); 1322 break; 1323 default: 1324 return (EACCES); 1325 } 1326 1327 if (flags & FWRITE) 1328 rw_enter_write(&pf_consistency_lock); 1329 else 1330 rw_enter_read(&pf_consistency_lock); 1331 1332 s = splsoftnet(); 1333 switch (cmd) { 1334 1335 case DIOCSTART: 1336 if (pf_status.running) 1337 error = EEXIST; 1338 else { 1339 #ifdef __NetBSD__ 1340 error = pf_pfil_attach(); 1341 if (error) 1342 break; 1343 #endif /* __NetBSD__ */ 1344 pf_status.running = 1; 1345 pf_status.since = time_second; 1346 if (pf_status.stateid == 0) { 1347 pf_status.stateid = time_second; 1348 pf_status.stateid = pf_status.stateid << 32; 1349 } 1350 DPFPRINTF(PF_DEBUG_MISC, ("pf: started\n")); 1351 } 1352 break; 1353 1354 case DIOCSTOP: 1355 if (!pf_status.running) 1356 error = ENOENT; 1357 else { 1358 #ifdef __NetBSD__ 1359 error = pf_pfil_detach(); 1360 if (error) 1361 break; 1362 #endif /* __NetBSD__ */ 1363 pf_status.running = 0; 1364 pf_status.since = time_second; 1365 DPFPRINTF(PF_DEBUG_MISC, ("pf: stopped\n")); 1366 } 1367 break; 1368 1369 case DIOCADDRULE: { 1370 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 1371 struct pf_ruleset *ruleset; 1372 struct pf_rule *rule, *tail; 1373 struct pf_pooladdr *pa; 1374 int rs_num; 1375 1376 pr->anchor[sizeof(pr->anchor) - 1] = 0; 1377 ruleset = pf_find_ruleset(pr->anchor); 1378 if (ruleset == NULL) { 1379 error = EINVAL; 1380 break; 1381 } 1382 rs_num = pf_get_ruleset_number(pr->rule.action); 1383 if (rs_num >= PF_RULESET_MAX) { 1384 error = EINVAL; 1385 break; 1386 } 1387 if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) { 1388 error = EINVAL; 1389 break; 1390 } 1391 if (pr->ticket != ruleset->rules[rs_num].inactive.ticket) { 1392 error = EBUSY; 1393 break; 1394 } 1395 if (pr->pool_ticket != ticket_pabuf) { 1396 error = EBUSY; 1397 break; 1398 } 1399 rule = pool_get(&pf_rule_pl, PR_NOWAIT); 1400 if (rule == NULL) { 1401 error = ENOMEM; 1402 break; 1403 } 1404 bcopy(&pr->rule, rule, sizeof(struct pf_rule)); 1405 #ifdef __NetBSD__ 1406 rule->cuid = kauth_cred_getuid(l->l_cred); 1407 rule->cpid = l->l_proc->p_pid; 1408 #else 1409 rule->cuid = p->p_cred->p_ruid; 1410 rule->cpid = p->p_pid; 1411 #endif /* !__NetBSD__ */ 1412 rule->anchor = NULL; 1413 rule->kif = NULL; 1414 TAILQ_INIT(&rule->rpool.list); 1415 /* initialize refcounting */ 1416 rule->states = 0; 1417 rule->src_nodes = 0; 1418 rule->entries.tqe_prev = NULL; 1419 #ifndef INET 1420 if (rule->af == AF_INET) { 1421 pool_put(&pf_rule_pl, rule); 1422 error = EAFNOSUPPORT; 1423 break; 1424 } 1425 #endif /* INET */ 1426 #ifndef INET6 1427 if (rule->af == AF_INET6) { 1428 pool_put(&pf_rule_pl, rule); 1429 error = EAFNOSUPPORT; 1430 break; 1431 } 1432 #endif /* INET6 */ 1433 tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr, 1434 pf_rulequeue); 1435 if (tail) 1436 rule->nr = tail->nr + 1; 1437 else 1438 rule->nr = 0; 1439 if (rule->ifname[0]) { 1440 rule->kif = pfi_kif_get(rule->ifname); 1441 if (rule->kif == NULL) { 1442 pool_put(&pf_rule_pl, rule); 1443 error = EINVAL; 1444 break; 1445 } 1446 pfi_kif_ref(rule->kif, PFI_KIF_REF_RULE); 1447 } 1448 1449 #ifndef __NetBSD__ 1450 if (rule->rtableid > 0 && !rtable_exists(rule->rtableid)) 1451 error = EBUSY; 1452 #endif /* !__NetBSD__ */ 1453 1454 #ifdef ALTQ 1455 /* set queue IDs */ 1456 if (rule->qname[0] != 0) { 1457 if ((rule->qid = pf_qname2qid(rule->qname)) == 0) 1458 error = EBUSY; 1459 else if (rule->pqname[0] != 0) { 1460 if ((rule->pqid = 1461 pf_qname2qid(rule->pqname)) == 0) 1462 error = EBUSY; 1463 } else 1464 rule->pqid = rule->qid; 1465 } 1466 #endif 1467 if (rule->tagname[0]) 1468 if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0) 1469 error = EBUSY; 1470 if (rule->match_tagname[0]) 1471 if ((rule->match_tag = 1472 pf_tagname2tag(rule->match_tagname)) == 0) 1473 error = EBUSY; 1474 if (rule->rt && !rule->direction) 1475 error = EINVAL; 1476 #if NPFLOG > 0 1477 if (!rule->log) 1478 rule->logif = 0; 1479 if (rule->logif >= PFLOGIFS_MAX) 1480 error = EINVAL; 1481 #endif 1482 if (pf_rtlabel_add(&rule->src.addr) || 1483 pf_rtlabel_add(&rule->dst.addr)) 1484 error = EBUSY; 1485 if (pfi_dynaddr_setup(&rule->src.addr, rule->af)) 1486 error = EINVAL; 1487 if (pfi_dynaddr_setup(&rule->dst.addr, rule->af)) 1488 error = EINVAL; 1489 if (pf_tbladdr_setup(ruleset, &rule->src.addr)) 1490 error = EINVAL; 1491 if (pf_tbladdr_setup(ruleset, &rule->dst.addr)) 1492 error = EINVAL; 1493 if (pf_anchor_setup(rule, ruleset, pr->anchor_call)) 1494 error = EINVAL; 1495 TAILQ_FOREACH(pa, &pf_pabuf, entries) 1496 if (pf_tbladdr_setup(ruleset, &pa->addr)) 1497 error = EINVAL; 1498 1499 rule->overload_tbl = NULL; 1500 if (rule->overload_tblname[0]) { 1501 if ((rule->overload_tbl = pfr_attach_table(ruleset, 1502 rule->overload_tblname)) == NULL) 1503 error = EINVAL; 1504 else 1505 rule->overload_tbl->pfrkt_flags |= 1506 PFR_TFLAG_ACTIVE; 1507 } 1508 1509 pf_mv_pool(&pf_pabuf, &rule->rpool.list); 1510 if (((((rule->action == PF_NAT) || (rule->action == PF_RDR) || 1511 (rule->action == PF_BINAT)) && rule->anchor == NULL) || 1512 (rule->rt > PF_FASTROUTE)) && 1513 (TAILQ_FIRST(&rule->rpool.list) == NULL)) 1514 error = EINVAL; 1515 1516 if (error) { 1517 pf_rm_rule(NULL, rule); 1518 break; 1519 } 1520 rule->rpool.cur = TAILQ_FIRST(&rule->rpool.list); 1521 rule->evaluations = rule->packets[0] = rule->packets[1] = 1522 rule->bytes[0] = rule->bytes[1] = 0; 1523 TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr, 1524 rule, entries); 1525 ruleset->rules[rs_num].inactive.rcount++; 1526 break; 1527 } 1528 1529 case DIOCGETRULES: { 1530 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 1531 struct pf_ruleset *ruleset; 1532 struct pf_rule *tail; 1533 int rs_num; 1534 1535 pr->anchor[sizeof(pr->anchor) - 1] = 0; 1536 ruleset = pf_find_ruleset(pr->anchor); 1537 if (ruleset == NULL) { 1538 error = EINVAL; 1539 break; 1540 } 1541 rs_num = pf_get_ruleset_number(pr->rule.action); 1542 if (rs_num >= PF_RULESET_MAX) { 1543 error = EINVAL; 1544 break; 1545 } 1546 tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr, 1547 pf_rulequeue); 1548 if (tail) 1549 pr->nr = tail->nr + 1; 1550 else 1551 pr->nr = 0; 1552 pr->ticket = ruleset->rules[rs_num].active.ticket; 1553 break; 1554 } 1555 1556 case DIOCGETRULE: { 1557 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 1558 struct pf_ruleset *ruleset; 1559 struct pf_rule *rule; 1560 int rs_num, i; 1561 1562 pr->anchor[sizeof(pr->anchor) - 1] = 0; 1563 ruleset = pf_find_ruleset(pr->anchor); 1564 if (ruleset == NULL) { 1565 error = EINVAL; 1566 break; 1567 } 1568 rs_num = pf_get_ruleset_number(pr->rule.action); 1569 if (rs_num >= PF_RULESET_MAX) { 1570 error = EINVAL; 1571 break; 1572 } 1573 if (pr->ticket != ruleset->rules[rs_num].active.ticket) { 1574 error = EBUSY; 1575 break; 1576 } 1577 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr); 1578 while ((rule != NULL) && (rule->nr != pr->nr)) 1579 rule = TAILQ_NEXT(rule, entries); 1580 if (rule == NULL) { 1581 error = EBUSY; 1582 break; 1583 } 1584 bcopy(rule, &pr->rule, sizeof(struct pf_rule)); 1585 if (pf_anchor_copyout(ruleset, rule, pr)) { 1586 error = EBUSY; 1587 break; 1588 } 1589 pfi_dynaddr_copyout(&pr->rule.src.addr); 1590 pfi_dynaddr_copyout(&pr->rule.dst.addr); 1591 pf_tbladdr_copyout(&pr->rule.src.addr); 1592 pf_tbladdr_copyout(&pr->rule.dst.addr); 1593 pf_rtlabel_copyout(&pr->rule.src.addr); 1594 pf_rtlabel_copyout(&pr->rule.dst.addr); 1595 for (i = 0; i < PF_SKIP_COUNT; ++i) 1596 if (rule->skip[i].ptr == NULL) 1597 pr->rule.skip[i].nr = -1; 1598 else 1599 pr->rule.skip[i].nr = 1600 rule->skip[i].ptr->nr; 1601 1602 if (pr->action == PF_GET_CLR_CNTR) { 1603 rule->evaluations = 0; 1604 rule->packets[0] = rule->packets[1] = 0; 1605 rule->bytes[0] = rule->bytes[1] = 0; 1606 } 1607 break; 1608 } 1609 1610 case DIOCCHANGERULE: { 1611 struct pfioc_rule *pcr = (struct pfioc_rule *)addr; 1612 struct pf_ruleset *ruleset; 1613 struct pf_rule *oldrule = NULL, *newrule = NULL; 1614 u_int32_t nr = 0; 1615 int rs_num; 1616 1617 if (!(pcr->action == PF_CHANGE_REMOVE || 1618 pcr->action == PF_CHANGE_GET_TICKET) && 1619 pcr->pool_ticket != ticket_pabuf) { 1620 error = EBUSY; 1621 break; 1622 } 1623 1624 if (pcr->action < PF_CHANGE_ADD_HEAD || 1625 pcr->action > PF_CHANGE_GET_TICKET) { 1626 error = EINVAL; 1627 break; 1628 } 1629 ruleset = pf_find_ruleset(pcr->anchor); 1630 if (ruleset == NULL) { 1631 error = EINVAL; 1632 break; 1633 } 1634 rs_num = pf_get_ruleset_number(pcr->rule.action); 1635 if (rs_num >= PF_RULESET_MAX) { 1636 error = EINVAL; 1637 break; 1638 } 1639 1640 if (pcr->action == PF_CHANGE_GET_TICKET) { 1641 pcr->ticket = ++ruleset->rules[rs_num].active.ticket; 1642 break; 1643 } else { 1644 if (pcr->ticket != 1645 ruleset->rules[rs_num].active.ticket) { 1646 error = EINVAL; 1647 break; 1648 } 1649 if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) { 1650 error = EINVAL; 1651 break; 1652 } 1653 } 1654 1655 if (pcr->action != PF_CHANGE_REMOVE) { 1656 newrule = pool_get(&pf_rule_pl, PR_NOWAIT); 1657 if (newrule == NULL) { 1658 error = ENOMEM; 1659 break; 1660 } 1661 bcopy(&pcr->rule, newrule, sizeof(struct pf_rule)); 1662 #ifdef __NetBSD__ 1663 newrule->cuid = kauth_cred_getuid(l->l_cred); 1664 newrule->cpid = l->l_proc->p_pid; 1665 #else 1666 newrule->cuid = p->p_cred->p_ruid; 1667 newrule->cpid = p->p_pid; 1668 #endif /* !__NetBSD__ */ 1669 TAILQ_INIT(&newrule->rpool.list); 1670 /* initialize refcounting */ 1671 newrule->states = 0; 1672 newrule->entries.tqe_prev = NULL; 1673 #ifndef INET 1674 if (newrule->af == AF_INET) { 1675 pool_put(&pf_rule_pl, newrule); 1676 error = EAFNOSUPPORT; 1677 break; 1678 } 1679 #endif /* INET */ 1680 #ifndef INET6 1681 if (newrule->af == AF_INET6) { 1682 pool_put(&pf_rule_pl, newrule); 1683 error = EAFNOSUPPORT; 1684 break; 1685 } 1686 #endif /* INET6 */ 1687 if (newrule->ifname[0]) { 1688 newrule->kif = pfi_kif_get(newrule->ifname); 1689 if (newrule->kif == NULL) { 1690 pool_put(&pf_rule_pl, newrule); 1691 error = EINVAL; 1692 break; 1693 } 1694 pfi_kif_ref(newrule->kif, PFI_KIF_REF_RULE); 1695 } else 1696 newrule->kif = NULL; 1697 1698 #ifndef __NetBSD__ 1699 if (newrule->rtableid > 0 && 1700 !rtable_exists(newrule->rtableid)) 1701 error = EBUSY; 1702 #endif /* !__NetBSD__ */ 1703 1704 #ifdef ALTQ 1705 /* set queue IDs */ 1706 if (newrule->qname[0] != 0) { 1707 if ((newrule->qid = 1708 pf_qname2qid(newrule->qname)) == 0) 1709 error = EBUSY; 1710 else if (newrule->pqname[0] != 0) { 1711 if ((newrule->pqid = 1712 pf_qname2qid(newrule->pqname)) == 0) 1713 error = EBUSY; 1714 } else 1715 newrule->pqid = newrule->qid; 1716 } 1717 #endif /* ALTQ */ 1718 if (newrule->tagname[0]) 1719 if ((newrule->tag = 1720 pf_tagname2tag(newrule->tagname)) == 0) 1721 error = EBUSY; 1722 if (newrule->match_tagname[0]) 1723 if ((newrule->match_tag = pf_tagname2tag( 1724 newrule->match_tagname)) == 0) 1725 error = EBUSY; 1726 if (newrule->rt && !newrule->direction) 1727 error = EINVAL; 1728 #if NPFLOG > 0 1729 if (!newrule->log) 1730 newrule->logif = 0; 1731 if (newrule->logif >= PFLOGIFS_MAX) 1732 error = EINVAL; 1733 #endif 1734 if (pf_rtlabel_add(&newrule->src.addr) || 1735 pf_rtlabel_add(&newrule->dst.addr)) 1736 error = EBUSY; 1737 if (pfi_dynaddr_setup(&newrule->src.addr, newrule->af)) 1738 error = EINVAL; 1739 if (pfi_dynaddr_setup(&newrule->dst.addr, newrule->af)) 1740 error = EINVAL; 1741 if (pf_tbladdr_setup(ruleset, &newrule->src.addr)) 1742 error = EINVAL; 1743 if (pf_tbladdr_setup(ruleset, &newrule->dst.addr)) 1744 error = EINVAL; 1745 if (pf_anchor_setup(newrule, ruleset, pcr->anchor_call)) 1746 error = EINVAL; 1747 TAILQ_FOREACH(pa, &pf_pabuf, entries) 1748 if (pf_tbladdr_setup(ruleset, &pa->addr)) 1749 error = EINVAL; 1750 1751 newrule->overload_tbl = NULL; 1752 if (newrule->overload_tblname[0]) { 1753 if ((newrule->overload_tbl = pfr_attach_table( 1754 ruleset, newrule->overload_tblname)) == 1755 NULL) 1756 error = EINVAL; 1757 else 1758 newrule->overload_tbl->pfrkt_flags |= 1759 PFR_TFLAG_ACTIVE; 1760 } 1761 1762 pf_mv_pool(&pf_pabuf, &newrule->rpool.list); 1763 if (((((newrule->action == PF_NAT) || 1764 (newrule->action == PF_RDR) || 1765 (newrule->action == PF_BINAT) || 1766 (newrule->rt > PF_FASTROUTE)) && 1767 !newrule->anchor)) && 1768 (TAILQ_FIRST(&newrule->rpool.list) == NULL)) 1769 error = EINVAL; 1770 1771 if (error) { 1772 pf_rm_rule(NULL, newrule); 1773 break; 1774 } 1775 newrule->rpool.cur = TAILQ_FIRST(&newrule->rpool.list); 1776 newrule->evaluations = 0; 1777 newrule->packets[0] = newrule->packets[1] = 0; 1778 newrule->bytes[0] = newrule->bytes[1] = 0; 1779 } 1780 pf_empty_pool(&pf_pabuf); 1781 1782 if (pcr->action == PF_CHANGE_ADD_HEAD) 1783 oldrule = TAILQ_FIRST( 1784 ruleset->rules[rs_num].active.ptr); 1785 else if (pcr->action == PF_CHANGE_ADD_TAIL) 1786 oldrule = TAILQ_LAST( 1787 ruleset->rules[rs_num].active.ptr, pf_rulequeue); 1788 else { 1789 oldrule = TAILQ_FIRST( 1790 ruleset->rules[rs_num].active.ptr); 1791 while ((oldrule != NULL) && (oldrule->nr != pcr->nr)) 1792 oldrule = TAILQ_NEXT(oldrule, entries); 1793 if (oldrule == NULL) { 1794 if (newrule != NULL) 1795 pf_rm_rule(NULL, newrule); 1796 error = EINVAL; 1797 break; 1798 } 1799 } 1800 1801 if (pcr->action == PF_CHANGE_REMOVE) { 1802 pf_rm_rule(ruleset->rules[rs_num].active.ptr, oldrule); 1803 ruleset->rules[rs_num].active.rcount--; 1804 } else { 1805 if (oldrule == NULL) 1806 TAILQ_INSERT_TAIL( 1807 ruleset->rules[rs_num].active.ptr, 1808 newrule, entries); 1809 else if (pcr->action == PF_CHANGE_ADD_HEAD || 1810 pcr->action == PF_CHANGE_ADD_BEFORE) 1811 TAILQ_INSERT_BEFORE(oldrule, newrule, entries); 1812 else 1813 TAILQ_INSERT_AFTER( 1814 ruleset->rules[rs_num].active.ptr, 1815 oldrule, newrule, entries); 1816 ruleset->rules[rs_num].active.rcount++; 1817 } 1818 1819 nr = 0; 1820 TAILQ_FOREACH(oldrule, 1821 ruleset->rules[rs_num].active.ptr, entries) 1822 oldrule->nr = nr++; 1823 1824 ruleset->rules[rs_num].active.ticket++; 1825 1826 pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr); 1827 pf_remove_if_empty_ruleset(ruleset); 1828 1829 break; 1830 } 1831 1832 case DIOCCLRSTATES: { 1833 struct pf_state *s, *nexts; 1834 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr; 1835 int killed = 0; 1836 1837 for (s = RB_MIN(pf_state_tree_id, &tree_id); s; s = nexts) { 1838 nexts = RB_NEXT(pf_state_tree_id, &tree_id, s); 1839 1840 if (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname, 1841 s->kif->pfik_name)) { 1842 #if NPFSYNC 1843 /* don't send out individual delete messages */ 1844 s->sync_flags = PFSTATE_NOSYNC; 1845 #endif 1846 pf_unlink_state(s); 1847 killed++; 1848 } 1849 } 1850 psk->psk_af = killed; 1851 #if NPFSYNC 1852 pfsync_clear_states(pf_status.hostid, psk->psk_ifname); 1853 #endif 1854 break; 1855 } 1856 1857 case DIOCKILLSTATES: { 1858 struct pf_state *s, *nexts; 1859 struct pf_state_key *sk; 1860 struct pf_state_host *src, *dst; 1861 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr; 1862 int killed = 0; 1863 1864 for (s = RB_MIN(pf_state_tree_id, &tree_id); s; 1865 s = nexts) { 1866 nexts = RB_NEXT(pf_state_tree_id, &tree_id, s); 1867 sk = s->state_key; 1868 1869 if (sk->direction == PF_OUT) { 1870 src = &sk->lan; 1871 dst = &sk->ext; 1872 } else { 1873 src = &sk->ext; 1874 dst = &sk->lan; 1875 } 1876 if ((!psk->psk_af || sk->af == psk->psk_af) 1877 && (!psk->psk_proto || psk->psk_proto == 1878 sk->proto) && 1879 PF_MATCHA(psk->psk_src.neg, 1880 &psk->psk_src.addr.v.a.addr, 1881 &psk->psk_src.addr.v.a.mask, 1882 &src->addr, sk->af) && 1883 PF_MATCHA(psk->psk_dst.neg, 1884 &psk->psk_dst.addr.v.a.addr, 1885 &psk->psk_dst.addr.v.a.mask, 1886 &dst->addr, sk->af) && 1887 (psk->psk_src.port_op == 0 || 1888 pf_match_port(psk->psk_src.port_op, 1889 psk->psk_src.port[0], psk->psk_src.port[1], 1890 src->port)) && 1891 (psk->psk_dst.port_op == 0 || 1892 pf_match_port(psk->psk_dst.port_op, 1893 psk->psk_dst.port[0], psk->psk_dst.port[1], 1894 dst->port)) && 1895 (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname, 1896 s->kif->pfik_name))) { 1897 #if NPFSYNC > 0 1898 /* send immediate delete of state */ 1899 pfsync_delete_state(s); 1900 s->sync_flags |= PFSTATE_NOSYNC; 1901 #endif 1902 pf_unlink_state(s); 1903 killed++; 1904 } 1905 } 1906 psk->psk_af = killed; 1907 break; 1908 } 1909 1910 case DIOCADDSTATE: { 1911 struct pfioc_state *ps = (struct pfioc_state *)addr; 1912 struct pfsync_state *sp = (struct pfsync_state *)ps->state; 1913 1914 error = pf_state_add(sp); 1915 break; 1916 } 1917 1918 case DIOCADDSTATES: { 1919 struct pfioc_states *ps = (struct pfioc_states *)addr; 1920 struct pfsync_state *p = (struct pfsync_state *) ps->ps_states; 1921 struct pfsync_state *pk; 1922 int size = ps->ps_len; 1923 int i = 0; 1924 error = 0; 1925 1926 pk = malloc(sizeof(*pk), M_TEMP,M_WAITOK); 1927 1928 while (error == 0 && i < size) 1929 { 1930 if (copyin(p, pk, sizeof(struct pfsync_state))) 1931 { 1932 error = EFAULT; 1933 free(pk, M_TEMP); 1934 } else { 1935 error = pf_state_add(pk); 1936 i += sizeof(*p); 1937 p++; 1938 } 1939 } 1940 1941 free(pk, M_TEMP); 1942 break; 1943 } 1944 1945 1946 case DIOCGETSTATE: { 1947 struct pfioc_state *ps = (struct pfioc_state *)addr; 1948 struct pf_state *s; 1949 u_int32_t nr; 1950 1951 nr = 0; 1952 RB_FOREACH(s, pf_state_tree_id, &tree_id) { 1953 if (nr >= ps->nr) 1954 break; 1955 nr++; 1956 } 1957 if (s == NULL) { 1958 error = EBUSY; 1959 break; 1960 } 1961 1962 pf_state_export((struct pfsync_state *)&ps->state, 1963 s->state_key, s); 1964 break; 1965 } 1966 1967 case DIOCGETSTATES: { 1968 struct pfioc_states *ps = (struct pfioc_states *)addr; 1969 struct pf_state *state; 1970 struct pfsync_state *p, *pstore; 1971 u_int32_t nr = 0; 1972 1973 if (ps->ps_len == 0) { 1974 nr = pf_status.states; 1975 ps->ps_len = sizeof(struct pfsync_state) * nr; 1976 break; 1977 } 1978 1979 pstore = malloc(sizeof(*pstore), M_TEMP, M_WAITOK); 1980 1981 p = ps->ps_states; 1982 1983 state = TAILQ_FIRST(&state_list); 1984 while (state) { 1985 if (state->timeout != PFTM_UNLINKED) { 1986 if ((nr+1) * sizeof(*p) > (unsigned)ps->ps_len) 1987 break; 1988 1989 pf_state_export(pstore, 1990 state->state_key, state); 1991 error = copyout(pstore, p, sizeof(*p)); 1992 if (error) { 1993 free(pstore, M_TEMP); 1994 goto fail; 1995 } 1996 p++; 1997 nr++; 1998 } 1999 state = TAILQ_NEXT(state, entry_list); 2000 } 2001 2002 ps->ps_len = sizeof(struct pfsync_state) * nr; 2003 2004 free(pstore, M_TEMP); 2005 break; 2006 } 2007 2008 case DIOCGETSTATUS: { 2009 struct pf_status *s = (struct pf_status *)addr; 2010 bcopy(&pf_status, s, sizeof(struct pf_status)); 2011 pfi_fill_oldstatus(s); 2012 break; 2013 } 2014 2015 case DIOCSETSTATUSIF: { 2016 struct pfioc_if *pi = (struct pfioc_if *)addr; 2017 2018 if (pi->ifname[0] == 0) { 2019 bzero(pf_status.ifname, IFNAMSIZ); 2020 break; 2021 } 2022 if (ifunit(pi->ifname) == NULL) { 2023 error = EINVAL; 2024 break; 2025 } 2026 strlcpy(pf_status.ifname, pi->ifname, IFNAMSIZ); 2027 break; 2028 } 2029 2030 case DIOCCLRSTATUS: { 2031 bzero(pf_status.counters, sizeof(pf_status.counters)); 2032 bzero(pf_status.fcounters, sizeof(pf_status.fcounters)); 2033 bzero(pf_status.scounters, sizeof(pf_status.scounters)); 2034 pf_status.since = time_second; 2035 if (*pf_status.ifname) 2036 pfi_clr_istats(pf_status.ifname); 2037 break; 2038 } 2039 2040 case DIOCNATLOOK: { 2041 struct pfioc_natlook *pnl = (struct pfioc_natlook *)addr; 2042 struct pf_state_key *sk; 2043 struct pf_state *state; 2044 struct pf_state_key_cmp key; 2045 int m = 0, direction = pnl->direction; 2046 2047 key.af = pnl->af; 2048 key.proto = pnl->proto; 2049 2050 if (!pnl->proto || 2051 PF_AZERO(&pnl->saddr, pnl->af) || 2052 PF_AZERO(&pnl->daddr, pnl->af) || 2053 ((pnl->proto == IPPROTO_TCP || 2054 pnl->proto == IPPROTO_UDP) && 2055 (!pnl->dport || !pnl->sport))) 2056 error = EINVAL; 2057 else { 2058 /* 2059 * userland gives us source and dest of connection, 2060 * reverse the lookup so we ask for what happens with 2061 * the return traffic, enabling us to find it in the 2062 * state tree. 2063 */ 2064 if (direction == PF_IN) { 2065 PF_ACPY(&key.ext.addr, &pnl->daddr, pnl->af); 2066 key.ext.port = pnl->dport; 2067 PF_ACPY(&key.gwy.addr, &pnl->saddr, pnl->af); 2068 key.gwy.port = pnl->sport; 2069 state = pf_find_state_all(&key, PF_EXT_GWY, &m); 2070 } else { 2071 PF_ACPY(&key.lan.addr, &pnl->daddr, pnl->af); 2072 key.lan.port = pnl->dport; 2073 PF_ACPY(&key.ext.addr, &pnl->saddr, pnl->af); 2074 key.ext.port = pnl->sport; 2075 state = pf_find_state_all(&key, PF_LAN_EXT, &m); 2076 } 2077 if (m > 1) 2078 error = E2BIG; /* more than one state */ 2079 else if (state != NULL) { 2080 sk = state->state_key; 2081 if (direction == PF_IN) { 2082 PF_ACPY(&pnl->rsaddr, &sk->lan.addr, 2083 sk->af); 2084 pnl->rsport = sk->lan.port; 2085 PF_ACPY(&pnl->rdaddr, &pnl->daddr, 2086 pnl->af); 2087 pnl->rdport = pnl->dport; 2088 } else { 2089 PF_ACPY(&pnl->rdaddr, &sk->gwy.addr, 2090 sk->af); 2091 pnl->rdport = sk->gwy.port; 2092 PF_ACPY(&pnl->rsaddr, &pnl->saddr, 2093 pnl->af); 2094 pnl->rsport = pnl->sport; 2095 } 2096 } else 2097 error = ENOENT; 2098 } 2099 break; 2100 } 2101 2102 case DIOCSETTIMEOUT: { 2103 struct pfioc_tm *pt = (struct pfioc_tm *)addr; 2104 int old; 2105 2106 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX || 2107 pt->seconds < 0) { 2108 error = EINVAL; 2109 goto fail; 2110 } 2111 old = pf_default_rule.timeout[pt->timeout]; 2112 if (pt->timeout == PFTM_INTERVAL && pt->seconds == 0) 2113 pt->seconds = 1; 2114 pf_default_rule.timeout[pt->timeout] = pt->seconds; 2115 if (pt->timeout == PFTM_INTERVAL && pt->seconds < old) 2116 wakeup(pf_purge_thread); 2117 pt->seconds = old; 2118 break; 2119 } 2120 2121 case DIOCGETTIMEOUT: { 2122 struct pfioc_tm *pt = (struct pfioc_tm *)addr; 2123 2124 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) { 2125 error = EINVAL; 2126 goto fail; 2127 } 2128 pt->seconds = pf_default_rule.timeout[pt->timeout]; 2129 break; 2130 } 2131 2132 case DIOCGETLIMIT: { 2133 struct pfioc_limit *pl = (struct pfioc_limit *)addr; 2134 2135 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) { 2136 error = EINVAL; 2137 goto fail; 2138 } 2139 pl->limit = pf_pool_limits[pl->index].limit; 2140 break; 2141 } 2142 2143 case DIOCSETLIMIT: { 2144 struct pfioc_limit *pl = (struct pfioc_limit *)addr; 2145 int old_limit; 2146 2147 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX || 2148 pf_pool_limits[pl->index].pp == NULL) { 2149 error = EINVAL; 2150 goto fail; 2151 } 2152 #ifdef __NetBSD__ 2153 pool_sethardlimit(pf_pool_limits[pl->index].pp, 2154 pl->limit, NULL, 0); 2155 #else 2156 if (pool_sethardlimit(pf_pool_limits[pl->index].pp, 2157 pl->limit, NULL, 0) != 0) { 2158 error = EBUSY; 2159 goto fail; 2160 } 2161 #endif /* !__NetBSD__ */ 2162 old_limit = pf_pool_limits[pl->index].limit; 2163 pf_pool_limits[pl->index].limit = pl->limit; 2164 pl->limit = old_limit; 2165 break; 2166 } 2167 2168 case DIOCSETDEBUG: { 2169 u_int32_t *level = (u_int32_t *)addr; 2170 2171 pf_status.debug = *level; 2172 break; 2173 } 2174 2175 case DIOCCLRRULECTRS: { 2176 /* obsoleted by DIOCGETRULE with action=PF_GET_CLR_CNTR */ 2177 struct pf_ruleset *ruleset = &pf_main_ruleset; 2178 struct pf_rule *rule; 2179 2180 TAILQ_FOREACH(rule, 2181 ruleset->rules[PF_RULESET_FILTER].active.ptr, entries) { 2182 rule->evaluations = 0; 2183 rule->packets[0] = rule->packets[1] = 0; 2184 rule->bytes[0] = rule->bytes[1] = 0; 2185 } 2186 break; 2187 } 2188 2189 #ifdef ALTQ 2190 case DIOCSTARTALTQ: { 2191 struct pf_altq *altq; 2192 2193 /* enable all altq interfaces on active list */ 2194 TAILQ_FOREACH(altq, pf_altqs_active, entries) { 2195 if (altq->qname[0] == 0) { 2196 error = pf_enable_altq(altq); 2197 if (error != 0) 2198 break; 2199 } 2200 } 2201 if (error == 0) 2202 pf_altq_running = 1; 2203 DPFPRINTF(PF_DEBUG_MISC, ("altq: started\n")); 2204 break; 2205 } 2206 2207 case DIOCSTOPALTQ: { 2208 struct pf_altq *altq; 2209 2210 /* disable all altq interfaces on active list */ 2211 TAILQ_FOREACH(altq, pf_altqs_active, entries) { 2212 if (altq->qname[0] == 0) { 2213 error = pf_disable_altq(altq); 2214 if (error != 0) 2215 break; 2216 } 2217 } 2218 if (error == 0) 2219 pf_altq_running = 0; 2220 DPFPRINTF(PF_DEBUG_MISC, ("altq: stopped\n")); 2221 break; 2222 } 2223 2224 case DIOCADDALTQ: { 2225 struct pfioc_altq *pa = (struct pfioc_altq *)addr; 2226 struct pf_altq *altq, *a; 2227 2228 if (pa->ticket != ticket_altqs_inactive) { 2229 error = EBUSY; 2230 break; 2231 } 2232 altq = pool_get(&pf_altq_pl, PR_NOWAIT); 2233 if (altq == NULL) { 2234 error = ENOMEM; 2235 break; 2236 } 2237 bcopy(&pa->altq, altq, sizeof(struct pf_altq)); 2238 2239 /* 2240 * if this is for a queue, find the discipline and 2241 * copy the necessary fields 2242 */ 2243 if (altq->qname[0] != 0) { 2244 if ((altq->qid = pf_qname2qid(altq->qname)) == 0) { 2245 error = EBUSY; 2246 pool_put(&pf_altq_pl, altq); 2247 break; 2248 } 2249 TAILQ_FOREACH(a, pf_altqs_inactive, entries) { 2250 if (strncmp(a->ifname, altq->ifname, 2251 IFNAMSIZ) == 0 && a->qname[0] == 0) { 2252 altq->altq_disc = a->altq_disc; 2253 break; 2254 } 2255 } 2256 } 2257 2258 error = altq_add(altq); 2259 if (error) { 2260 pool_put(&pf_altq_pl, altq); 2261 break; 2262 } 2263 2264 TAILQ_INSERT_TAIL(pf_altqs_inactive, altq, entries); 2265 bcopy(altq, &pa->altq, sizeof(struct pf_altq)); 2266 break; 2267 } 2268 2269 case DIOCGETALTQS: { 2270 struct pfioc_altq *pa = (struct pfioc_altq *)addr; 2271 struct pf_altq *altq; 2272 2273 pa->nr = 0; 2274 TAILQ_FOREACH(altq, pf_altqs_active, entries) 2275 pa->nr++; 2276 pa->ticket = ticket_altqs_active; 2277 break; 2278 } 2279 2280 case DIOCGETALTQ: { 2281 struct pfioc_altq *pa = (struct pfioc_altq *)addr; 2282 struct pf_altq *altq; 2283 u_int32_t nr; 2284 2285 if (pa->ticket != ticket_altqs_active) { 2286 error = EBUSY; 2287 break; 2288 } 2289 nr = 0; 2290 altq = TAILQ_FIRST(pf_altqs_active); 2291 while ((altq != NULL) && (nr < pa->nr)) { 2292 altq = TAILQ_NEXT(altq, entries); 2293 nr++; 2294 } 2295 if (altq == NULL) { 2296 error = EBUSY; 2297 break; 2298 } 2299 bcopy(altq, &pa->altq, sizeof(struct pf_altq)); 2300 break; 2301 } 2302 2303 case DIOCCHANGEALTQ: 2304 /* CHANGEALTQ not supported yet! */ 2305 error = ENODEV; 2306 break; 2307 2308 case DIOCGETQSTATS: { 2309 struct pfioc_qstats *pq = (struct pfioc_qstats *)addr; 2310 struct pf_altq *altq; 2311 u_int32_t nr; 2312 int nbytes; 2313 2314 if (pq->ticket != ticket_altqs_active) { 2315 error = EBUSY; 2316 break; 2317 } 2318 nbytes = pq->nbytes; 2319 nr = 0; 2320 altq = TAILQ_FIRST(pf_altqs_active); 2321 while ((altq != NULL) && (nr < pq->nr)) { 2322 altq = TAILQ_NEXT(altq, entries); 2323 nr++; 2324 } 2325 if (altq == NULL) { 2326 error = EBUSY; 2327 break; 2328 } 2329 error = altq_getqstats(altq, pq->buf, &nbytes); 2330 if (error == 0) { 2331 pq->scheduler = altq->scheduler; 2332 pq->nbytes = nbytes; 2333 } 2334 break; 2335 } 2336 #endif /* ALTQ */ 2337 2338 case DIOCBEGINADDRS: { 2339 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 2340 2341 pf_empty_pool(&pf_pabuf); 2342 pp->ticket = ++ticket_pabuf; 2343 break; 2344 } 2345 2346 case DIOCADDADDR: { 2347 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 2348 2349 if (pp->ticket != ticket_pabuf) { 2350 error = EBUSY; 2351 break; 2352 } 2353 #ifndef INET 2354 if (pp->af == AF_INET) { 2355 error = EAFNOSUPPORT; 2356 break; 2357 } 2358 #endif /* INET */ 2359 #ifndef INET6 2360 if (pp->af == AF_INET6) { 2361 error = EAFNOSUPPORT; 2362 break; 2363 } 2364 #endif /* INET6 */ 2365 if (pp->addr.addr.type != PF_ADDR_ADDRMASK && 2366 pp->addr.addr.type != PF_ADDR_DYNIFTL && 2367 pp->addr.addr.type != PF_ADDR_TABLE) { 2368 error = EINVAL; 2369 break; 2370 } 2371 pa = pool_get(&pf_pooladdr_pl, PR_NOWAIT); 2372 if (pa == NULL) { 2373 error = ENOMEM; 2374 break; 2375 } 2376 bcopy(&pp->addr, pa, sizeof(struct pf_pooladdr)); 2377 if (pa->ifname[0]) { 2378 pa->kif = pfi_kif_get(pa->ifname); 2379 if (pa->kif == NULL) { 2380 pool_put(&pf_pooladdr_pl, pa); 2381 error = EINVAL; 2382 break; 2383 } 2384 pfi_kif_ref(pa->kif, PFI_KIF_REF_RULE); 2385 } 2386 if (pfi_dynaddr_setup(&pa->addr, pp->af)) { 2387 pfi_dynaddr_remove(&pa->addr); 2388 pfi_kif_unref(pa->kif, PFI_KIF_REF_RULE); 2389 pool_put(&pf_pooladdr_pl, pa); 2390 error = EINVAL; 2391 break; 2392 } 2393 TAILQ_INSERT_TAIL(&pf_pabuf, pa, entries); 2394 break; 2395 } 2396 2397 case DIOCGETADDRS: { 2398 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 2399 2400 pp->nr = 0; 2401 pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action, 2402 pp->r_num, 0, 1, 0); 2403 if (pool == NULL) { 2404 error = EBUSY; 2405 break; 2406 } 2407 TAILQ_FOREACH(pa, &pool->list, entries) 2408 pp->nr++; 2409 break; 2410 } 2411 2412 case DIOCGETADDR: { 2413 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 2414 u_int32_t nr = 0; 2415 2416 pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action, 2417 pp->r_num, 0, 1, 1); 2418 if (pool == NULL) { 2419 error = EBUSY; 2420 break; 2421 } 2422 pa = TAILQ_FIRST(&pool->list); 2423 while ((pa != NULL) && (nr < pp->nr)) { 2424 pa = TAILQ_NEXT(pa, entries); 2425 nr++; 2426 } 2427 if (pa == NULL) { 2428 error = EBUSY; 2429 break; 2430 } 2431 bcopy(pa, &pp->addr, sizeof(struct pf_pooladdr)); 2432 pfi_dynaddr_copyout(&pp->addr.addr); 2433 pf_tbladdr_copyout(&pp->addr.addr); 2434 pf_rtlabel_copyout(&pp->addr.addr); 2435 break; 2436 } 2437 2438 case DIOCCHANGEADDR: { 2439 struct pfioc_pooladdr *pca = (struct pfioc_pooladdr *)addr; 2440 struct pf_pooladdr *oldpa = NULL, *newpa = NULL; 2441 struct pf_ruleset *ruleset; 2442 2443 if (pca->action < PF_CHANGE_ADD_HEAD || 2444 pca->action > PF_CHANGE_REMOVE) { 2445 error = EINVAL; 2446 break; 2447 } 2448 if (pca->addr.addr.type != PF_ADDR_ADDRMASK && 2449 pca->addr.addr.type != PF_ADDR_DYNIFTL && 2450 pca->addr.addr.type != PF_ADDR_TABLE) { 2451 error = EINVAL; 2452 break; 2453 } 2454 2455 ruleset = pf_find_ruleset(pca->anchor); 2456 if (ruleset == NULL) { 2457 error = EBUSY; 2458 break; 2459 } 2460 pool = pf_get_pool(pca->anchor, pca->ticket, pca->r_action, 2461 pca->r_num, pca->r_last, 1, 1); 2462 if (pool == NULL) { 2463 error = EBUSY; 2464 break; 2465 } 2466 if (pca->action != PF_CHANGE_REMOVE) { 2467 newpa = pool_get(&pf_pooladdr_pl, PR_NOWAIT); 2468 if (newpa == NULL) { 2469 error = ENOMEM; 2470 break; 2471 } 2472 bcopy(&pca->addr, newpa, sizeof(struct pf_pooladdr)); 2473 #ifndef INET 2474 if (pca->af == AF_INET) { 2475 pool_put(&pf_pooladdr_pl, newpa); 2476 error = EAFNOSUPPORT; 2477 break; 2478 } 2479 #endif /* INET */ 2480 #ifndef INET6 2481 if (pca->af == AF_INET6) { 2482 pool_put(&pf_pooladdr_pl, newpa); 2483 error = EAFNOSUPPORT; 2484 break; 2485 } 2486 #endif /* INET6 */ 2487 if (newpa->ifname[0]) { 2488 newpa->kif = pfi_kif_get(newpa->ifname); 2489 if (newpa->kif == NULL) { 2490 pool_put(&pf_pooladdr_pl, newpa); 2491 error = EINVAL; 2492 break; 2493 } 2494 pfi_kif_ref(newpa->kif, PFI_KIF_REF_RULE); 2495 } else 2496 newpa->kif = NULL; 2497 if (pfi_dynaddr_setup(&newpa->addr, pca->af) || 2498 pf_tbladdr_setup(ruleset, &newpa->addr)) { 2499 pfi_dynaddr_remove(&newpa->addr); 2500 pfi_kif_unref(newpa->kif, PFI_KIF_REF_RULE); 2501 pool_put(&pf_pooladdr_pl, newpa); 2502 error = EINVAL; 2503 break; 2504 } 2505 } 2506 2507 if (pca->action == PF_CHANGE_ADD_HEAD) 2508 oldpa = TAILQ_FIRST(&pool->list); 2509 else if (pca->action == PF_CHANGE_ADD_TAIL) 2510 oldpa = TAILQ_LAST(&pool->list, pf_palist); 2511 else { 2512 int i = 0; 2513 2514 oldpa = TAILQ_FIRST(&pool->list); 2515 while ((oldpa != NULL) && (i < pca->nr)) { 2516 oldpa = TAILQ_NEXT(oldpa, entries); 2517 i++; 2518 } 2519 if (oldpa == NULL) { 2520 error = EINVAL; 2521 break; 2522 } 2523 } 2524 2525 if (pca->action == PF_CHANGE_REMOVE) { 2526 TAILQ_REMOVE(&pool->list, oldpa, entries); 2527 pfi_dynaddr_remove(&oldpa->addr); 2528 pf_tbladdr_remove(&oldpa->addr); 2529 pfi_kif_unref(oldpa->kif, PFI_KIF_REF_RULE); 2530 pool_put(&pf_pooladdr_pl, oldpa); 2531 } else { 2532 if (oldpa == NULL) 2533 TAILQ_INSERT_TAIL(&pool->list, newpa, entries); 2534 else if (pca->action == PF_CHANGE_ADD_HEAD || 2535 pca->action == PF_CHANGE_ADD_BEFORE) 2536 TAILQ_INSERT_BEFORE(oldpa, newpa, entries); 2537 else 2538 TAILQ_INSERT_AFTER(&pool->list, oldpa, 2539 newpa, entries); 2540 } 2541 2542 pool->cur = TAILQ_FIRST(&pool->list); 2543 PF_ACPY(&pool->counter, &pool->cur->addr.v.a.addr, 2544 pca->af); 2545 break; 2546 } 2547 2548 case DIOCGETRULESETS: { 2549 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr; 2550 struct pf_ruleset *ruleset; 2551 struct pf_anchor *anchor; 2552 2553 pr->path[sizeof(pr->path) - 1] = 0; 2554 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) { 2555 error = EINVAL; 2556 break; 2557 } 2558 pr->nr = 0; 2559 if (ruleset->anchor == NULL) { 2560 /* XXX kludge for pf_main_ruleset */ 2561 RB_FOREACH(anchor, pf_anchor_global, &pf_anchors) 2562 if (anchor->parent == NULL) 2563 pr->nr++; 2564 } else { 2565 RB_FOREACH(anchor, pf_anchor_node, 2566 &ruleset->anchor->children) 2567 pr->nr++; 2568 } 2569 break; 2570 } 2571 2572 case DIOCGETRULESET: { 2573 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr; 2574 struct pf_ruleset *ruleset; 2575 struct pf_anchor *anchor; 2576 u_int32_t nr = 0; 2577 2578 pr->path[sizeof(pr->path) - 1] = 0; 2579 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) { 2580 error = EINVAL; 2581 break; 2582 } 2583 pr->name[0] = 0; 2584 if (ruleset->anchor == NULL) { 2585 /* XXX kludge for pf_main_ruleset */ 2586 RB_FOREACH(anchor, pf_anchor_global, &pf_anchors) 2587 if (anchor->parent == NULL && nr++ == pr->nr) { 2588 strlcpy(pr->name, anchor->name, 2589 sizeof(pr->name)); 2590 break; 2591 } 2592 } else { 2593 RB_FOREACH(anchor, pf_anchor_node, 2594 &ruleset->anchor->children) 2595 if (nr++ == pr->nr) { 2596 strlcpy(pr->name, anchor->name, 2597 sizeof(pr->name)); 2598 break; 2599 } 2600 } 2601 if (!pr->name[0]) 2602 error = EBUSY; 2603 break; 2604 } 2605 2606 case DIOCRCLRTABLES: { 2607 struct pfioc_table *io = (struct pfioc_table *)addr; 2608 2609 if (io->pfrio_esize != 0) { 2610 error = ENODEV; 2611 break; 2612 } 2613 error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel, 2614 io->pfrio_flags | PFR_FLAG_USERIOCTL); 2615 break; 2616 } 2617 2618 case DIOCRADDTABLES: { 2619 struct pfioc_table *io = (struct pfioc_table *)addr; 2620 2621 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2622 error = ENODEV; 2623 break; 2624 } 2625 error = pfr_add_tables(io->pfrio_buffer, io->pfrio_size, 2626 &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2627 break; 2628 } 2629 2630 case DIOCRDELTABLES: { 2631 struct pfioc_table *io = (struct pfioc_table *)addr; 2632 2633 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2634 error = ENODEV; 2635 break; 2636 } 2637 error = pfr_del_tables(io->pfrio_buffer, io->pfrio_size, 2638 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2639 break; 2640 } 2641 2642 case DIOCRGETTABLES: { 2643 struct pfioc_table *io = (struct pfioc_table *)addr; 2644 2645 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2646 error = ENODEV; 2647 break; 2648 } 2649 error = pfr_get_tables(&io->pfrio_table, io->pfrio_buffer, 2650 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2651 break; 2652 } 2653 2654 case DIOCRGETTSTATS: { 2655 struct pfioc_table *io = (struct pfioc_table *)addr; 2656 2657 if (io->pfrio_esize != sizeof(struct pfr_tstats)) { 2658 error = ENODEV; 2659 break; 2660 } 2661 error = pfr_get_tstats(&io->pfrio_table, io->pfrio_buffer, 2662 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2663 break; 2664 } 2665 2666 case DIOCRCLRTSTATS: { 2667 struct pfioc_table *io = (struct pfioc_table *)addr; 2668 2669 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2670 error = ENODEV; 2671 break; 2672 } 2673 error = pfr_clr_tstats(io->pfrio_buffer, io->pfrio_size, 2674 &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2675 break; 2676 } 2677 2678 case DIOCRSETTFLAGS: { 2679 struct pfioc_table *io = (struct pfioc_table *)addr; 2680 2681 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2682 error = ENODEV; 2683 break; 2684 } 2685 error = pfr_set_tflags(io->pfrio_buffer, io->pfrio_size, 2686 io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange, 2687 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2688 break; 2689 } 2690 2691 case DIOCRCLRADDRS: { 2692 struct pfioc_table *io = (struct pfioc_table *)addr; 2693 2694 if (io->pfrio_esize != 0) { 2695 error = ENODEV; 2696 break; 2697 } 2698 error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel, 2699 io->pfrio_flags | PFR_FLAG_USERIOCTL); 2700 break; 2701 } 2702 2703 case DIOCRADDADDRS: { 2704 struct pfioc_table *io = (struct pfioc_table *)addr; 2705 2706 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2707 error = ENODEV; 2708 break; 2709 } 2710 error = pfr_add_addrs(&io->pfrio_table, io->pfrio_buffer, 2711 io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags | 2712 PFR_FLAG_USERIOCTL); 2713 break; 2714 } 2715 2716 case DIOCRDELADDRS: { 2717 struct pfioc_table *io = (struct pfioc_table *)addr; 2718 2719 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2720 error = ENODEV; 2721 break; 2722 } 2723 error = pfr_del_addrs(&io->pfrio_table, io->pfrio_buffer, 2724 io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags | 2725 PFR_FLAG_USERIOCTL); 2726 break; 2727 } 2728 2729 case DIOCRSETADDRS: { 2730 struct pfioc_table *io = (struct pfioc_table *)addr; 2731 2732 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2733 error = ENODEV; 2734 break; 2735 } 2736 error = pfr_set_addrs(&io->pfrio_table, io->pfrio_buffer, 2737 io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd, 2738 &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags | 2739 PFR_FLAG_USERIOCTL, 0); 2740 break; 2741 } 2742 2743 case DIOCRGETADDRS: { 2744 struct pfioc_table *io = (struct pfioc_table *)addr; 2745 2746 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2747 error = ENODEV; 2748 break; 2749 } 2750 error = pfr_get_addrs(&io->pfrio_table, io->pfrio_buffer, 2751 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2752 break; 2753 } 2754 2755 case DIOCRGETASTATS: { 2756 struct pfioc_table *io = (struct pfioc_table *)addr; 2757 2758 if (io->pfrio_esize != sizeof(struct pfr_astats)) { 2759 error = ENODEV; 2760 break; 2761 } 2762 error = pfr_get_astats(&io->pfrio_table, io->pfrio_buffer, 2763 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2764 break; 2765 } 2766 2767 case DIOCRCLRASTATS: { 2768 struct pfioc_table *io = (struct pfioc_table *)addr; 2769 2770 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2771 error = ENODEV; 2772 break; 2773 } 2774 error = pfr_clr_astats(&io->pfrio_table, io->pfrio_buffer, 2775 io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags | 2776 PFR_FLAG_USERIOCTL); 2777 break; 2778 } 2779 2780 case DIOCRTSTADDRS: { 2781 struct pfioc_table *io = (struct pfioc_table *)addr; 2782 2783 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2784 error = ENODEV; 2785 break; 2786 } 2787 error = pfr_tst_addrs(&io->pfrio_table, io->pfrio_buffer, 2788 io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags | 2789 PFR_FLAG_USERIOCTL); 2790 break; 2791 } 2792 2793 case DIOCRINADEFINE: { 2794 struct pfioc_table *io = (struct pfioc_table *)addr; 2795 2796 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2797 error = ENODEV; 2798 break; 2799 } 2800 error = pfr_ina_define(&io->pfrio_table, io->pfrio_buffer, 2801 io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr, 2802 io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2803 break; 2804 } 2805 2806 case DIOCOSFPADD: { 2807 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr; 2808 error = pf_osfp_add(io); 2809 break; 2810 } 2811 2812 case DIOCOSFPGET: { 2813 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr; 2814 error = pf_osfp_get(io); 2815 break; 2816 } 2817 2818 case DIOCXBEGIN: { 2819 struct pfioc_trans *io = (struct pfioc_trans *)addr; 2820 struct pfioc_trans_e *ioe; 2821 struct pfr_table *table; 2822 int i; 2823 2824 if (io->esize != sizeof(*ioe)) { 2825 error = ENODEV; 2826 goto fail; 2827 } 2828 ioe = (struct pfioc_trans_e *)malloc(sizeof(*ioe), 2829 M_TEMP, M_WAITOK); 2830 table = (struct pfr_table *)malloc(sizeof(*table), 2831 M_TEMP, M_WAITOK); 2832 for (i = 0; i < io->size; i++) { 2833 if (copyin(io->array+i, ioe, sizeof(*ioe))) { 2834 free(table, M_TEMP); 2835 free(ioe, M_TEMP); 2836 error = EFAULT; 2837 goto fail; 2838 } 2839 switch (ioe->rs_num) { 2840 #ifdef ALTQ 2841 case PF_RULESET_ALTQ: 2842 if (ioe->anchor[0]) { 2843 free(table, M_TEMP); 2844 free(ioe, M_TEMP); 2845 error = EINVAL; 2846 goto fail; 2847 } 2848 if ((error = pf_begin_altq(&ioe->ticket))) { 2849 free(table, M_TEMP); 2850 free(ioe, M_TEMP); 2851 goto fail; 2852 } 2853 break; 2854 #endif /* ALTQ */ 2855 case PF_RULESET_TABLE: 2856 bzero(table, sizeof(*table)); 2857 strlcpy(table->pfrt_anchor, ioe->anchor, 2858 sizeof(table->pfrt_anchor)); 2859 if ((error = pfr_ina_begin(table, 2860 &ioe->ticket, NULL, 0))) { 2861 free(table, M_TEMP); 2862 free(ioe, M_TEMP); 2863 goto fail; 2864 } 2865 break; 2866 default: 2867 if ((error = pf_begin_rules(&ioe->ticket, 2868 ioe->rs_num, ioe->anchor))) { 2869 free(table, M_TEMP); 2870 free(ioe, M_TEMP); 2871 goto fail; 2872 } 2873 break; 2874 } 2875 if (copyout(ioe, io->array+i, sizeof(io->array[i]))) { 2876 free(table, M_TEMP); 2877 free(ioe, M_TEMP); 2878 error = EFAULT; 2879 goto fail; 2880 } 2881 } 2882 free(table, M_TEMP); 2883 free(ioe, M_TEMP); 2884 break; 2885 } 2886 2887 case DIOCXROLLBACK: { 2888 struct pfioc_trans *io = (struct pfioc_trans *)addr; 2889 struct pfioc_trans_e *ioe; 2890 struct pfr_table *table; 2891 int i; 2892 2893 if (io->esize != sizeof(*ioe)) { 2894 error = ENODEV; 2895 goto fail; 2896 } 2897 ioe = (struct pfioc_trans_e *)malloc(sizeof(*ioe), 2898 M_TEMP, M_WAITOK); 2899 table = (struct pfr_table *)malloc(sizeof(*table), 2900 M_TEMP, M_WAITOK); 2901 for (i = 0; i < io->size; i++) { 2902 if (copyin(io->array+i, ioe, sizeof(*ioe))) { 2903 free(table, M_TEMP); 2904 free(ioe, M_TEMP); 2905 error = EFAULT; 2906 goto fail; 2907 } 2908 switch (ioe->rs_num) { 2909 #ifdef ALTQ 2910 case PF_RULESET_ALTQ: 2911 if (ioe->anchor[0]) { 2912 free(table, M_TEMP); 2913 free(ioe, M_TEMP); 2914 error = EINVAL; 2915 goto fail; 2916 } 2917 if ((error = pf_rollback_altq(ioe->ticket))) { 2918 free(table, M_TEMP); 2919 free(ioe, M_TEMP); 2920 goto fail; /* really bad */ 2921 } 2922 break; 2923 #endif /* ALTQ */ 2924 case PF_RULESET_TABLE: 2925 bzero(table, sizeof(*table)); 2926 strlcpy(table->pfrt_anchor, ioe->anchor, 2927 sizeof(table->pfrt_anchor)); 2928 if ((error = pfr_ina_rollback(table, 2929 ioe->ticket, NULL, 0))) { 2930 free(table, M_TEMP); 2931 free(ioe, M_TEMP); 2932 goto fail; /* really bad */ 2933 } 2934 break; 2935 default: 2936 if ((error = pf_rollback_rules(ioe->ticket, 2937 ioe->rs_num, ioe->anchor))) { 2938 free(table, M_TEMP); 2939 free(ioe, M_TEMP); 2940 goto fail; /* really bad */ 2941 } 2942 break; 2943 } 2944 } 2945 free(table, M_TEMP); 2946 free(ioe, M_TEMP); 2947 break; 2948 } 2949 2950 case DIOCXCOMMIT: { 2951 struct pfioc_trans *io = (struct pfioc_trans *)addr; 2952 struct pfioc_trans_e *ioe; 2953 struct pfr_table *table; 2954 struct pf_ruleset *rs; 2955 int i; 2956 2957 if (io->esize != sizeof(*ioe)) { 2958 error = ENODEV; 2959 goto fail; 2960 } 2961 ioe = (struct pfioc_trans_e *)malloc(sizeof(*ioe), 2962 M_TEMP, M_WAITOK); 2963 table = (struct pfr_table *)malloc(sizeof(*table), 2964 M_TEMP, M_WAITOK); 2965 /* first makes sure everything will succeed */ 2966 for (i = 0; i < io->size; i++) { 2967 if (copyin(io->array+i, ioe, sizeof(*ioe))) { 2968 free(table, M_TEMP); 2969 free(ioe, M_TEMP); 2970 error = EFAULT; 2971 goto fail; 2972 } 2973 switch (ioe->rs_num) { 2974 #ifdef ALTQ 2975 case PF_RULESET_ALTQ: 2976 if (ioe->anchor[0]) { 2977 free(table, M_TEMP); 2978 free(ioe, M_TEMP); 2979 error = EINVAL; 2980 goto fail; 2981 } 2982 if (!altqs_inactive_open || ioe->ticket != 2983 ticket_altqs_inactive) { 2984 free(table, M_TEMP); 2985 free(ioe, M_TEMP); 2986 error = EBUSY; 2987 goto fail; 2988 } 2989 break; 2990 #endif /* ALTQ */ 2991 case PF_RULESET_TABLE: 2992 rs = pf_find_ruleset(ioe->anchor); 2993 if (rs == NULL || !rs->topen || ioe->ticket != 2994 rs->tticket) { 2995 free(table, M_TEMP); 2996 free(ioe, M_TEMP); 2997 error = EBUSY; 2998 goto fail; 2999 } 3000 break; 3001 default: 3002 if (ioe->rs_num < 0 || ioe->rs_num >= 3003 PF_RULESET_MAX) { 3004 free(table, M_TEMP); 3005 free(ioe, M_TEMP); 3006 error = EINVAL; 3007 goto fail; 3008 } 3009 rs = pf_find_ruleset(ioe->anchor); 3010 if (rs == NULL || 3011 !rs->rules[ioe->rs_num].inactive.open || 3012 rs->rules[ioe->rs_num].inactive.ticket != 3013 ioe->ticket) { 3014 free(table, M_TEMP); 3015 free(ioe, M_TEMP); 3016 error = EBUSY; 3017 goto fail; 3018 } 3019 break; 3020 } 3021 } 3022 /* now do the commit - no errors should happen here */ 3023 for (i = 0; i < io->size; i++) { 3024 if (copyin(io->array+i, ioe, sizeof(*ioe))) { 3025 free(table, M_TEMP); 3026 free(ioe, M_TEMP); 3027 error = EFAULT; 3028 goto fail; 3029 } 3030 switch (ioe->rs_num) { 3031 #ifdef ALTQ 3032 case PF_RULESET_ALTQ: 3033 if ((error = pf_commit_altq(ioe->ticket))) { 3034 free(table, M_TEMP); 3035 free(ioe, M_TEMP); 3036 goto fail; /* really bad */ 3037 } 3038 break; 3039 #endif /* ALTQ */ 3040 case PF_RULESET_TABLE: 3041 bzero(table, sizeof(*table)); 3042 strlcpy(table->pfrt_anchor, ioe->anchor, 3043 sizeof(table->pfrt_anchor)); 3044 if ((error = pfr_ina_commit(table, ioe->ticket, 3045 NULL, NULL, 0))) { 3046 free(table, M_TEMP); 3047 free(ioe, M_TEMP); 3048 goto fail; /* really bad */ 3049 } 3050 break; 3051 default: 3052 if ((error = pf_commit_rules(ioe->ticket, 3053 ioe->rs_num, ioe->anchor))) { 3054 free(table, M_TEMP); 3055 free(ioe, M_TEMP); 3056 goto fail; /* really bad */ 3057 } 3058 break; 3059 } 3060 } 3061 free(table, M_TEMP); 3062 free(ioe, M_TEMP); 3063 break; 3064 } 3065 3066 case DIOCGETSRCNODES: { 3067 struct pfioc_src_nodes *psn = (struct pfioc_src_nodes *)addr; 3068 struct pf_src_node *n, *p, *pstore; 3069 u_int32_t nr = 0; 3070 int space = psn->psn_len; 3071 3072 if (space == 0) { 3073 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) 3074 nr++; 3075 psn->psn_len = sizeof(struct pf_src_node) * nr; 3076 break; 3077 } 3078 3079 pstore = malloc(sizeof(*pstore), M_TEMP, M_WAITOK); 3080 3081 p = psn->psn_src_nodes; 3082 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) { 3083 int secs = time_second, diff; 3084 3085 if ((nr + 1) * sizeof(*p) > (unsigned)psn->psn_len) 3086 break; 3087 3088 bcopy(n, pstore, sizeof(*pstore)); 3089 if (n->rule.ptr != NULL) 3090 pstore->rule.nr = n->rule.ptr->nr; 3091 pstore->creation = secs - pstore->creation; 3092 if (pstore->expire > secs) 3093 pstore->expire -= secs; 3094 else 3095 pstore->expire = 0; 3096 3097 /* adjust the connection rate estimate */ 3098 diff = secs - n->conn_rate.last; 3099 if (diff >= n->conn_rate.seconds) 3100 pstore->conn_rate.count = 0; 3101 else 3102 pstore->conn_rate.count -= 3103 n->conn_rate.count * diff / 3104 n->conn_rate.seconds; 3105 3106 error = copyout(pstore, p, sizeof(*p)); 3107 if (error) { 3108 free(pstore, M_TEMP); 3109 goto fail; 3110 } 3111 p++; 3112 nr++; 3113 } 3114 psn->psn_len = sizeof(struct pf_src_node) * nr; 3115 3116 free(pstore, M_TEMP); 3117 break; 3118 } 3119 3120 case DIOCCLRSRCNODES: { 3121 struct pf_src_node *n; 3122 struct pf_state *state; 3123 3124 RB_FOREACH(state, pf_state_tree_id, &tree_id) { 3125 state->src_node = NULL; 3126 state->nat_src_node = NULL; 3127 } 3128 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) { 3129 n->expire = 1; 3130 n->states = 0; 3131 } 3132 pf_purge_expired_src_nodes(1); 3133 pf_status.src_nodes = 0; 3134 break; 3135 } 3136 3137 case DIOCKILLSRCNODES: { 3138 struct pf_src_node *sn; 3139 struct pf_state *s; 3140 struct pfioc_src_node_kill *psnk = \ 3141 (struct pfioc_src_node_kill *) addr; 3142 int killed = 0; 3143 3144 RB_FOREACH(sn, pf_src_tree, &tree_src_tracking) { 3145 if (PF_MATCHA(psnk->psnk_src.neg, \ 3146 &psnk->psnk_src.addr.v.a.addr, \ 3147 &psnk->psnk_src.addr.v.a.mask, \ 3148 &sn->addr, sn->af) && 3149 PF_MATCHA(psnk->psnk_dst.neg, \ 3150 &psnk->psnk_dst.addr.v.a.addr, \ 3151 &psnk->psnk_dst.addr.v.a.mask, \ 3152 &sn->raddr, sn->af)) { 3153 /* Handle state to src_node linkage */ 3154 if (sn->states != 0) { 3155 RB_FOREACH(s, pf_state_tree_id, 3156 &tree_id) { 3157 if (s->src_node == sn) 3158 s->src_node = NULL; 3159 if (s->nat_src_node == sn) 3160 s->nat_src_node = NULL; 3161 } 3162 sn->states = 0; 3163 } 3164 sn->expire = 1; 3165 killed++; 3166 } 3167 } 3168 3169 if (killed > 0) 3170 pf_purge_expired_src_nodes(1); 3171 3172 psnk->psnk_af = killed; 3173 break; 3174 } 3175 3176 case DIOCSETHOSTID: { 3177 u_int32_t *hostid = (u_int32_t *)addr; 3178 3179 if (*hostid == 0) 3180 pf_status.hostid = arc4random(); 3181 else 3182 pf_status.hostid = *hostid; 3183 break; 3184 } 3185 3186 case DIOCOSFPFLUSH: 3187 pf_osfp_flush(); 3188 break; 3189 3190 case DIOCIGETIFACES: { 3191 struct pfioc_iface *io = (struct pfioc_iface *)addr; 3192 3193 if (io->pfiio_esize != sizeof(struct pfi_kif)) { 3194 error = ENODEV; 3195 break; 3196 } 3197 error = pfi_get_ifaces(io->pfiio_name, io->pfiio_buffer, 3198 &io->pfiio_size); 3199 break; 3200 } 3201 3202 case DIOCSETIFFLAG: { 3203 struct pfioc_iface *io = (struct pfioc_iface *)addr; 3204 3205 error = pfi_set_flags(io->pfiio_name, io->pfiio_flags); 3206 break; 3207 } 3208 3209 case DIOCCLRIFFLAG: { 3210 struct pfioc_iface *io = (struct pfioc_iface *)addr; 3211 3212 error = pfi_clear_flags(io->pfiio_name, io->pfiio_flags); 3213 break; 3214 } 3215 3216 case DIOCSETLCK: { 3217 pf_state_lock = *(uint32_t*)addr; 3218 break; 3219 } 3220 3221 default: 3222 error = ENODEV; 3223 break; 3224 } 3225 fail: 3226 splx(s); 3227 if (flags & FWRITE) 3228 rw_exit_write(&pf_consistency_lock); 3229 else 3230 rw_exit_read(&pf_consistency_lock); 3231 return (error); 3232 } 3233 3234 #ifdef __NetBSD__ 3235 #ifdef INET 3236 static int 3237 pfil4_wrapper(void *arg, struct mbuf **mp, struct ifnet *ifp, int dir) 3238 { 3239 int error; 3240 3241 /* 3242 * ensure that mbufs are writable beforehand 3243 * as it's assumed by pf code. 3244 * ip hdr (60 bytes) + tcp hdr (60 bytes) should be enough. 3245 * XXX inefficient 3246 */ 3247 error = m_makewritable(mp, 0, 60 + 60, M_DONTWAIT); 3248 if (error) { 3249 m_freem(*mp); 3250 *mp = NULL; 3251 return error; 3252 } 3253 3254 /* 3255 * If the packet is out-bound, we can't delay checksums 3256 * here. For in-bound, the checksum has already been 3257 * validated. 3258 */ 3259 if (dir == PFIL_OUT) { 3260 if ((*mp)->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) { 3261 in_delayed_cksum(*mp); 3262 (*mp)->m_pkthdr.csum_flags &= 3263 ~(M_CSUM_TCPv4|M_CSUM_UDPv4); 3264 } 3265 } 3266 3267 if (pf_test(dir == PFIL_OUT ? PF_OUT : PF_IN, ifp, mp, NULL) 3268 != PF_PASS) { 3269 m_freem(*mp); 3270 *mp = NULL; 3271 return EHOSTUNREACH; 3272 } 3273 3274 /* 3275 * we're not compatible with fast-forward. 3276 */ 3277 3278 if (dir == PFIL_IN && *mp) { 3279 (*mp)->m_flags &= ~M_CANFASTFWD; 3280 } 3281 3282 return (0); 3283 } 3284 #endif /* INET */ 3285 3286 #ifdef INET6 3287 static int 3288 pfil6_wrapper(void *arg, struct mbuf **mp, struct ifnet *ifp, int dir) 3289 { 3290 int error; 3291 3292 /* 3293 * ensure that mbufs are writable beforehand 3294 * as it's assumed by pf code. 3295 * XXX inefficient 3296 */ 3297 error = m_makewritable(mp, 0, M_COPYALL, M_DONTWAIT); 3298 if (error) { 3299 m_freem(*mp); 3300 *mp = NULL; 3301 return error; 3302 } 3303 3304 /* 3305 * If the packet is out-bound, we can't delay checksums 3306 * here. For in-bound, the checksum has already been 3307 * validated. 3308 */ 3309 if (dir == PFIL_OUT) { 3310 if ((*mp)->m_pkthdr.csum_flags & (M_CSUM_TCPv6|M_CSUM_UDPv6)) { 3311 in6_delayed_cksum(*mp); 3312 (*mp)->m_pkthdr.csum_flags &= 3313 ~(M_CSUM_TCPv6|M_CSUM_UDPv6); 3314 } 3315 } 3316 3317 if (pf_test6(dir == PFIL_OUT ? PF_OUT : PF_IN, ifp, mp, NULL) 3318 != PF_PASS) { 3319 m_freem(*mp); 3320 *mp = NULL; 3321 return EHOSTUNREACH; 3322 } else 3323 return (0); 3324 } 3325 #endif /* INET6 */ 3326 3327 static int 3328 pf_pfil_attach(void) 3329 { 3330 struct pfil_head *ph_inet; 3331 #ifdef INET6 3332 struct pfil_head *ph_inet6; 3333 #endif /* INET6 */ 3334 int error; 3335 3336 if (pf_pfil_attached) 3337 return (EBUSY); 3338 3339 ph_inet = pfil_head_get(PFIL_TYPE_AF, AF_INET); 3340 if (ph_inet) 3341 error = pfil_add_hook((void *)pfil4_wrapper, NULL, 3342 PFIL_IN|PFIL_OUT, ph_inet); 3343 else 3344 error = ENOENT; 3345 if (error) 3346 return (error); 3347 3348 #ifdef INET6 3349 ph_inet6 = pfil_head_get(PFIL_TYPE_AF, AF_INET6); 3350 if (ph_inet6) 3351 error = pfil_add_hook((void *)pfil6_wrapper, NULL, 3352 PFIL_IN|PFIL_OUT, ph_inet6); 3353 else 3354 error = ENOENT; 3355 if (error) 3356 goto bad; 3357 #endif /* INET6 */ 3358 3359 pf_pfil_attached = 1; 3360 3361 return (0); 3362 3363 #ifdef INET6 3364 bad: 3365 pfil_remove_hook(pfil4_wrapper, NULL, PFIL_IN|PFIL_OUT, ph_inet); 3366 #endif /* INET6 */ 3367 3368 return (error); 3369 } 3370 3371 static int 3372 pf_pfil_detach(void) 3373 { 3374 struct pfil_head *ph_inet; 3375 #ifdef INET6 3376 struct pfil_head *ph_inet6; 3377 #endif /* INET6 */ 3378 3379 if (pf_pfil_attached == 0) 3380 return (EBUSY); 3381 3382 ph_inet = pfil_head_get(PFIL_TYPE_AF, AF_INET); 3383 if (ph_inet) 3384 pfil_remove_hook((void *)pfil4_wrapper, NULL, 3385 PFIL_IN|PFIL_OUT, ph_inet); 3386 #ifdef INET6 3387 ph_inet6 = pfil_head_get(PFIL_TYPE_AF, AF_INET6); 3388 if (ph_inet6) 3389 pfil_remove_hook((void *)pfil6_wrapper, NULL, 3390 PFIL_IN|PFIL_OUT, ph_inet6); 3391 #endif /* INET6 */ 3392 pf_pfil_attached = 0; 3393 3394 return (0); 3395 } 3396 #endif /* __NetBSD__ */ 3397 3398 #if defined(__NetBSD__) 3399 MODULE(MODULE_CLASS_DRIVER, pf, "bpf"); 3400 3401 static int 3402 pf_modcmd(modcmd_t cmd, void *opaque) 3403 { 3404 #ifdef _MODULE 3405 extern void pflogattach(int); 3406 extern void pflogdetach(void); 3407 3408 devmajor_t cmajor = NODEVMAJOR, bmajor = NODEVMAJOR; 3409 int err; 3410 3411 switch (cmd) { 3412 case MODULE_CMD_INIT: 3413 err = devsw_attach("pf", NULL, &bmajor, &pf_cdevsw, &cmajor); 3414 if (err) 3415 return err; 3416 pfattach(1); 3417 pflogattach(1); 3418 return 0; 3419 case MODULE_CMD_FINI: 3420 if (pf_status.running) { 3421 return EBUSY; 3422 } else { 3423 pfdetach(); 3424 pflogdetach(); 3425 return devsw_detach(NULL, &pf_cdevsw); 3426 } 3427 default: 3428 return ENOTTY; 3429 } 3430 #else 3431 if (cmd == MODULE_CMD_INIT) 3432 return 0; 3433 return ENOTTY; 3434 #endif 3435 } 3436 #endif 3437