1 /* $NetBSD: pf_ioctl.c,v 1.48 2013/07/01 08:32:48 skrll Exp $ */ 2 /* $OpenBSD: pf_ioctl.c,v 1.182 2007/06/24 11:17:13 mcbride Exp $ */ 3 4 /* 5 * Copyright (c) 2001 Daniel Hartmeier 6 * Copyright (c) 2002,2003 Henning Brauer 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 13 * - Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * - Redistributions in binary form must reproduce the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer in the documentation and/or other materials provided 18 * with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 28 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 30 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 * POSSIBILITY OF SUCH DAMAGE. 32 * 33 * Effort sponsored in part by the Defense Advanced Research Projects 34 * Agency (DARPA) and Air Force Research Laboratory, Air Force 35 * Materiel Command, USAF, under agreement number F30602-01-2-0537. 36 * 37 */ 38 39 #include <sys/cdefs.h> 40 __KERNEL_RCSID(0, "$NetBSD: pf_ioctl.c,v 1.48 2013/07/01 08:32:48 skrll Exp $"); 41 42 #ifdef _KERNEL_OPT 43 #include "opt_inet.h" 44 #endif 45 46 #include "pfsync.h" 47 48 #include <sys/param.h> 49 #include <sys/systm.h> 50 #include <sys/mbuf.h> 51 #include <sys/filio.h> 52 #include <sys/fcntl.h> 53 #include <sys/socket.h> 54 #include <sys/socketvar.h> 55 #include <sys/kernel.h> 56 #include <sys/time.h> 57 #include <sys/pool.h> 58 #include <sys/proc.h> 59 #include <sys/malloc.h> 60 #include <sys/kthread.h> 61 #include <sys/rwlock.h> 62 #include <uvm/uvm_extern.h> 63 #ifdef __NetBSD__ 64 #include <sys/conf.h> 65 #include <sys/lwp.h> 66 #include <sys/kauth.h> 67 #include <sys/module.h> 68 #include <sys/cprng.h> 69 #endif /* __NetBSD__ */ 70 71 #include <net/if.h> 72 #include <net/if_types.h> 73 #include <net/route.h> 74 75 #include <netinet/in.h> 76 #include <netinet/in_var.h> 77 #include <netinet/in_systm.h> 78 #include <netinet/ip.h> 79 #include <netinet/ip_var.h> 80 #include <netinet/ip_icmp.h> 81 82 #ifndef __NetBSD__ 83 #include <dev/rndvar.h> 84 #include <crypto/md5.h> 85 #else 86 #include <sys/md5.h> 87 #endif /* __NetBSD__ */ 88 #include <net/pfvar.h> 89 90 #if NPFSYNC > 0 91 #include <net/if_pfsync.h> 92 #endif /* NPFSYNC > 0 */ 93 94 #if NPFLOG > 0 95 #include <net/if_pflog.h> 96 #endif /* NPFLOG > 0 */ 97 98 #ifdef INET6 99 #include <netinet/ip6.h> 100 #include <netinet/in_pcb.h> 101 #endif /* INET6 */ 102 103 #ifdef ALTQ 104 #include <altq/altq.h> 105 #endif 106 107 void pfattach(int); 108 #ifdef _MODULE 109 void pfdetach(void); 110 #endif /* _MODULE */ 111 #ifndef __NetBSD__ 112 void pf_thread_create(void *); 113 #endif /* !__NetBSD__ */ 114 int pfopen(dev_t, int, int, struct lwp *); 115 int pfclose(dev_t, int, int, struct lwp *); 116 struct pf_pool *pf_get_pool(char *, u_int32_t, u_int8_t, u_int32_t, 117 u_int8_t, u_int8_t, u_int8_t); 118 119 void pf_mv_pool(struct pf_palist *, struct pf_palist *); 120 void pf_empty_pool(struct pf_palist *); 121 int pfioctl(dev_t, u_long, void *, int, struct lwp *); 122 #ifdef ALTQ 123 int pf_begin_altq(u_int32_t *); 124 int pf_rollback_altq(u_int32_t); 125 int pf_commit_altq(u_int32_t); 126 int pf_enable_altq(struct pf_altq *); 127 int pf_disable_altq(struct pf_altq *); 128 #endif /* ALTQ */ 129 int pf_begin_rules(u_int32_t *, int, const char *); 130 int pf_rollback_rules(u_int32_t, int, char *); 131 int pf_setup_pfsync_matching(struct pf_ruleset *); 132 void pf_hash_rule(MD5_CTX *, struct pf_rule *); 133 void pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *); 134 int pf_commit_rules(u_int32_t, int, char *); 135 void pf_state_export(struct pfsync_state *, 136 struct pf_state_key *, struct pf_state *); 137 void pf_state_import(struct pfsync_state *, 138 struct pf_state_key *, struct pf_state *); 139 140 static int pf_state_add(struct pfsync_state*); 141 142 struct pf_rule pf_default_rule; 143 #ifdef __NetBSD__ 144 krwlock_t pf_consistency_lock; 145 #else 146 struct rwlock pf_consistency_lock = RWLOCK_INITIALIZER("pfcnslk"); 147 #endif /* __NetBSD__ */ 148 #ifdef ALTQ 149 static int pf_altq_running; 150 #endif 151 152 int pf_state_lock = 0; 153 154 #define TAGID_MAX 50000 155 TAILQ_HEAD(pf_tags, pf_tagname) pf_tags = TAILQ_HEAD_INITIALIZER(pf_tags), 156 pf_qids = TAILQ_HEAD_INITIALIZER(pf_qids); 157 158 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE) 159 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE 160 #endif 161 u_int16_t tagname2tag(struct pf_tags *, char *); 162 void tag2tagname(struct pf_tags *, u_int16_t, char *); 163 void tag_unref(struct pf_tags *, u_int16_t); 164 int pf_rtlabel_add(struct pf_addr_wrap *); 165 void pf_rtlabel_remove(struct pf_addr_wrap *); 166 void pf_rtlabel_copyout(struct pf_addr_wrap *); 167 168 #define DPFPRINTF(n, x) if (pf_status.debug >= (n)) printf x 169 170 #ifdef __NetBSD__ 171 const struct cdevsw pf_cdevsw = { 172 pfopen, pfclose, noread, nowrite, pfioctl, 173 nostop, notty, nopoll, nommap, nokqfilter, D_OTHER 174 }; 175 176 static int pfil4_wrapper(void *, struct mbuf **, struct ifnet *, int); 177 #ifdef INET6 178 static int pfil6_wrapper(void *, struct mbuf **, struct ifnet *, int); 179 #endif /* INET6 */ 180 181 static int pf_pfil_attach(void); 182 static int pf_pfil_detach(void); 183 184 static int pf_pfil_attached; 185 186 static kauth_listener_t pf_listener; 187 #endif /* __NetBSD__ */ 188 189 #ifdef __NetBSD__ 190 static int 191 pf_listener_cb(kauth_cred_t cred, kauth_action_t action, void *cookie, 192 void *arg0, void *arg1, void *arg2, void *arg3) 193 { 194 int result; 195 enum kauth_network_req req; 196 197 result = KAUTH_RESULT_DEFER; 198 req = (enum kauth_network_req)arg0; 199 200 if (action != KAUTH_NETWORK_FIREWALL) 201 return result; 202 203 /* These must have came from device context. */ 204 if ((req == KAUTH_REQ_NETWORK_FIREWALL_FW) || 205 (req == KAUTH_REQ_NETWORK_FIREWALL_NAT)) 206 result = KAUTH_RESULT_ALLOW; 207 208 return result; 209 } 210 #endif /* __NetBSD__ */ 211 212 void 213 pfattach(int num) 214 { 215 u_int32_t *timeout = pf_default_rule.timeout; 216 217 #ifdef __NetBSD__ 218 pool_init(&pf_rule_pl, sizeof(struct pf_rule), 0, 0, 0, "pfrulepl", 219 &pool_allocator_nointr, IPL_NONE); 220 pool_init(&pf_src_tree_pl, sizeof(struct pf_src_node), 0, 0, 0, 221 "pfsrctrpl", NULL, IPL_SOFTNET); 222 pool_init(&pf_state_pl, sizeof(struct pf_state), 0, 0, 0, "pfstatepl", 223 NULL, IPL_SOFTNET); 224 pool_init(&pf_state_key_pl, sizeof(struct pf_state_key), 0, 0, 0, 225 "pfstatekeypl", NULL, IPL_SOFTNET); 226 pool_init(&pf_altq_pl, sizeof(struct pf_altq), 0, 0, 0, "pfaltqpl", 227 &pool_allocator_nointr, IPL_NONE); 228 pool_init(&pf_pooladdr_pl, sizeof(struct pf_pooladdr), 0, 0, 0, 229 "pfpooladdrpl", &pool_allocator_nointr, IPL_NONE); 230 #else 231 pool_init(&pf_rule_pl, sizeof(struct pf_rule), 0, 0, 0, "pfrulepl", 232 &pool_allocator_nointr); 233 pool_init(&pf_src_tree_pl, sizeof(struct pf_src_node), 0, 0, 0, 234 "pfsrctrpl", NULL); 235 pool_init(&pf_state_pl, sizeof(struct pf_state), 0, 0, 0, "pfstatepl", 236 NULL); 237 pool_init(&pf_state_key_pl, sizeof(struct pf_state_key), 0, 0, 0, 238 "pfstatekeypl", NULL); 239 pool_init(&pf_altq_pl, sizeof(struct pf_altq), 0, 0, 0, "pfaltqpl", 240 &pool_allocator_nointr); 241 pool_init(&pf_pooladdr_pl, sizeof(struct pf_pooladdr), 0, 0, 0, 242 "pfpooladdrpl", &pool_allocator_nointr); 243 #endif /* !__NetBSD__ */ 244 245 pfr_initialize(); 246 pfi_initialize(); 247 pf_osfp_initialize(); 248 249 pool_sethardlimit(pf_pool_limits[PF_LIMIT_STATES].pp, 250 pf_pool_limits[PF_LIMIT_STATES].limit, NULL, 0); 251 252 if (ctob(physmem) <= 100*1024*1024) 253 pf_pool_limits[PF_LIMIT_TABLE_ENTRIES].limit = 254 PFR_KENTRY_HIWAT_SMALL; 255 256 RB_INIT(&tree_src_tracking); 257 RB_INIT(&pf_anchors); 258 pf_init_ruleset(&pf_main_ruleset); 259 TAILQ_INIT(&pf_altqs[0]); 260 TAILQ_INIT(&pf_altqs[1]); 261 TAILQ_INIT(&pf_pabuf); 262 pf_altqs_active = &pf_altqs[0]; 263 pf_altqs_inactive = &pf_altqs[1]; 264 TAILQ_INIT(&state_list); 265 266 #ifdef __NetBSD__ 267 rw_init(&pf_consistency_lock); 268 #endif /* __NetBSD__ */ 269 270 /* default rule should never be garbage collected */ 271 pf_default_rule.entries.tqe_prev = &pf_default_rule.entries.tqe_next; 272 pf_default_rule.action = PF_PASS; 273 pf_default_rule.nr = -1; 274 pf_default_rule.rtableid = -1; 275 276 /* initialize default timeouts */ 277 timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL; 278 timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL; 279 timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL; 280 timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL; 281 timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL; 282 timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL; 283 timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL; 284 timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL; 285 timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL; 286 timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL; 287 timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL; 288 timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL; 289 timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL; 290 timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL; 291 timeout[PFTM_FRAG] = PFTM_FRAG_VAL; 292 timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL; 293 timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL; 294 timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL; 295 timeout[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START; 296 timeout[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END; 297 298 pf_normalize_init(); 299 bzero(&pf_status, sizeof(pf_status)); 300 pf_status.debug = PF_DEBUG_URGENT; 301 302 /* XXX do our best to avoid a conflict */ 303 pf_status.hostid = cprng_fast32(); 304 305 /* require process context to purge states, so perform in a thread */ 306 #ifdef __NetBSD__ 307 if (kthread_create(PRI_NONE, 0, NULL, pf_purge_thread, NULL, NULL, 308 "pfpurge")) 309 panic("pfpurge thread"); 310 #else 311 kthread_create_deferred(pf_thread_create, NULL); 312 #endif /* !__NetBSD__ */ 313 314 #ifdef __NetBSD__ 315 pf_listener = kauth_listen_scope(KAUTH_SCOPE_NETWORK, 316 pf_listener_cb, NULL); 317 #endif /* __NetBSD__ */ 318 } 319 320 #ifdef _MODULE 321 void 322 pfdetach(void) 323 { 324 extern int pf_purge_thread_running; 325 extern int pf_purge_thread_stop; 326 struct pf_anchor *anchor; 327 struct pf_state *state; 328 struct pf_src_node *node; 329 struct pfioc_table pt; 330 u_int32_t ticket; 331 int i; 332 char r = '\0'; 333 334 pf_purge_thread_stop = 1; 335 wakeup(pf_purge_thread); 336 337 /* wait until the kthread exits */ 338 while (pf_purge_thread_running) 339 tsleep(&pf_purge_thread_running, PWAIT, "pfdown", 0); 340 341 (void)pf_pfil_detach(); 342 343 pf_status.running = 0; 344 345 /* clear the rulesets */ 346 for (i = 0; i < PF_RULESET_MAX; i++) 347 if (pf_begin_rules(&ticket, i, &r) == 0) 348 pf_commit_rules(ticket, i, &r); 349 #ifdef ALTQ 350 if (pf_begin_altq(&ticket) == 0) 351 pf_commit_altq(ticket); 352 #endif /* ALTQ */ 353 354 /* clear states */ 355 RB_FOREACH(state, pf_state_tree_id, &tree_id) { 356 state->timeout = PFTM_PURGE; 357 #if NPFSYNC > 0 358 state->sync_flags = PFSTATE_NOSYNC; 359 #endif /* NPFSYNC > 0 */ 360 } 361 pf_purge_expired_states(pf_status.states); 362 #if NPFSYNC > 0 363 pfsync_clear_states(pf_status.hostid, NULL); 364 #endif /* NPFSYNC > 0 */ 365 366 /* clear source nodes */ 367 RB_FOREACH(state, pf_state_tree_id, &tree_id) { 368 state->src_node = NULL; 369 state->nat_src_node = NULL; 370 } 371 RB_FOREACH(node, pf_src_tree, &tree_src_tracking) { 372 node->expire = 1; 373 node->states = 0; 374 } 375 pf_purge_expired_src_nodes(0); 376 377 /* clear tables */ 378 memset(&pt, '\0', sizeof(pt)); 379 pfr_clr_tables(&pt.pfrio_table, &pt.pfrio_ndel, pt.pfrio_flags); 380 381 /* destroy anchors */ 382 while ((anchor = RB_MIN(pf_anchor_global, &pf_anchors)) != NULL) { 383 for (i = 0; i < PF_RULESET_MAX; i++) 384 if (pf_begin_rules(&ticket, i, anchor->name) == 0) 385 pf_commit_rules(ticket, i, anchor->name); 386 } 387 388 /* destroy main ruleset */ 389 pf_remove_if_empty_ruleset(&pf_main_ruleset); 390 391 /* destroy the pools */ 392 pool_destroy(&pf_pooladdr_pl); 393 pool_destroy(&pf_altq_pl); 394 pool_destroy(&pf_state_key_pl); 395 pool_destroy(&pf_state_pl); 396 pool_destroy(&pf_rule_pl); 397 pool_destroy(&pf_src_tree_pl); 398 399 rw_destroy(&pf_consistency_lock); 400 401 /* destroy subsystems */ 402 pf_normalize_destroy(); 403 pf_osfp_destroy(); 404 pfr_destroy(); 405 pfi_destroy(); 406 407 /* cleanup kauth listener */ 408 kauth_unlisten_scope(pf_listener); 409 } 410 #endif /* _MODULE */ 411 412 #ifndef __NetBSD__ 413 void 414 pf_thread_create(void *v) 415 { 416 if (kthread_create(pf_purge_thread, NULL, NULL, "pfpurge")) 417 panic("pfpurge thread"); 418 } 419 #endif /* !__NetBSD__ */ 420 421 int 422 pfopen(dev_t dev, int flags, int fmt, struct lwp *l) 423 { 424 if (minor(dev) >= 1) 425 return (ENXIO); 426 return (0); 427 } 428 429 int 430 pfclose(dev_t dev, int flags, int fmt, struct lwp *l) 431 { 432 if (minor(dev) >= 1) 433 return (ENXIO); 434 return (0); 435 } 436 437 struct pf_pool * 438 pf_get_pool(char *anchor, u_int32_t ticket, u_int8_t rule_action, 439 u_int32_t rule_number, u_int8_t r_last, u_int8_t active, 440 u_int8_t check_ticket) 441 { 442 struct pf_ruleset *ruleset; 443 struct pf_rule *rule; 444 int rs_num; 445 446 ruleset = pf_find_ruleset(anchor); 447 if (ruleset == NULL) 448 return (NULL); 449 rs_num = pf_get_ruleset_number(rule_action); 450 if (rs_num >= PF_RULESET_MAX) 451 return (NULL); 452 if (active) { 453 if (check_ticket && ticket != 454 ruleset->rules[rs_num].active.ticket) 455 return (NULL); 456 if (r_last) 457 rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr, 458 pf_rulequeue); 459 else 460 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr); 461 } else { 462 if (check_ticket && ticket != 463 ruleset->rules[rs_num].inactive.ticket) 464 return (NULL); 465 if (r_last) 466 rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr, 467 pf_rulequeue); 468 else 469 rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr); 470 } 471 if (!r_last) { 472 while ((rule != NULL) && (rule->nr != rule_number)) 473 rule = TAILQ_NEXT(rule, entries); 474 } 475 if (rule == NULL) 476 return (NULL); 477 478 return (&rule->rpool); 479 } 480 481 void 482 pf_mv_pool(struct pf_palist *poola, struct pf_palist *poolb) 483 { 484 struct pf_pooladdr *mv_pool_pa; 485 486 while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) { 487 TAILQ_REMOVE(poola, mv_pool_pa, entries); 488 TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries); 489 } 490 } 491 492 void 493 pf_empty_pool(struct pf_palist *poola) 494 { 495 struct pf_pooladdr *empty_pool_pa; 496 497 while ((empty_pool_pa = TAILQ_FIRST(poola)) != NULL) { 498 pfi_dynaddr_remove(&empty_pool_pa->addr); 499 pf_tbladdr_remove(&empty_pool_pa->addr); 500 pfi_kif_unref(empty_pool_pa->kif, PFI_KIF_REF_RULE); 501 TAILQ_REMOVE(poola, empty_pool_pa, entries); 502 pool_put(&pf_pooladdr_pl, empty_pool_pa); 503 } 504 } 505 506 void 507 pf_rm_rule(struct pf_rulequeue *rulequeue, struct pf_rule *rule) 508 { 509 if (rulequeue != NULL) { 510 if (rule->states <= 0) { 511 /* 512 * XXX - we need to remove the table *before* detaching 513 * the rule to make sure the table code does not delete 514 * the anchor under our feet. 515 */ 516 pf_tbladdr_remove(&rule->src.addr); 517 pf_tbladdr_remove(&rule->dst.addr); 518 if (rule->overload_tbl) 519 pfr_detach_table(rule->overload_tbl); 520 } 521 TAILQ_REMOVE(rulequeue, rule, entries); 522 rule->entries.tqe_prev = NULL; 523 rule->nr = -1; 524 } 525 526 if (rule->states > 0 || rule->src_nodes > 0 || 527 rule->entries.tqe_prev != NULL) 528 return; 529 pf_tag_unref(rule->tag); 530 pf_tag_unref(rule->match_tag); 531 #ifdef ALTQ 532 if (rule->pqid != rule->qid) 533 pf_qid_unref(rule->pqid); 534 pf_qid_unref(rule->qid); 535 #endif 536 pf_rtlabel_remove(&rule->src.addr); 537 pf_rtlabel_remove(&rule->dst.addr); 538 pfi_dynaddr_remove(&rule->src.addr); 539 pfi_dynaddr_remove(&rule->dst.addr); 540 if (rulequeue == NULL) { 541 pf_tbladdr_remove(&rule->src.addr); 542 pf_tbladdr_remove(&rule->dst.addr); 543 if (rule->overload_tbl) 544 pfr_detach_table(rule->overload_tbl); 545 } 546 pfi_kif_unref(rule->kif, PFI_KIF_REF_RULE); 547 pf_anchor_remove(rule); 548 pf_empty_pool(&rule->rpool.list); 549 pool_put(&pf_rule_pl, rule); 550 } 551 552 u_int16_t 553 tagname2tag(struct pf_tags *head, char *tagname) 554 { 555 struct pf_tagname *tag, *p = NULL; 556 u_int16_t new_tagid = 1; 557 558 TAILQ_FOREACH(tag, head, entries) 559 if (strcmp(tagname, tag->name) == 0) { 560 tag->ref++; 561 return (tag->tag); 562 } 563 564 /* 565 * to avoid fragmentation, we do a linear search from the beginning 566 * and take the first free slot we find. if there is none or the list 567 * is empty, append a new entry at the end. 568 */ 569 570 /* new entry */ 571 if (!TAILQ_EMPTY(head)) 572 for (p = TAILQ_FIRST(head); p != NULL && 573 p->tag == new_tagid; p = TAILQ_NEXT(p, entries)) 574 new_tagid = p->tag + 1; 575 576 if (new_tagid > TAGID_MAX) 577 return (0); 578 579 /* allocate and fill new struct pf_tagname */ 580 tag = (struct pf_tagname *)malloc(sizeof(struct pf_tagname), 581 M_TEMP, M_NOWAIT); 582 if (tag == NULL) 583 return (0); 584 bzero(tag, sizeof(struct pf_tagname)); 585 strlcpy(tag->name, tagname, sizeof(tag->name)); 586 tag->tag = new_tagid; 587 tag->ref++; 588 589 if (p != NULL) /* insert new entry before p */ 590 TAILQ_INSERT_BEFORE(p, tag, entries); 591 else /* either list empty or no free slot in between */ 592 TAILQ_INSERT_TAIL(head, tag, entries); 593 594 return (tag->tag); 595 } 596 597 void 598 tag2tagname(struct pf_tags *head, u_int16_t tagid, char *p) 599 { 600 struct pf_tagname *tag; 601 602 TAILQ_FOREACH(tag, head, entries) 603 if (tag->tag == tagid) { 604 strlcpy(p, tag->name, PF_TAG_NAME_SIZE); 605 return; 606 } 607 } 608 609 void 610 tag_unref(struct pf_tags *head, u_int16_t tag) 611 { 612 struct pf_tagname *p, *next; 613 614 if (tag == 0) 615 return; 616 617 for (p = TAILQ_FIRST(head); p != NULL; p = next) { 618 next = TAILQ_NEXT(p, entries); 619 if (tag == p->tag) { 620 if (--p->ref == 0) { 621 TAILQ_REMOVE(head, p, entries); 622 free(p, M_TEMP); 623 } 624 break; 625 } 626 } 627 } 628 629 u_int16_t 630 pf_tagname2tag(char *tagname) 631 { 632 return (tagname2tag(&pf_tags, tagname)); 633 } 634 635 void 636 pf_tag2tagname(u_int16_t tagid, char *p) 637 { 638 tag2tagname(&pf_tags, tagid, p); 639 } 640 641 void 642 pf_tag_ref(u_int16_t tag) 643 { 644 struct pf_tagname *t; 645 646 TAILQ_FOREACH(t, &pf_tags, entries) 647 if (t->tag == tag) 648 break; 649 if (t != NULL) 650 t->ref++; 651 } 652 653 void 654 pf_tag_unref(u_int16_t tag) 655 { 656 tag_unref(&pf_tags, tag); 657 } 658 659 int 660 pf_rtlabel_add(struct pf_addr_wrap *a) 661 { 662 #ifndef __NetBSD__ 663 if (a->type == PF_ADDR_RTLABEL && 664 (a->v.rtlabel = rtlabel_name2id(a->v.rtlabelname)) == 0) 665 return (-1); 666 #endif /* !__NetBSD__ */ 667 return (0); 668 } 669 670 void 671 pf_rtlabel_remove(struct pf_addr_wrap *a) 672 { 673 #ifndef __NetBSD__ 674 if (a->type == PF_ADDR_RTLABEL) 675 rtlabel_unref(a->v.rtlabel); 676 #endif /* !__NetBSD__ */ 677 } 678 679 void 680 pf_rtlabel_copyout(struct pf_addr_wrap *a) 681 { 682 #ifndef __NetBSD__ 683 const char *name; 684 685 if (a->type == PF_ADDR_RTLABEL && a->v.rtlabel) { 686 if ((name = rtlabel_id2name(a->v.rtlabel)) == NULL) 687 strlcpy(a->v.rtlabelname, "?", 688 sizeof(a->v.rtlabelname)); 689 else 690 strlcpy(a->v.rtlabelname, name, 691 sizeof(a->v.rtlabelname)); 692 } 693 #endif /* !__NetBSD__ */ 694 } 695 696 #ifdef ALTQ 697 u_int32_t 698 pf_qname2qid(char *qname) 699 { 700 return ((u_int32_t)tagname2tag(&pf_qids, qname)); 701 } 702 703 void 704 pf_qid2qname(u_int32_t qid, char *p) 705 { 706 tag2tagname(&pf_qids, (u_int16_t)qid, p); 707 } 708 709 void 710 pf_qid_unref(u_int32_t qid) 711 { 712 tag_unref(&pf_qids, (u_int16_t)qid); 713 } 714 715 int 716 pf_begin_altq(u_int32_t *ticket) 717 { 718 struct pf_altq *altq; 719 int error = 0; 720 721 /* Purge the old altq list */ 722 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) { 723 TAILQ_REMOVE(pf_altqs_inactive, altq, entries); 724 if (altq->qname[0] == 0) { 725 /* detach and destroy the discipline */ 726 error = altq_remove(altq); 727 } else 728 pf_qid_unref(altq->qid); 729 pool_put(&pf_altq_pl, altq); 730 } 731 if (error) 732 return (error); 733 *ticket = ++ticket_altqs_inactive; 734 altqs_inactive_open = 1; 735 return (0); 736 } 737 738 int 739 pf_rollback_altq(u_int32_t ticket) 740 { 741 struct pf_altq *altq; 742 int error = 0; 743 744 if (!altqs_inactive_open || ticket != ticket_altqs_inactive) 745 return (0); 746 /* Purge the old altq list */ 747 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) { 748 TAILQ_REMOVE(pf_altqs_inactive, altq, entries); 749 if (altq->qname[0] == 0) { 750 /* detach and destroy the discipline */ 751 error = altq_remove(altq); 752 } else 753 pf_qid_unref(altq->qid); 754 pool_put(&pf_altq_pl, altq); 755 } 756 altqs_inactive_open = 0; 757 return (error); 758 } 759 760 int 761 pf_commit_altq(u_int32_t ticket) 762 { 763 struct pf_altqqueue *old_altqs; 764 struct pf_altq *altq; 765 int s, err, error = 0; 766 767 if (!altqs_inactive_open || ticket != ticket_altqs_inactive) 768 return (EBUSY); 769 770 /* swap altqs, keep the old. */ 771 s = splsoftnet(); 772 old_altqs = pf_altqs_active; 773 pf_altqs_active = pf_altqs_inactive; 774 pf_altqs_inactive = old_altqs; 775 ticket_altqs_active = ticket_altqs_inactive; 776 777 /* Attach new disciplines */ 778 TAILQ_FOREACH(altq, pf_altqs_active, entries) { 779 if (altq->qname[0] == 0) { 780 /* attach the discipline */ 781 error = altq_pfattach(altq); 782 if (error == 0 && pf_altq_running) 783 error = pf_enable_altq(altq); 784 if (error != 0) { 785 splx(s); 786 return (error); 787 } 788 } 789 } 790 791 /* Purge the old altq list */ 792 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) { 793 TAILQ_REMOVE(pf_altqs_inactive, altq, entries); 794 if (altq->qname[0] == 0) { 795 /* detach and destroy the discipline */ 796 if (pf_altq_running) 797 error = pf_disable_altq(altq); 798 err = altq_pfdetach(altq); 799 if (err != 0 && error == 0) 800 error = err; 801 err = altq_remove(altq); 802 if (err != 0 && error == 0) 803 error = err; 804 } else 805 pf_qid_unref(altq->qid); 806 pool_put(&pf_altq_pl, altq); 807 } 808 splx(s); 809 810 altqs_inactive_open = 0; 811 return (error); 812 } 813 814 int 815 pf_enable_altq(struct pf_altq *altq) 816 { 817 struct ifnet *ifp; 818 struct tb_profile tb; 819 int s, error = 0; 820 821 if ((ifp = ifunit(altq->ifname)) == NULL) 822 return (EINVAL); 823 824 if (ifp->if_snd.altq_type != ALTQT_NONE) 825 error = altq_enable(&ifp->if_snd); 826 827 /* set tokenbucket regulator */ 828 if (error == 0 && ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) { 829 tb.rate = altq->ifbandwidth; 830 tb.depth = altq->tbrsize; 831 s = splnet(); 832 error = tbr_set(&ifp->if_snd, &tb); 833 splx(s); 834 } 835 836 return (error); 837 } 838 839 int 840 pf_disable_altq(struct pf_altq *altq) 841 { 842 struct ifnet *ifp; 843 struct tb_profile tb; 844 int s, error; 845 846 if ((ifp = ifunit(altq->ifname)) == NULL) 847 return (EINVAL); 848 849 /* 850 * when the discipline is no longer referenced, it was overridden 851 * by a new one. if so, just return. 852 */ 853 if (altq->altq_disc != ifp->if_snd.altq_disc) 854 return (0); 855 856 error = altq_disable(&ifp->if_snd); 857 858 if (error == 0) { 859 /* clear tokenbucket regulator */ 860 tb.rate = 0; 861 s = splnet(); 862 error = tbr_set(&ifp->if_snd, &tb); 863 splx(s); 864 } 865 866 return (error); 867 } 868 #endif /* ALTQ */ 869 870 int 871 pf_begin_rules(u_int32_t *ticket, int rs_num, const char *anchor) 872 { 873 struct pf_ruleset *rs; 874 struct pf_rule *rule; 875 876 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) 877 return (EINVAL); 878 rs = pf_find_or_create_ruleset(anchor); 879 if (rs == NULL) 880 return (EINVAL); 881 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) { 882 pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule); 883 rs->rules[rs_num].inactive.rcount--; 884 } 885 *ticket = ++rs->rules[rs_num].inactive.ticket; 886 rs->rules[rs_num].inactive.open = 1; 887 return (0); 888 } 889 890 int 891 pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor) 892 { 893 struct pf_ruleset *rs; 894 struct pf_rule *rule; 895 896 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) 897 return (EINVAL); 898 rs = pf_find_ruleset(anchor); 899 if (rs == NULL || !rs->rules[rs_num].inactive.open || 900 rs->rules[rs_num].inactive.ticket != ticket) 901 return (0); 902 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) { 903 pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule); 904 rs->rules[rs_num].inactive.rcount--; 905 } 906 rs->rules[rs_num].inactive.open = 0; 907 return (0); 908 } 909 910 #define PF_MD5_UPD(st, elm) \ 911 MD5Update(ctx, (u_int8_t *) &(st)->elm, sizeof((st)->elm)) 912 913 #define PF_MD5_UPD_STR(st, elm) \ 914 MD5Update(ctx, (u_int8_t *) (st)->elm, strlen((st)->elm)) 915 916 #define PF_MD5_UPD_HTONL(st, elm, stor) do { \ 917 (stor) = htonl((st)->elm); \ 918 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int32_t));\ 919 } while (0) 920 921 #define PF_MD5_UPD_HTONS(st, elm, stor) do { \ 922 (stor) = htons((st)->elm); \ 923 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int16_t));\ 924 } while (0) 925 926 void 927 pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr) 928 { 929 PF_MD5_UPD(pfr, addr.type); 930 switch (pfr->addr.type) { 931 case PF_ADDR_DYNIFTL: 932 PF_MD5_UPD(pfr, addr.v.ifname); 933 PF_MD5_UPD(pfr, addr.iflags); 934 break; 935 case PF_ADDR_TABLE: 936 PF_MD5_UPD(pfr, addr.v.tblname); 937 break; 938 case PF_ADDR_ADDRMASK: 939 /* XXX ignore af? */ 940 PF_MD5_UPD(pfr, addr.v.a.addr.addr32); 941 PF_MD5_UPD(pfr, addr.v.a.mask.addr32); 942 break; 943 case PF_ADDR_RTLABEL: 944 PF_MD5_UPD(pfr, addr.v.rtlabelname); 945 break; 946 } 947 948 PF_MD5_UPD(pfr, port[0]); 949 PF_MD5_UPD(pfr, port[1]); 950 PF_MD5_UPD(pfr, neg); 951 PF_MD5_UPD(pfr, port_op); 952 } 953 954 void 955 pf_hash_rule(MD5_CTX *ctx, struct pf_rule *rule) 956 { 957 u_int16_t x; 958 u_int32_t y; 959 960 pf_hash_rule_addr(ctx, &rule->src); 961 pf_hash_rule_addr(ctx, &rule->dst); 962 PF_MD5_UPD_STR(rule, label); 963 PF_MD5_UPD_STR(rule, ifname); 964 PF_MD5_UPD_STR(rule, match_tagname); 965 PF_MD5_UPD_HTONS(rule, match_tag, x); /* dup? */ 966 PF_MD5_UPD_HTONL(rule, os_fingerprint, y); 967 PF_MD5_UPD_HTONL(rule, prob, y); 968 PF_MD5_UPD_HTONL(rule, uid.uid[0], y); 969 PF_MD5_UPD_HTONL(rule, uid.uid[1], y); 970 PF_MD5_UPD(rule, uid.op); 971 PF_MD5_UPD_HTONL(rule, gid.gid[0], y); 972 PF_MD5_UPD_HTONL(rule, gid.gid[1], y); 973 PF_MD5_UPD(rule, gid.op); 974 PF_MD5_UPD_HTONL(rule, rule_flag, y); 975 PF_MD5_UPD(rule, action); 976 PF_MD5_UPD(rule, direction); 977 PF_MD5_UPD(rule, af); 978 PF_MD5_UPD(rule, quick); 979 PF_MD5_UPD(rule, ifnot); 980 PF_MD5_UPD(rule, match_tag_not); 981 PF_MD5_UPD(rule, natpass); 982 PF_MD5_UPD(rule, keep_state); 983 PF_MD5_UPD(rule, proto); 984 PF_MD5_UPD(rule, type); 985 PF_MD5_UPD(rule, code); 986 PF_MD5_UPD(rule, flags); 987 PF_MD5_UPD(rule, flagset); 988 PF_MD5_UPD(rule, allow_opts); 989 PF_MD5_UPD(rule, rt); 990 PF_MD5_UPD(rule, tos); 991 } 992 993 int 994 pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor) 995 { 996 struct pf_ruleset *rs; 997 struct pf_rule *rule, **old_array; 998 struct pf_rulequeue *old_rules; 999 int s, error; 1000 u_int32_t old_rcount; 1001 1002 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) 1003 return (EINVAL); 1004 rs = pf_find_ruleset(anchor); 1005 if (rs == NULL || !rs->rules[rs_num].inactive.open || 1006 ticket != rs->rules[rs_num].inactive.ticket) 1007 return (EBUSY); 1008 1009 /* Calculate checksum for the main ruleset */ 1010 if (rs == &pf_main_ruleset) { 1011 error = pf_setup_pfsync_matching(rs); 1012 if (error != 0) 1013 return (error); 1014 } 1015 1016 /* Swap rules, keep the old. */ 1017 s = splsoftnet(); 1018 old_rules = rs->rules[rs_num].active.ptr; 1019 old_rcount = rs->rules[rs_num].active.rcount; 1020 old_array = rs->rules[rs_num].active.ptr_array; 1021 1022 rs->rules[rs_num].active.ptr = 1023 rs->rules[rs_num].inactive.ptr; 1024 rs->rules[rs_num].active.ptr_array = 1025 rs->rules[rs_num].inactive.ptr_array; 1026 rs->rules[rs_num].active.rcount = 1027 rs->rules[rs_num].inactive.rcount; 1028 rs->rules[rs_num].inactive.ptr = old_rules; 1029 rs->rules[rs_num].inactive.ptr_array = old_array; 1030 rs->rules[rs_num].inactive.rcount = old_rcount; 1031 1032 rs->rules[rs_num].active.ticket = 1033 rs->rules[rs_num].inactive.ticket; 1034 pf_calc_skip_steps(rs->rules[rs_num].active.ptr); 1035 1036 1037 /* Purge the old rule list. */ 1038 while ((rule = TAILQ_FIRST(old_rules)) != NULL) 1039 pf_rm_rule(old_rules, rule); 1040 if (rs->rules[rs_num].inactive.ptr_array) 1041 free(rs->rules[rs_num].inactive.ptr_array, M_TEMP); 1042 rs->rules[rs_num].inactive.ptr_array = NULL; 1043 rs->rules[rs_num].inactive.rcount = 0; 1044 rs->rules[rs_num].inactive.open = 0; 1045 pf_remove_if_empty_ruleset(rs); 1046 splx(s); 1047 return (0); 1048 } 1049 1050 void 1051 pf_state_export(struct pfsync_state *sp, struct pf_state_key *sk, 1052 struct pf_state *s) 1053 { 1054 int secs = time_second; 1055 bzero(sp, sizeof(struct pfsync_state)); 1056 1057 /* copy from state key */ 1058 sp->lan.addr = sk->lan.addr; 1059 sp->lan.port = sk->lan.port; 1060 sp->gwy.addr = sk->gwy.addr; 1061 sp->gwy.port = sk->gwy.port; 1062 sp->ext.addr = sk->ext.addr; 1063 sp->ext.port = sk->ext.port; 1064 sp->proto = sk->proto; 1065 sp->af = sk->af; 1066 sp->direction = sk->direction; 1067 1068 /* copy from state */ 1069 memcpy(&sp->id, &s->id, sizeof(sp->id)); 1070 sp->creatorid = s->creatorid; 1071 strlcpy(sp->ifname, s->kif->pfik_name, sizeof(sp->ifname)); 1072 pf_state_peer_to_pfsync(&s->src, &sp->src); 1073 pf_state_peer_to_pfsync(&s->dst, &sp->dst); 1074 1075 sp->rule = s->rule.ptr->nr; 1076 sp->nat_rule = (s->nat_rule.ptr == NULL) ? -1 : s->nat_rule.ptr->nr; 1077 sp->anchor = (s->anchor.ptr == NULL) ? -1 : s->anchor.ptr->nr; 1078 1079 pf_state_counter_to_pfsync(s->bytes[0], sp->bytes[0]); 1080 pf_state_counter_to_pfsync(s->bytes[1], sp->bytes[1]); 1081 pf_state_counter_to_pfsync(s->packets[0], sp->packets[0]); 1082 pf_state_counter_to_pfsync(s->packets[1], sp->packets[1]); 1083 sp->creation = secs - s->creation; 1084 sp->expire = pf_state_expires(s); 1085 sp->log = s->log; 1086 sp->allow_opts = s->allow_opts; 1087 sp->timeout = s->timeout; 1088 1089 if (s->src_node) 1090 sp->sync_flags |= PFSYNC_FLAG_SRCNODE; 1091 if (s->nat_src_node) 1092 sp->sync_flags |= PFSYNC_FLAG_NATSRCNODE; 1093 1094 if (sp->expire > secs) 1095 sp->expire -= secs; 1096 else 1097 sp->expire = 0; 1098 1099 } 1100 1101 void 1102 pf_state_import(struct pfsync_state *sp, struct pf_state_key *sk, 1103 struct pf_state *s) 1104 { 1105 /* copy to state key */ 1106 sk->lan.addr = sp->lan.addr; 1107 sk->lan.port = sp->lan.port; 1108 sk->gwy.addr = sp->gwy.addr; 1109 sk->gwy.port = sp->gwy.port; 1110 sk->ext.addr = sp->ext.addr; 1111 sk->ext.port = sp->ext.port; 1112 sk->proto = sp->proto; 1113 sk->af = sp->af; 1114 sk->direction = sp->direction; 1115 1116 /* copy to state */ 1117 memcpy(&s->id, &sp->id, sizeof(sp->id)); 1118 s->creatorid = sp->creatorid; 1119 pf_state_peer_from_pfsync(&sp->src, &s->src); 1120 pf_state_peer_from_pfsync(&sp->dst, &s->dst); 1121 1122 s->rule.ptr = &pf_default_rule; 1123 s->rule.ptr->states++; 1124 s->nat_rule.ptr = NULL; 1125 s->anchor.ptr = NULL; 1126 s->rt_kif = NULL; 1127 s->creation = time_second; 1128 s->expire = time_second; 1129 s->timeout = sp->timeout; 1130 if (sp->expire > 0) 1131 s->expire -= pf_default_rule.timeout[sp->timeout] - sp->expire; 1132 s->pfsync_time = 0; 1133 s->packets[0] = s->packets[1] = 0; 1134 s->bytes[0] = s->bytes[1] = 0; 1135 } 1136 1137 int 1138 pf_state_add(struct pfsync_state* sp) 1139 { 1140 struct pf_state *s; 1141 struct pf_state_key *sk; 1142 struct pfi_kif *kif; 1143 1144 if (sp->timeout >= PFTM_MAX && 1145 sp->timeout != PFTM_UNTIL_PACKET) { 1146 return EINVAL; 1147 } 1148 s = pool_get(&pf_state_pl, PR_NOWAIT); 1149 if (s == NULL) { 1150 return ENOMEM; 1151 } 1152 bzero(s, sizeof(struct pf_state)); 1153 if ((sk = pf_alloc_state_key(s)) == NULL) { 1154 pool_put(&pf_state_pl, s); 1155 return ENOMEM; 1156 } 1157 pf_state_import(sp, sk, s); 1158 kif = pfi_kif_get(sp->ifname); 1159 if (kif == NULL) { 1160 pool_put(&pf_state_pl, s); 1161 pool_put(&pf_state_key_pl, sk); 1162 return ENOENT; 1163 } 1164 if (pf_insert_state(kif, s)) { 1165 pfi_kif_unref(kif, PFI_KIF_REF_NONE); 1166 pool_put(&pf_state_pl, s); 1167 return ENOMEM; 1168 } 1169 1170 return 0; 1171 } 1172 1173 1174 int 1175 pf_setup_pfsync_matching(struct pf_ruleset *rs) 1176 { 1177 MD5_CTX ctx; 1178 struct pf_rule *rule; 1179 int rs_cnt; 1180 u_int8_t digest[PF_MD5_DIGEST_LENGTH]; 1181 1182 MD5Init(&ctx); 1183 for (rs_cnt = 0; rs_cnt < PF_RULESET_MAX; rs_cnt++) { 1184 /* XXX PF_RULESET_SCRUB as well? */ 1185 if (rs_cnt == PF_RULESET_SCRUB) 1186 continue; 1187 1188 if (rs->rules[rs_cnt].inactive.ptr_array) 1189 free(rs->rules[rs_cnt].inactive.ptr_array, M_TEMP); 1190 rs->rules[rs_cnt].inactive.ptr_array = NULL; 1191 1192 if (rs->rules[rs_cnt].inactive.rcount) { 1193 rs->rules[rs_cnt].inactive.ptr_array = 1194 malloc(sizeof(void *) * 1195 rs->rules[rs_cnt].inactive.rcount, 1196 M_TEMP, M_NOWAIT); 1197 1198 if (!rs->rules[rs_cnt].inactive.ptr_array) 1199 return (ENOMEM); 1200 } 1201 1202 TAILQ_FOREACH(rule, rs->rules[rs_cnt].inactive.ptr, 1203 entries) { 1204 pf_hash_rule(&ctx, rule); 1205 (rs->rules[rs_cnt].inactive.ptr_array)[rule->nr] = rule; 1206 } 1207 } 1208 1209 MD5Final(digest, &ctx); 1210 memcpy(pf_status.pf_chksum, digest, sizeof(pf_status.pf_chksum)); 1211 return (0); 1212 } 1213 1214 int 1215 pfioctl(dev_t dev, u_long cmd, void *addr, int flags, struct lwp *l) 1216 { 1217 struct pf_pooladdr *pa = NULL; 1218 struct pf_pool *pool = NULL; 1219 int s; 1220 int error = 0; 1221 1222 /* XXX keep in sync with switch() below */ 1223 if (kauth_authorize_network(l->l_cred, KAUTH_NETWORK_FIREWALL, 1224 KAUTH_REQ_NETWORK_FIREWALL_FW, NULL, NULL, NULL)) 1225 switch (cmd) { 1226 case DIOCGETRULES: 1227 case DIOCGETRULE: 1228 case DIOCGETADDRS: 1229 case DIOCGETADDR: 1230 case DIOCGETSTATE: 1231 case DIOCSETSTATUSIF: 1232 case DIOCGETSTATUS: 1233 case DIOCCLRSTATUS: 1234 case DIOCNATLOOK: 1235 case DIOCSETDEBUG: 1236 case DIOCGETSTATES: 1237 case DIOCGETTIMEOUT: 1238 case DIOCCLRRULECTRS: 1239 case DIOCGETLIMIT: 1240 case DIOCGETALTQS: 1241 case DIOCGETALTQ: 1242 case DIOCGETQSTATS: 1243 case DIOCGETRULESETS: 1244 case DIOCGETRULESET: 1245 case DIOCRGETTABLES: 1246 case DIOCRGETTSTATS: 1247 case DIOCRCLRTSTATS: 1248 case DIOCRCLRADDRS: 1249 case DIOCRADDADDRS: 1250 case DIOCRDELADDRS: 1251 case DIOCRSETADDRS: 1252 case DIOCRGETADDRS: 1253 case DIOCRGETASTATS: 1254 case DIOCRCLRASTATS: 1255 case DIOCRTSTADDRS: 1256 case DIOCOSFPGET: 1257 case DIOCGETSRCNODES: 1258 case DIOCCLRSRCNODES: 1259 case DIOCIGETIFACES: 1260 case DIOCSETIFFLAG: 1261 case DIOCCLRIFFLAG: 1262 case DIOCSETLCK: 1263 case DIOCADDSTATES: 1264 break; 1265 case DIOCRCLRTABLES: 1266 case DIOCRADDTABLES: 1267 case DIOCRDELTABLES: 1268 case DIOCRSETTFLAGS: 1269 if (((struct pfioc_table *)addr)->pfrio_flags & 1270 PFR_FLAG_DUMMY) 1271 break; /* dummy operation ok */ 1272 return (EPERM); 1273 default: 1274 return (EPERM); 1275 } 1276 1277 if (!(flags & FWRITE)) 1278 switch (cmd) { 1279 case DIOCGETRULES: 1280 case DIOCGETADDRS: 1281 case DIOCGETADDR: 1282 case DIOCGETSTATE: 1283 case DIOCGETSTATUS: 1284 case DIOCGETSTATES: 1285 case DIOCGETTIMEOUT: 1286 case DIOCGETLIMIT: 1287 case DIOCGETALTQS: 1288 case DIOCGETALTQ: 1289 case DIOCGETQSTATS: 1290 case DIOCGETRULESETS: 1291 case DIOCGETRULESET: 1292 case DIOCNATLOOK: 1293 case DIOCRGETTABLES: 1294 case DIOCRGETTSTATS: 1295 case DIOCRGETADDRS: 1296 case DIOCRGETASTATS: 1297 case DIOCRTSTADDRS: 1298 case DIOCOSFPGET: 1299 case DIOCGETSRCNODES: 1300 case DIOCIGETIFACES: 1301 case DIOCSETLCK: 1302 break; 1303 case DIOCRCLRTABLES: 1304 case DIOCRADDTABLES: 1305 case DIOCRDELTABLES: 1306 case DIOCRCLRTSTATS: 1307 case DIOCRCLRADDRS: 1308 case DIOCRADDADDRS: 1309 case DIOCRDELADDRS: 1310 case DIOCRSETADDRS: 1311 case DIOCRSETTFLAGS: 1312 case DIOCADDSTATES: 1313 if (((struct pfioc_table *)addr)->pfrio_flags & 1314 PFR_FLAG_DUMMY) { 1315 flags |= FWRITE; /* need write lock for dummy */ 1316 break; /* dummy operation ok */ 1317 } 1318 return (EACCES); 1319 case DIOCGETRULE: 1320 if (((struct pfioc_rule *)addr)->action == PF_GET_CLR_CNTR) 1321 return (EACCES); 1322 break; 1323 default: 1324 return (EACCES); 1325 } 1326 1327 if (flags & FWRITE) 1328 rw_enter_write(&pf_consistency_lock); 1329 else 1330 rw_enter_read(&pf_consistency_lock); 1331 1332 s = splsoftnet(); 1333 switch (cmd) { 1334 1335 case DIOCSTART: 1336 if (pf_status.running) 1337 error = EEXIST; 1338 else { 1339 #ifdef __NetBSD__ 1340 error = pf_pfil_attach(); 1341 if (error) 1342 break; 1343 #endif /* __NetBSD__ */ 1344 pf_status.running = 1; 1345 pf_status.since = time_second; 1346 if (pf_status.stateid == 0) { 1347 pf_status.stateid = time_second; 1348 pf_status.stateid = pf_status.stateid << 32; 1349 } 1350 DPFPRINTF(PF_DEBUG_MISC, ("pf: started\n")); 1351 } 1352 break; 1353 1354 case DIOCSTOP: 1355 if (!pf_status.running) 1356 error = ENOENT; 1357 else { 1358 #ifdef __NetBSD__ 1359 error = pf_pfil_detach(); 1360 if (error) 1361 break; 1362 #endif /* __NetBSD__ */ 1363 pf_status.running = 0; 1364 pf_status.since = time_second; 1365 DPFPRINTF(PF_DEBUG_MISC, ("pf: stopped\n")); 1366 } 1367 break; 1368 1369 case DIOCADDRULE: { 1370 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 1371 struct pf_ruleset *ruleset; 1372 struct pf_rule *rule, *tail; 1373 int rs_num; 1374 1375 pr->anchor[sizeof(pr->anchor) - 1] = 0; 1376 ruleset = pf_find_ruleset(pr->anchor); 1377 if (ruleset == NULL) { 1378 error = EINVAL; 1379 break; 1380 } 1381 rs_num = pf_get_ruleset_number(pr->rule.action); 1382 if (rs_num >= PF_RULESET_MAX) { 1383 error = EINVAL; 1384 break; 1385 } 1386 if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) { 1387 error = EINVAL; 1388 break; 1389 } 1390 if (pr->ticket != ruleset->rules[rs_num].inactive.ticket) { 1391 error = EBUSY; 1392 break; 1393 } 1394 if (pr->pool_ticket != ticket_pabuf) { 1395 error = EBUSY; 1396 break; 1397 } 1398 rule = pool_get(&pf_rule_pl, PR_NOWAIT); 1399 if (rule == NULL) { 1400 error = ENOMEM; 1401 break; 1402 } 1403 bcopy(&pr->rule, rule, sizeof(struct pf_rule)); 1404 #ifdef __NetBSD__ 1405 rule->cuid = kauth_cred_getuid(l->l_cred); 1406 rule->cpid = l->l_proc->p_pid; 1407 #else 1408 rule->cuid = p->p_cred->p_ruid; 1409 rule->cpid = p->p_pid; 1410 #endif /* !__NetBSD__ */ 1411 rule->anchor = NULL; 1412 rule->kif = NULL; 1413 TAILQ_INIT(&rule->rpool.list); 1414 /* initialize refcounting */ 1415 rule->states = 0; 1416 rule->src_nodes = 0; 1417 rule->entries.tqe_prev = NULL; 1418 #ifndef INET 1419 if (rule->af == AF_INET) { 1420 pool_put(&pf_rule_pl, rule); 1421 error = EAFNOSUPPORT; 1422 break; 1423 } 1424 #endif /* INET */ 1425 #ifndef INET6 1426 if (rule->af == AF_INET6) { 1427 pool_put(&pf_rule_pl, rule); 1428 error = EAFNOSUPPORT; 1429 break; 1430 } 1431 #endif /* INET6 */ 1432 tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr, 1433 pf_rulequeue); 1434 if (tail) 1435 rule->nr = tail->nr + 1; 1436 else 1437 rule->nr = 0; 1438 if (rule->ifname[0]) { 1439 rule->kif = pfi_kif_get(rule->ifname); 1440 if (rule->kif == NULL) { 1441 pool_put(&pf_rule_pl, rule); 1442 error = EINVAL; 1443 break; 1444 } 1445 pfi_kif_ref(rule->kif, PFI_KIF_REF_RULE); 1446 } 1447 1448 #ifndef __NetBSD__ 1449 if (rule->rtableid > 0 && !rtable_exists(rule->rtableid)) 1450 error = EBUSY; 1451 #endif /* !__NetBSD__ */ 1452 1453 #ifdef ALTQ 1454 /* set queue IDs */ 1455 if (rule->qname[0] != 0) { 1456 if ((rule->qid = pf_qname2qid(rule->qname)) == 0) 1457 error = EBUSY; 1458 else if (rule->pqname[0] != 0) { 1459 if ((rule->pqid = 1460 pf_qname2qid(rule->pqname)) == 0) 1461 error = EBUSY; 1462 } else 1463 rule->pqid = rule->qid; 1464 } 1465 #endif 1466 if (rule->tagname[0]) 1467 if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0) 1468 error = EBUSY; 1469 if (rule->match_tagname[0]) 1470 if ((rule->match_tag = 1471 pf_tagname2tag(rule->match_tagname)) == 0) 1472 error = EBUSY; 1473 if (rule->rt && !rule->direction) 1474 error = EINVAL; 1475 #if NPFLOG > 0 1476 if (!rule->log) 1477 rule->logif = 0; 1478 if (rule->logif >= PFLOGIFS_MAX) 1479 error = EINVAL; 1480 #endif 1481 if (pf_rtlabel_add(&rule->src.addr) || 1482 pf_rtlabel_add(&rule->dst.addr)) 1483 error = EBUSY; 1484 if (pfi_dynaddr_setup(&rule->src.addr, rule->af)) 1485 error = EINVAL; 1486 if (pfi_dynaddr_setup(&rule->dst.addr, rule->af)) 1487 error = EINVAL; 1488 if (pf_tbladdr_setup(ruleset, &rule->src.addr)) 1489 error = EINVAL; 1490 if (pf_tbladdr_setup(ruleset, &rule->dst.addr)) 1491 error = EINVAL; 1492 if (pf_anchor_setup(rule, ruleset, pr->anchor_call)) 1493 error = EINVAL; 1494 TAILQ_FOREACH(pa, &pf_pabuf, entries) 1495 if (pf_tbladdr_setup(ruleset, &pa->addr)) 1496 error = EINVAL; 1497 1498 rule->overload_tbl = NULL; 1499 if (rule->overload_tblname[0]) { 1500 if ((rule->overload_tbl = pfr_attach_table(ruleset, 1501 rule->overload_tblname)) == NULL) 1502 error = EINVAL; 1503 else 1504 rule->overload_tbl->pfrkt_flags |= 1505 PFR_TFLAG_ACTIVE; 1506 } 1507 1508 pf_mv_pool(&pf_pabuf, &rule->rpool.list); 1509 if (((((rule->action == PF_NAT) || (rule->action == PF_RDR) || 1510 (rule->action == PF_BINAT)) && rule->anchor == NULL) || 1511 (rule->rt > PF_FASTROUTE)) && 1512 (TAILQ_FIRST(&rule->rpool.list) == NULL)) 1513 error = EINVAL; 1514 1515 if (error) { 1516 pf_rm_rule(NULL, rule); 1517 break; 1518 } 1519 rule->rpool.cur = TAILQ_FIRST(&rule->rpool.list); 1520 rule->evaluations = rule->packets[0] = rule->packets[1] = 1521 rule->bytes[0] = rule->bytes[1] = 0; 1522 TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr, 1523 rule, entries); 1524 ruleset->rules[rs_num].inactive.rcount++; 1525 break; 1526 } 1527 1528 case DIOCGETRULES: { 1529 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 1530 struct pf_ruleset *ruleset; 1531 struct pf_rule *tail; 1532 int rs_num; 1533 1534 pr->anchor[sizeof(pr->anchor) - 1] = 0; 1535 ruleset = pf_find_ruleset(pr->anchor); 1536 if (ruleset == NULL) { 1537 error = EINVAL; 1538 break; 1539 } 1540 rs_num = pf_get_ruleset_number(pr->rule.action); 1541 if (rs_num >= PF_RULESET_MAX) { 1542 error = EINVAL; 1543 break; 1544 } 1545 tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr, 1546 pf_rulequeue); 1547 if (tail) 1548 pr->nr = tail->nr + 1; 1549 else 1550 pr->nr = 0; 1551 pr->ticket = ruleset->rules[rs_num].active.ticket; 1552 break; 1553 } 1554 1555 case DIOCGETRULE: { 1556 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 1557 struct pf_ruleset *ruleset; 1558 struct pf_rule *rule; 1559 int rs_num, i; 1560 1561 pr->anchor[sizeof(pr->anchor) - 1] = 0; 1562 ruleset = pf_find_ruleset(pr->anchor); 1563 if (ruleset == NULL) { 1564 error = EINVAL; 1565 break; 1566 } 1567 rs_num = pf_get_ruleset_number(pr->rule.action); 1568 if (rs_num >= PF_RULESET_MAX) { 1569 error = EINVAL; 1570 break; 1571 } 1572 if (pr->ticket != ruleset->rules[rs_num].active.ticket) { 1573 error = EBUSY; 1574 break; 1575 } 1576 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr); 1577 while ((rule != NULL) && (rule->nr != pr->nr)) 1578 rule = TAILQ_NEXT(rule, entries); 1579 if (rule == NULL) { 1580 error = EBUSY; 1581 break; 1582 } 1583 bcopy(rule, &pr->rule, sizeof(struct pf_rule)); 1584 if (pf_anchor_copyout(ruleset, rule, pr)) { 1585 error = EBUSY; 1586 break; 1587 } 1588 pfi_dynaddr_copyout(&pr->rule.src.addr); 1589 pfi_dynaddr_copyout(&pr->rule.dst.addr); 1590 pf_tbladdr_copyout(&pr->rule.src.addr); 1591 pf_tbladdr_copyout(&pr->rule.dst.addr); 1592 pf_rtlabel_copyout(&pr->rule.src.addr); 1593 pf_rtlabel_copyout(&pr->rule.dst.addr); 1594 for (i = 0; i < PF_SKIP_COUNT; ++i) 1595 if (rule->skip[i].ptr == NULL) 1596 pr->rule.skip[i].nr = -1; 1597 else 1598 pr->rule.skip[i].nr = 1599 rule->skip[i].ptr->nr; 1600 1601 if (pr->action == PF_GET_CLR_CNTR) { 1602 rule->evaluations = 0; 1603 rule->packets[0] = rule->packets[1] = 0; 1604 rule->bytes[0] = rule->bytes[1] = 0; 1605 } 1606 break; 1607 } 1608 1609 case DIOCCHANGERULE: { 1610 struct pfioc_rule *pcr = (struct pfioc_rule *)addr; 1611 struct pf_ruleset *ruleset; 1612 struct pf_rule *oldrule = NULL, *newrule = NULL; 1613 u_int32_t nr = 0; 1614 int rs_num; 1615 1616 if (!(pcr->action == PF_CHANGE_REMOVE || 1617 pcr->action == PF_CHANGE_GET_TICKET) && 1618 pcr->pool_ticket != ticket_pabuf) { 1619 error = EBUSY; 1620 break; 1621 } 1622 1623 if (pcr->action < PF_CHANGE_ADD_HEAD || 1624 pcr->action > PF_CHANGE_GET_TICKET) { 1625 error = EINVAL; 1626 break; 1627 } 1628 ruleset = pf_find_ruleset(pcr->anchor); 1629 if (ruleset == NULL) { 1630 error = EINVAL; 1631 break; 1632 } 1633 rs_num = pf_get_ruleset_number(pcr->rule.action); 1634 if (rs_num >= PF_RULESET_MAX) { 1635 error = EINVAL; 1636 break; 1637 } 1638 1639 if (pcr->action == PF_CHANGE_GET_TICKET) { 1640 pcr->ticket = ++ruleset->rules[rs_num].active.ticket; 1641 break; 1642 } else { 1643 if (pcr->ticket != 1644 ruleset->rules[rs_num].active.ticket) { 1645 error = EINVAL; 1646 break; 1647 } 1648 if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) { 1649 error = EINVAL; 1650 break; 1651 } 1652 } 1653 1654 if (pcr->action != PF_CHANGE_REMOVE) { 1655 newrule = pool_get(&pf_rule_pl, PR_NOWAIT); 1656 if (newrule == NULL) { 1657 error = ENOMEM; 1658 break; 1659 } 1660 bcopy(&pcr->rule, newrule, sizeof(struct pf_rule)); 1661 #ifdef __NetBSD__ 1662 newrule->cuid = kauth_cred_getuid(l->l_cred); 1663 newrule->cpid = l->l_proc->p_pid; 1664 #else 1665 newrule->cuid = p->p_cred->p_ruid; 1666 newrule->cpid = p->p_pid; 1667 #endif /* !__NetBSD__ */ 1668 TAILQ_INIT(&newrule->rpool.list); 1669 /* initialize refcounting */ 1670 newrule->states = 0; 1671 newrule->entries.tqe_prev = NULL; 1672 #ifndef INET 1673 if (newrule->af == AF_INET) { 1674 pool_put(&pf_rule_pl, newrule); 1675 error = EAFNOSUPPORT; 1676 break; 1677 } 1678 #endif /* INET */ 1679 #ifndef INET6 1680 if (newrule->af == AF_INET6) { 1681 pool_put(&pf_rule_pl, newrule); 1682 error = EAFNOSUPPORT; 1683 break; 1684 } 1685 #endif /* INET6 */ 1686 if (newrule->ifname[0]) { 1687 newrule->kif = pfi_kif_get(newrule->ifname); 1688 if (newrule->kif == NULL) { 1689 pool_put(&pf_rule_pl, newrule); 1690 error = EINVAL; 1691 break; 1692 } 1693 pfi_kif_ref(newrule->kif, PFI_KIF_REF_RULE); 1694 } else 1695 newrule->kif = NULL; 1696 1697 #ifndef __NetBSD__ 1698 if (newrule->rtableid > 0 && 1699 !rtable_exists(newrule->rtableid)) 1700 error = EBUSY; 1701 #endif /* !__NetBSD__ */ 1702 1703 #ifdef ALTQ 1704 /* set queue IDs */ 1705 if (newrule->qname[0] != 0) { 1706 if ((newrule->qid = 1707 pf_qname2qid(newrule->qname)) == 0) 1708 error = EBUSY; 1709 else if (newrule->pqname[0] != 0) { 1710 if ((newrule->pqid = 1711 pf_qname2qid(newrule->pqname)) == 0) 1712 error = EBUSY; 1713 } else 1714 newrule->pqid = newrule->qid; 1715 } 1716 #endif /* ALTQ */ 1717 if (newrule->tagname[0]) 1718 if ((newrule->tag = 1719 pf_tagname2tag(newrule->tagname)) == 0) 1720 error = EBUSY; 1721 if (newrule->match_tagname[0]) 1722 if ((newrule->match_tag = pf_tagname2tag( 1723 newrule->match_tagname)) == 0) 1724 error = EBUSY; 1725 if (newrule->rt && !newrule->direction) 1726 error = EINVAL; 1727 #if NPFLOG > 0 1728 if (!newrule->log) 1729 newrule->logif = 0; 1730 if (newrule->logif >= PFLOGIFS_MAX) 1731 error = EINVAL; 1732 #endif 1733 if (pf_rtlabel_add(&newrule->src.addr) || 1734 pf_rtlabel_add(&newrule->dst.addr)) 1735 error = EBUSY; 1736 if (pfi_dynaddr_setup(&newrule->src.addr, newrule->af)) 1737 error = EINVAL; 1738 if (pfi_dynaddr_setup(&newrule->dst.addr, newrule->af)) 1739 error = EINVAL; 1740 if (pf_tbladdr_setup(ruleset, &newrule->src.addr)) 1741 error = EINVAL; 1742 if (pf_tbladdr_setup(ruleset, &newrule->dst.addr)) 1743 error = EINVAL; 1744 if (pf_anchor_setup(newrule, ruleset, pcr->anchor_call)) 1745 error = EINVAL; 1746 TAILQ_FOREACH(pa, &pf_pabuf, entries) 1747 if (pf_tbladdr_setup(ruleset, &pa->addr)) 1748 error = EINVAL; 1749 1750 newrule->overload_tbl = NULL; 1751 if (newrule->overload_tblname[0]) { 1752 if ((newrule->overload_tbl = pfr_attach_table( 1753 ruleset, newrule->overload_tblname)) == 1754 NULL) 1755 error = EINVAL; 1756 else 1757 newrule->overload_tbl->pfrkt_flags |= 1758 PFR_TFLAG_ACTIVE; 1759 } 1760 1761 pf_mv_pool(&pf_pabuf, &newrule->rpool.list); 1762 if (((((newrule->action == PF_NAT) || 1763 (newrule->action == PF_RDR) || 1764 (newrule->action == PF_BINAT) || 1765 (newrule->rt > PF_FASTROUTE)) && 1766 !newrule->anchor)) && 1767 (TAILQ_FIRST(&newrule->rpool.list) == NULL)) 1768 error = EINVAL; 1769 1770 if (error) { 1771 pf_rm_rule(NULL, newrule); 1772 break; 1773 } 1774 newrule->rpool.cur = TAILQ_FIRST(&newrule->rpool.list); 1775 newrule->evaluations = 0; 1776 newrule->packets[0] = newrule->packets[1] = 0; 1777 newrule->bytes[0] = newrule->bytes[1] = 0; 1778 } 1779 pf_empty_pool(&pf_pabuf); 1780 1781 if (pcr->action == PF_CHANGE_ADD_HEAD) 1782 oldrule = TAILQ_FIRST( 1783 ruleset->rules[rs_num].active.ptr); 1784 else if (pcr->action == PF_CHANGE_ADD_TAIL) 1785 oldrule = TAILQ_LAST( 1786 ruleset->rules[rs_num].active.ptr, pf_rulequeue); 1787 else { 1788 oldrule = TAILQ_FIRST( 1789 ruleset->rules[rs_num].active.ptr); 1790 while ((oldrule != NULL) && (oldrule->nr != pcr->nr)) 1791 oldrule = TAILQ_NEXT(oldrule, entries); 1792 if (oldrule == NULL) { 1793 if (newrule != NULL) 1794 pf_rm_rule(NULL, newrule); 1795 error = EINVAL; 1796 break; 1797 } 1798 } 1799 1800 if (pcr->action == PF_CHANGE_REMOVE) { 1801 pf_rm_rule(ruleset->rules[rs_num].active.ptr, oldrule); 1802 ruleset->rules[rs_num].active.rcount--; 1803 } else { 1804 if (oldrule == NULL) 1805 TAILQ_INSERT_TAIL( 1806 ruleset->rules[rs_num].active.ptr, 1807 newrule, entries); 1808 else if (pcr->action == PF_CHANGE_ADD_HEAD || 1809 pcr->action == PF_CHANGE_ADD_BEFORE) 1810 TAILQ_INSERT_BEFORE(oldrule, newrule, entries); 1811 else 1812 TAILQ_INSERT_AFTER( 1813 ruleset->rules[rs_num].active.ptr, 1814 oldrule, newrule, entries); 1815 ruleset->rules[rs_num].active.rcount++; 1816 } 1817 1818 nr = 0; 1819 TAILQ_FOREACH(oldrule, 1820 ruleset->rules[rs_num].active.ptr, entries) 1821 oldrule->nr = nr++; 1822 1823 ruleset->rules[rs_num].active.ticket++; 1824 1825 pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr); 1826 pf_remove_if_empty_ruleset(ruleset); 1827 1828 break; 1829 } 1830 1831 case DIOCCLRSTATES: { 1832 struct pf_state *ps, *nexts; 1833 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr; 1834 int killed = 0; 1835 1836 for (ps = RB_MIN(pf_state_tree_id, &tree_id); ps; ps = nexts) { 1837 nexts = RB_NEXT(pf_state_tree_id, &tree_id, ps); 1838 1839 if (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname, 1840 ps->kif->pfik_name)) { 1841 #if NPFSYNC 1842 /* don't send out individual delete messages */ 1843 ps->sync_flags = PFSTATE_NOSYNC; 1844 #endif 1845 pf_unlink_state(ps); 1846 killed++; 1847 } 1848 } 1849 psk->psk_af = killed; 1850 #if NPFSYNC 1851 pfsync_clear_states(pf_status.hostid, psk->psk_ifname); 1852 #endif 1853 break; 1854 } 1855 1856 case DIOCKILLSTATES: { 1857 struct pf_state *ps, *nexts; 1858 struct pf_state_key *sk; 1859 struct pf_state_host *src, *dst; 1860 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr; 1861 int killed = 0; 1862 1863 for (ps = RB_MIN(pf_state_tree_id, &tree_id); ps; 1864 ps = nexts) { 1865 nexts = RB_NEXT(pf_state_tree_id, &tree_id, ps); 1866 sk = ps->state_key; 1867 1868 if (sk->direction == PF_OUT) { 1869 src = &sk->lan; 1870 dst = &sk->ext; 1871 } else { 1872 src = &sk->ext; 1873 dst = &sk->lan; 1874 } 1875 if ((!psk->psk_af || sk->af == psk->psk_af) 1876 && (!psk->psk_proto || psk->psk_proto == 1877 sk->proto) && 1878 PF_MATCHA(psk->psk_src.neg, 1879 &psk->psk_src.addr.v.a.addr, 1880 &psk->psk_src.addr.v.a.mask, 1881 &src->addr, sk->af) && 1882 PF_MATCHA(psk->psk_dst.neg, 1883 &psk->psk_dst.addr.v.a.addr, 1884 &psk->psk_dst.addr.v.a.mask, 1885 &dst->addr, sk->af) && 1886 (psk->psk_src.port_op == 0 || 1887 pf_match_port(psk->psk_src.port_op, 1888 psk->psk_src.port[0], psk->psk_src.port[1], 1889 src->port)) && 1890 (psk->psk_dst.port_op == 0 || 1891 pf_match_port(psk->psk_dst.port_op, 1892 psk->psk_dst.port[0], psk->psk_dst.port[1], 1893 dst->port)) && 1894 (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname, 1895 ps->kif->pfik_name))) { 1896 #if NPFSYNC > 0 1897 /* send immediate delete of state */ 1898 pfsync_delete_state(ps); 1899 ps->sync_flags |= PFSTATE_NOSYNC; 1900 #endif 1901 pf_unlink_state(ps); 1902 killed++; 1903 } 1904 } 1905 psk->psk_af = killed; 1906 break; 1907 } 1908 1909 case DIOCADDSTATE: { 1910 struct pfioc_state *ps = (struct pfioc_state *)addr; 1911 struct pfsync_state *sp = (struct pfsync_state *)ps->state; 1912 1913 error = pf_state_add(sp); 1914 break; 1915 } 1916 1917 case DIOCADDSTATES: { 1918 struct pfioc_states *ps = (struct pfioc_states *)addr; 1919 struct pfsync_state *p = (struct pfsync_state *) ps->ps_states; 1920 struct pfsync_state *pk; 1921 int size = ps->ps_len; 1922 int i = 0; 1923 error = 0; 1924 1925 pk = malloc(sizeof(*pk), M_TEMP,M_WAITOK); 1926 1927 while (error == 0 && i < size) 1928 { 1929 if (copyin(p, pk, sizeof(struct pfsync_state))) 1930 { 1931 error = EFAULT; 1932 free(pk, M_TEMP); 1933 } else { 1934 error = pf_state_add(pk); 1935 i += sizeof(*p); 1936 p++; 1937 } 1938 } 1939 1940 free(pk, M_TEMP); 1941 break; 1942 } 1943 1944 1945 case DIOCGETSTATE: { 1946 struct pfioc_state *ps = (struct pfioc_state *)addr; 1947 struct pf_state *pfs; 1948 u_int32_t nr; 1949 1950 nr = 0; 1951 RB_FOREACH(pfs, pf_state_tree_id, &tree_id) { 1952 if (nr >= ps->nr) 1953 break; 1954 nr++; 1955 } 1956 if (pfs == NULL) { 1957 error = EBUSY; 1958 break; 1959 } 1960 1961 pf_state_export((struct pfsync_state *)&ps->state, 1962 pfs->state_key, pfs); 1963 break; 1964 } 1965 1966 case DIOCGETSTATES: { 1967 struct pfioc_states *ps = (struct pfioc_states *)addr; 1968 struct pf_state *state; 1969 struct pfsync_state *p, *pstore; 1970 u_int32_t nr = 0; 1971 1972 if (ps->ps_len == 0) { 1973 nr = pf_status.states; 1974 ps->ps_len = sizeof(struct pfsync_state) * nr; 1975 break; 1976 } 1977 1978 pstore = malloc(sizeof(*pstore), M_TEMP, M_WAITOK); 1979 1980 p = ps->ps_states; 1981 1982 state = TAILQ_FIRST(&state_list); 1983 while (state) { 1984 if (state->timeout != PFTM_UNLINKED) { 1985 if ((nr+1) * sizeof(*p) > (unsigned)ps->ps_len) 1986 break; 1987 1988 pf_state_export(pstore, 1989 state->state_key, state); 1990 error = copyout(pstore, p, sizeof(*p)); 1991 if (error) { 1992 free(pstore, M_TEMP); 1993 goto fail; 1994 } 1995 p++; 1996 nr++; 1997 } 1998 state = TAILQ_NEXT(state, entry_list); 1999 } 2000 2001 ps->ps_len = sizeof(struct pfsync_state) * nr; 2002 2003 free(pstore, M_TEMP); 2004 break; 2005 } 2006 2007 case DIOCGETSTATUS: { 2008 struct pf_status *ps = (struct pf_status *)addr; 2009 bcopy(&pf_status, ps, sizeof(struct pf_status)); 2010 pfi_fill_oldstatus(ps); 2011 break; 2012 } 2013 2014 case DIOCSETSTATUSIF: { 2015 struct pfioc_if *pi = (struct pfioc_if *)addr; 2016 2017 if (pi->ifname[0] == 0) { 2018 bzero(pf_status.ifname, IFNAMSIZ); 2019 break; 2020 } 2021 if (ifunit(pi->ifname) == NULL) { 2022 error = EINVAL; 2023 break; 2024 } 2025 strlcpy(pf_status.ifname, pi->ifname, IFNAMSIZ); 2026 break; 2027 } 2028 2029 case DIOCCLRSTATUS: { 2030 bzero(pf_status.counters, sizeof(pf_status.counters)); 2031 bzero(pf_status.fcounters, sizeof(pf_status.fcounters)); 2032 bzero(pf_status.scounters, sizeof(pf_status.scounters)); 2033 pf_status.since = time_second; 2034 if (*pf_status.ifname) 2035 pfi_clr_istats(pf_status.ifname); 2036 break; 2037 } 2038 2039 case DIOCNATLOOK: { 2040 struct pfioc_natlook *pnl = (struct pfioc_natlook *)addr; 2041 struct pf_state_key *sk; 2042 struct pf_state *state; 2043 struct pf_state_key_cmp key; 2044 int m = 0, direction = pnl->direction; 2045 2046 key.af = pnl->af; 2047 key.proto = pnl->proto; 2048 2049 if (!pnl->proto || 2050 PF_AZERO(&pnl->saddr, pnl->af) || 2051 PF_AZERO(&pnl->daddr, pnl->af) || 2052 ((pnl->proto == IPPROTO_TCP || 2053 pnl->proto == IPPROTO_UDP) && 2054 (!pnl->dport || !pnl->sport))) 2055 error = EINVAL; 2056 else { 2057 /* 2058 * userland gives us source and dest of connection, 2059 * reverse the lookup so we ask for what happens with 2060 * the return traffic, enabling us to find it in the 2061 * state tree. 2062 */ 2063 if (direction == PF_IN) { 2064 PF_ACPY(&key.ext.addr, &pnl->daddr, pnl->af); 2065 key.ext.port = pnl->dport; 2066 PF_ACPY(&key.gwy.addr, &pnl->saddr, pnl->af); 2067 key.gwy.port = pnl->sport; 2068 state = pf_find_state_all(&key, PF_EXT_GWY, &m); 2069 } else { 2070 PF_ACPY(&key.lan.addr, &pnl->daddr, pnl->af); 2071 key.lan.port = pnl->dport; 2072 PF_ACPY(&key.ext.addr, &pnl->saddr, pnl->af); 2073 key.ext.port = pnl->sport; 2074 state = pf_find_state_all(&key, PF_LAN_EXT, &m); 2075 } 2076 if (m > 1) 2077 error = E2BIG; /* more than one state */ 2078 else if (state != NULL) { 2079 sk = state->state_key; 2080 if (direction == PF_IN) { 2081 PF_ACPY(&pnl->rsaddr, &sk->lan.addr, 2082 sk->af); 2083 pnl->rsport = sk->lan.port; 2084 PF_ACPY(&pnl->rdaddr, &pnl->daddr, 2085 pnl->af); 2086 pnl->rdport = pnl->dport; 2087 } else { 2088 PF_ACPY(&pnl->rdaddr, &sk->gwy.addr, 2089 sk->af); 2090 pnl->rdport = sk->gwy.port; 2091 PF_ACPY(&pnl->rsaddr, &pnl->saddr, 2092 pnl->af); 2093 pnl->rsport = pnl->sport; 2094 } 2095 } else 2096 error = ENOENT; 2097 } 2098 break; 2099 } 2100 2101 case DIOCSETTIMEOUT: { 2102 struct pfioc_tm *pt = (struct pfioc_tm *)addr; 2103 int old; 2104 2105 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX || 2106 pt->seconds < 0) { 2107 error = EINVAL; 2108 goto fail; 2109 } 2110 old = pf_default_rule.timeout[pt->timeout]; 2111 if (pt->timeout == PFTM_INTERVAL && pt->seconds == 0) 2112 pt->seconds = 1; 2113 pf_default_rule.timeout[pt->timeout] = pt->seconds; 2114 if (pt->timeout == PFTM_INTERVAL && pt->seconds < old) 2115 wakeup(pf_purge_thread); 2116 pt->seconds = old; 2117 break; 2118 } 2119 2120 case DIOCGETTIMEOUT: { 2121 struct pfioc_tm *pt = (struct pfioc_tm *)addr; 2122 2123 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) { 2124 error = EINVAL; 2125 goto fail; 2126 } 2127 pt->seconds = pf_default_rule.timeout[pt->timeout]; 2128 break; 2129 } 2130 2131 case DIOCGETLIMIT: { 2132 struct pfioc_limit *pl = (struct pfioc_limit *)addr; 2133 2134 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) { 2135 error = EINVAL; 2136 goto fail; 2137 } 2138 pl->limit = pf_pool_limits[pl->index].limit; 2139 break; 2140 } 2141 2142 case DIOCSETLIMIT: { 2143 struct pfioc_limit *pl = (struct pfioc_limit *)addr; 2144 int old_limit; 2145 2146 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX || 2147 pf_pool_limits[pl->index].pp == NULL) { 2148 error = EINVAL; 2149 goto fail; 2150 } 2151 #ifdef __NetBSD__ 2152 pool_sethardlimit(pf_pool_limits[pl->index].pp, 2153 pl->limit, NULL, 0); 2154 #else 2155 if (pool_sethardlimit(pf_pool_limits[pl->index].pp, 2156 pl->limit, NULL, 0) != 0) { 2157 error = EBUSY; 2158 goto fail; 2159 } 2160 #endif /* !__NetBSD__ */ 2161 old_limit = pf_pool_limits[pl->index].limit; 2162 pf_pool_limits[pl->index].limit = pl->limit; 2163 pl->limit = old_limit; 2164 break; 2165 } 2166 2167 case DIOCSETDEBUG: { 2168 u_int32_t *level = (u_int32_t *)addr; 2169 2170 pf_status.debug = *level; 2171 break; 2172 } 2173 2174 case DIOCCLRRULECTRS: { 2175 /* obsoleted by DIOCGETRULE with action=PF_GET_CLR_CNTR */ 2176 struct pf_ruleset *ruleset = &pf_main_ruleset; 2177 struct pf_rule *rule; 2178 2179 TAILQ_FOREACH(rule, 2180 ruleset->rules[PF_RULESET_FILTER].active.ptr, entries) { 2181 rule->evaluations = 0; 2182 rule->packets[0] = rule->packets[1] = 0; 2183 rule->bytes[0] = rule->bytes[1] = 0; 2184 } 2185 break; 2186 } 2187 2188 #ifdef ALTQ 2189 case DIOCSTARTALTQ: { 2190 struct pf_altq *altq; 2191 2192 /* enable all altq interfaces on active list */ 2193 TAILQ_FOREACH(altq, pf_altqs_active, entries) { 2194 if (altq->qname[0] == 0) { 2195 error = pf_enable_altq(altq); 2196 if (error != 0) 2197 break; 2198 } 2199 } 2200 if (error == 0) 2201 pf_altq_running = 1; 2202 DPFPRINTF(PF_DEBUG_MISC, ("altq: started\n")); 2203 break; 2204 } 2205 2206 case DIOCSTOPALTQ: { 2207 struct pf_altq *altq; 2208 2209 /* disable all altq interfaces on active list */ 2210 TAILQ_FOREACH(altq, pf_altqs_active, entries) { 2211 if (altq->qname[0] == 0) { 2212 error = pf_disable_altq(altq); 2213 if (error != 0) 2214 break; 2215 } 2216 } 2217 if (error == 0) 2218 pf_altq_running = 0; 2219 DPFPRINTF(PF_DEBUG_MISC, ("altq: stopped\n")); 2220 break; 2221 } 2222 2223 case DIOCADDALTQ: { 2224 struct pfioc_altq *paa = (struct pfioc_altq *)addr; 2225 struct pf_altq *altq, *a; 2226 2227 if (paa->ticket != ticket_altqs_inactive) { 2228 error = EBUSY; 2229 break; 2230 } 2231 altq = pool_get(&pf_altq_pl, PR_NOWAIT); 2232 if (altq == NULL) { 2233 error = ENOMEM; 2234 break; 2235 } 2236 bcopy(&paa->altq, altq, sizeof(struct pf_altq)); 2237 2238 /* 2239 * if this is for a queue, find the discipline and 2240 * copy the necessary fields 2241 */ 2242 if (altq->qname[0] != 0) { 2243 if ((altq->qid = pf_qname2qid(altq->qname)) == 0) { 2244 error = EBUSY; 2245 pool_put(&pf_altq_pl, altq); 2246 break; 2247 } 2248 TAILQ_FOREACH(a, pf_altqs_inactive, entries) { 2249 if (strncmp(a->ifname, altq->ifname, 2250 IFNAMSIZ) == 0 && a->qname[0] == 0) { 2251 altq->altq_disc = a->altq_disc; 2252 break; 2253 } 2254 } 2255 } 2256 2257 error = altq_add(altq); 2258 if (error) { 2259 pool_put(&pf_altq_pl, altq); 2260 break; 2261 } 2262 2263 TAILQ_INSERT_TAIL(pf_altqs_inactive, altq, entries); 2264 bcopy(altq, &paa->altq, sizeof(struct pf_altq)); 2265 break; 2266 } 2267 2268 case DIOCGETALTQS: { 2269 struct pfioc_altq *paa = (struct pfioc_altq *)addr; 2270 struct pf_altq *altq; 2271 2272 paa->nr = 0; 2273 TAILQ_FOREACH(altq, pf_altqs_active, entries) 2274 paa->nr++; 2275 paa->ticket = ticket_altqs_active; 2276 break; 2277 } 2278 2279 case DIOCGETALTQ: { 2280 struct pfioc_altq *paa = (struct pfioc_altq *)addr; 2281 struct pf_altq *altq; 2282 u_int32_t nr; 2283 2284 if (paa->ticket != ticket_altqs_active) { 2285 error = EBUSY; 2286 break; 2287 } 2288 nr = 0; 2289 altq = TAILQ_FIRST(pf_altqs_active); 2290 while ((altq != NULL) && (nr < paa->nr)) { 2291 altq = TAILQ_NEXT(altq, entries); 2292 nr++; 2293 } 2294 if (altq == NULL) { 2295 error = EBUSY; 2296 break; 2297 } 2298 bcopy(altq, &paa->altq, sizeof(struct pf_altq)); 2299 break; 2300 } 2301 2302 case DIOCCHANGEALTQ: 2303 /* CHANGEALTQ not supported yet! */ 2304 error = ENODEV; 2305 break; 2306 2307 case DIOCGETQSTATS: { 2308 struct pfioc_qstats *pq = (struct pfioc_qstats *)addr; 2309 struct pf_altq *altq; 2310 u_int32_t nr; 2311 int nbytes; 2312 2313 if (pq->ticket != ticket_altqs_active) { 2314 error = EBUSY; 2315 break; 2316 } 2317 nbytes = pq->nbytes; 2318 nr = 0; 2319 altq = TAILQ_FIRST(pf_altqs_active); 2320 while ((altq != NULL) && (nr < pq->nr)) { 2321 altq = TAILQ_NEXT(altq, entries); 2322 nr++; 2323 } 2324 if (altq == NULL) { 2325 error = EBUSY; 2326 break; 2327 } 2328 error = altq_getqstats(altq, pq->buf, &nbytes); 2329 if (error == 0) { 2330 pq->scheduler = altq->scheduler; 2331 pq->nbytes = nbytes; 2332 } 2333 break; 2334 } 2335 #endif /* ALTQ */ 2336 2337 case DIOCBEGINADDRS: { 2338 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 2339 2340 pf_empty_pool(&pf_pabuf); 2341 pp->ticket = ++ticket_pabuf; 2342 break; 2343 } 2344 2345 case DIOCADDADDR: { 2346 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 2347 2348 if (pp->ticket != ticket_pabuf) { 2349 error = EBUSY; 2350 break; 2351 } 2352 #ifndef INET 2353 if (pp->af == AF_INET) { 2354 error = EAFNOSUPPORT; 2355 break; 2356 } 2357 #endif /* INET */ 2358 #ifndef INET6 2359 if (pp->af == AF_INET6) { 2360 error = EAFNOSUPPORT; 2361 break; 2362 } 2363 #endif /* INET6 */ 2364 if (pp->addr.addr.type != PF_ADDR_ADDRMASK && 2365 pp->addr.addr.type != PF_ADDR_DYNIFTL && 2366 pp->addr.addr.type != PF_ADDR_TABLE) { 2367 error = EINVAL; 2368 break; 2369 } 2370 pa = pool_get(&pf_pooladdr_pl, PR_NOWAIT); 2371 if (pa == NULL) { 2372 error = ENOMEM; 2373 break; 2374 } 2375 bcopy(&pp->addr, pa, sizeof(struct pf_pooladdr)); 2376 if (pa->ifname[0]) { 2377 pa->kif = pfi_kif_get(pa->ifname); 2378 if (pa->kif == NULL) { 2379 pool_put(&pf_pooladdr_pl, pa); 2380 error = EINVAL; 2381 break; 2382 } 2383 pfi_kif_ref(pa->kif, PFI_KIF_REF_RULE); 2384 } 2385 if (pfi_dynaddr_setup(&pa->addr, pp->af)) { 2386 pfi_dynaddr_remove(&pa->addr); 2387 pfi_kif_unref(pa->kif, PFI_KIF_REF_RULE); 2388 pool_put(&pf_pooladdr_pl, pa); 2389 error = EINVAL; 2390 break; 2391 } 2392 TAILQ_INSERT_TAIL(&pf_pabuf, pa, entries); 2393 break; 2394 } 2395 2396 case DIOCGETADDRS: { 2397 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 2398 2399 pp->nr = 0; 2400 pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action, 2401 pp->r_num, 0, 1, 0); 2402 if (pool == NULL) { 2403 error = EBUSY; 2404 break; 2405 } 2406 TAILQ_FOREACH(pa, &pool->list, entries) 2407 pp->nr++; 2408 break; 2409 } 2410 2411 case DIOCGETADDR: { 2412 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 2413 u_int32_t nr = 0; 2414 2415 pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action, 2416 pp->r_num, 0, 1, 1); 2417 if (pool == NULL) { 2418 error = EBUSY; 2419 break; 2420 } 2421 pa = TAILQ_FIRST(&pool->list); 2422 while ((pa != NULL) && (nr < pp->nr)) { 2423 pa = TAILQ_NEXT(pa, entries); 2424 nr++; 2425 } 2426 if (pa == NULL) { 2427 error = EBUSY; 2428 break; 2429 } 2430 bcopy(pa, &pp->addr, sizeof(struct pf_pooladdr)); 2431 pfi_dynaddr_copyout(&pp->addr.addr); 2432 pf_tbladdr_copyout(&pp->addr.addr); 2433 pf_rtlabel_copyout(&pp->addr.addr); 2434 break; 2435 } 2436 2437 case DIOCCHANGEADDR: { 2438 struct pfioc_pooladdr *pca = (struct pfioc_pooladdr *)addr; 2439 struct pf_pooladdr *oldpa = NULL, *newpa = NULL; 2440 struct pf_ruleset *ruleset; 2441 2442 if (pca->action < PF_CHANGE_ADD_HEAD || 2443 pca->action > PF_CHANGE_REMOVE) { 2444 error = EINVAL; 2445 break; 2446 } 2447 if (pca->addr.addr.type != PF_ADDR_ADDRMASK && 2448 pca->addr.addr.type != PF_ADDR_DYNIFTL && 2449 pca->addr.addr.type != PF_ADDR_TABLE) { 2450 error = EINVAL; 2451 break; 2452 } 2453 2454 ruleset = pf_find_ruleset(pca->anchor); 2455 if (ruleset == NULL) { 2456 error = EBUSY; 2457 break; 2458 } 2459 pool = pf_get_pool(pca->anchor, pca->ticket, pca->r_action, 2460 pca->r_num, pca->r_last, 1, 1); 2461 if (pool == NULL) { 2462 error = EBUSY; 2463 break; 2464 } 2465 if (pca->action != PF_CHANGE_REMOVE) { 2466 newpa = pool_get(&pf_pooladdr_pl, PR_NOWAIT); 2467 if (newpa == NULL) { 2468 error = ENOMEM; 2469 break; 2470 } 2471 bcopy(&pca->addr, newpa, sizeof(struct pf_pooladdr)); 2472 #ifndef INET 2473 if (pca->af == AF_INET) { 2474 pool_put(&pf_pooladdr_pl, newpa); 2475 error = EAFNOSUPPORT; 2476 break; 2477 } 2478 #endif /* INET */ 2479 #ifndef INET6 2480 if (pca->af == AF_INET6) { 2481 pool_put(&pf_pooladdr_pl, newpa); 2482 error = EAFNOSUPPORT; 2483 break; 2484 } 2485 #endif /* INET6 */ 2486 if (newpa->ifname[0]) { 2487 newpa->kif = pfi_kif_get(newpa->ifname); 2488 if (newpa->kif == NULL) { 2489 pool_put(&pf_pooladdr_pl, newpa); 2490 error = EINVAL; 2491 break; 2492 } 2493 pfi_kif_ref(newpa->kif, PFI_KIF_REF_RULE); 2494 } else 2495 newpa->kif = NULL; 2496 if (pfi_dynaddr_setup(&newpa->addr, pca->af) || 2497 pf_tbladdr_setup(ruleset, &newpa->addr)) { 2498 pfi_dynaddr_remove(&newpa->addr); 2499 pfi_kif_unref(newpa->kif, PFI_KIF_REF_RULE); 2500 pool_put(&pf_pooladdr_pl, newpa); 2501 error = EINVAL; 2502 break; 2503 } 2504 } 2505 2506 if (pca->action == PF_CHANGE_ADD_HEAD) 2507 oldpa = TAILQ_FIRST(&pool->list); 2508 else if (pca->action == PF_CHANGE_ADD_TAIL) 2509 oldpa = TAILQ_LAST(&pool->list, pf_palist); 2510 else { 2511 int i = 0; 2512 2513 oldpa = TAILQ_FIRST(&pool->list); 2514 while ((oldpa != NULL) && (i < pca->nr)) { 2515 oldpa = TAILQ_NEXT(oldpa, entries); 2516 i++; 2517 } 2518 if (oldpa == NULL) { 2519 error = EINVAL; 2520 break; 2521 } 2522 } 2523 2524 if (pca->action == PF_CHANGE_REMOVE) { 2525 TAILQ_REMOVE(&pool->list, oldpa, entries); 2526 pfi_dynaddr_remove(&oldpa->addr); 2527 pf_tbladdr_remove(&oldpa->addr); 2528 pfi_kif_unref(oldpa->kif, PFI_KIF_REF_RULE); 2529 pool_put(&pf_pooladdr_pl, oldpa); 2530 } else { 2531 if (oldpa == NULL) 2532 TAILQ_INSERT_TAIL(&pool->list, newpa, entries); 2533 else if (pca->action == PF_CHANGE_ADD_HEAD || 2534 pca->action == PF_CHANGE_ADD_BEFORE) 2535 TAILQ_INSERT_BEFORE(oldpa, newpa, entries); 2536 else 2537 TAILQ_INSERT_AFTER(&pool->list, oldpa, 2538 newpa, entries); 2539 } 2540 2541 pool->cur = TAILQ_FIRST(&pool->list); 2542 PF_ACPY(&pool->counter, &pool->cur->addr.v.a.addr, 2543 pca->af); 2544 break; 2545 } 2546 2547 case DIOCGETRULESETS: { 2548 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr; 2549 struct pf_ruleset *ruleset; 2550 struct pf_anchor *anchor; 2551 2552 pr->path[sizeof(pr->path) - 1] = 0; 2553 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) { 2554 error = EINVAL; 2555 break; 2556 } 2557 pr->nr = 0; 2558 if (ruleset->anchor == NULL) { 2559 /* XXX kludge for pf_main_ruleset */ 2560 RB_FOREACH(anchor, pf_anchor_global, &pf_anchors) 2561 if (anchor->parent == NULL) 2562 pr->nr++; 2563 } else { 2564 RB_FOREACH(anchor, pf_anchor_node, 2565 &ruleset->anchor->children) 2566 pr->nr++; 2567 } 2568 break; 2569 } 2570 2571 case DIOCGETRULESET: { 2572 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr; 2573 struct pf_ruleset *ruleset; 2574 struct pf_anchor *anchor; 2575 u_int32_t nr = 0; 2576 2577 pr->path[sizeof(pr->path) - 1] = 0; 2578 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) { 2579 error = EINVAL; 2580 break; 2581 } 2582 pr->name[0] = 0; 2583 if (ruleset->anchor == NULL) { 2584 /* XXX kludge for pf_main_ruleset */ 2585 RB_FOREACH(anchor, pf_anchor_global, &pf_anchors) 2586 if (anchor->parent == NULL && nr++ == pr->nr) { 2587 strlcpy(pr->name, anchor->name, 2588 sizeof(pr->name)); 2589 break; 2590 } 2591 } else { 2592 RB_FOREACH(anchor, pf_anchor_node, 2593 &ruleset->anchor->children) 2594 if (nr++ == pr->nr) { 2595 strlcpy(pr->name, anchor->name, 2596 sizeof(pr->name)); 2597 break; 2598 } 2599 } 2600 if (!pr->name[0]) 2601 error = EBUSY; 2602 break; 2603 } 2604 2605 case DIOCRCLRTABLES: { 2606 struct pfioc_table *io = (struct pfioc_table *)addr; 2607 2608 if (io->pfrio_esize != 0) { 2609 error = ENODEV; 2610 break; 2611 } 2612 error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel, 2613 io->pfrio_flags | PFR_FLAG_USERIOCTL); 2614 break; 2615 } 2616 2617 case DIOCRADDTABLES: { 2618 struct pfioc_table *io = (struct pfioc_table *)addr; 2619 2620 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2621 error = ENODEV; 2622 break; 2623 } 2624 error = pfr_add_tables(io->pfrio_buffer, io->pfrio_size, 2625 &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2626 break; 2627 } 2628 2629 case DIOCRDELTABLES: { 2630 struct pfioc_table *io = (struct pfioc_table *)addr; 2631 2632 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2633 error = ENODEV; 2634 break; 2635 } 2636 error = pfr_del_tables(io->pfrio_buffer, io->pfrio_size, 2637 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2638 break; 2639 } 2640 2641 case DIOCRGETTABLES: { 2642 struct pfioc_table *io = (struct pfioc_table *)addr; 2643 2644 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2645 error = ENODEV; 2646 break; 2647 } 2648 error = pfr_get_tables(&io->pfrio_table, io->pfrio_buffer, 2649 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2650 break; 2651 } 2652 2653 case DIOCRGETTSTATS: { 2654 struct pfioc_table *io = (struct pfioc_table *)addr; 2655 2656 if (io->pfrio_esize != sizeof(struct pfr_tstats)) { 2657 error = ENODEV; 2658 break; 2659 } 2660 error = pfr_get_tstats(&io->pfrio_table, io->pfrio_buffer, 2661 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2662 break; 2663 } 2664 2665 case DIOCRCLRTSTATS: { 2666 struct pfioc_table *io = (struct pfioc_table *)addr; 2667 2668 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2669 error = ENODEV; 2670 break; 2671 } 2672 error = pfr_clr_tstats(io->pfrio_buffer, io->pfrio_size, 2673 &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2674 break; 2675 } 2676 2677 case DIOCRSETTFLAGS: { 2678 struct pfioc_table *io = (struct pfioc_table *)addr; 2679 2680 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2681 error = ENODEV; 2682 break; 2683 } 2684 error = pfr_set_tflags(io->pfrio_buffer, io->pfrio_size, 2685 io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange, 2686 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2687 break; 2688 } 2689 2690 case DIOCRCLRADDRS: { 2691 struct pfioc_table *io = (struct pfioc_table *)addr; 2692 2693 if (io->pfrio_esize != 0) { 2694 error = ENODEV; 2695 break; 2696 } 2697 error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel, 2698 io->pfrio_flags | PFR_FLAG_USERIOCTL); 2699 break; 2700 } 2701 2702 case DIOCRADDADDRS: { 2703 struct pfioc_table *io = (struct pfioc_table *)addr; 2704 2705 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2706 error = ENODEV; 2707 break; 2708 } 2709 error = pfr_add_addrs(&io->pfrio_table, io->pfrio_buffer, 2710 io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags | 2711 PFR_FLAG_USERIOCTL); 2712 break; 2713 } 2714 2715 case DIOCRDELADDRS: { 2716 struct pfioc_table *io = (struct pfioc_table *)addr; 2717 2718 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2719 error = ENODEV; 2720 break; 2721 } 2722 error = pfr_del_addrs(&io->pfrio_table, io->pfrio_buffer, 2723 io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags | 2724 PFR_FLAG_USERIOCTL); 2725 break; 2726 } 2727 2728 case DIOCRSETADDRS: { 2729 struct pfioc_table *io = (struct pfioc_table *)addr; 2730 2731 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2732 error = ENODEV; 2733 break; 2734 } 2735 error = pfr_set_addrs(&io->pfrio_table, io->pfrio_buffer, 2736 io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd, 2737 &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags | 2738 PFR_FLAG_USERIOCTL, 0); 2739 break; 2740 } 2741 2742 case DIOCRGETADDRS: { 2743 struct pfioc_table *io = (struct pfioc_table *)addr; 2744 2745 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2746 error = ENODEV; 2747 break; 2748 } 2749 error = pfr_get_addrs(&io->pfrio_table, io->pfrio_buffer, 2750 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2751 break; 2752 } 2753 2754 case DIOCRGETASTATS: { 2755 struct pfioc_table *io = (struct pfioc_table *)addr; 2756 2757 if (io->pfrio_esize != sizeof(struct pfr_astats)) { 2758 error = ENODEV; 2759 break; 2760 } 2761 error = pfr_get_astats(&io->pfrio_table, io->pfrio_buffer, 2762 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2763 break; 2764 } 2765 2766 case DIOCRCLRASTATS: { 2767 struct pfioc_table *io = (struct pfioc_table *)addr; 2768 2769 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2770 error = ENODEV; 2771 break; 2772 } 2773 error = pfr_clr_astats(&io->pfrio_table, io->pfrio_buffer, 2774 io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags | 2775 PFR_FLAG_USERIOCTL); 2776 break; 2777 } 2778 2779 case DIOCRTSTADDRS: { 2780 struct pfioc_table *io = (struct pfioc_table *)addr; 2781 2782 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2783 error = ENODEV; 2784 break; 2785 } 2786 error = pfr_tst_addrs(&io->pfrio_table, io->pfrio_buffer, 2787 io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags | 2788 PFR_FLAG_USERIOCTL); 2789 break; 2790 } 2791 2792 case DIOCRINADEFINE: { 2793 struct pfioc_table *io = (struct pfioc_table *)addr; 2794 2795 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2796 error = ENODEV; 2797 break; 2798 } 2799 error = pfr_ina_define(&io->pfrio_table, io->pfrio_buffer, 2800 io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr, 2801 io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2802 break; 2803 } 2804 2805 case DIOCOSFPADD: { 2806 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr; 2807 error = pf_osfp_add(io); 2808 break; 2809 } 2810 2811 case DIOCOSFPGET: { 2812 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr; 2813 error = pf_osfp_get(io); 2814 break; 2815 } 2816 2817 case DIOCXBEGIN: { 2818 struct pfioc_trans *io = (struct pfioc_trans *)addr; 2819 struct pfioc_trans_e *ioe; 2820 struct pfr_table *table; 2821 int i; 2822 2823 if (io->esize != sizeof(*ioe)) { 2824 error = ENODEV; 2825 goto fail; 2826 } 2827 ioe = (struct pfioc_trans_e *)malloc(sizeof(*ioe), 2828 M_TEMP, M_WAITOK); 2829 table = (struct pfr_table *)malloc(sizeof(*table), 2830 M_TEMP, M_WAITOK); 2831 for (i = 0; i < io->size; i++) { 2832 if (copyin(io->array+i, ioe, sizeof(*ioe))) { 2833 free(table, M_TEMP); 2834 free(ioe, M_TEMP); 2835 error = EFAULT; 2836 goto fail; 2837 } 2838 switch (ioe->rs_num) { 2839 #ifdef ALTQ 2840 case PF_RULESET_ALTQ: 2841 if (ioe->anchor[0]) { 2842 free(table, M_TEMP); 2843 free(ioe, M_TEMP); 2844 error = EINVAL; 2845 goto fail; 2846 } 2847 if ((error = pf_begin_altq(&ioe->ticket))) { 2848 free(table, M_TEMP); 2849 free(ioe, M_TEMP); 2850 goto fail; 2851 } 2852 break; 2853 #endif /* ALTQ */ 2854 case PF_RULESET_TABLE: 2855 bzero(table, sizeof(*table)); 2856 strlcpy(table->pfrt_anchor, ioe->anchor, 2857 sizeof(table->pfrt_anchor)); 2858 if ((error = pfr_ina_begin(table, 2859 &ioe->ticket, NULL, 0))) { 2860 free(table, M_TEMP); 2861 free(ioe, M_TEMP); 2862 goto fail; 2863 } 2864 break; 2865 default: 2866 if ((error = pf_begin_rules(&ioe->ticket, 2867 ioe->rs_num, ioe->anchor))) { 2868 free(table, M_TEMP); 2869 free(ioe, M_TEMP); 2870 goto fail; 2871 } 2872 break; 2873 } 2874 if (copyout(ioe, io->array+i, sizeof(io->array[i]))) { 2875 free(table, M_TEMP); 2876 free(ioe, M_TEMP); 2877 error = EFAULT; 2878 goto fail; 2879 } 2880 } 2881 free(table, M_TEMP); 2882 free(ioe, M_TEMP); 2883 break; 2884 } 2885 2886 case DIOCXROLLBACK: { 2887 struct pfioc_trans *io = (struct pfioc_trans *)addr; 2888 struct pfioc_trans_e *ioe; 2889 struct pfr_table *table; 2890 int i; 2891 2892 if (io->esize != sizeof(*ioe)) { 2893 error = ENODEV; 2894 goto fail; 2895 } 2896 ioe = (struct pfioc_trans_e *)malloc(sizeof(*ioe), 2897 M_TEMP, M_WAITOK); 2898 table = (struct pfr_table *)malloc(sizeof(*table), 2899 M_TEMP, M_WAITOK); 2900 for (i = 0; i < io->size; i++) { 2901 if (copyin(io->array+i, ioe, sizeof(*ioe))) { 2902 free(table, M_TEMP); 2903 free(ioe, M_TEMP); 2904 error = EFAULT; 2905 goto fail; 2906 } 2907 switch (ioe->rs_num) { 2908 #ifdef ALTQ 2909 case PF_RULESET_ALTQ: 2910 if (ioe->anchor[0]) { 2911 free(table, M_TEMP); 2912 free(ioe, M_TEMP); 2913 error = EINVAL; 2914 goto fail; 2915 } 2916 if ((error = pf_rollback_altq(ioe->ticket))) { 2917 free(table, M_TEMP); 2918 free(ioe, M_TEMP); 2919 goto fail; /* really bad */ 2920 } 2921 break; 2922 #endif /* ALTQ */ 2923 case PF_RULESET_TABLE: 2924 bzero(table, sizeof(*table)); 2925 strlcpy(table->pfrt_anchor, ioe->anchor, 2926 sizeof(table->pfrt_anchor)); 2927 if ((error = pfr_ina_rollback(table, 2928 ioe->ticket, NULL, 0))) { 2929 free(table, M_TEMP); 2930 free(ioe, M_TEMP); 2931 goto fail; /* really bad */ 2932 } 2933 break; 2934 default: 2935 if ((error = pf_rollback_rules(ioe->ticket, 2936 ioe->rs_num, ioe->anchor))) { 2937 free(table, M_TEMP); 2938 free(ioe, M_TEMP); 2939 goto fail; /* really bad */ 2940 } 2941 break; 2942 } 2943 } 2944 free(table, M_TEMP); 2945 free(ioe, M_TEMP); 2946 break; 2947 } 2948 2949 case DIOCXCOMMIT: { 2950 struct pfioc_trans *io = (struct pfioc_trans *)addr; 2951 struct pfioc_trans_e *ioe; 2952 struct pfr_table *table; 2953 struct pf_ruleset *rs; 2954 int i; 2955 2956 if (io->esize != sizeof(*ioe)) { 2957 error = ENODEV; 2958 goto fail; 2959 } 2960 ioe = (struct pfioc_trans_e *)malloc(sizeof(*ioe), 2961 M_TEMP, M_WAITOK); 2962 table = (struct pfr_table *)malloc(sizeof(*table), 2963 M_TEMP, M_WAITOK); 2964 /* first makes sure everything will succeed */ 2965 for (i = 0; i < io->size; i++) { 2966 if (copyin(io->array+i, ioe, sizeof(*ioe))) { 2967 free(table, M_TEMP); 2968 free(ioe, M_TEMP); 2969 error = EFAULT; 2970 goto fail; 2971 } 2972 switch (ioe->rs_num) { 2973 #ifdef ALTQ 2974 case PF_RULESET_ALTQ: 2975 if (ioe->anchor[0]) { 2976 free(table, M_TEMP); 2977 free(ioe, M_TEMP); 2978 error = EINVAL; 2979 goto fail; 2980 } 2981 if (!altqs_inactive_open || ioe->ticket != 2982 ticket_altqs_inactive) { 2983 free(table, M_TEMP); 2984 free(ioe, M_TEMP); 2985 error = EBUSY; 2986 goto fail; 2987 } 2988 break; 2989 #endif /* ALTQ */ 2990 case PF_RULESET_TABLE: 2991 rs = pf_find_ruleset(ioe->anchor); 2992 if (rs == NULL || !rs->topen || ioe->ticket != 2993 rs->tticket) { 2994 free(table, M_TEMP); 2995 free(ioe, M_TEMP); 2996 error = EBUSY; 2997 goto fail; 2998 } 2999 break; 3000 default: 3001 if (ioe->rs_num < 0 || ioe->rs_num >= 3002 PF_RULESET_MAX) { 3003 free(table, M_TEMP); 3004 free(ioe, M_TEMP); 3005 error = EINVAL; 3006 goto fail; 3007 } 3008 rs = pf_find_ruleset(ioe->anchor); 3009 if (rs == NULL || 3010 !rs->rules[ioe->rs_num].inactive.open || 3011 rs->rules[ioe->rs_num].inactive.ticket != 3012 ioe->ticket) { 3013 free(table, M_TEMP); 3014 free(ioe, M_TEMP); 3015 error = EBUSY; 3016 goto fail; 3017 } 3018 break; 3019 } 3020 } 3021 /* now do the commit - no errors should happen here */ 3022 for (i = 0; i < io->size; i++) { 3023 if (copyin(io->array+i, ioe, sizeof(*ioe))) { 3024 free(table, M_TEMP); 3025 free(ioe, M_TEMP); 3026 error = EFAULT; 3027 goto fail; 3028 } 3029 switch (ioe->rs_num) { 3030 #ifdef ALTQ 3031 case PF_RULESET_ALTQ: 3032 if ((error = pf_commit_altq(ioe->ticket))) { 3033 free(table, M_TEMP); 3034 free(ioe, M_TEMP); 3035 goto fail; /* really bad */ 3036 } 3037 break; 3038 #endif /* ALTQ */ 3039 case PF_RULESET_TABLE: 3040 bzero(table, sizeof(*table)); 3041 strlcpy(table->pfrt_anchor, ioe->anchor, 3042 sizeof(table->pfrt_anchor)); 3043 if ((error = pfr_ina_commit(table, ioe->ticket, 3044 NULL, NULL, 0))) { 3045 free(table, M_TEMP); 3046 free(ioe, M_TEMP); 3047 goto fail; /* really bad */ 3048 } 3049 break; 3050 default: 3051 if ((error = pf_commit_rules(ioe->ticket, 3052 ioe->rs_num, ioe->anchor))) { 3053 free(table, M_TEMP); 3054 free(ioe, M_TEMP); 3055 goto fail; /* really bad */ 3056 } 3057 break; 3058 } 3059 } 3060 free(table, M_TEMP); 3061 free(ioe, M_TEMP); 3062 break; 3063 } 3064 3065 case DIOCGETSRCNODES: { 3066 struct pfioc_src_nodes *psn = (struct pfioc_src_nodes *)addr; 3067 struct pf_src_node *n, *p, *pstore; 3068 u_int32_t nr = 0; 3069 int space = psn->psn_len; 3070 3071 if (space == 0) { 3072 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) 3073 nr++; 3074 psn->psn_len = sizeof(struct pf_src_node) * nr; 3075 break; 3076 } 3077 3078 pstore = malloc(sizeof(*pstore), M_TEMP, M_WAITOK); 3079 3080 p = psn->psn_src_nodes; 3081 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) { 3082 int secs = time_second, diff; 3083 3084 if ((nr + 1) * sizeof(*p) > (unsigned)psn->psn_len) 3085 break; 3086 3087 bcopy(n, pstore, sizeof(*pstore)); 3088 if (n->rule.ptr != NULL) 3089 pstore->rule.nr = n->rule.ptr->nr; 3090 pstore->creation = secs - pstore->creation; 3091 if (pstore->expire > secs) 3092 pstore->expire -= secs; 3093 else 3094 pstore->expire = 0; 3095 3096 /* adjust the connection rate estimate */ 3097 diff = secs - n->conn_rate.last; 3098 if (diff >= n->conn_rate.seconds) 3099 pstore->conn_rate.count = 0; 3100 else 3101 pstore->conn_rate.count -= 3102 n->conn_rate.count * diff / 3103 n->conn_rate.seconds; 3104 3105 error = copyout(pstore, p, sizeof(*p)); 3106 if (error) { 3107 free(pstore, M_TEMP); 3108 goto fail; 3109 } 3110 p++; 3111 nr++; 3112 } 3113 psn->psn_len = sizeof(struct pf_src_node) * nr; 3114 3115 free(pstore, M_TEMP); 3116 break; 3117 } 3118 3119 case DIOCCLRSRCNODES: { 3120 struct pf_src_node *n; 3121 struct pf_state *state; 3122 3123 RB_FOREACH(state, pf_state_tree_id, &tree_id) { 3124 state->src_node = NULL; 3125 state->nat_src_node = NULL; 3126 } 3127 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) { 3128 n->expire = 1; 3129 n->states = 0; 3130 } 3131 pf_purge_expired_src_nodes(1); 3132 pf_status.src_nodes = 0; 3133 break; 3134 } 3135 3136 case DIOCKILLSRCNODES: { 3137 struct pf_src_node *sn; 3138 struct pf_state *ps; 3139 struct pfioc_src_node_kill *psnk = \ 3140 (struct pfioc_src_node_kill *) addr; 3141 int killed = 0; 3142 3143 RB_FOREACH(sn, pf_src_tree, &tree_src_tracking) { 3144 if (PF_MATCHA(psnk->psnk_src.neg, \ 3145 &psnk->psnk_src.addr.v.a.addr, \ 3146 &psnk->psnk_src.addr.v.a.mask, \ 3147 &sn->addr, sn->af) && 3148 PF_MATCHA(psnk->psnk_dst.neg, \ 3149 &psnk->psnk_dst.addr.v.a.addr, \ 3150 &psnk->psnk_dst.addr.v.a.mask, \ 3151 &sn->raddr, sn->af)) { 3152 /* Handle state to src_node linkage */ 3153 if (sn->states != 0) { 3154 RB_FOREACH(ps, pf_state_tree_id, 3155 &tree_id) { 3156 if (ps->src_node == sn) 3157 ps->src_node = NULL; 3158 if (ps->nat_src_node == sn) 3159 ps->nat_src_node = NULL; 3160 } 3161 sn->states = 0; 3162 } 3163 sn->expire = 1; 3164 killed++; 3165 } 3166 } 3167 3168 if (killed > 0) 3169 pf_purge_expired_src_nodes(1); 3170 3171 psnk->psnk_af = killed; 3172 break; 3173 } 3174 3175 case DIOCSETHOSTID: { 3176 u_int32_t *hid = (u_int32_t *)addr; 3177 3178 if (*hid == 0) 3179 pf_status.hostid = cprng_fast32(); 3180 else 3181 pf_status.hostid = *hid; 3182 break; 3183 } 3184 3185 case DIOCOSFPFLUSH: 3186 pf_osfp_flush(); 3187 break; 3188 3189 case DIOCIGETIFACES: { 3190 struct pfioc_iface *io = (struct pfioc_iface *)addr; 3191 3192 if (io->pfiio_esize != sizeof(struct pfi_kif)) { 3193 error = ENODEV; 3194 break; 3195 } 3196 error = pfi_get_ifaces(io->pfiio_name, io->pfiio_buffer, 3197 &io->pfiio_size); 3198 break; 3199 } 3200 3201 case DIOCSETIFFLAG: { 3202 struct pfioc_iface *io = (struct pfioc_iface *)addr; 3203 3204 error = pfi_set_flags(io->pfiio_name, io->pfiio_flags); 3205 break; 3206 } 3207 3208 case DIOCCLRIFFLAG: { 3209 struct pfioc_iface *io = (struct pfioc_iface *)addr; 3210 3211 error = pfi_clear_flags(io->pfiio_name, io->pfiio_flags); 3212 break; 3213 } 3214 3215 case DIOCSETLCK: { 3216 pf_state_lock = *(uint32_t*)addr; 3217 break; 3218 } 3219 3220 default: 3221 error = ENODEV; 3222 break; 3223 } 3224 fail: 3225 splx(s); 3226 if (flags & FWRITE) 3227 rw_exit_write(&pf_consistency_lock); 3228 else 3229 rw_exit_read(&pf_consistency_lock); 3230 return (error); 3231 } 3232 3233 #ifdef __NetBSD__ 3234 #ifdef INET 3235 static int 3236 pfil4_wrapper(void *arg, struct mbuf **mp, struct ifnet *ifp, int dir) 3237 { 3238 int error; 3239 3240 /* 3241 * ensure that mbufs are writable beforehand 3242 * as it's assumed by pf code. 3243 * ip hdr (60 bytes) + tcp hdr (60 bytes) should be enough. 3244 * XXX inefficient 3245 */ 3246 error = m_makewritable(mp, 0, 60 + 60, M_DONTWAIT); 3247 if (error) { 3248 m_freem(*mp); 3249 *mp = NULL; 3250 return error; 3251 } 3252 3253 /* 3254 * If the packet is out-bound, we can't delay checksums 3255 * here. For in-bound, the checksum has already been 3256 * validated. 3257 */ 3258 if (dir == PFIL_OUT) { 3259 if ((*mp)->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) { 3260 in_delayed_cksum(*mp); 3261 (*mp)->m_pkthdr.csum_flags &= 3262 ~(M_CSUM_TCPv4|M_CSUM_UDPv4); 3263 } 3264 } 3265 3266 if (pf_test(dir == PFIL_OUT ? PF_OUT : PF_IN, ifp, mp, NULL) 3267 != PF_PASS) { 3268 m_freem(*mp); 3269 *mp = NULL; 3270 return EHOSTUNREACH; 3271 } 3272 3273 /* 3274 * we're not compatible with fast-forward. 3275 */ 3276 3277 if (dir == PFIL_IN && *mp) { 3278 (*mp)->m_flags &= ~M_CANFASTFWD; 3279 } 3280 3281 return (0); 3282 } 3283 #endif /* INET */ 3284 3285 #ifdef INET6 3286 static int 3287 pfil6_wrapper(void *arg, struct mbuf **mp, struct ifnet *ifp, int dir) 3288 { 3289 int error; 3290 3291 /* 3292 * ensure that mbufs are writable beforehand 3293 * as it's assumed by pf code. 3294 * XXX inefficient 3295 */ 3296 error = m_makewritable(mp, 0, M_COPYALL, M_DONTWAIT); 3297 if (error) { 3298 m_freem(*mp); 3299 *mp = NULL; 3300 return error; 3301 } 3302 3303 /* 3304 * If the packet is out-bound, we can't delay checksums 3305 * here. For in-bound, the checksum has already been 3306 * validated. 3307 */ 3308 if (dir == PFIL_OUT) { 3309 if ((*mp)->m_pkthdr.csum_flags & (M_CSUM_TCPv6|M_CSUM_UDPv6)) { 3310 in6_delayed_cksum(*mp); 3311 (*mp)->m_pkthdr.csum_flags &= 3312 ~(M_CSUM_TCPv6|M_CSUM_UDPv6); 3313 } 3314 } 3315 3316 if (pf_test6(dir == PFIL_OUT ? PF_OUT : PF_IN, ifp, mp, NULL) 3317 != PF_PASS) { 3318 m_freem(*mp); 3319 *mp = NULL; 3320 return EHOSTUNREACH; 3321 } else 3322 return (0); 3323 } 3324 #endif /* INET6 */ 3325 3326 static int 3327 pf_pfil_attach(void) 3328 { 3329 pfil_head_t *ph_inet; 3330 #ifdef INET6 3331 pfil_head_t *ph_inet6; 3332 #endif /* INET6 */ 3333 int error; 3334 3335 if (pf_pfil_attached) 3336 return (EBUSY); 3337 3338 ph_inet = pfil_head_get(PFIL_TYPE_AF, (void *)AF_INET); 3339 if (ph_inet) 3340 error = pfil_add_hook((void *)pfil4_wrapper, NULL, 3341 PFIL_IN|PFIL_OUT, ph_inet); 3342 else 3343 error = ENOENT; 3344 if (error) 3345 return (error); 3346 3347 #ifdef INET6 3348 ph_inet6 = pfil_head_get(PFIL_TYPE_AF, (void *)AF_INET6); 3349 if (ph_inet6) 3350 error = pfil_add_hook((void *)pfil6_wrapper, NULL, 3351 PFIL_IN|PFIL_OUT, ph_inet6); 3352 else 3353 error = ENOENT; 3354 if (error) 3355 goto bad; 3356 #endif /* INET6 */ 3357 3358 pf_pfil_attached = 1; 3359 3360 return (0); 3361 3362 #ifdef INET6 3363 bad: 3364 pfil_remove_hook(pfil4_wrapper, NULL, PFIL_IN|PFIL_OUT, ph_inet); 3365 #endif /* INET6 */ 3366 3367 return (error); 3368 } 3369 3370 static int 3371 pf_pfil_detach(void) 3372 { 3373 pfil_head_t *ph_inet; 3374 #ifdef INET6 3375 pfil_head_t *ph_inet6; 3376 #endif /* INET6 */ 3377 3378 if (pf_pfil_attached == 0) 3379 return (EBUSY); 3380 3381 ph_inet = pfil_head_get(PFIL_TYPE_AF, (void *)AF_INET); 3382 if (ph_inet) 3383 pfil_remove_hook((void *)pfil4_wrapper, NULL, 3384 PFIL_IN|PFIL_OUT, ph_inet); 3385 #ifdef INET6 3386 ph_inet6 = pfil_head_get(PFIL_TYPE_AF, (void *)AF_INET6); 3387 if (ph_inet6) 3388 pfil_remove_hook((void *)pfil6_wrapper, NULL, 3389 PFIL_IN|PFIL_OUT, ph_inet6); 3390 #endif /* INET6 */ 3391 pf_pfil_attached = 0; 3392 3393 return (0); 3394 } 3395 #endif /* __NetBSD__ */ 3396 3397 #if defined(__NetBSD__) 3398 MODULE(MODULE_CLASS_DRIVER, pf, "bpf"); 3399 3400 static int 3401 pf_modcmd(modcmd_t cmd, void *opaque) 3402 { 3403 #ifdef _MODULE 3404 extern void pflogattach(int); 3405 extern void pflogdetach(void); 3406 3407 devmajor_t cmajor = NODEVMAJOR, bmajor = NODEVMAJOR; 3408 int err; 3409 3410 switch (cmd) { 3411 case MODULE_CMD_INIT: 3412 err = devsw_attach("pf", NULL, &bmajor, &pf_cdevsw, &cmajor); 3413 if (err) 3414 return err; 3415 pfattach(1); 3416 pflogattach(1); 3417 return 0; 3418 case MODULE_CMD_FINI: 3419 if (pf_status.running) { 3420 return EBUSY; 3421 } else { 3422 pfdetach(); 3423 pflogdetach(); 3424 return devsw_detach(NULL, &pf_cdevsw); 3425 } 3426 default: 3427 return ENOTTY; 3428 } 3429 #else 3430 if (cmd == MODULE_CMD_INIT) 3431 return 0; 3432 return ENOTTY; 3433 #endif 3434 } 3435 #endif 3436