1 /* $NetBSD: pf_ioctl.c,v 1.37 2009/10/03 00:37:02 elad Exp $ */ 2 /* $OpenBSD: pf_ioctl.c,v 1.182 2007/06/24 11:17:13 mcbride Exp $ */ 3 4 /* 5 * Copyright (c) 2001 Daniel Hartmeier 6 * Copyright (c) 2002,2003 Henning Brauer 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 13 * - Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * - Redistributions in binary form must reproduce the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer in the documentation and/or other materials provided 18 * with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 28 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 30 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 * POSSIBILITY OF SUCH DAMAGE. 32 * 33 * Effort sponsored in part by the Defense Advanced Research Projects 34 * Agency (DARPA) and Air Force Research Laboratory, Air Force 35 * Materiel Command, USAF, under agreement number F30602-01-2-0537. 36 * 37 */ 38 39 #include <sys/cdefs.h> 40 __KERNEL_RCSID(0, "$NetBSD: pf_ioctl.c,v 1.37 2009/10/03 00:37:02 elad Exp $"); 41 42 #ifdef _KERNEL_OPT 43 #include "opt_inet.h" 44 #include "opt_pfil_hooks.h" 45 #endif 46 47 #include "pfsync.h" 48 49 #include <sys/param.h> 50 #include <sys/systm.h> 51 #include <sys/mbuf.h> 52 #include <sys/filio.h> 53 #include <sys/fcntl.h> 54 #include <sys/socket.h> 55 #include <sys/socketvar.h> 56 #include <sys/kernel.h> 57 #include <sys/time.h> 58 #include <sys/pool.h> 59 #include <sys/proc.h> 60 #include <sys/malloc.h> 61 #include <sys/kthread.h> 62 #include <sys/rwlock.h> 63 #include <uvm/uvm_extern.h> 64 #ifdef __NetBSD__ 65 #include <sys/conf.h> 66 #include <sys/lwp.h> 67 #include <sys/kauth.h> 68 #endif /* __NetBSD__ */ 69 70 #include <net/if.h> 71 #include <net/if_types.h> 72 #include <net/route.h> 73 74 #include <netinet/in.h> 75 #include <netinet/in_var.h> 76 #include <netinet/in_systm.h> 77 #include <netinet/ip.h> 78 #include <netinet/ip_var.h> 79 #include <netinet/ip_icmp.h> 80 81 #ifndef __NetBSD__ 82 #include <dev/rndvar.h> 83 #include <crypto/md5.h> 84 #else 85 #include <sys/md5.h> 86 #endif /* __NetBSD__ */ 87 #include <net/pfvar.h> 88 89 #if NPFSYNC > 0 90 #include <net/if_pfsync.h> 91 #endif /* NPFSYNC > 0 */ 92 93 #if NPFLOG > 0 94 #include <net/if_pflog.h> 95 #endif /* NPFLOG > 0 */ 96 97 #ifdef INET6 98 #include <netinet/ip6.h> 99 #include <netinet/in_pcb.h> 100 #endif /* INET6 */ 101 102 #ifdef ALTQ 103 #include <altq/altq.h> 104 #endif 105 106 void pfattach(int); 107 #ifndef __NetBSD__ 108 void pf_thread_create(void *); 109 #endif /* !__NetBSD__ */ 110 int pfopen(dev_t, int, int, struct lwp *); 111 int pfclose(dev_t, int, int, struct lwp *); 112 struct pf_pool *pf_get_pool(char *, u_int32_t, u_int8_t, u_int32_t, 113 u_int8_t, u_int8_t, u_int8_t); 114 115 void pf_mv_pool(struct pf_palist *, struct pf_palist *); 116 void pf_empty_pool(struct pf_palist *); 117 int pfioctl(dev_t, u_long, void *, int, struct lwp *); 118 #ifdef ALTQ 119 int pf_begin_altq(u_int32_t *); 120 int pf_rollback_altq(u_int32_t); 121 int pf_commit_altq(u_int32_t); 122 int pf_enable_altq(struct pf_altq *); 123 int pf_disable_altq(struct pf_altq *); 124 #endif /* ALTQ */ 125 int pf_begin_rules(u_int32_t *, int, const char *); 126 int pf_rollback_rules(u_int32_t, int, char *); 127 int pf_setup_pfsync_matching(struct pf_ruleset *); 128 void pf_hash_rule(MD5_CTX *, struct pf_rule *); 129 void pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *); 130 int pf_commit_rules(u_int32_t, int, char *); 131 void pf_state_export(struct pfsync_state *, 132 struct pf_state_key *, struct pf_state *); 133 void pf_state_import(struct pfsync_state *, 134 struct pf_state_key *, struct pf_state *); 135 136 struct pf_rule pf_default_rule; 137 #ifdef __NetBSD__ 138 krwlock_t pf_consistency_lock; 139 #else 140 struct rwlock pf_consistency_lock = RWLOCK_INITIALIZER("pfcnslk"); 141 #endif /* __NetBSD__ */ 142 #ifdef ALTQ 143 static int pf_altq_running; 144 #endif 145 146 #define TAGID_MAX 50000 147 TAILQ_HEAD(pf_tags, pf_tagname) pf_tags = TAILQ_HEAD_INITIALIZER(pf_tags), 148 pf_qids = TAILQ_HEAD_INITIALIZER(pf_qids); 149 150 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE) 151 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE 152 #endif 153 u_int16_t tagname2tag(struct pf_tags *, char *); 154 void tag2tagname(struct pf_tags *, u_int16_t, char *); 155 void tag_unref(struct pf_tags *, u_int16_t); 156 int pf_rtlabel_add(struct pf_addr_wrap *); 157 void pf_rtlabel_remove(struct pf_addr_wrap *); 158 void pf_rtlabel_copyout(struct pf_addr_wrap *); 159 160 #define DPFPRINTF(n, x) if (pf_status.debug >= (n)) printf x 161 162 #ifdef __NetBSD__ 163 const struct cdevsw pf_cdevsw = { 164 pfopen, pfclose, noread, nowrite, pfioctl, 165 nostop, notty, nopoll, nommap, nokqfilter, D_OTHER 166 }; 167 168 static int pfil4_wrapper(void *, struct mbuf **, struct ifnet *, int); 169 #ifdef INET6 170 static int pfil6_wrapper(void *, struct mbuf **, struct ifnet *, int); 171 #endif /* INET6 */ 172 173 static int pf_pfil_attach(void); 174 static int pf_pfil_detach(void); 175 176 static int pf_pfil_attached; 177 178 static kauth_listener_t pf_listener; 179 #endif /* __NetBSD__ */ 180 181 #ifdef __NetBSD__ 182 static int 183 pf_listener_cb(kauth_cred_t cred, kauth_action_t action, void *cookie, 184 void *arg0, void *arg1, void *arg2, void *arg3) 185 { 186 int result; 187 enum kauth_network_req req; 188 189 result = KAUTH_RESULT_DEFER; 190 req = (enum kauth_network_req)arg0; 191 192 if (action != KAUTH_NETWORK_FIREWALL) 193 return result; 194 195 /* These must have came from device context. */ 196 if ((req == KAUTH_REQ_NETWORK_FIREWALL_FW) || 197 (req == KAUTH_REQ_NETWORK_FIREWALL_NAT)) 198 result = KAUTH_RESULT_ALLOW; 199 200 return result; 201 } 202 #endif /* __NetBSD__ */ 203 204 void 205 pfattach(int num) 206 { 207 u_int32_t *timeout = pf_default_rule.timeout; 208 209 #ifdef __NetBSD__ 210 pool_init(&pf_rule_pl, sizeof(struct pf_rule), 0, 0, 0, "pfrulepl", 211 &pool_allocator_nointr, IPL_NONE); 212 pool_init(&pf_src_tree_pl, sizeof(struct pf_src_node), 0, 0, 0, 213 "pfsrctrpl", NULL, IPL_SOFTNET); 214 pool_init(&pf_state_pl, sizeof(struct pf_state), 0, 0, 0, "pfstatepl", 215 NULL, IPL_SOFTNET); 216 pool_init(&pf_state_key_pl, sizeof(struct pf_state_key), 0, 0, 0, 217 "pfstatekeypl", NULL, IPL_SOFTNET); 218 pool_init(&pf_altq_pl, sizeof(struct pf_altq), 0, 0, 0, "pfaltqpl", 219 &pool_allocator_nointr, IPL_NONE); 220 pool_init(&pf_pooladdr_pl, sizeof(struct pf_pooladdr), 0, 0, 0, 221 "pfpooladdrpl", &pool_allocator_nointr, IPL_NONE); 222 #else 223 pool_init(&pf_rule_pl, sizeof(struct pf_rule), 0, 0, 0, "pfrulepl", 224 &pool_allocator_nointr); 225 pool_init(&pf_src_tree_pl, sizeof(struct pf_src_node), 0, 0, 0, 226 "pfsrctrpl", NULL); 227 pool_init(&pf_state_pl, sizeof(struct pf_state), 0, 0, 0, "pfstatepl", 228 NULL); 229 pool_init(&pf_state_key_pl, sizeof(struct pf_state_key), 0, 0, 0, 230 "pfstatekeypl", NULL); 231 pool_init(&pf_altq_pl, sizeof(struct pf_altq), 0, 0, 0, "pfaltqpl", 232 &pool_allocator_nointr); 233 pool_init(&pf_pooladdr_pl, sizeof(struct pf_pooladdr), 0, 0, 0, 234 "pfpooladdrpl", &pool_allocator_nointr); 235 #endif /* !__NetBSD__ */ 236 237 pfr_initialize(); 238 pfi_initialize(); 239 pf_osfp_initialize(); 240 241 pool_sethardlimit(pf_pool_limits[PF_LIMIT_STATES].pp, 242 pf_pool_limits[PF_LIMIT_STATES].limit, NULL, 0); 243 244 if (ctob(physmem) <= 100*1024*1024) 245 pf_pool_limits[PF_LIMIT_TABLE_ENTRIES].limit = 246 PFR_KENTRY_HIWAT_SMALL; 247 248 RB_INIT(&tree_src_tracking); 249 RB_INIT(&pf_anchors); 250 pf_init_ruleset(&pf_main_ruleset); 251 TAILQ_INIT(&pf_altqs[0]); 252 TAILQ_INIT(&pf_altqs[1]); 253 TAILQ_INIT(&pf_pabuf); 254 pf_altqs_active = &pf_altqs[0]; 255 pf_altqs_inactive = &pf_altqs[1]; 256 TAILQ_INIT(&state_list); 257 258 #ifdef __NetBSD__ 259 rw_init(&pf_consistency_lock); 260 #endif /* __NetBSD__ */ 261 262 /* default rule should never be garbage collected */ 263 pf_default_rule.entries.tqe_prev = &pf_default_rule.entries.tqe_next; 264 pf_default_rule.action = PF_PASS; 265 pf_default_rule.nr = -1; 266 pf_default_rule.rtableid = -1; 267 268 /* initialize default timeouts */ 269 timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL; 270 timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL; 271 timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL; 272 timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL; 273 timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL; 274 timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL; 275 timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL; 276 timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL; 277 timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL; 278 timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL; 279 timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL; 280 timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL; 281 timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL; 282 timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL; 283 timeout[PFTM_FRAG] = PFTM_FRAG_VAL; 284 timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL; 285 timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL; 286 timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL; 287 timeout[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START; 288 timeout[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END; 289 290 pf_normalize_init(); 291 bzero(&pf_status, sizeof(pf_status)); 292 pf_status.debug = PF_DEBUG_URGENT; 293 294 /* XXX do our best to avoid a conflict */ 295 pf_status.hostid = arc4random(); 296 297 /* require process context to purge states, so perform in a thread */ 298 #ifdef __NetBSD__ 299 if (kthread_create(PRI_NONE, 0, NULL, pf_purge_thread, NULL, NULL, 300 "pfpurge")) 301 panic("pfpurge thread"); 302 #else 303 kthread_create_deferred(pf_thread_create, NULL); 304 #endif /* !__NetBSD__ */ 305 306 #ifdef __NetBSD__ 307 pf_listener = kauth_listen_scope(KAUTH_SCOPE_NETWORK, 308 pf_listener_cb, NULL); 309 #endif /* __NetBSD__ */ 310 } 311 312 #ifndef __NetBSD__ 313 void 314 pf_thread_create(void *v) 315 { 316 if (kthread_create(pf_purge_thread, NULL, NULL, "pfpurge")) 317 panic("pfpurge thread"); 318 } 319 #endif /* !__NetBSD__ */ 320 321 int 322 pfopen(dev_t dev, int flags, int fmt, struct lwp *l) 323 { 324 if (minor(dev) >= 1) 325 return (ENXIO); 326 return (0); 327 } 328 329 int 330 pfclose(dev_t dev, int flags, int fmt, struct lwp *l) 331 { 332 if (minor(dev) >= 1) 333 return (ENXIO); 334 return (0); 335 } 336 337 struct pf_pool * 338 pf_get_pool(char *anchor, u_int32_t ticket, u_int8_t rule_action, 339 u_int32_t rule_number, u_int8_t r_last, u_int8_t active, 340 u_int8_t check_ticket) 341 { 342 struct pf_ruleset *ruleset; 343 struct pf_rule *rule; 344 int rs_num; 345 346 ruleset = pf_find_ruleset(anchor); 347 if (ruleset == NULL) 348 return (NULL); 349 rs_num = pf_get_ruleset_number(rule_action); 350 if (rs_num >= PF_RULESET_MAX) 351 return (NULL); 352 if (active) { 353 if (check_ticket && ticket != 354 ruleset->rules[rs_num].active.ticket) 355 return (NULL); 356 if (r_last) 357 rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr, 358 pf_rulequeue); 359 else 360 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr); 361 } else { 362 if (check_ticket && ticket != 363 ruleset->rules[rs_num].inactive.ticket) 364 return (NULL); 365 if (r_last) 366 rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr, 367 pf_rulequeue); 368 else 369 rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr); 370 } 371 if (!r_last) { 372 while ((rule != NULL) && (rule->nr != rule_number)) 373 rule = TAILQ_NEXT(rule, entries); 374 } 375 if (rule == NULL) 376 return (NULL); 377 378 return (&rule->rpool); 379 } 380 381 void 382 pf_mv_pool(struct pf_palist *poola, struct pf_palist *poolb) 383 { 384 struct pf_pooladdr *mv_pool_pa; 385 386 while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) { 387 TAILQ_REMOVE(poola, mv_pool_pa, entries); 388 TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries); 389 } 390 } 391 392 void 393 pf_empty_pool(struct pf_palist *poola) 394 { 395 struct pf_pooladdr *empty_pool_pa; 396 397 while ((empty_pool_pa = TAILQ_FIRST(poola)) != NULL) { 398 pfi_dynaddr_remove(&empty_pool_pa->addr); 399 pf_tbladdr_remove(&empty_pool_pa->addr); 400 pfi_kif_unref(empty_pool_pa->kif, PFI_KIF_REF_RULE); 401 TAILQ_REMOVE(poola, empty_pool_pa, entries); 402 pool_put(&pf_pooladdr_pl, empty_pool_pa); 403 } 404 } 405 406 void 407 pf_rm_rule(struct pf_rulequeue *rulequeue, struct pf_rule *rule) 408 { 409 if (rulequeue != NULL) { 410 if (rule->states <= 0) { 411 /* 412 * XXX - we need to remove the table *before* detaching 413 * the rule to make sure the table code does not delete 414 * the anchor under our feet. 415 */ 416 pf_tbladdr_remove(&rule->src.addr); 417 pf_tbladdr_remove(&rule->dst.addr); 418 if (rule->overload_tbl) 419 pfr_detach_table(rule->overload_tbl); 420 } 421 TAILQ_REMOVE(rulequeue, rule, entries); 422 rule->entries.tqe_prev = NULL; 423 rule->nr = -1; 424 } 425 426 if (rule->states > 0 || rule->src_nodes > 0 || 427 rule->entries.tqe_prev != NULL) 428 return; 429 pf_tag_unref(rule->tag); 430 pf_tag_unref(rule->match_tag); 431 #ifdef ALTQ 432 if (rule->pqid != rule->qid) 433 pf_qid_unref(rule->pqid); 434 pf_qid_unref(rule->qid); 435 #endif 436 pf_rtlabel_remove(&rule->src.addr); 437 pf_rtlabel_remove(&rule->dst.addr); 438 pfi_dynaddr_remove(&rule->src.addr); 439 pfi_dynaddr_remove(&rule->dst.addr); 440 if (rulequeue == NULL) { 441 pf_tbladdr_remove(&rule->src.addr); 442 pf_tbladdr_remove(&rule->dst.addr); 443 if (rule->overload_tbl) 444 pfr_detach_table(rule->overload_tbl); 445 } 446 pfi_kif_unref(rule->kif, PFI_KIF_REF_RULE); 447 pf_anchor_remove(rule); 448 pf_empty_pool(&rule->rpool.list); 449 pool_put(&pf_rule_pl, rule); 450 } 451 452 u_int16_t 453 tagname2tag(struct pf_tags *head, char *tagname) 454 { 455 struct pf_tagname *tag, *p = NULL; 456 u_int16_t new_tagid = 1; 457 458 TAILQ_FOREACH(tag, head, entries) 459 if (strcmp(tagname, tag->name) == 0) { 460 tag->ref++; 461 return (tag->tag); 462 } 463 464 /* 465 * to avoid fragmentation, we do a linear search from the beginning 466 * and take the first free slot we find. if there is none or the list 467 * is empty, append a new entry at the end. 468 */ 469 470 /* new entry */ 471 if (!TAILQ_EMPTY(head)) 472 for (p = TAILQ_FIRST(head); p != NULL && 473 p->tag == new_tagid; p = TAILQ_NEXT(p, entries)) 474 new_tagid = p->tag + 1; 475 476 if (new_tagid > TAGID_MAX) 477 return (0); 478 479 /* allocate and fill new struct pf_tagname */ 480 tag = (struct pf_tagname *)malloc(sizeof(struct pf_tagname), 481 M_TEMP, M_NOWAIT); 482 if (tag == NULL) 483 return (0); 484 bzero(tag, sizeof(struct pf_tagname)); 485 strlcpy(tag->name, tagname, sizeof(tag->name)); 486 tag->tag = new_tagid; 487 tag->ref++; 488 489 if (p != NULL) /* insert new entry before p */ 490 TAILQ_INSERT_BEFORE(p, tag, entries); 491 else /* either list empty or no free slot in between */ 492 TAILQ_INSERT_TAIL(head, tag, entries); 493 494 return (tag->tag); 495 } 496 497 void 498 tag2tagname(struct pf_tags *head, u_int16_t tagid, char *p) 499 { 500 struct pf_tagname *tag; 501 502 TAILQ_FOREACH(tag, head, entries) 503 if (tag->tag == tagid) { 504 strlcpy(p, tag->name, PF_TAG_NAME_SIZE); 505 return; 506 } 507 } 508 509 void 510 tag_unref(struct pf_tags *head, u_int16_t tag) 511 { 512 struct pf_tagname *p, *next; 513 514 if (tag == 0) 515 return; 516 517 for (p = TAILQ_FIRST(head); p != NULL; p = next) { 518 next = TAILQ_NEXT(p, entries); 519 if (tag == p->tag) { 520 if (--p->ref == 0) { 521 TAILQ_REMOVE(head, p, entries); 522 free(p, M_TEMP); 523 } 524 break; 525 } 526 } 527 } 528 529 u_int16_t 530 pf_tagname2tag(char *tagname) 531 { 532 return (tagname2tag(&pf_tags, tagname)); 533 } 534 535 void 536 pf_tag2tagname(u_int16_t tagid, char *p) 537 { 538 tag2tagname(&pf_tags, tagid, p); 539 } 540 541 void 542 pf_tag_ref(u_int16_t tag) 543 { 544 struct pf_tagname *t; 545 546 TAILQ_FOREACH(t, &pf_tags, entries) 547 if (t->tag == tag) 548 break; 549 if (t != NULL) 550 t->ref++; 551 } 552 553 void 554 pf_tag_unref(u_int16_t tag) 555 { 556 tag_unref(&pf_tags, tag); 557 } 558 559 int 560 pf_rtlabel_add(struct pf_addr_wrap *a) 561 { 562 #ifndef __NetBSD__ 563 if (a->type == PF_ADDR_RTLABEL && 564 (a->v.rtlabel = rtlabel_name2id(a->v.rtlabelname)) == 0) 565 return (-1); 566 #endif /* !__NetBSD__ */ 567 return (0); 568 } 569 570 void 571 pf_rtlabel_remove(struct pf_addr_wrap *a) 572 { 573 #ifndef __NetBSD__ 574 if (a->type == PF_ADDR_RTLABEL) 575 rtlabel_unref(a->v.rtlabel); 576 #endif /* !__NetBSD__ */ 577 } 578 579 void 580 pf_rtlabel_copyout(struct pf_addr_wrap *a) 581 { 582 #ifndef __NetBSD__ 583 const char *name; 584 585 if (a->type == PF_ADDR_RTLABEL && a->v.rtlabel) { 586 if ((name = rtlabel_id2name(a->v.rtlabel)) == NULL) 587 strlcpy(a->v.rtlabelname, "?", 588 sizeof(a->v.rtlabelname)); 589 else 590 strlcpy(a->v.rtlabelname, name, 591 sizeof(a->v.rtlabelname)); 592 } 593 #endif /* !__NetBSD__ */ 594 } 595 596 #ifdef ALTQ 597 u_int32_t 598 pf_qname2qid(char *qname) 599 { 600 return ((u_int32_t)tagname2tag(&pf_qids, qname)); 601 } 602 603 void 604 pf_qid2qname(u_int32_t qid, char *p) 605 { 606 tag2tagname(&pf_qids, (u_int16_t)qid, p); 607 } 608 609 void 610 pf_qid_unref(u_int32_t qid) 611 { 612 tag_unref(&pf_qids, (u_int16_t)qid); 613 } 614 615 int 616 pf_begin_altq(u_int32_t *ticket) 617 { 618 struct pf_altq *altq; 619 int error = 0; 620 621 /* Purge the old altq list */ 622 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) { 623 TAILQ_REMOVE(pf_altqs_inactive, altq, entries); 624 if (altq->qname[0] == 0) { 625 /* detach and destroy the discipline */ 626 error = altq_remove(altq); 627 } else 628 pf_qid_unref(altq->qid); 629 pool_put(&pf_altq_pl, altq); 630 } 631 if (error) 632 return (error); 633 *ticket = ++ticket_altqs_inactive; 634 altqs_inactive_open = 1; 635 return (0); 636 } 637 638 int 639 pf_rollback_altq(u_int32_t ticket) 640 { 641 struct pf_altq *altq; 642 int error = 0; 643 644 if (!altqs_inactive_open || ticket != ticket_altqs_inactive) 645 return (0); 646 /* Purge the old altq list */ 647 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) { 648 TAILQ_REMOVE(pf_altqs_inactive, altq, entries); 649 if (altq->qname[0] == 0) { 650 /* detach and destroy the discipline */ 651 error = altq_remove(altq); 652 } else 653 pf_qid_unref(altq->qid); 654 pool_put(&pf_altq_pl, altq); 655 } 656 altqs_inactive_open = 0; 657 return (error); 658 } 659 660 int 661 pf_commit_altq(u_int32_t ticket) 662 { 663 struct pf_altqqueue *old_altqs; 664 struct pf_altq *altq; 665 int s, err, error = 0; 666 667 if (!altqs_inactive_open || ticket != ticket_altqs_inactive) 668 return (EBUSY); 669 670 /* swap altqs, keep the old. */ 671 s = splsoftnet(); 672 old_altqs = pf_altqs_active; 673 pf_altqs_active = pf_altqs_inactive; 674 pf_altqs_inactive = old_altqs; 675 ticket_altqs_active = ticket_altqs_inactive; 676 677 /* Attach new disciplines */ 678 TAILQ_FOREACH(altq, pf_altqs_active, entries) { 679 if (altq->qname[0] == 0) { 680 /* attach the discipline */ 681 error = altq_pfattach(altq); 682 if (error == 0 && pf_altq_running) 683 error = pf_enable_altq(altq); 684 if (error != 0) { 685 splx(s); 686 return (error); 687 } 688 } 689 } 690 691 /* Purge the old altq list */ 692 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) { 693 TAILQ_REMOVE(pf_altqs_inactive, altq, entries); 694 if (altq->qname[0] == 0) { 695 /* detach and destroy the discipline */ 696 if (pf_altq_running) 697 error = pf_disable_altq(altq); 698 err = altq_pfdetach(altq); 699 if (err != 0 && error == 0) 700 error = err; 701 err = altq_remove(altq); 702 if (err != 0 && error == 0) 703 error = err; 704 } else 705 pf_qid_unref(altq->qid); 706 pool_put(&pf_altq_pl, altq); 707 } 708 splx(s); 709 710 altqs_inactive_open = 0; 711 return (error); 712 } 713 714 int 715 pf_enable_altq(struct pf_altq *altq) 716 { 717 struct ifnet *ifp; 718 struct tb_profile tb; 719 int s, error = 0; 720 721 if ((ifp = ifunit(altq->ifname)) == NULL) 722 return (EINVAL); 723 724 if (ifp->if_snd.altq_type != ALTQT_NONE) 725 error = altq_enable(&ifp->if_snd); 726 727 /* set tokenbucket regulator */ 728 if (error == 0 && ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) { 729 tb.rate = altq->ifbandwidth; 730 tb.depth = altq->tbrsize; 731 s = splnet(); 732 error = tbr_set(&ifp->if_snd, &tb); 733 splx(s); 734 } 735 736 return (error); 737 } 738 739 int 740 pf_disable_altq(struct pf_altq *altq) 741 { 742 struct ifnet *ifp; 743 struct tb_profile tb; 744 int s, error; 745 746 if ((ifp = ifunit(altq->ifname)) == NULL) 747 return (EINVAL); 748 749 /* 750 * when the discipline is no longer referenced, it was overridden 751 * by a new one. if so, just return. 752 */ 753 if (altq->altq_disc != ifp->if_snd.altq_disc) 754 return (0); 755 756 error = altq_disable(&ifp->if_snd); 757 758 if (error == 0) { 759 /* clear tokenbucket regulator */ 760 tb.rate = 0; 761 s = splnet(); 762 error = tbr_set(&ifp->if_snd, &tb); 763 splx(s); 764 } 765 766 return (error); 767 } 768 #endif /* ALTQ */ 769 770 int 771 pf_begin_rules(u_int32_t *ticket, int rs_num, const char *anchor) 772 { 773 struct pf_ruleset *rs; 774 struct pf_rule *rule; 775 776 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) 777 return (EINVAL); 778 rs = pf_find_or_create_ruleset(anchor); 779 if (rs == NULL) 780 return (EINVAL); 781 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) { 782 pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule); 783 rs->rules[rs_num].inactive.rcount--; 784 } 785 *ticket = ++rs->rules[rs_num].inactive.ticket; 786 rs->rules[rs_num].inactive.open = 1; 787 return (0); 788 } 789 790 int 791 pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor) 792 { 793 struct pf_ruleset *rs; 794 struct pf_rule *rule; 795 796 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) 797 return (EINVAL); 798 rs = pf_find_ruleset(anchor); 799 if (rs == NULL || !rs->rules[rs_num].inactive.open || 800 rs->rules[rs_num].inactive.ticket != ticket) 801 return (0); 802 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) { 803 pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule); 804 rs->rules[rs_num].inactive.rcount--; 805 } 806 rs->rules[rs_num].inactive.open = 0; 807 return (0); 808 } 809 810 #define PF_MD5_UPD(st, elm) \ 811 MD5Update(ctx, (u_int8_t *) &(st)->elm, sizeof((st)->elm)) 812 813 #define PF_MD5_UPD_STR(st, elm) \ 814 MD5Update(ctx, (u_int8_t *) (st)->elm, strlen((st)->elm)) 815 816 #define PF_MD5_UPD_HTONL(st, elm, stor) do { \ 817 (stor) = htonl((st)->elm); \ 818 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int32_t));\ 819 } while (0) 820 821 #define PF_MD5_UPD_HTONS(st, elm, stor) do { \ 822 (stor) = htons((st)->elm); \ 823 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int16_t));\ 824 } while (0) 825 826 void 827 pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr) 828 { 829 PF_MD5_UPD(pfr, addr.type); 830 switch (pfr->addr.type) { 831 case PF_ADDR_DYNIFTL: 832 PF_MD5_UPD(pfr, addr.v.ifname); 833 PF_MD5_UPD(pfr, addr.iflags); 834 break; 835 case PF_ADDR_TABLE: 836 PF_MD5_UPD(pfr, addr.v.tblname); 837 break; 838 case PF_ADDR_ADDRMASK: 839 /* XXX ignore af? */ 840 PF_MD5_UPD(pfr, addr.v.a.addr.addr32); 841 PF_MD5_UPD(pfr, addr.v.a.mask.addr32); 842 break; 843 case PF_ADDR_RTLABEL: 844 PF_MD5_UPD(pfr, addr.v.rtlabelname); 845 break; 846 } 847 848 PF_MD5_UPD(pfr, port[0]); 849 PF_MD5_UPD(pfr, port[1]); 850 PF_MD5_UPD(pfr, neg); 851 PF_MD5_UPD(pfr, port_op); 852 } 853 854 void 855 pf_hash_rule(MD5_CTX *ctx, struct pf_rule *rule) 856 { 857 u_int16_t x; 858 u_int32_t y; 859 860 pf_hash_rule_addr(ctx, &rule->src); 861 pf_hash_rule_addr(ctx, &rule->dst); 862 PF_MD5_UPD_STR(rule, label); 863 PF_MD5_UPD_STR(rule, ifname); 864 PF_MD5_UPD_STR(rule, match_tagname); 865 PF_MD5_UPD_HTONS(rule, match_tag, x); /* dup? */ 866 PF_MD5_UPD_HTONL(rule, os_fingerprint, y); 867 PF_MD5_UPD_HTONL(rule, prob, y); 868 PF_MD5_UPD_HTONL(rule, uid.uid[0], y); 869 PF_MD5_UPD_HTONL(rule, uid.uid[1], y); 870 PF_MD5_UPD(rule, uid.op); 871 PF_MD5_UPD_HTONL(rule, gid.gid[0], y); 872 PF_MD5_UPD_HTONL(rule, gid.gid[1], y); 873 PF_MD5_UPD(rule, gid.op); 874 PF_MD5_UPD_HTONL(rule, rule_flag, y); 875 PF_MD5_UPD(rule, action); 876 PF_MD5_UPD(rule, direction); 877 PF_MD5_UPD(rule, af); 878 PF_MD5_UPD(rule, quick); 879 PF_MD5_UPD(rule, ifnot); 880 PF_MD5_UPD(rule, match_tag_not); 881 PF_MD5_UPD(rule, natpass); 882 PF_MD5_UPD(rule, keep_state); 883 PF_MD5_UPD(rule, proto); 884 PF_MD5_UPD(rule, type); 885 PF_MD5_UPD(rule, code); 886 PF_MD5_UPD(rule, flags); 887 PF_MD5_UPD(rule, flagset); 888 PF_MD5_UPD(rule, allow_opts); 889 PF_MD5_UPD(rule, rt); 890 PF_MD5_UPD(rule, tos); 891 } 892 893 int 894 pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor) 895 { 896 struct pf_ruleset *rs; 897 struct pf_rule *rule, **old_array; 898 struct pf_rulequeue *old_rules; 899 int s, error; 900 u_int32_t old_rcount; 901 902 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) 903 return (EINVAL); 904 rs = pf_find_ruleset(anchor); 905 if (rs == NULL || !rs->rules[rs_num].inactive.open || 906 ticket != rs->rules[rs_num].inactive.ticket) 907 return (EBUSY); 908 909 /* Calculate checksum for the main ruleset */ 910 if (rs == &pf_main_ruleset) { 911 error = pf_setup_pfsync_matching(rs); 912 if (error != 0) 913 return (error); 914 } 915 916 /* Swap rules, keep the old. */ 917 s = splsoftnet(); 918 old_rules = rs->rules[rs_num].active.ptr; 919 old_rcount = rs->rules[rs_num].active.rcount; 920 old_array = rs->rules[rs_num].active.ptr_array; 921 922 rs->rules[rs_num].active.ptr = 923 rs->rules[rs_num].inactive.ptr; 924 rs->rules[rs_num].active.ptr_array = 925 rs->rules[rs_num].inactive.ptr_array; 926 rs->rules[rs_num].active.rcount = 927 rs->rules[rs_num].inactive.rcount; 928 rs->rules[rs_num].inactive.ptr = old_rules; 929 rs->rules[rs_num].inactive.ptr_array = old_array; 930 rs->rules[rs_num].inactive.rcount = old_rcount; 931 932 rs->rules[rs_num].active.ticket = 933 rs->rules[rs_num].inactive.ticket; 934 pf_calc_skip_steps(rs->rules[rs_num].active.ptr); 935 936 937 /* Purge the old rule list. */ 938 while ((rule = TAILQ_FIRST(old_rules)) != NULL) 939 pf_rm_rule(old_rules, rule); 940 if (rs->rules[rs_num].inactive.ptr_array) 941 free(rs->rules[rs_num].inactive.ptr_array, M_TEMP); 942 rs->rules[rs_num].inactive.ptr_array = NULL; 943 rs->rules[rs_num].inactive.rcount = 0; 944 rs->rules[rs_num].inactive.open = 0; 945 pf_remove_if_empty_ruleset(rs); 946 splx(s); 947 return (0); 948 } 949 950 void 951 pf_state_export(struct pfsync_state *sp, struct pf_state_key *sk, 952 struct pf_state *s) 953 { 954 int secs = time_second; 955 bzero(sp, sizeof(struct pfsync_state)); 956 957 /* copy from state key */ 958 sp->lan.addr = sk->lan.addr; 959 sp->lan.port = sk->lan.port; 960 sp->gwy.addr = sk->gwy.addr; 961 sp->gwy.port = sk->gwy.port; 962 sp->ext.addr = sk->ext.addr; 963 sp->ext.port = sk->ext.port; 964 sp->proto = sk->proto; 965 sp->af = sk->af; 966 sp->direction = sk->direction; 967 968 /* copy from state */ 969 memcpy(&sp->id, &s->id, sizeof(sp->id)); 970 sp->creatorid = s->creatorid; 971 strlcpy(sp->ifname, s->kif->pfik_name, sizeof(sp->ifname)); 972 pf_state_peer_to_pfsync(&s->src, &sp->src); 973 pf_state_peer_to_pfsync(&s->dst, &sp->dst); 974 975 sp->rule = s->rule.ptr->nr; 976 sp->nat_rule = (s->nat_rule.ptr == NULL) ? -1 : s->nat_rule.ptr->nr; 977 sp->anchor = (s->anchor.ptr == NULL) ? -1 : s->anchor.ptr->nr; 978 979 pf_state_counter_to_pfsync(s->bytes[0], sp->bytes[0]); 980 pf_state_counter_to_pfsync(s->bytes[1], sp->bytes[1]); 981 pf_state_counter_to_pfsync(s->packets[0], sp->packets[0]); 982 pf_state_counter_to_pfsync(s->packets[1], sp->packets[1]); 983 sp->creation = secs - s->creation; 984 sp->expire = pf_state_expires(s); 985 sp->log = s->log; 986 sp->allow_opts = s->allow_opts; 987 sp->timeout = s->timeout; 988 989 if (s->src_node) 990 sp->sync_flags |= PFSYNC_FLAG_SRCNODE; 991 if (s->nat_src_node) 992 sp->sync_flags |= PFSYNC_FLAG_NATSRCNODE; 993 994 if (sp->expire > secs) 995 sp->expire -= secs; 996 else 997 sp->expire = 0; 998 999 } 1000 1001 void 1002 pf_state_import(struct pfsync_state *sp, struct pf_state_key *sk, 1003 struct pf_state *s) 1004 { 1005 /* copy to state key */ 1006 sk->lan.addr = sp->lan.addr; 1007 sk->lan.port = sp->lan.port; 1008 sk->gwy.addr = sp->gwy.addr; 1009 sk->gwy.port = sp->gwy.port; 1010 sk->ext.addr = sp->ext.addr; 1011 sk->ext.port = sp->ext.port; 1012 sk->proto = sp->proto; 1013 sk->af = sp->af; 1014 sk->direction = sp->direction; 1015 1016 /* copy to state */ 1017 memcpy(&s->id, &sp->id, sizeof(sp->id)); 1018 s->creatorid = sp->creatorid; 1019 strlcpy(sp->ifname, s->kif->pfik_name, sizeof(sp->ifname)); 1020 pf_state_peer_from_pfsync(&sp->src, &s->src); 1021 pf_state_peer_from_pfsync(&sp->dst, &s->dst); 1022 1023 s->rule.ptr = &pf_default_rule; 1024 s->nat_rule.ptr = NULL; 1025 s->anchor.ptr = NULL; 1026 s->rt_kif = NULL; 1027 s->creation = time_second; 1028 s->pfsync_time = 0; 1029 s->packets[0] = s->packets[1] = 0; 1030 s->bytes[0] = s->bytes[1] = 0; 1031 } 1032 1033 int 1034 pf_setup_pfsync_matching(struct pf_ruleset *rs) 1035 { 1036 MD5_CTX ctx; 1037 struct pf_rule *rule; 1038 int rs_cnt; 1039 u_int8_t digest[PF_MD5_DIGEST_LENGTH]; 1040 1041 MD5Init(&ctx); 1042 for (rs_cnt = 0; rs_cnt < PF_RULESET_MAX; rs_cnt++) { 1043 /* XXX PF_RULESET_SCRUB as well? */ 1044 if (rs_cnt == PF_RULESET_SCRUB) 1045 continue; 1046 1047 if (rs->rules[rs_cnt].inactive.ptr_array) 1048 free(rs->rules[rs_cnt].inactive.ptr_array, M_TEMP); 1049 rs->rules[rs_cnt].inactive.ptr_array = NULL; 1050 1051 if (rs->rules[rs_cnt].inactive.rcount) { 1052 rs->rules[rs_cnt].inactive.ptr_array = 1053 malloc(sizeof(void *) * 1054 rs->rules[rs_cnt].inactive.rcount, 1055 M_TEMP, M_NOWAIT); 1056 1057 if (!rs->rules[rs_cnt].inactive.ptr_array) 1058 return (ENOMEM); 1059 } 1060 1061 TAILQ_FOREACH(rule, rs->rules[rs_cnt].inactive.ptr, 1062 entries) { 1063 pf_hash_rule(&ctx, rule); 1064 (rs->rules[rs_cnt].inactive.ptr_array)[rule->nr] = rule; 1065 } 1066 } 1067 1068 MD5Final(digest, &ctx); 1069 memcpy(pf_status.pf_chksum, digest, sizeof(pf_status.pf_chksum)); 1070 return (0); 1071 } 1072 1073 int 1074 pfioctl(dev_t dev, u_long cmd, void *addr, int flags, struct lwp *l) 1075 { 1076 struct pf_pooladdr *pa = NULL; 1077 struct pf_pool *pool = NULL; 1078 int s; 1079 int error = 0; 1080 1081 /* XXX keep in sync with switch() below */ 1082 if (kauth_authorize_network(l->l_cred, KAUTH_NETWORK_FIREWALL, 1083 KAUTH_REQ_NETWORK_FIREWALL_FW, NULL, NULL, NULL)) 1084 switch (cmd) { 1085 case DIOCGETRULES: 1086 case DIOCGETRULE: 1087 case DIOCGETADDRS: 1088 case DIOCGETADDR: 1089 case DIOCGETSTATE: 1090 case DIOCSETSTATUSIF: 1091 case DIOCGETSTATUS: 1092 case DIOCCLRSTATUS: 1093 case DIOCNATLOOK: 1094 case DIOCSETDEBUG: 1095 case DIOCGETSTATES: 1096 case DIOCGETTIMEOUT: 1097 case DIOCCLRRULECTRS: 1098 case DIOCGETLIMIT: 1099 case DIOCGETALTQS: 1100 case DIOCGETALTQ: 1101 case DIOCGETQSTATS: 1102 case DIOCGETRULESETS: 1103 case DIOCGETRULESET: 1104 case DIOCRGETTABLES: 1105 case DIOCRGETTSTATS: 1106 case DIOCRCLRTSTATS: 1107 case DIOCRCLRADDRS: 1108 case DIOCRADDADDRS: 1109 case DIOCRDELADDRS: 1110 case DIOCRSETADDRS: 1111 case DIOCRGETADDRS: 1112 case DIOCRGETASTATS: 1113 case DIOCRCLRASTATS: 1114 case DIOCRTSTADDRS: 1115 case DIOCOSFPGET: 1116 case DIOCGETSRCNODES: 1117 case DIOCCLRSRCNODES: 1118 case DIOCIGETIFACES: 1119 case DIOCSETIFFLAG: 1120 case DIOCCLRIFFLAG: 1121 break; 1122 case DIOCRCLRTABLES: 1123 case DIOCRADDTABLES: 1124 case DIOCRDELTABLES: 1125 case DIOCRSETTFLAGS: 1126 if (((struct pfioc_table *)addr)->pfrio_flags & 1127 PFR_FLAG_DUMMY) 1128 break; /* dummy operation ok */ 1129 return (EPERM); 1130 default: 1131 return (EPERM); 1132 } 1133 1134 if (!(flags & FWRITE)) 1135 switch (cmd) { 1136 case DIOCGETRULES: 1137 case DIOCGETADDRS: 1138 case DIOCGETADDR: 1139 case DIOCGETSTATE: 1140 case DIOCGETSTATUS: 1141 case DIOCGETSTATES: 1142 case DIOCGETTIMEOUT: 1143 case DIOCGETLIMIT: 1144 case DIOCGETALTQS: 1145 case DIOCGETALTQ: 1146 case DIOCGETQSTATS: 1147 case DIOCGETRULESETS: 1148 case DIOCGETRULESET: 1149 case DIOCNATLOOK: 1150 case DIOCRGETTABLES: 1151 case DIOCRGETTSTATS: 1152 case DIOCRGETADDRS: 1153 case DIOCRGETASTATS: 1154 case DIOCRTSTADDRS: 1155 case DIOCOSFPGET: 1156 case DIOCGETSRCNODES: 1157 case DIOCIGETIFACES: 1158 break; 1159 case DIOCRCLRTABLES: 1160 case DIOCRADDTABLES: 1161 case DIOCRDELTABLES: 1162 case DIOCRCLRTSTATS: 1163 case DIOCRCLRADDRS: 1164 case DIOCRADDADDRS: 1165 case DIOCRDELADDRS: 1166 case DIOCRSETADDRS: 1167 case DIOCRSETTFLAGS: 1168 if (((struct pfioc_table *)addr)->pfrio_flags & 1169 PFR_FLAG_DUMMY) { 1170 flags |= FWRITE; /* need write lock for dummy */ 1171 break; /* dummy operation ok */ 1172 } 1173 return (EACCES); 1174 case DIOCGETRULE: 1175 if (((struct pfioc_rule *)addr)->action == PF_GET_CLR_CNTR) 1176 return (EACCES); 1177 break; 1178 default: 1179 return (EACCES); 1180 } 1181 1182 if (flags & FWRITE) 1183 rw_enter_write(&pf_consistency_lock); 1184 else 1185 rw_enter_read(&pf_consistency_lock); 1186 1187 s = splsoftnet(); 1188 switch (cmd) { 1189 1190 case DIOCSTART: 1191 if (pf_status.running) 1192 error = EEXIST; 1193 else { 1194 #ifdef __NetBSD__ 1195 error = pf_pfil_attach(); 1196 if (error) 1197 break; 1198 #endif /* __NetBSD__ */ 1199 pf_status.running = 1; 1200 pf_status.since = time_second; 1201 if (pf_status.stateid == 0) { 1202 pf_status.stateid = time_second; 1203 pf_status.stateid = pf_status.stateid << 32; 1204 } 1205 DPFPRINTF(PF_DEBUG_MISC, ("pf: started\n")); 1206 } 1207 break; 1208 1209 case DIOCSTOP: 1210 if (!pf_status.running) 1211 error = ENOENT; 1212 else { 1213 #ifdef __NetBSD__ 1214 error = pf_pfil_detach(); 1215 if (error) 1216 break; 1217 #endif /* __NetBSD__ */ 1218 pf_status.running = 0; 1219 pf_status.since = time_second; 1220 DPFPRINTF(PF_DEBUG_MISC, ("pf: stopped\n")); 1221 } 1222 break; 1223 1224 case DIOCADDRULE: { 1225 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 1226 struct pf_ruleset *ruleset; 1227 struct pf_rule *rule, *tail; 1228 struct pf_pooladdr *pa; 1229 int rs_num; 1230 1231 pr->anchor[sizeof(pr->anchor) - 1] = 0; 1232 ruleset = pf_find_ruleset(pr->anchor); 1233 if (ruleset == NULL) { 1234 error = EINVAL; 1235 break; 1236 } 1237 rs_num = pf_get_ruleset_number(pr->rule.action); 1238 if (rs_num >= PF_RULESET_MAX) { 1239 error = EINVAL; 1240 break; 1241 } 1242 if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) { 1243 error = EINVAL; 1244 break; 1245 } 1246 if (pr->ticket != ruleset->rules[rs_num].inactive.ticket) { 1247 error = EBUSY; 1248 break; 1249 } 1250 if (pr->pool_ticket != ticket_pabuf) { 1251 error = EBUSY; 1252 break; 1253 } 1254 rule = pool_get(&pf_rule_pl, PR_NOWAIT); 1255 if (rule == NULL) { 1256 error = ENOMEM; 1257 break; 1258 } 1259 bcopy(&pr->rule, rule, sizeof(struct pf_rule)); 1260 #ifdef __NetBSD__ 1261 rule->cuid = kauth_cred_getuid(l->l_cred); 1262 rule->cpid = l->l_proc->p_pid; 1263 #else 1264 rule->cuid = p->p_cred->p_ruid; 1265 rule->cpid = p->p_pid; 1266 #endif /* !__NetBSD__ */ 1267 rule->anchor = NULL; 1268 rule->kif = NULL; 1269 TAILQ_INIT(&rule->rpool.list); 1270 /* initialize refcounting */ 1271 rule->states = 0; 1272 rule->src_nodes = 0; 1273 rule->entries.tqe_prev = NULL; 1274 #ifndef INET 1275 if (rule->af == AF_INET) { 1276 pool_put(&pf_rule_pl, rule); 1277 error = EAFNOSUPPORT; 1278 break; 1279 } 1280 #endif /* INET */ 1281 #ifndef INET6 1282 if (rule->af == AF_INET6) { 1283 pool_put(&pf_rule_pl, rule); 1284 error = EAFNOSUPPORT; 1285 break; 1286 } 1287 #endif /* INET6 */ 1288 tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr, 1289 pf_rulequeue); 1290 if (tail) 1291 rule->nr = tail->nr + 1; 1292 else 1293 rule->nr = 0; 1294 if (rule->ifname[0]) { 1295 rule->kif = pfi_kif_get(rule->ifname); 1296 if (rule->kif == NULL) { 1297 pool_put(&pf_rule_pl, rule); 1298 error = EINVAL; 1299 break; 1300 } 1301 pfi_kif_ref(rule->kif, PFI_KIF_REF_RULE); 1302 } 1303 1304 #ifndef __NetBSD__ 1305 if (rule->rtableid > 0 && !rtable_exists(rule->rtableid)) 1306 error = EBUSY; 1307 #endif /* !__NetBSD__ */ 1308 1309 #ifdef ALTQ 1310 /* set queue IDs */ 1311 if (rule->qname[0] != 0) { 1312 if ((rule->qid = pf_qname2qid(rule->qname)) == 0) 1313 error = EBUSY; 1314 else if (rule->pqname[0] != 0) { 1315 if ((rule->pqid = 1316 pf_qname2qid(rule->pqname)) == 0) 1317 error = EBUSY; 1318 } else 1319 rule->pqid = rule->qid; 1320 } 1321 #endif 1322 if (rule->tagname[0]) 1323 if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0) 1324 error = EBUSY; 1325 if (rule->match_tagname[0]) 1326 if ((rule->match_tag = 1327 pf_tagname2tag(rule->match_tagname)) == 0) 1328 error = EBUSY; 1329 if (rule->rt && !rule->direction) 1330 error = EINVAL; 1331 #if NPFLOG > 0 1332 if (!rule->log) 1333 rule->logif = 0; 1334 if (rule->logif >= PFLOGIFS_MAX) 1335 error = EINVAL; 1336 #endif 1337 if (pf_rtlabel_add(&rule->src.addr) || 1338 pf_rtlabel_add(&rule->dst.addr)) 1339 error = EBUSY; 1340 if (pfi_dynaddr_setup(&rule->src.addr, rule->af)) 1341 error = EINVAL; 1342 if (pfi_dynaddr_setup(&rule->dst.addr, rule->af)) 1343 error = EINVAL; 1344 if (pf_tbladdr_setup(ruleset, &rule->src.addr)) 1345 error = EINVAL; 1346 if (pf_tbladdr_setup(ruleset, &rule->dst.addr)) 1347 error = EINVAL; 1348 if (pf_anchor_setup(rule, ruleset, pr->anchor_call)) 1349 error = EINVAL; 1350 TAILQ_FOREACH(pa, &pf_pabuf, entries) 1351 if (pf_tbladdr_setup(ruleset, &pa->addr)) 1352 error = EINVAL; 1353 1354 if (rule->overload_tblname[0]) { 1355 if ((rule->overload_tbl = pfr_attach_table(ruleset, 1356 rule->overload_tblname)) == NULL) 1357 error = EINVAL; 1358 else 1359 rule->overload_tbl->pfrkt_flags |= 1360 PFR_TFLAG_ACTIVE; 1361 } 1362 1363 pf_mv_pool(&pf_pabuf, &rule->rpool.list); 1364 if (((((rule->action == PF_NAT) || (rule->action == PF_RDR) || 1365 (rule->action == PF_BINAT)) && rule->anchor == NULL) || 1366 (rule->rt > PF_FASTROUTE)) && 1367 (TAILQ_FIRST(&rule->rpool.list) == NULL)) 1368 error = EINVAL; 1369 1370 if (error) { 1371 pf_rm_rule(NULL, rule); 1372 break; 1373 } 1374 rule->rpool.cur = TAILQ_FIRST(&rule->rpool.list); 1375 rule->evaluations = rule->packets[0] = rule->packets[1] = 1376 rule->bytes[0] = rule->bytes[1] = 0; 1377 TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr, 1378 rule, entries); 1379 ruleset->rules[rs_num].inactive.rcount++; 1380 break; 1381 } 1382 1383 case DIOCGETRULES: { 1384 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 1385 struct pf_ruleset *ruleset; 1386 struct pf_rule *tail; 1387 int rs_num; 1388 1389 pr->anchor[sizeof(pr->anchor) - 1] = 0; 1390 ruleset = pf_find_ruleset(pr->anchor); 1391 if (ruleset == NULL) { 1392 error = EINVAL; 1393 break; 1394 } 1395 rs_num = pf_get_ruleset_number(pr->rule.action); 1396 if (rs_num >= PF_RULESET_MAX) { 1397 error = EINVAL; 1398 break; 1399 } 1400 tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr, 1401 pf_rulequeue); 1402 if (tail) 1403 pr->nr = tail->nr + 1; 1404 else 1405 pr->nr = 0; 1406 pr->ticket = ruleset->rules[rs_num].active.ticket; 1407 break; 1408 } 1409 1410 case DIOCGETRULE: { 1411 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 1412 struct pf_ruleset *ruleset; 1413 struct pf_rule *rule; 1414 int rs_num, i; 1415 1416 pr->anchor[sizeof(pr->anchor) - 1] = 0; 1417 ruleset = pf_find_ruleset(pr->anchor); 1418 if (ruleset == NULL) { 1419 error = EINVAL; 1420 break; 1421 } 1422 rs_num = pf_get_ruleset_number(pr->rule.action); 1423 if (rs_num >= PF_RULESET_MAX) { 1424 error = EINVAL; 1425 break; 1426 } 1427 if (pr->ticket != ruleset->rules[rs_num].active.ticket) { 1428 error = EBUSY; 1429 break; 1430 } 1431 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr); 1432 while ((rule != NULL) && (rule->nr != pr->nr)) 1433 rule = TAILQ_NEXT(rule, entries); 1434 if (rule == NULL) { 1435 error = EBUSY; 1436 break; 1437 } 1438 bcopy(rule, &pr->rule, sizeof(struct pf_rule)); 1439 if (pf_anchor_copyout(ruleset, rule, pr)) { 1440 error = EBUSY; 1441 break; 1442 } 1443 pfi_dynaddr_copyout(&pr->rule.src.addr); 1444 pfi_dynaddr_copyout(&pr->rule.dst.addr); 1445 pf_tbladdr_copyout(&pr->rule.src.addr); 1446 pf_tbladdr_copyout(&pr->rule.dst.addr); 1447 pf_rtlabel_copyout(&pr->rule.src.addr); 1448 pf_rtlabel_copyout(&pr->rule.dst.addr); 1449 for (i = 0; i < PF_SKIP_COUNT; ++i) 1450 if (rule->skip[i].ptr == NULL) 1451 pr->rule.skip[i].nr = -1; 1452 else 1453 pr->rule.skip[i].nr = 1454 rule->skip[i].ptr->nr; 1455 1456 if (pr->action == PF_GET_CLR_CNTR) { 1457 rule->evaluations = 0; 1458 rule->packets[0] = rule->packets[1] = 0; 1459 rule->bytes[0] = rule->bytes[1] = 0; 1460 } 1461 break; 1462 } 1463 1464 case DIOCCHANGERULE: { 1465 struct pfioc_rule *pcr = (struct pfioc_rule *)addr; 1466 struct pf_ruleset *ruleset; 1467 struct pf_rule *oldrule = NULL, *newrule = NULL; 1468 u_int32_t nr = 0; 1469 int rs_num; 1470 1471 if (!(pcr->action == PF_CHANGE_REMOVE || 1472 pcr->action == PF_CHANGE_GET_TICKET) && 1473 pcr->pool_ticket != ticket_pabuf) { 1474 error = EBUSY; 1475 break; 1476 } 1477 1478 if (pcr->action < PF_CHANGE_ADD_HEAD || 1479 pcr->action > PF_CHANGE_GET_TICKET) { 1480 error = EINVAL; 1481 break; 1482 } 1483 ruleset = pf_find_ruleset(pcr->anchor); 1484 if (ruleset == NULL) { 1485 error = EINVAL; 1486 break; 1487 } 1488 rs_num = pf_get_ruleset_number(pcr->rule.action); 1489 if (rs_num >= PF_RULESET_MAX) { 1490 error = EINVAL; 1491 break; 1492 } 1493 1494 if (pcr->action == PF_CHANGE_GET_TICKET) { 1495 pcr->ticket = ++ruleset->rules[rs_num].active.ticket; 1496 break; 1497 } else { 1498 if (pcr->ticket != 1499 ruleset->rules[rs_num].active.ticket) { 1500 error = EINVAL; 1501 break; 1502 } 1503 if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) { 1504 error = EINVAL; 1505 break; 1506 } 1507 } 1508 1509 if (pcr->action != PF_CHANGE_REMOVE) { 1510 newrule = pool_get(&pf_rule_pl, PR_NOWAIT); 1511 if (newrule == NULL) { 1512 error = ENOMEM; 1513 break; 1514 } 1515 bcopy(&pcr->rule, newrule, sizeof(struct pf_rule)); 1516 #ifdef __NetBSD__ 1517 newrule->cuid = kauth_cred_getuid(l->l_cred); 1518 newrule->cpid = l->l_proc->p_pid; 1519 #else 1520 newrule->cuid = p->p_cred->p_ruid; 1521 newrule->cpid = p->p_pid; 1522 #endif /* !__NetBSD__ */ 1523 TAILQ_INIT(&newrule->rpool.list); 1524 /* initialize refcounting */ 1525 newrule->states = 0; 1526 newrule->entries.tqe_prev = NULL; 1527 #ifndef INET 1528 if (newrule->af == AF_INET) { 1529 pool_put(&pf_rule_pl, newrule); 1530 error = EAFNOSUPPORT; 1531 break; 1532 } 1533 #endif /* INET */ 1534 #ifndef INET6 1535 if (newrule->af == AF_INET6) { 1536 pool_put(&pf_rule_pl, newrule); 1537 error = EAFNOSUPPORT; 1538 break; 1539 } 1540 #endif /* INET6 */ 1541 if (newrule->ifname[0]) { 1542 newrule->kif = pfi_kif_get(newrule->ifname); 1543 if (newrule->kif == NULL) { 1544 pool_put(&pf_rule_pl, newrule); 1545 error = EINVAL; 1546 break; 1547 } 1548 pfi_kif_ref(newrule->kif, PFI_KIF_REF_RULE); 1549 } else 1550 newrule->kif = NULL; 1551 1552 #ifndef __NetBSD__ 1553 if (newrule->rtableid > 0 && 1554 !rtable_exists(newrule->rtableid)) 1555 error = EBUSY; 1556 #endif /* !__NetBSD__ */ 1557 1558 #ifdef ALTQ 1559 /* set queue IDs */ 1560 if (newrule->qname[0] != 0) { 1561 if ((newrule->qid = 1562 pf_qname2qid(newrule->qname)) == 0) 1563 error = EBUSY; 1564 else if (newrule->pqname[0] != 0) { 1565 if ((newrule->pqid = 1566 pf_qname2qid(newrule->pqname)) == 0) 1567 error = EBUSY; 1568 } else 1569 newrule->pqid = newrule->qid; 1570 } 1571 #endif /* ALTQ */ 1572 if (newrule->tagname[0]) 1573 if ((newrule->tag = 1574 pf_tagname2tag(newrule->tagname)) == 0) 1575 error = EBUSY; 1576 if (newrule->match_tagname[0]) 1577 if ((newrule->match_tag = pf_tagname2tag( 1578 newrule->match_tagname)) == 0) 1579 error = EBUSY; 1580 if (newrule->rt && !newrule->direction) 1581 error = EINVAL; 1582 #if NPFLOG > 0 1583 if (!newrule->log) 1584 newrule->logif = 0; 1585 if (newrule->logif >= PFLOGIFS_MAX) 1586 error = EINVAL; 1587 #endif 1588 if (pf_rtlabel_add(&newrule->src.addr) || 1589 pf_rtlabel_add(&newrule->dst.addr)) 1590 error = EBUSY; 1591 if (pfi_dynaddr_setup(&newrule->src.addr, newrule->af)) 1592 error = EINVAL; 1593 if (pfi_dynaddr_setup(&newrule->dst.addr, newrule->af)) 1594 error = EINVAL; 1595 if (pf_tbladdr_setup(ruleset, &newrule->src.addr)) 1596 error = EINVAL; 1597 if (pf_tbladdr_setup(ruleset, &newrule->dst.addr)) 1598 error = EINVAL; 1599 if (pf_anchor_setup(newrule, ruleset, pcr->anchor_call)) 1600 error = EINVAL; 1601 TAILQ_FOREACH(pa, &pf_pabuf, entries) 1602 if (pf_tbladdr_setup(ruleset, &pa->addr)) 1603 error = EINVAL; 1604 1605 if (newrule->overload_tblname[0]) { 1606 if ((newrule->overload_tbl = pfr_attach_table( 1607 ruleset, newrule->overload_tblname)) == 1608 NULL) 1609 error = EINVAL; 1610 else 1611 newrule->overload_tbl->pfrkt_flags |= 1612 PFR_TFLAG_ACTIVE; 1613 } 1614 1615 pf_mv_pool(&pf_pabuf, &newrule->rpool.list); 1616 if (((((newrule->action == PF_NAT) || 1617 (newrule->action == PF_RDR) || 1618 (newrule->action == PF_BINAT) || 1619 (newrule->rt > PF_FASTROUTE)) && 1620 !newrule->anchor)) && 1621 (TAILQ_FIRST(&newrule->rpool.list) == NULL)) 1622 error = EINVAL; 1623 1624 if (error) { 1625 pf_rm_rule(NULL, newrule); 1626 break; 1627 } 1628 newrule->rpool.cur = TAILQ_FIRST(&newrule->rpool.list); 1629 newrule->evaluations = 0; 1630 newrule->packets[0] = newrule->packets[1] = 0; 1631 newrule->bytes[0] = newrule->bytes[1] = 0; 1632 } 1633 pf_empty_pool(&pf_pabuf); 1634 1635 if (pcr->action == PF_CHANGE_ADD_HEAD) 1636 oldrule = TAILQ_FIRST( 1637 ruleset->rules[rs_num].active.ptr); 1638 else if (pcr->action == PF_CHANGE_ADD_TAIL) 1639 oldrule = TAILQ_LAST( 1640 ruleset->rules[rs_num].active.ptr, pf_rulequeue); 1641 else { 1642 oldrule = TAILQ_FIRST( 1643 ruleset->rules[rs_num].active.ptr); 1644 while ((oldrule != NULL) && (oldrule->nr != pcr->nr)) 1645 oldrule = TAILQ_NEXT(oldrule, entries); 1646 if (oldrule == NULL) { 1647 if (newrule != NULL) 1648 pf_rm_rule(NULL, newrule); 1649 error = EINVAL; 1650 break; 1651 } 1652 } 1653 1654 if (pcr->action == PF_CHANGE_REMOVE) { 1655 pf_rm_rule(ruleset->rules[rs_num].active.ptr, oldrule); 1656 ruleset->rules[rs_num].active.rcount--; 1657 } else { 1658 if (oldrule == NULL) 1659 TAILQ_INSERT_TAIL( 1660 ruleset->rules[rs_num].active.ptr, 1661 newrule, entries); 1662 else if (pcr->action == PF_CHANGE_ADD_HEAD || 1663 pcr->action == PF_CHANGE_ADD_BEFORE) 1664 TAILQ_INSERT_BEFORE(oldrule, newrule, entries); 1665 else 1666 TAILQ_INSERT_AFTER( 1667 ruleset->rules[rs_num].active.ptr, 1668 oldrule, newrule, entries); 1669 ruleset->rules[rs_num].active.rcount++; 1670 } 1671 1672 nr = 0; 1673 TAILQ_FOREACH(oldrule, 1674 ruleset->rules[rs_num].active.ptr, entries) 1675 oldrule->nr = nr++; 1676 1677 ruleset->rules[rs_num].active.ticket++; 1678 1679 pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr); 1680 pf_remove_if_empty_ruleset(ruleset); 1681 1682 break; 1683 } 1684 1685 case DIOCCLRSTATES: { 1686 struct pf_state *s, *nexts; 1687 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr; 1688 int killed = 0; 1689 1690 for (s = RB_MIN(pf_state_tree_id, &tree_id); s; s = nexts) { 1691 nexts = RB_NEXT(pf_state_tree_id, &tree_id, s); 1692 1693 if (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname, 1694 s->kif->pfik_name)) { 1695 #if NPFSYNC 1696 /* don't send out individual delete messages */ 1697 s->sync_flags = PFSTATE_NOSYNC; 1698 #endif 1699 pf_unlink_state(s); 1700 killed++; 1701 } 1702 } 1703 psk->psk_af = killed; 1704 #if NPFSYNC 1705 pfsync_clear_states(pf_status.hostid, psk->psk_ifname); 1706 #endif 1707 break; 1708 } 1709 1710 case DIOCKILLSTATES: { 1711 struct pf_state *s, *nexts; 1712 struct pf_state_key *sk; 1713 struct pf_state_host *src, *dst; 1714 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr; 1715 int killed = 0; 1716 1717 for (s = RB_MIN(pf_state_tree_id, &tree_id); s; 1718 s = nexts) { 1719 nexts = RB_NEXT(pf_state_tree_id, &tree_id, s); 1720 sk = s->state_key; 1721 1722 if (sk->direction == PF_OUT) { 1723 src = &sk->lan; 1724 dst = &sk->ext; 1725 } else { 1726 src = &sk->ext; 1727 dst = &sk->lan; 1728 } 1729 if ((!psk->psk_af || sk->af == psk->psk_af) 1730 && (!psk->psk_proto || psk->psk_proto == 1731 sk->proto) && 1732 PF_MATCHA(psk->psk_src.neg, 1733 &psk->psk_src.addr.v.a.addr, 1734 &psk->psk_src.addr.v.a.mask, 1735 &src->addr, sk->af) && 1736 PF_MATCHA(psk->psk_dst.neg, 1737 &psk->psk_dst.addr.v.a.addr, 1738 &psk->psk_dst.addr.v.a.mask, 1739 &dst->addr, sk->af) && 1740 (psk->psk_src.port_op == 0 || 1741 pf_match_port(psk->psk_src.port_op, 1742 psk->psk_src.port[0], psk->psk_src.port[1], 1743 src->port)) && 1744 (psk->psk_dst.port_op == 0 || 1745 pf_match_port(psk->psk_dst.port_op, 1746 psk->psk_dst.port[0], psk->psk_dst.port[1], 1747 dst->port)) && 1748 (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname, 1749 s->kif->pfik_name))) { 1750 #if NPFSYNC > 0 1751 /* send immediate delete of state */ 1752 pfsync_delete_state(s); 1753 s->sync_flags |= PFSTATE_NOSYNC; 1754 #endif 1755 pf_unlink_state(s); 1756 killed++; 1757 } 1758 } 1759 psk->psk_af = killed; 1760 break; 1761 } 1762 1763 case DIOCADDSTATE: { 1764 struct pfioc_state *ps = (struct pfioc_state *)addr; 1765 struct pfsync_state *sp = (struct pfsync_state *)ps->state; 1766 struct pf_state *s; 1767 struct pf_state_key *sk; 1768 struct pfi_kif *kif; 1769 1770 if (sp->timeout >= PFTM_MAX && 1771 sp->timeout != PFTM_UNTIL_PACKET) { 1772 error = EINVAL; 1773 break; 1774 } 1775 s = pool_get(&pf_state_pl, PR_NOWAIT); 1776 if (s == NULL) { 1777 error = ENOMEM; 1778 break; 1779 } 1780 bzero(s, sizeof(struct pf_state)); 1781 if ((sk = pf_alloc_state_key(s)) == NULL) { 1782 error = ENOMEM; 1783 break; 1784 } 1785 pf_state_import(sp, sk, s); 1786 kif = pfi_kif_get(sp->ifname); 1787 if (kif == NULL) { 1788 pool_put(&pf_state_pl, s); 1789 pool_put(&pf_state_key_pl, sk); 1790 error = ENOENT; 1791 break; 1792 } 1793 if (pf_insert_state(kif, s)) { 1794 pfi_kif_unref(kif, PFI_KIF_REF_NONE); 1795 pool_put(&pf_state_pl, s); 1796 pool_put(&pf_state_key_pl, sk); 1797 error = ENOMEM; 1798 } 1799 break; 1800 } 1801 1802 case DIOCGETSTATE: { 1803 struct pfioc_state *ps = (struct pfioc_state *)addr; 1804 struct pf_state *s; 1805 u_int32_t nr; 1806 1807 nr = 0; 1808 RB_FOREACH(s, pf_state_tree_id, &tree_id) { 1809 if (nr >= ps->nr) 1810 break; 1811 nr++; 1812 } 1813 if (s == NULL) { 1814 error = EBUSY; 1815 break; 1816 } 1817 1818 pf_state_export((struct pfsync_state *)&ps->state, 1819 s->state_key, s); 1820 break; 1821 } 1822 1823 case DIOCGETSTATES: { 1824 struct pfioc_states *ps = (struct pfioc_states *)addr; 1825 struct pf_state *state; 1826 struct pfsync_state *p, *pstore; 1827 u_int32_t nr = 0; 1828 1829 if (ps->ps_len == 0) { 1830 nr = pf_status.states; 1831 ps->ps_len = sizeof(struct pfsync_state) * nr; 1832 break; 1833 } 1834 1835 pstore = malloc(sizeof(*pstore), M_TEMP, M_WAITOK); 1836 1837 p = ps->ps_states; 1838 1839 state = TAILQ_FIRST(&state_list); 1840 while (state) { 1841 if (state->timeout != PFTM_UNLINKED) { 1842 if ((nr+1) * sizeof(*p) > (unsigned)ps->ps_len) 1843 break; 1844 1845 pf_state_export(pstore, 1846 state->state_key, state); 1847 error = copyout(pstore, p, sizeof(*p)); 1848 if (error) { 1849 free(pstore, M_TEMP); 1850 goto fail; 1851 } 1852 p++; 1853 nr++; 1854 } 1855 state = TAILQ_NEXT(state, entry_list); 1856 } 1857 1858 ps->ps_len = sizeof(struct pfsync_state) * nr; 1859 1860 free(pstore, M_TEMP); 1861 break; 1862 } 1863 1864 case DIOCGETSTATUS: { 1865 struct pf_status *s = (struct pf_status *)addr; 1866 bcopy(&pf_status, s, sizeof(struct pf_status)); 1867 pfi_fill_oldstatus(s); 1868 break; 1869 } 1870 1871 case DIOCSETSTATUSIF: { 1872 struct pfioc_if *pi = (struct pfioc_if *)addr; 1873 1874 if (pi->ifname[0] == 0) { 1875 bzero(pf_status.ifname, IFNAMSIZ); 1876 break; 1877 } 1878 if (ifunit(pi->ifname) == NULL) { 1879 error = EINVAL; 1880 break; 1881 } 1882 strlcpy(pf_status.ifname, pi->ifname, IFNAMSIZ); 1883 break; 1884 } 1885 1886 case DIOCCLRSTATUS: { 1887 bzero(pf_status.counters, sizeof(pf_status.counters)); 1888 bzero(pf_status.fcounters, sizeof(pf_status.fcounters)); 1889 bzero(pf_status.scounters, sizeof(pf_status.scounters)); 1890 pf_status.since = time_second; 1891 if (*pf_status.ifname) 1892 pfi_clr_istats(pf_status.ifname); 1893 break; 1894 } 1895 1896 case DIOCNATLOOK: { 1897 struct pfioc_natlook *pnl = (struct pfioc_natlook *)addr; 1898 struct pf_state_key *sk; 1899 struct pf_state *state; 1900 struct pf_state_key_cmp key; 1901 int m = 0, direction = pnl->direction; 1902 1903 key.af = pnl->af; 1904 key.proto = pnl->proto; 1905 1906 if (!pnl->proto || 1907 PF_AZERO(&pnl->saddr, pnl->af) || 1908 PF_AZERO(&pnl->daddr, pnl->af) || 1909 ((pnl->proto == IPPROTO_TCP || 1910 pnl->proto == IPPROTO_UDP) && 1911 (!pnl->dport || !pnl->sport))) 1912 error = EINVAL; 1913 else { 1914 /* 1915 * userland gives us source and dest of connection, 1916 * reverse the lookup so we ask for what happens with 1917 * the return traffic, enabling us to find it in the 1918 * state tree. 1919 */ 1920 if (direction == PF_IN) { 1921 PF_ACPY(&key.ext.addr, &pnl->daddr, pnl->af); 1922 key.ext.port = pnl->dport; 1923 PF_ACPY(&key.gwy.addr, &pnl->saddr, pnl->af); 1924 key.gwy.port = pnl->sport; 1925 state = pf_find_state_all(&key, PF_EXT_GWY, &m); 1926 } else { 1927 PF_ACPY(&key.lan.addr, &pnl->daddr, pnl->af); 1928 key.lan.port = pnl->dport; 1929 PF_ACPY(&key.ext.addr, &pnl->saddr, pnl->af); 1930 key.ext.port = pnl->sport; 1931 state = pf_find_state_all(&key, PF_LAN_EXT, &m); 1932 } 1933 if (m > 1) 1934 error = E2BIG; /* more than one state */ 1935 else if (state != NULL) { 1936 sk = state->state_key; 1937 if (direction == PF_IN) { 1938 PF_ACPY(&pnl->rsaddr, &sk->lan.addr, 1939 sk->af); 1940 pnl->rsport = sk->lan.port; 1941 PF_ACPY(&pnl->rdaddr, &pnl->daddr, 1942 pnl->af); 1943 pnl->rdport = pnl->dport; 1944 } else { 1945 PF_ACPY(&pnl->rdaddr, &sk->gwy.addr, 1946 sk->af); 1947 pnl->rdport = sk->gwy.port; 1948 PF_ACPY(&pnl->rsaddr, &pnl->saddr, 1949 pnl->af); 1950 pnl->rsport = pnl->sport; 1951 } 1952 } else 1953 error = ENOENT; 1954 } 1955 break; 1956 } 1957 1958 case DIOCSETTIMEOUT: { 1959 struct pfioc_tm *pt = (struct pfioc_tm *)addr; 1960 int old; 1961 1962 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX || 1963 pt->seconds < 0) { 1964 error = EINVAL; 1965 goto fail; 1966 } 1967 old = pf_default_rule.timeout[pt->timeout]; 1968 if (pt->timeout == PFTM_INTERVAL && pt->seconds == 0) 1969 pt->seconds = 1; 1970 pf_default_rule.timeout[pt->timeout] = pt->seconds; 1971 if (pt->timeout == PFTM_INTERVAL && pt->seconds < old) 1972 wakeup(pf_purge_thread); 1973 pt->seconds = old; 1974 break; 1975 } 1976 1977 case DIOCGETTIMEOUT: { 1978 struct pfioc_tm *pt = (struct pfioc_tm *)addr; 1979 1980 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) { 1981 error = EINVAL; 1982 goto fail; 1983 } 1984 pt->seconds = pf_default_rule.timeout[pt->timeout]; 1985 break; 1986 } 1987 1988 case DIOCGETLIMIT: { 1989 struct pfioc_limit *pl = (struct pfioc_limit *)addr; 1990 1991 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) { 1992 error = EINVAL; 1993 goto fail; 1994 } 1995 pl->limit = pf_pool_limits[pl->index].limit; 1996 break; 1997 } 1998 1999 case DIOCSETLIMIT: { 2000 struct pfioc_limit *pl = (struct pfioc_limit *)addr; 2001 int old_limit; 2002 2003 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX || 2004 pf_pool_limits[pl->index].pp == NULL) { 2005 error = EINVAL; 2006 goto fail; 2007 } 2008 #ifdef __NetBSD__ 2009 pool_sethardlimit(pf_pool_limits[pl->index].pp, 2010 pl->limit, NULL, 0); 2011 #else 2012 if (pool_sethardlimit(pf_pool_limits[pl->index].pp, 2013 pl->limit, NULL, 0) != 0) { 2014 error = EBUSY; 2015 goto fail; 2016 } 2017 #endif /* !__NetBSD__ */ 2018 old_limit = pf_pool_limits[pl->index].limit; 2019 pf_pool_limits[pl->index].limit = pl->limit; 2020 pl->limit = old_limit; 2021 break; 2022 } 2023 2024 case DIOCSETDEBUG: { 2025 u_int32_t *level = (u_int32_t *)addr; 2026 2027 pf_status.debug = *level; 2028 break; 2029 } 2030 2031 case DIOCCLRRULECTRS: { 2032 /* obsoleted by DIOCGETRULE with action=PF_GET_CLR_CNTR */ 2033 struct pf_ruleset *ruleset = &pf_main_ruleset; 2034 struct pf_rule *rule; 2035 2036 TAILQ_FOREACH(rule, 2037 ruleset->rules[PF_RULESET_FILTER].active.ptr, entries) { 2038 rule->evaluations = 0; 2039 rule->packets[0] = rule->packets[1] = 0; 2040 rule->bytes[0] = rule->bytes[1] = 0; 2041 } 2042 break; 2043 } 2044 2045 #ifdef ALTQ 2046 case DIOCSTARTALTQ: { 2047 struct pf_altq *altq; 2048 2049 /* enable all altq interfaces on active list */ 2050 TAILQ_FOREACH(altq, pf_altqs_active, entries) { 2051 if (altq->qname[0] == 0) { 2052 error = pf_enable_altq(altq); 2053 if (error != 0) 2054 break; 2055 } 2056 } 2057 if (error == 0) 2058 pf_altq_running = 1; 2059 DPFPRINTF(PF_DEBUG_MISC, ("altq: started\n")); 2060 break; 2061 } 2062 2063 case DIOCSTOPALTQ: { 2064 struct pf_altq *altq; 2065 2066 /* disable all altq interfaces on active list */ 2067 TAILQ_FOREACH(altq, pf_altqs_active, entries) { 2068 if (altq->qname[0] == 0) { 2069 error = pf_disable_altq(altq); 2070 if (error != 0) 2071 break; 2072 } 2073 } 2074 if (error == 0) 2075 pf_altq_running = 0; 2076 DPFPRINTF(PF_DEBUG_MISC, ("altq: stopped\n")); 2077 break; 2078 } 2079 2080 case DIOCADDALTQ: { 2081 struct pfioc_altq *pa = (struct pfioc_altq *)addr; 2082 struct pf_altq *altq, *a; 2083 2084 if (pa->ticket != ticket_altqs_inactive) { 2085 error = EBUSY; 2086 break; 2087 } 2088 altq = pool_get(&pf_altq_pl, PR_NOWAIT); 2089 if (altq == NULL) { 2090 error = ENOMEM; 2091 break; 2092 } 2093 bcopy(&pa->altq, altq, sizeof(struct pf_altq)); 2094 2095 /* 2096 * if this is for a queue, find the discipline and 2097 * copy the necessary fields 2098 */ 2099 if (altq->qname[0] != 0) { 2100 if ((altq->qid = pf_qname2qid(altq->qname)) == 0) { 2101 error = EBUSY; 2102 pool_put(&pf_altq_pl, altq); 2103 break; 2104 } 2105 TAILQ_FOREACH(a, pf_altqs_inactive, entries) { 2106 if (strncmp(a->ifname, altq->ifname, 2107 IFNAMSIZ) == 0 && a->qname[0] == 0) { 2108 altq->altq_disc = a->altq_disc; 2109 break; 2110 } 2111 } 2112 } 2113 2114 error = altq_add(altq); 2115 if (error) { 2116 pool_put(&pf_altq_pl, altq); 2117 break; 2118 } 2119 2120 TAILQ_INSERT_TAIL(pf_altqs_inactive, altq, entries); 2121 bcopy(altq, &pa->altq, sizeof(struct pf_altq)); 2122 break; 2123 } 2124 2125 case DIOCGETALTQS: { 2126 struct pfioc_altq *pa = (struct pfioc_altq *)addr; 2127 struct pf_altq *altq; 2128 2129 pa->nr = 0; 2130 TAILQ_FOREACH(altq, pf_altqs_active, entries) 2131 pa->nr++; 2132 pa->ticket = ticket_altqs_active; 2133 break; 2134 } 2135 2136 case DIOCGETALTQ: { 2137 struct pfioc_altq *pa = (struct pfioc_altq *)addr; 2138 struct pf_altq *altq; 2139 u_int32_t nr; 2140 2141 if (pa->ticket != ticket_altqs_active) { 2142 error = EBUSY; 2143 break; 2144 } 2145 nr = 0; 2146 altq = TAILQ_FIRST(pf_altqs_active); 2147 while ((altq != NULL) && (nr < pa->nr)) { 2148 altq = TAILQ_NEXT(altq, entries); 2149 nr++; 2150 } 2151 if (altq == NULL) { 2152 error = EBUSY; 2153 break; 2154 } 2155 bcopy(altq, &pa->altq, sizeof(struct pf_altq)); 2156 break; 2157 } 2158 2159 case DIOCCHANGEALTQ: 2160 /* CHANGEALTQ not supported yet! */ 2161 error = ENODEV; 2162 break; 2163 2164 case DIOCGETQSTATS: { 2165 struct pfioc_qstats *pq = (struct pfioc_qstats *)addr; 2166 struct pf_altq *altq; 2167 u_int32_t nr; 2168 int nbytes; 2169 2170 if (pq->ticket != ticket_altqs_active) { 2171 error = EBUSY; 2172 break; 2173 } 2174 nbytes = pq->nbytes; 2175 nr = 0; 2176 altq = TAILQ_FIRST(pf_altqs_active); 2177 while ((altq != NULL) && (nr < pq->nr)) { 2178 altq = TAILQ_NEXT(altq, entries); 2179 nr++; 2180 } 2181 if (altq == NULL) { 2182 error = EBUSY; 2183 break; 2184 } 2185 error = altq_getqstats(altq, pq->buf, &nbytes); 2186 if (error == 0) { 2187 pq->scheduler = altq->scheduler; 2188 pq->nbytes = nbytes; 2189 } 2190 break; 2191 } 2192 #endif /* ALTQ */ 2193 2194 case DIOCBEGINADDRS: { 2195 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 2196 2197 pf_empty_pool(&pf_pabuf); 2198 pp->ticket = ++ticket_pabuf; 2199 break; 2200 } 2201 2202 case DIOCADDADDR: { 2203 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 2204 2205 if (pp->ticket != ticket_pabuf) { 2206 error = EBUSY; 2207 break; 2208 } 2209 #ifndef INET 2210 if (pp->af == AF_INET) { 2211 error = EAFNOSUPPORT; 2212 break; 2213 } 2214 #endif /* INET */ 2215 #ifndef INET6 2216 if (pp->af == AF_INET6) { 2217 error = EAFNOSUPPORT; 2218 break; 2219 } 2220 #endif /* INET6 */ 2221 if (pp->addr.addr.type != PF_ADDR_ADDRMASK && 2222 pp->addr.addr.type != PF_ADDR_DYNIFTL && 2223 pp->addr.addr.type != PF_ADDR_TABLE) { 2224 error = EINVAL; 2225 break; 2226 } 2227 pa = pool_get(&pf_pooladdr_pl, PR_NOWAIT); 2228 if (pa == NULL) { 2229 error = ENOMEM; 2230 break; 2231 } 2232 bcopy(&pp->addr, pa, sizeof(struct pf_pooladdr)); 2233 if (pa->ifname[0]) { 2234 pa->kif = pfi_kif_get(pa->ifname); 2235 if (pa->kif == NULL) { 2236 pool_put(&pf_pooladdr_pl, pa); 2237 error = EINVAL; 2238 break; 2239 } 2240 pfi_kif_ref(pa->kif, PFI_KIF_REF_RULE); 2241 } 2242 if (pfi_dynaddr_setup(&pa->addr, pp->af)) { 2243 pfi_dynaddr_remove(&pa->addr); 2244 pfi_kif_unref(pa->kif, PFI_KIF_REF_RULE); 2245 pool_put(&pf_pooladdr_pl, pa); 2246 error = EINVAL; 2247 break; 2248 } 2249 TAILQ_INSERT_TAIL(&pf_pabuf, pa, entries); 2250 break; 2251 } 2252 2253 case DIOCGETADDRS: { 2254 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 2255 2256 pp->nr = 0; 2257 pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action, 2258 pp->r_num, 0, 1, 0); 2259 if (pool == NULL) { 2260 error = EBUSY; 2261 break; 2262 } 2263 TAILQ_FOREACH(pa, &pool->list, entries) 2264 pp->nr++; 2265 break; 2266 } 2267 2268 case DIOCGETADDR: { 2269 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 2270 u_int32_t nr = 0; 2271 2272 pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action, 2273 pp->r_num, 0, 1, 1); 2274 if (pool == NULL) { 2275 error = EBUSY; 2276 break; 2277 } 2278 pa = TAILQ_FIRST(&pool->list); 2279 while ((pa != NULL) && (nr < pp->nr)) { 2280 pa = TAILQ_NEXT(pa, entries); 2281 nr++; 2282 } 2283 if (pa == NULL) { 2284 error = EBUSY; 2285 break; 2286 } 2287 bcopy(pa, &pp->addr, sizeof(struct pf_pooladdr)); 2288 pfi_dynaddr_copyout(&pp->addr.addr); 2289 pf_tbladdr_copyout(&pp->addr.addr); 2290 pf_rtlabel_copyout(&pp->addr.addr); 2291 break; 2292 } 2293 2294 case DIOCCHANGEADDR: { 2295 struct pfioc_pooladdr *pca = (struct pfioc_pooladdr *)addr; 2296 struct pf_pooladdr *oldpa = NULL, *newpa = NULL; 2297 struct pf_ruleset *ruleset; 2298 2299 if (pca->action < PF_CHANGE_ADD_HEAD || 2300 pca->action > PF_CHANGE_REMOVE) { 2301 error = EINVAL; 2302 break; 2303 } 2304 if (pca->addr.addr.type != PF_ADDR_ADDRMASK && 2305 pca->addr.addr.type != PF_ADDR_DYNIFTL && 2306 pca->addr.addr.type != PF_ADDR_TABLE) { 2307 error = EINVAL; 2308 break; 2309 } 2310 2311 ruleset = pf_find_ruleset(pca->anchor); 2312 if (ruleset == NULL) { 2313 error = EBUSY; 2314 break; 2315 } 2316 pool = pf_get_pool(pca->anchor, pca->ticket, pca->r_action, 2317 pca->r_num, pca->r_last, 1, 1); 2318 if (pool == NULL) { 2319 error = EBUSY; 2320 break; 2321 } 2322 if (pca->action != PF_CHANGE_REMOVE) { 2323 newpa = pool_get(&pf_pooladdr_pl, PR_NOWAIT); 2324 if (newpa == NULL) { 2325 error = ENOMEM; 2326 break; 2327 } 2328 bcopy(&pca->addr, newpa, sizeof(struct pf_pooladdr)); 2329 #ifndef INET 2330 if (pca->af == AF_INET) { 2331 pool_put(&pf_pooladdr_pl, newpa); 2332 error = EAFNOSUPPORT; 2333 break; 2334 } 2335 #endif /* INET */ 2336 #ifndef INET6 2337 if (pca->af == AF_INET6) { 2338 pool_put(&pf_pooladdr_pl, newpa); 2339 error = EAFNOSUPPORT; 2340 break; 2341 } 2342 #endif /* INET6 */ 2343 if (newpa->ifname[0]) { 2344 newpa->kif = pfi_kif_get(newpa->ifname); 2345 if (newpa->kif == NULL) { 2346 pool_put(&pf_pooladdr_pl, newpa); 2347 error = EINVAL; 2348 break; 2349 } 2350 pfi_kif_ref(newpa->kif, PFI_KIF_REF_RULE); 2351 } else 2352 newpa->kif = NULL; 2353 if (pfi_dynaddr_setup(&newpa->addr, pca->af) || 2354 pf_tbladdr_setup(ruleset, &newpa->addr)) { 2355 pfi_dynaddr_remove(&newpa->addr); 2356 pfi_kif_unref(newpa->kif, PFI_KIF_REF_RULE); 2357 pool_put(&pf_pooladdr_pl, newpa); 2358 error = EINVAL; 2359 break; 2360 } 2361 } 2362 2363 if (pca->action == PF_CHANGE_ADD_HEAD) 2364 oldpa = TAILQ_FIRST(&pool->list); 2365 else if (pca->action == PF_CHANGE_ADD_TAIL) 2366 oldpa = TAILQ_LAST(&pool->list, pf_palist); 2367 else { 2368 int i = 0; 2369 2370 oldpa = TAILQ_FIRST(&pool->list); 2371 while ((oldpa != NULL) && (i < pca->nr)) { 2372 oldpa = TAILQ_NEXT(oldpa, entries); 2373 i++; 2374 } 2375 if (oldpa == NULL) { 2376 error = EINVAL; 2377 break; 2378 } 2379 } 2380 2381 if (pca->action == PF_CHANGE_REMOVE) { 2382 TAILQ_REMOVE(&pool->list, oldpa, entries); 2383 pfi_dynaddr_remove(&oldpa->addr); 2384 pf_tbladdr_remove(&oldpa->addr); 2385 pfi_kif_unref(oldpa->kif, PFI_KIF_REF_RULE); 2386 pool_put(&pf_pooladdr_pl, oldpa); 2387 } else { 2388 if (oldpa == NULL) 2389 TAILQ_INSERT_TAIL(&pool->list, newpa, entries); 2390 else if (pca->action == PF_CHANGE_ADD_HEAD || 2391 pca->action == PF_CHANGE_ADD_BEFORE) 2392 TAILQ_INSERT_BEFORE(oldpa, newpa, entries); 2393 else 2394 TAILQ_INSERT_AFTER(&pool->list, oldpa, 2395 newpa, entries); 2396 } 2397 2398 pool->cur = TAILQ_FIRST(&pool->list); 2399 PF_ACPY(&pool->counter, &pool->cur->addr.v.a.addr, 2400 pca->af); 2401 break; 2402 } 2403 2404 case DIOCGETRULESETS: { 2405 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr; 2406 struct pf_ruleset *ruleset; 2407 struct pf_anchor *anchor; 2408 2409 pr->path[sizeof(pr->path) - 1] = 0; 2410 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) { 2411 error = EINVAL; 2412 break; 2413 } 2414 pr->nr = 0; 2415 if (ruleset->anchor == NULL) { 2416 /* XXX kludge for pf_main_ruleset */ 2417 RB_FOREACH(anchor, pf_anchor_global, &pf_anchors) 2418 if (anchor->parent == NULL) 2419 pr->nr++; 2420 } else { 2421 RB_FOREACH(anchor, pf_anchor_node, 2422 &ruleset->anchor->children) 2423 pr->nr++; 2424 } 2425 break; 2426 } 2427 2428 case DIOCGETRULESET: { 2429 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr; 2430 struct pf_ruleset *ruleset; 2431 struct pf_anchor *anchor; 2432 u_int32_t nr = 0; 2433 2434 pr->path[sizeof(pr->path) - 1] = 0; 2435 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) { 2436 error = EINVAL; 2437 break; 2438 } 2439 pr->name[0] = 0; 2440 if (ruleset->anchor == NULL) { 2441 /* XXX kludge for pf_main_ruleset */ 2442 RB_FOREACH(anchor, pf_anchor_global, &pf_anchors) 2443 if (anchor->parent == NULL && nr++ == pr->nr) { 2444 strlcpy(pr->name, anchor->name, 2445 sizeof(pr->name)); 2446 break; 2447 } 2448 } else { 2449 RB_FOREACH(anchor, pf_anchor_node, 2450 &ruleset->anchor->children) 2451 if (nr++ == pr->nr) { 2452 strlcpy(pr->name, anchor->name, 2453 sizeof(pr->name)); 2454 break; 2455 } 2456 } 2457 if (!pr->name[0]) 2458 error = EBUSY; 2459 break; 2460 } 2461 2462 case DIOCRCLRTABLES: { 2463 struct pfioc_table *io = (struct pfioc_table *)addr; 2464 2465 if (io->pfrio_esize != 0) { 2466 error = ENODEV; 2467 break; 2468 } 2469 error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel, 2470 io->pfrio_flags | PFR_FLAG_USERIOCTL); 2471 break; 2472 } 2473 2474 case DIOCRADDTABLES: { 2475 struct pfioc_table *io = (struct pfioc_table *)addr; 2476 2477 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2478 error = ENODEV; 2479 break; 2480 } 2481 error = pfr_add_tables(io->pfrio_buffer, io->pfrio_size, 2482 &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2483 break; 2484 } 2485 2486 case DIOCRDELTABLES: { 2487 struct pfioc_table *io = (struct pfioc_table *)addr; 2488 2489 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2490 error = ENODEV; 2491 break; 2492 } 2493 error = pfr_del_tables(io->pfrio_buffer, io->pfrio_size, 2494 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2495 break; 2496 } 2497 2498 case DIOCRGETTABLES: { 2499 struct pfioc_table *io = (struct pfioc_table *)addr; 2500 2501 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2502 error = ENODEV; 2503 break; 2504 } 2505 error = pfr_get_tables(&io->pfrio_table, io->pfrio_buffer, 2506 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2507 break; 2508 } 2509 2510 case DIOCRGETTSTATS: { 2511 struct pfioc_table *io = (struct pfioc_table *)addr; 2512 2513 if (io->pfrio_esize != sizeof(struct pfr_tstats)) { 2514 error = ENODEV; 2515 break; 2516 } 2517 error = pfr_get_tstats(&io->pfrio_table, io->pfrio_buffer, 2518 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2519 break; 2520 } 2521 2522 case DIOCRCLRTSTATS: { 2523 struct pfioc_table *io = (struct pfioc_table *)addr; 2524 2525 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2526 error = ENODEV; 2527 break; 2528 } 2529 error = pfr_clr_tstats(io->pfrio_buffer, io->pfrio_size, 2530 &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2531 break; 2532 } 2533 2534 case DIOCRSETTFLAGS: { 2535 struct pfioc_table *io = (struct pfioc_table *)addr; 2536 2537 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2538 error = ENODEV; 2539 break; 2540 } 2541 error = pfr_set_tflags(io->pfrio_buffer, io->pfrio_size, 2542 io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange, 2543 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2544 break; 2545 } 2546 2547 case DIOCRCLRADDRS: { 2548 struct pfioc_table *io = (struct pfioc_table *)addr; 2549 2550 if (io->pfrio_esize != 0) { 2551 error = ENODEV; 2552 break; 2553 } 2554 error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel, 2555 io->pfrio_flags | PFR_FLAG_USERIOCTL); 2556 break; 2557 } 2558 2559 case DIOCRADDADDRS: { 2560 struct pfioc_table *io = (struct pfioc_table *)addr; 2561 2562 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2563 error = ENODEV; 2564 break; 2565 } 2566 error = pfr_add_addrs(&io->pfrio_table, io->pfrio_buffer, 2567 io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags | 2568 PFR_FLAG_USERIOCTL); 2569 break; 2570 } 2571 2572 case DIOCRDELADDRS: { 2573 struct pfioc_table *io = (struct pfioc_table *)addr; 2574 2575 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2576 error = ENODEV; 2577 break; 2578 } 2579 error = pfr_del_addrs(&io->pfrio_table, io->pfrio_buffer, 2580 io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags | 2581 PFR_FLAG_USERIOCTL); 2582 break; 2583 } 2584 2585 case DIOCRSETADDRS: { 2586 struct pfioc_table *io = (struct pfioc_table *)addr; 2587 2588 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2589 error = ENODEV; 2590 break; 2591 } 2592 error = pfr_set_addrs(&io->pfrio_table, io->pfrio_buffer, 2593 io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd, 2594 &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags | 2595 PFR_FLAG_USERIOCTL, 0); 2596 break; 2597 } 2598 2599 case DIOCRGETADDRS: { 2600 struct pfioc_table *io = (struct pfioc_table *)addr; 2601 2602 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2603 error = ENODEV; 2604 break; 2605 } 2606 error = pfr_get_addrs(&io->pfrio_table, io->pfrio_buffer, 2607 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2608 break; 2609 } 2610 2611 case DIOCRGETASTATS: { 2612 struct pfioc_table *io = (struct pfioc_table *)addr; 2613 2614 if (io->pfrio_esize != sizeof(struct pfr_astats)) { 2615 error = ENODEV; 2616 break; 2617 } 2618 error = pfr_get_astats(&io->pfrio_table, io->pfrio_buffer, 2619 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2620 break; 2621 } 2622 2623 case DIOCRCLRASTATS: { 2624 struct pfioc_table *io = (struct pfioc_table *)addr; 2625 2626 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2627 error = ENODEV; 2628 break; 2629 } 2630 error = pfr_clr_astats(&io->pfrio_table, io->pfrio_buffer, 2631 io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags | 2632 PFR_FLAG_USERIOCTL); 2633 break; 2634 } 2635 2636 case DIOCRTSTADDRS: { 2637 struct pfioc_table *io = (struct pfioc_table *)addr; 2638 2639 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2640 error = ENODEV; 2641 break; 2642 } 2643 error = pfr_tst_addrs(&io->pfrio_table, io->pfrio_buffer, 2644 io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags | 2645 PFR_FLAG_USERIOCTL); 2646 break; 2647 } 2648 2649 case DIOCRINADEFINE: { 2650 struct pfioc_table *io = (struct pfioc_table *)addr; 2651 2652 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2653 error = ENODEV; 2654 break; 2655 } 2656 error = pfr_ina_define(&io->pfrio_table, io->pfrio_buffer, 2657 io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr, 2658 io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2659 break; 2660 } 2661 2662 case DIOCOSFPADD: { 2663 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr; 2664 error = pf_osfp_add(io); 2665 break; 2666 } 2667 2668 case DIOCOSFPGET: { 2669 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr; 2670 error = pf_osfp_get(io); 2671 break; 2672 } 2673 2674 case DIOCXBEGIN: { 2675 struct pfioc_trans *io = (struct pfioc_trans *)addr; 2676 struct pfioc_trans_e *ioe; 2677 struct pfr_table *table; 2678 int i; 2679 2680 if (io->esize != sizeof(*ioe)) { 2681 error = ENODEV; 2682 goto fail; 2683 } 2684 ioe = (struct pfioc_trans_e *)malloc(sizeof(*ioe), 2685 M_TEMP, M_WAITOK); 2686 table = (struct pfr_table *)malloc(sizeof(*table), 2687 M_TEMP, M_WAITOK); 2688 for (i = 0; i < io->size; i++) { 2689 if (copyin(io->array+i, ioe, sizeof(*ioe))) { 2690 free(table, M_TEMP); 2691 free(ioe, M_TEMP); 2692 error = EFAULT; 2693 goto fail; 2694 } 2695 switch (ioe->rs_num) { 2696 #ifdef ALTQ 2697 case PF_RULESET_ALTQ: 2698 if (ioe->anchor[0]) { 2699 free(table, M_TEMP); 2700 free(ioe, M_TEMP); 2701 error = EINVAL; 2702 goto fail; 2703 } 2704 if ((error = pf_begin_altq(&ioe->ticket))) { 2705 free(table, M_TEMP); 2706 free(ioe, M_TEMP); 2707 goto fail; 2708 } 2709 break; 2710 #endif /* ALTQ */ 2711 case PF_RULESET_TABLE: 2712 bzero(table, sizeof(*table)); 2713 strlcpy(table->pfrt_anchor, ioe->anchor, 2714 sizeof(table->pfrt_anchor)); 2715 if ((error = pfr_ina_begin(table, 2716 &ioe->ticket, NULL, 0))) { 2717 free(table, M_TEMP); 2718 free(ioe, M_TEMP); 2719 goto fail; 2720 } 2721 break; 2722 default: 2723 if ((error = pf_begin_rules(&ioe->ticket, 2724 ioe->rs_num, ioe->anchor))) { 2725 free(table, M_TEMP); 2726 free(ioe, M_TEMP); 2727 goto fail; 2728 } 2729 break; 2730 } 2731 if (copyout(ioe, io->array+i, sizeof(io->array[i]))) { 2732 free(table, M_TEMP); 2733 free(ioe, M_TEMP); 2734 error = EFAULT; 2735 goto fail; 2736 } 2737 } 2738 free(table, M_TEMP); 2739 free(ioe, M_TEMP); 2740 break; 2741 } 2742 2743 case DIOCXROLLBACK: { 2744 struct pfioc_trans *io = (struct pfioc_trans *)addr; 2745 struct pfioc_trans_e *ioe; 2746 struct pfr_table *table; 2747 int i; 2748 2749 if (io->esize != sizeof(*ioe)) { 2750 error = ENODEV; 2751 goto fail; 2752 } 2753 ioe = (struct pfioc_trans_e *)malloc(sizeof(*ioe), 2754 M_TEMP, M_WAITOK); 2755 table = (struct pfr_table *)malloc(sizeof(*table), 2756 M_TEMP, M_WAITOK); 2757 for (i = 0; i < io->size; i++) { 2758 if (copyin(io->array+i, ioe, sizeof(*ioe))) { 2759 free(table, M_TEMP); 2760 free(ioe, M_TEMP); 2761 error = EFAULT; 2762 goto fail; 2763 } 2764 switch (ioe->rs_num) { 2765 #ifdef ALTQ 2766 case PF_RULESET_ALTQ: 2767 if (ioe->anchor[0]) { 2768 free(table, M_TEMP); 2769 free(ioe, M_TEMP); 2770 error = EINVAL; 2771 goto fail; 2772 } 2773 if ((error = pf_rollback_altq(ioe->ticket))) { 2774 free(table, M_TEMP); 2775 free(ioe, M_TEMP); 2776 goto fail; /* really bad */ 2777 } 2778 break; 2779 #endif /* ALTQ */ 2780 case PF_RULESET_TABLE: 2781 bzero(table, sizeof(*table)); 2782 strlcpy(table->pfrt_anchor, ioe->anchor, 2783 sizeof(table->pfrt_anchor)); 2784 if ((error = pfr_ina_rollback(table, 2785 ioe->ticket, NULL, 0))) { 2786 free(table, M_TEMP); 2787 free(ioe, M_TEMP); 2788 goto fail; /* really bad */ 2789 } 2790 break; 2791 default: 2792 if ((error = pf_rollback_rules(ioe->ticket, 2793 ioe->rs_num, ioe->anchor))) { 2794 free(table, M_TEMP); 2795 free(ioe, M_TEMP); 2796 goto fail; /* really bad */ 2797 } 2798 break; 2799 } 2800 } 2801 free(table, M_TEMP); 2802 free(ioe, M_TEMP); 2803 break; 2804 } 2805 2806 case DIOCXCOMMIT: { 2807 struct pfioc_trans *io = (struct pfioc_trans *)addr; 2808 struct pfioc_trans_e *ioe; 2809 struct pfr_table *table; 2810 struct pf_ruleset *rs; 2811 int i; 2812 2813 if (io->esize != sizeof(*ioe)) { 2814 error = ENODEV; 2815 goto fail; 2816 } 2817 ioe = (struct pfioc_trans_e *)malloc(sizeof(*ioe), 2818 M_TEMP, M_WAITOK); 2819 table = (struct pfr_table *)malloc(sizeof(*table), 2820 M_TEMP, M_WAITOK); 2821 /* first makes sure everything will succeed */ 2822 for (i = 0; i < io->size; i++) { 2823 if (copyin(io->array+i, ioe, sizeof(*ioe))) { 2824 free(table, M_TEMP); 2825 free(ioe, M_TEMP); 2826 error = EFAULT; 2827 goto fail; 2828 } 2829 switch (ioe->rs_num) { 2830 #ifdef ALTQ 2831 case PF_RULESET_ALTQ: 2832 if (ioe->anchor[0]) { 2833 free(table, M_TEMP); 2834 free(ioe, M_TEMP); 2835 error = EINVAL; 2836 goto fail; 2837 } 2838 if (!altqs_inactive_open || ioe->ticket != 2839 ticket_altqs_inactive) { 2840 free(table, M_TEMP); 2841 free(ioe, M_TEMP); 2842 error = EBUSY; 2843 goto fail; 2844 } 2845 break; 2846 #endif /* ALTQ */ 2847 case PF_RULESET_TABLE: 2848 rs = pf_find_ruleset(ioe->anchor); 2849 if (rs == NULL || !rs->topen || ioe->ticket != 2850 rs->tticket) { 2851 free(table, M_TEMP); 2852 free(ioe, M_TEMP); 2853 error = EBUSY; 2854 goto fail; 2855 } 2856 break; 2857 default: 2858 if (ioe->rs_num < 0 || ioe->rs_num >= 2859 PF_RULESET_MAX) { 2860 free(table, M_TEMP); 2861 free(ioe, M_TEMP); 2862 error = EINVAL; 2863 goto fail; 2864 } 2865 rs = pf_find_ruleset(ioe->anchor); 2866 if (rs == NULL || 2867 !rs->rules[ioe->rs_num].inactive.open || 2868 rs->rules[ioe->rs_num].inactive.ticket != 2869 ioe->ticket) { 2870 free(table, M_TEMP); 2871 free(ioe, M_TEMP); 2872 error = EBUSY; 2873 goto fail; 2874 } 2875 break; 2876 } 2877 } 2878 /* now do the commit - no errors should happen here */ 2879 for (i = 0; i < io->size; i++) { 2880 if (copyin(io->array+i, ioe, sizeof(*ioe))) { 2881 free(table, M_TEMP); 2882 free(ioe, M_TEMP); 2883 error = EFAULT; 2884 goto fail; 2885 } 2886 switch (ioe->rs_num) { 2887 #ifdef ALTQ 2888 case PF_RULESET_ALTQ: 2889 if ((error = pf_commit_altq(ioe->ticket))) { 2890 free(table, M_TEMP); 2891 free(ioe, M_TEMP); 2892 goto fail; /* really bad */ 2893 } 2894 break; 2895 #endif /* ALTQ */ 2896 case PF_RULESET_TABLE: 2897 bzero(table, sizeof(*table)); 2898 strlcpy(table->pfrt_anchor, ioe->anchor, 2899 sizeof(table->pfrt_anchor)); 2900 if ((error = pfr_ina_commit(table, ioe->ticket, 2901 NULL, NULL, 0))) { 2902 free(table, M_TEMP); 2903 free(ioe, M_TEMP); 2904 goto fail; /* really bad */ 2905 } 2906 break; 2907 default: 2908 if ((error = pf_commit_rules(ioe->ticket, 2909 ioe->rs_num, ioe->anchor))) { 2910 free(table, M_TEMP); 2911 free(ioe, M_TEMP); 2912 goto fail; /* really bad */ 2913 } 2914 break; 2915 } 2916 } 2917 free(table, M_TEMP); 2918 free(ioe, M_TEMP); 2919 break; 2920 } 2921 2922 case DIOCGETSRCNODES: { 2923 struct pfioc_src_nodes *psn = (struct pfioc_src_nodes *)addr; 2924 struct pf_src_node *n, *p, *pstore; 2925 u_int32_t nr = 0; 2926 int space = psn->psn_len; 2927 2928 if (space == 0) { 2929 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) 2930 nr++; 2931 psn->psn_len = sizeof(struct pf_src_node) * nr; 2932 break; 2933 } 2934 2935 pstore = malloc(sizeof(*pstore), M_TEMP, M_WAITOK); 2936 2937 p = psn->psn_src_nodes; 2938 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) { 2939 int secs = time_second, diff; 2940 2941 if ((nr + 1) * sizeof(*p) > (unsigned)psn->psn_len) 2942 break; 2943 2944 bcopy(n, pstore, sizeof(*pstore)); 2945 if (n->rule.ptr != NULL) 2946 pstore->rule.nr = n->rule.ptr->nr; 2947 pstore->creation = secs - pstore->creation; 2948 if (pstore->expire > secs) 2949 pstore->expire -= secs; 2950 else 2951 pstore->expire = 0; 2952 2953 /* adjust the connection rate estimate */ 2954 diff = secs - n->conn_rate.last; 2955 if (diff >= n->conn_rate.seconds) 2956 pstore->conn_rate.count = 0; 2957 else 2958 pstore->conn_rate.count -= 2959 n->conn_rate.count * diff / 2960 n->conn_rate.seconds; 2961 2962 error = copyout(pstore, p, sizeof(*p)); 2963 if (error) { 2964 free(pstore, M_TEMP); 2965 goto fail; 2966 } 2967 p++; 2968 nr++; 2969 } 2970 psn->psn_len = sizeof(struct pf_src_node) * nr; 2971 2972 free(pstore, M_TEMP); 2973 break; 2974 } 2975 2976 case DIOCCLRSRCNODES: { 2977 struct pf_src_node *n; 2978 struct pf_state *state; 2979 2980 RB_FOREACH(state, pf_state_tree_id, &tree_id) { 2981 state->src_node = NULL; 2982 state->nat_src_node = NULL; 2983 } 2984 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) { 2985 n->expire = 1; 2986 n->states = 0; 2987 } 2988 pf_purge_expired_src_nodes(1); 2989 pf_status.src_nodes = 0; 2990 break; 2991 } 2992 2993 case DIOCKILLSRCNODES: { 2994 struct pf_src_node *sn; 2995 struct pf_state *s; 2996 struct pfioc_src_node_kill *psnk = \ 2997 (struct pfioc_src_node_kill *) addr; 2998 int killed = 0; 2999 3000 RB_FOREACH(sn, pf_src_tree, &tree_src_tracking) { 3001 if (PF_MATCHA(psnk->psnk_src.neg, \ 3002 &psnk->psnk_src.addr.v.a.addr, \ 3003 &psnk->psnk_src.addr.v.a.mask, \ 3004 &sn->addr, sn->af) && 3005 PF_MATCHA(psnk->psnk_dst.neg, \ 3006 &psnk->psnk_dst.addr.v.a.addr, \ 3007 &psnk->psnk_dst.addr.v.a.mask, \ 3008 &sn->raddr, sn->af)) { 3009 /* Handle state to src_node linkage */ 3010 if (sn->states != 0) { 3011 RB_FOREACH(s, pf_state_tree_id, 3012 &tree_id) { 3013 if (s->src_node == sn) 3014 s->src_node = NULL; 3015 if (s->nat_src_node == sn) 3016 s->nat_src_node = NULL; 3017 } 3018 sn->states = 0; 3019 } 3020 sn->expire = 1; 3021 killed++; 3022 } 3023 } 3024 3025 if (killed > 0) 3026 pf_purge_expired_src_nodes(1); 3027 3028 psnk->psnk_af = killed; 3029 break; 3030 } 3031 3032 case DIOCSETHOSTID: { 3033 u_int32_t *hostid = (u_int32_t *)addr; 3034 3035 if (*hostid == 0) 3036 pf_status.hostid = arc4random(); 3037 else 3038 pf_status.hostid = *hostid; 3039 break; 3040 } 3041 3042 case DIOCOSFPFLUSH: 3043 pf_osfp_flush(); 3044 break; 3045 3046 case DIOCIGETIFACES: { 3047 struct pfioc_iface *io = (struct pfioc_iface *)addr; 3048 3049 if (io->pfiio_esize != sizeof(struct pfi_kif)) { 3050 error = ENODEV; 3051 break; 3052 } 3053 error = pfi_get_ifaces(io->pfiio_name, io->pfiio_buffer, 3054 &io->pfiio_size); 3055 break; 3056 } 3057 3058 case DIOCSETIFFLAG: { 3059 struct pfioc_iface *io = (struct pfioc_iface *)addr; 3060 3061 error = pfi_set_flags(io->pfiio_name, io->pfiio_flags); 3062 break; 3063 } 3064 3065 case DIOCCLRIFFLAG: { 3066 struct pfioc_iface *io = (struct pfioc_iface *)addr; 3067 3068 error = pfi_clear_flags(io->pfiio_name, io->pfiio_flags); 3069 break; 3070 } 3071 3072 default: 3073 error = ENODEV; 3074 break; 3075 } 3076 fail: 3077 splx(s); 3078 if (flags & FWRITE) 3079 rw_exit_write(&pf_consistency_lock); 3080 else 3081 rw_exit_read(&pf_consistency_lock); 3082 return (error); 3083 } 3084 3085 #ifdef __NetBSD__ 3086 #ifdef INET 3087 static int 3088 pfil4_wrapper(void *arg, struct mbuf **mp, struct ifnet *ifp, int dir) 3089 { 3090 int error; 3091 3092 /* 3093 * ensure that mbufs are writable beforehand 3094 * as it's assumed by pf code. 3095 * ip hdr (60 bytes) + tcp hdr (60 bytes) should be enough. 3096 * XXX inefficient 3097 */ 3098 error = m_makewritable(mp, 0, 60 + 60, M_DONTWAIT); 3099 if (error) { 3100 m_freem(*mp); 3101 *mp = NULL; 3102 return error; 3103 } 3104 3105 /* 3106 * If the packet is out-bound, we can't delay checksums 3107 * here. For in-bound, the checksum has already been 3108 * validated. 3109 */ 3110 if (dir == PFIL_OUT) { 3111 if ((*mp)->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) { 3112 in_delayed_cksum(*mp); 3113 (*mp)->m_pkthdr.csum_flags &= 3114 ~(M_CSUM_TCPv4|M_CSUM_UDPv4); 3115 } 3116 } 3117 3118 if (pf_test(dir == PFIL_OUT ? PF_OUT : PF_IN, ifp, mp, NULL) 3119 != PF_PASS) { 3120 m_freem(*mp); 3121 *mp = NULL; 3122 return EHOSTUNREACH; 3123 } 3124 3125 /* 3126 * we're not compatible with fast-forward. 3127 */ 3128 3129 if (dir == PFIL_IN && *mp) { 3130 (*mp)->m_flags &= ~M_CANFASTFWD; 3131 } 3132 3133 return (0); 3134 } 3135 #endif /* INET */ 3136 3137 #ifdef INET6 3138 static int 3139 pfil6_wrapper(void *arg, struct mbuf **mp, struct ifnet *ifp, int dir) 3140 { 3141 int error; 3142 3143 /* 3144 * ensure that mbufs are writable beforehand 3145 * as it's assumed by pf code. 3146 * XXX inefficient 3147 */ 3148 error = m_makewritable(mp, 0, M_COPYALL, M_DONTWAIT); 3149 if (error) { 3150 m_freem(*mp); 3151 *mp = NULL; 3152 return error; 3153 } 3154 3155 /* 3156 * If the packet is out-bound, we can't delay checksums 3157 * here. For in-bound, the checksum has already been 3158 * validated. 3159 */ 3160 if (dir == PFIL_OUT) { 3161 if ((*mp)->m_pkthdr.csum_flags & (M_CSUM_TCPv6|M_CSUM_UDPv6)) { 3162 in6_delayed_cksum(*mp); 3163 (*mp)->m_pkthdr.csum_flags &= 3164 ~(M_CSUM_TCPv6|M_CSUM_UDPv6); 3165 } 3166 } 3167 3168 if (pf_test6(dir == PFIL_OUT ? PF_OUT : PF_IN, ifp, mp, NULL) 3169 != PF_PASS) { 3170 m_freem(*mp); 3171 *mp = NULL; 3172 return EHOSTUNREACH; 3173 } else 3174 return (0); 3175 } 3176 #endif /* INET6 */ 3177 3178 static int 3179 pf_pfil_attach(void) 3180 { 3181 struct pfil_head *ph_inet; 3182 #ifdef INET6 3183 struct pfil_head *ph_inet6; 3184 #endif /* INET6 */ 3185 int error; 3186 3187 if (pf_pfil_attached) 3188 return (EBUSY); 3189 3190 ph_inet = pfil_head_get(PFIL_TYPE_AF, AF_INET); 3191 if (ph_inet) 3192 error = pfil_add_hook((void *)pfil4_wrapper, NULL, 3193 PFIL_IN|PFIL_OUT, ph_inet); 3194 else 3195 error = ENOENT; 3196 if (error) 3197 return (error); 3198 3199 #ifdef INET6 3200 ph_inet6 = pfil_head_get(PFIL_TYPE_AF, AF_INET6); 3201 if (ph_inet6) 3202 error = pfil_add_hook((void *)pfil6_wrapper, NULL, 3203 PFIL_IN|PFIL_OUT, ph_inet6); 3204 else 3205 error = ENOENT; 3206 if (error) 3207 goto bad; 3208 #endif /* INET6 */ 3209 3210 pf_pfil_attached = 1; 3211 3212 return (0); 3213 3214 #ifdef INET6 3215 bad: 3216 pfil_remove_hook(pfil4_wrapper, NULL, PFIL_IN|PFIL_OUT, ph_inet); 3217 #endif /* INET6 */ 3218 3219 return (error); 3220 } 3221 3222 static int 3223 pf_pfil_detach(void) 3224 { 3225 struct pfil_head *ph_inet; 3226 #ifdef INET6 3227 struct pfil_head *ph_inet6; 3228 #endif /* INET6 */ 3229 3230 if (pf_pfil_attached == 0) 3231 return (EBUSY); 3232 3233 ph_inet = pfil_head_get(PFIL_TYPE_AF, AF_INET); 3234 if (ph_inet) 3235 pfil_remove_hook((void *)pfil4_wrapper, NULL, 3236 PFIL_IN|PFIL_OUT, ph_inet); 3237 #ifdef INET6 3238 ph_inet6 = pfil_head_get(PFIL_TYPE_AF, AF_INET6); 3239 if (ph_inet6) 3240 pfil_remove_hook((void *)pfil6_wrapper, NULL, 3241 PFIL_IN|PFIL_OUT, ph_inet6); 3242 #endif /* INET6 */ 3243 pf_pfil_attached = 0; 3244 3245 return (0); 3246 } 3247 #endif /* __NetBSD__ */ 3248