1 /* $NetBSD: pf_ioctl.c,v 1.35 2009/07/28 18:15:26 minskim Exp $ */ 2 /* $OpenBSD: pf_ioctl.c,v 1.182 2007/06/24 11:17:13 mcbride Exp $ */ 3 4 /* 5 * Copyright (c) 2001 Daniel Hartmeier 6 * Copyright (c) 2002,2003 Henning Brauer 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 13 * - Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * - Redistributions in binary form must reproduce the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer in the documentation and/or other materials provided 18 * with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 28 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 30 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 * POSSIBILITY OF SUCH DAMAGE. 32 * 33 * Effort sponsored in part by the Defense Advanced Research Projects 34 * Agency (DARPA) and Air Force Research Laboratory, Air Force 35 * Materiel Command, USAF, under agreement number F30602-01-2-0537. 36 * 37 */ 38 39 #include <sys/cdefs.h> 40 __KERNEL_RCSID(0, "$NetBSD: pf_ioctl.c,v 1.35 2009/07/28 18:15:26 minskim Exp $"); 41 42 #ifdef _KERNEL_OPT 43 #include "opt_inet.h" 44 #include "opt_pfil_hooks.h" 45 #endif 46 47 #ifndef __NetBSD__ 48 #include "pfsync.h" 49 #else 50 #define NPFSYNC 0 51 #endif /* __NetBSD__ */ 52 53 #include <sys/param.h> 54 #include <sys/systm.h> 55 #include <sys/mbuf.h> 56 #include <sys/filio.h> 57 #include <sys/fcntl.h> 58 #include <sys/socket.h> 59 #include <sys/socketvar.h> 60 #include <sys/kernel.h> 61 #include <sys/time.h> 62 #include <sys/pool.h> 63 #include <sys/proc.h> 64 #include <sys/malloc.h> 65 #include <sys/kthread.h> 66 #include <sys/rwlock.h> 67 #include <uvm/uvm_extern.h> 68 #ifdef __NetBSD__ 69 #include <sys/conf.h> 70 #include <sys/lwp.h> 71 #include <sys/kauth.h> 72 #endif /* __NetBSD__ */ 73 74 #include <net/if.h> 75 #include <net/if_types.h> 76 #include <net/route.h> 77 78 #include <netinet/in.h> 79 #include <netinet/in_var.h> 80 #include <netinet/in_systm.h> 81 #include <netinet/ip.h> 82 #include <netinet/ip_var.h> 83 #include <netinet/ip_icmp.h> 84 85 #ifndef __NetBSD__ 86 #include <dev/rndvar.h> 87 #include <crypto/md5.h> 88 #else 89 #include <sys/md5.h> 90 #endif /* __NetBSD__ */ 91 #include <net/pfvar.h> 92 93 #if NPFSYNC > 0 94 #include <net/if_pfsync.h> 95 #endif /* NPFSYNC > 0 */ 96 97 #if NPFLOG > 0 98 #include <net/if_pflog.h> 99 #endif /* NPFLOG > 0 */ 100 101 #ifdef INET6 102 #include <netinet/ip6.h> 103 #include <netinet/in_pcb.h> 104 #endif /* INET6 */ 105 106 #ifdef ALTQ 107 #include <altq/altq.h> 108 #endif 109 110 void pfattach(int); 111 #ifndef __NetBSD__ 112 void pf_thread_create(void *); 113 #endif /* !__NetBSD__ */ 114 int pfopen(dev_t, int, int, struct lwp *); 115 int pfclose(dev_t, int, int, struct lwp *); 116 struct pf_pool *pf_get_pool(char *, u_int32_t, u_int8_t, u_int32_t, 117 u_int8_t, u_int8_t, u_int8_t); 118 119 void pf_mv_pool(struct pf_palist *, struct pf_palist *); 120 void pf_empty_pool(struct pf_palist *); 121 int pfioctl(dev_t, u_long, void *, int, struct lwp *); 122 #ifdef ALTQ 123 int pf_begin_altq(u_int32_t *); 124 int pf_rollback_altq(u_int32_t); 125 int pf_commit_altq(u_int32_t); 126 int pf_enable_altq(struct pf_altq *); 127 int pf_disable_altq(struct pf_altq *); 128 #endif /* ALTQ */ 129 int pf_begin_rules(u_int32_t *, int, const char *); 130 int pf_rollback_rules(u_int32_t, int, char *); 131 int pf_setup_pfsync_matching(struct pf_ruleset *); 132 void pf_hash_rule(MD5_CTX *, struct pf_rule *); 133 void pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *); 134 int pf_commit_rules(u_int32_t, int, char *); 135 void pf_state_export(struct pfsync_state *, 136 struct pf_state_key *, struct pf_state *); 137 void pf_state_import(struct pfsync_state *, 138 struct pf_state_key *, struct pf_state *); 139 140 struct pf_rule pf_default_rule; 141 #ifdef __NetBSD__ 142 krwlock_t pf_consistency_lock; 143 #else 144 struct rwlock pf_consistency_lock = RWLOCK_INITIALIZER("pfcnslk"); 145 #endif /* __NetBSD__ */ 146 #ifdef ALTQ 147 static int pf_altq_running; 148 #endif 149 150 #define TAGID_MAX 50000 151 TAILQ_HEAD(pf_tags, pf_tagname) pf_tags = TAILQ_HEAD_INITIALIZER(pf_tags), 152 pf_qids = TAILQ_HEAD_INITIALIZER(pf_qids); 153 154 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE) 155 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE 156 #endif 157 u_int16_t tagname2tag(struct pf_tags *, char *); 158 void tag2tagname(struct pf_tags *, u_int16_t, char *); 159 void tag_unref(struct pf_tags *, u_int16_t); 160 int pf_rtlabel_add(struct pf_addr_wrap *); 161 void pf_rtlabel_remove(struct pf_addr_wrap *); 162 void pf_rtlabel_copyout(struct pf_addr_wrap *); 163 164 #define DPFPRINTF(n, x) if (pf_status.debug >= (n)) printf x 165 166 #ifdef __NetBSD__ 167 const struct cdevsw pf_cdevsw = { 168 pfopen, pfclose, noread, nowrite, pfioctl, 169 nostop, notty, nopoll, nommap, nokqfilter, D_OTHER 170 }; 171 172 static int pfil4_wrapper(void *, struct mbuf **, struct ifnet *, int); 173 #ifdef INET6 174 static int pfil6_wrapper(void *, struct mbuf **, struct ifnet *, int); 175 #endif /* INET6 */ 176 177 static int pf_pfil_attach(void); 178 static int pf_pfil_detach(void); 179 180 static int pf_pfil_attached; 181 #endif /* __NetBSD__ */ 182 183 void 184 pfattach(int num) 185 { 186 u_int32_t *timeout = pf_default_rule.timeout; 187 188 #ifdef __NetBSD__ 189 pool_init(&pf_rule_pl, sizeof(struct pf_rule), 0, 0, 0, "pfrulepl", 190 &pool_allocator_nointr, IPL_NONE); 191 pool_init(&pf_src_tree_pl, sizeof(struct pf_src_node), 0, 0, 0, 192 "pfsrctrpl", NULL, IPL_SOFTNET); 193 pool_init(&pf_state_pl, sizeof(struct pf_state), 0, 0, 0, "pfstatepl", 194 NULL, IPL_SOFTNET); 195 pool_init(&pf_state_key_pl, sizeof(struct pf_state_key), 0, 0, 0, 196 "pfstatekeypl", NULL, IPL_SOFTNET); 197 pool_init(&pf_altq_pl, sizeof(struct pf_altq), 0, 0, 0, "pfaltqpl", 198 &pool_allocator_nointr, IPL_NONE); 199 pool_init(&pf_pooladdr_pl, sizeof(struct pf_pooladdr), 0, 0, 0, 200 "pfpooladdrpl", &pool_allocator_nointr, IPL_NONE); 201 #else 202 pool_init(&pf_rule_pl, sizeof(struct pf_rule), 0, 0, 0, "pfrulepl", 203 &pool_allocator_nointr); 204 pool_init(&pf_src_tree_pl, sizeof(struct pf_src_node), 0, 0, 0, 205 "pfsrctrpl", NULL); 206 pool_init(&pf_state_pl, sizeof(struct pf_state), 0, 0, 0, "pfstatepl", 207 NULL); 208 pool_init(&pf_state_key_pl, sizeof(struct pf_state_key), 0, 0, 0, 209 "pfstatekeypl", NULL); 210 pool_init(&pf_altq_pl, sizeof(struct pf_altq), 0, 0, 0, "pfaltqpl", 211 &pool_allocator_nointr); 212 pool_init(&pf_pooladdr_pl, sizeof(struct pf_pooladdr), 0, 0, 0, 213 "pfpooladdrpl", &pool_allocator_nointr); 214 #endif /* !__NetBSD__ */ 215 216 pfr_initialize(); 217 pfi_initialize(); 218 pf_osfp_initialize(); 219 220 pool_sethardlimit(pf_pool_limits[PF_LIMIT_STATES].pp, 221 pf_pool_limits[PF_LIMIT_STATES].limit, NULL, 0); 222 223 if (ctob(physmem) <= 100*1024*1024) 224 pf_pool_limits[PF_LIMIT_TABLE_ENTRIES].limit = 225 PFR_KENTRY_HIWAT_SMALL; 226 227 RB_INIT(&tree_src_tracking); 228 RB_INIT(&pf_anchors); 229 pf_init_ruleset(&pf_main_ruleset); 230 TAILQ_INIT(&pf_altqs[0]); 231 TAILQ_INIT(&pf_altqs[1]); 232 TAILQ_INIT(&pf_pabuf); 233 pf_altqs_active = &pf_altqs[0]; 234 pf_altqs_inactive = &pf_altqs[1]; 235 TAILQ_INIT(&state_list); 236 237 #ifdef __NetBSD__ 238 rw_init(&pf_consistency_lock); 239 #endif /* __NetBSD__ */ 240 241 /* default rule should never be garbage collected */ 242 pf_default_rule.entries.tqe_prev = &pf_default_rule.entries.tqe_next; 243 pf_default_rule.action = PF_PASS; 244 pf_default_rule.nr = -1; 245 pf_default_rule.rtableid = -1; 246 247 /* initialize default timeouts */ 248 timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL; 249 timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL; 250 timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL; 251 timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL; 252 timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL; 253 timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL; 254 timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL; 255 timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL; 256 timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL; 257 timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL; 258 timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL; 259 timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL; 260 timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL; 261 timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL; 262 timeout[PFTM_FRAG] = PFTM_FRAG_VAL; 263 timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL; 264 timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL; 265 timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL; 266 timeout[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START; 267 timeout[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END; 268 269 pf_normalize_init(); 270 bzero(&pf_status, sizeof(pf_status)); 271 pf_status.debug = PF_DEBUG_URGENT; 272 273 /* XXX do our best to avoid a conflict */ 274 pf_status.hostid = arc4random(); 275 276 /* require process context to purge states, so perform in a thread */ 277 #ifdef __NetBSD__ 278 if (kthread_create(PRI_NONE, 0, NULL, pf_purge_thread, NULL, NULL, 279 "pfpurge")) 280 panic("pfpurge thread"); 281 #else 282 kthread_create_deferred(pf_thread_create, NULL); 283 #endif /* !__NetBSD__ */ 284 } 285 286 #ifndef __NetBSD__ 287 void 288 pf_thread_create(void *v) 289 { 290 if (kthread_create(pf_purge_thread, NULL, NULL, "pfpurge")) 291 panic("pfpurge thread"); 292 } 293 #endif /* !__NetBSD__ */ 294 295 int 296 pfopen(dev_t dev, int flags, int fmt, struct lwp *l) 297 { 298 if (minor(dev) >= 1) 299 return (ENXIO); 300 return (0); 301 } 302 303 int 304 pfclose(dev_t dev, int flags, int fmt, struct lwp *l) 305 { 306 if (minor(dev) >= 1) 307 return (ENXIO); 308 return (0); 309 } 310 311 struct pf_pool * 312 pf_get_pool(char *anchor, u_int32_t ticket, u_int8_t rule_action, 313 u_int32_t rule_number, u_int8_t r_last, u_int8_t active, 314 u_int8_t check_ticket) 315 { 316 struct pf_ruleset *ruleset; 317 struct pf_rule *rule; 318 int rs_num; 319 320 ruleset = pf_find_ruleset(anchor); 321 if (ruleset == NULL) 322 return (NULL); 323 rs_num = pf_get_ruleset_number(rule_action); 324 if (rs_num >= PF_RULESET_MAX) 325 return (NULL); 326 if (active) { 327 if (check_ticket && ticket != 328 ruleset->rules[rs_num].active.ticket) 329 return (NULL); 330 if (r_last) 331 rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr, 332 pf_rulequeue); 333 else 334 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr); 335 } else { 336 if (check_ticket && ticket != 337 ruleset->rules[rs_num].inactive.ticket) 338 return (NULL); 339 if (r_last) 340 rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr, 341 pf_rulequeue); 342 else 343 rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr); 344 } 345 if (!r_last) { 346 while ((rule != NULL) && (rule->nr != rule_number)) 347 rule = TAILQ_NEXT(rule, entries); 348 } 349 if (rule == NULL) 350 return (NULL); 351 352 return (&rule->rpool); 353 } 354 355 void 356 pf_mv_pool(struct pf_palist *poola, struct pf_palist *poolb) 357 { 358 struct pf_pooladdr *mv_pool_pa; 359 360 while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) { 361 TAILQ_REMOVE(poola, mv_pool_pa, entries); 362 TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries); 363 } 364 } 365 366 void 367 pf_empty_pool(struct pf_palist *poola) 368 { 369 struct pf_pooladdr *empty_pool_pa; 370 371 while ((empty_pool_pa = TAILQ_FIRST(poola)) != NULL) { 372 pfi_dynaddr_remove(&empty_pool_pa->addr); 373 pf_tbladdr_remove(&empty_pool_pa->addr); 374 pfi_kif_unref(empty_pool_pa->kif, PFI_KIF_REF_RULE); 375 TAILQ_REMOVE(poola, empty_pool_pa, entries); 376 pool_put(&pf_pooladdr_pl, empty_pool_pa); 377 } 378 } 379 380 void 381 pf_rm_rule(struct pf_rulequeue *rulequeue, struct pf_rule *rule) 382 { 383 if (rulequeue != NULL) { 384 if (rule->states <= 0) { 385 /* 386 * XXX - we need to remove the table *before* detaching 387 * the rule to make sure the table code does not delete 388 * the anchor under our feet. 389 */ 390 pf_tbladdr_remove(&rule->src.addr); 391 pf_tbladdr_remove(&rule->dst.addr); 392 if (rule->overload_tbl) 393 pfr_detach_table(rule->overload_tbl); 394 } 395 TAILQ_REMOVE(rulequeue, rule, entries); 396 rule->entries.tqe_prev = NULL; 397 rule->nr = -1; 398 } 399 400 if (rule->states > 0 || rule->src_nodes > 0 || 401 rule->entries.tqe_prev != NULL) 402 return; 403 pf_tag_unref(rule->tag); 404 pf_tag_unref(rule->match_tag); 405 #ifdef ALTQ 406 if (rule->pqid != rule->qid) 407 pf_qid_unref(rule->pqid); 408 pf_qid_unref(rule->qid); 409 #endif 410 pf_rtlabel_remove(&rule->src.addr); 411 pf_rtlabel_remove(&rule->dst.addr); 412 pfi_dynaddr_remove(&rule->src.addr); 413 pfi_dynaddr_remove(&rule->dst.addr); 414 if (rulequeue == NULL) { 415 pf_tbladdr_remove(&rule->src.addr); 416 pf_tbladdr_remove(&rule->dst.addr); 417 if (rule->overload_tbl) 418 pfr_detach_table(rule->overload_tbl); 419 } 420 pfi_kif_unref(rule->kif, PFI_KIF_REF_RULE); 421 pf_anchor_remove(rule); 422 pf_empty_pool(&rule->rpool.list); 423 pool_put(&pf_rule_pl, rule); 424 } 425 426 u_int16_t 427 tagname2tag(struct pf_tags *head, char *tagname) 428 { 429 struct pf_tagname *tag, *p = NULL; 430 u_int16_t new_tagid = 1; 431 432 TAILQ_FOREACH(tag, head, entries) 433 if (strcmp(tagname, tag->name) == 0) { 434 tag->ref++; 435 return (tag->tag); 436 } 437 438 /* 439 * to avoid fragmentation, we do a linear search from the beginning 440 * and take the first free slot we find. if there is none or the list 441 * is empty, append a new entry at the end. 442 */ 443 444 /* new entry */ 445 if (!TAILQ_EMPTY(head)) 446 for (p = TAILQ_FIRST(head); p != NULL && 447 p->tag == new_tagid; p = TAILQ_NEXT(p, entries)) 448 new_tagid = p->tag + 1; 449 450 if (new_tagid > TAGID_MAX) 451 return (0); 452 453 /* allocate and fill new struct pf_tagname */ 454 tag = (struct pf_tagname *)malloc(sizeof(struct pf_tagname), 455 M_TEMP, M_NOWAIT); 456 if (tag == NULL) 457 return (0); 458 bzero(tag, sizeof(struct pf_tagname)); 459 strlcpy(tag->name, tagname, sizeof(tag->name)); 460 tag->tag = new_tagid; 461 tag->ref++; 462 463 if (p != NULL) /* insert new entry before p */ 464 TAILQ_INSERT_BEFORE(p, tag, entries); 465 else /* either list empty or no free slot in between */ 466 TAILQ_INSERT_TAIL(head, tag, entries); 467 468 return (tag->tag); 469 } 470 471 void 472 tag2tagname(struct pf_tags *head, u_int16_t tagid, char *p) 473 { 474 struct pf_tagname *tag; 475 476 TAILQ_FOREACH(tag, head, entries) 477 if (tag->tag == tagid) { 478 strlcpy(p, tag->name, PF_TAG_NAME_SIZE); 479 return; 480 } 481 } 482 483 void 484 tag_unref(struct pf_tags *head, u_int16_t tag) 485 { 486 struct pf_tagname *p, *next; 487 488 if (tag == 0) 489 return; 490 491 for (p = TAILQ_FIRST(head); p != NULL; p = next) { 492 next = TAILQ_NEXT(p, entries); 493 if (tag == p->tag) { 494 if (--p->ref == 0) { 495 TAILQ_REMOVE(head, p, entries); 496 free(p, M_TEMP); 497 } 498 break; 499 } 500 } 501 } 502 503 u_int16_t 504 pf_tagname2tag(char *tagname) 505 { 506 return (tagname2tag(&pf_tags, tagname)); 507 } 508 509 void 510 pf_tag2tagname(u_int16_t tagid, char *p) 511 { 512 tag2tagname(&pf_tags, tagid, p); 513 } 514 515 void 516 pf_tag_ref(u_int16_t tag) 517 { 518 struct pf_tagname *t; 519 520 TAILQ_FOREACH(t, &pf_tags, entries) 521 if (t->tag == tag) 522 break; 523 if (t != NULL) 524 t->ref++; 525 } 526 527 void 528 pf_tag_unref(u_int16_t tag) 529 { 530 tag_unref(&pf_tags, tag); 531 } 532 533 int 534 pf_rtlabel_add(struct pf_addr_wrap *a) 535 { 536 #ifndef __NetBSD__ 537 if (a->type == PF_ADDR_RTLABEL && 538 (a->v.rtlabel = rtlabel_name2id(a->v.rtlabelname)) == 0) 539 return (-1); 540 #endif /* !__NetBSD__ */ 541 return (0); 542 } 543 544 void 545 pf_rtlabel_remove(struct pf_addr_wrap *a) 546 { 547 #ifndef __NetBSD__ 548 if (a->type == PF_ADDR_RTLABEL) 549 rtlabel_unref(a->v.rtlabel); 550 #endif /* !__NetBSD__ */ 551 } 552 553 void 554 pf_rtlabel_copyout(struct pf_addr_wrap *a) 555 { 556 #ifndef __NetBSD__ 557 const char *name; 558 559 if (a->type == PF_ADDR_RTLABEL && a->v.rtlabel) { 560 if ((name = rtlabel_id2name(a->v.rtlabel)) == NULL) 561 strlcpy(a->v.rtlabelname, "?", 562 sizeof(a->v.rtlabelname)); 563 else 564 strlcpy(a->v.rtlabelname, name, 565 sizeof(a->v.rtlabelname)); 566 } 567 #endif /* !__NetBSD__ */ 568 } 569 570 #ifdef ALTQ 571 u_int32_t 572 pf_qname2qid(char *qname) 573 { 574 return ((u_int32_t)tagname2tag(&pf_qids, qname)); 575 } 576 577 void 578 pf_qid2qname(u_int32_t qid, char *p) 579 { 580 tag2tagname(&pf_qids, (u_int16_t)qid, p); 581 } 582 583 void 584 pf_qid_unref(u_int32_t qid) 585 { 586 tag_unref(&pf_qids, (u_int16_t)qid); 587 } 588 589 int 590 pf_begin_altq(u_int32_t *ticket) 591 { 592 struct pf_altq *altq; 593 int error = 0; 594 595 /* Purge the old altq list */ 596 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) { 597 TAILQ_REMOVE(pf_altqs_inactive, altq, entries); 598 if (altq->qname[0] == 0) { 599 /* detach and destroy the discipline */ 600 error = altq_remove(altq); 601 } else 602 pf_qid_unref(altq->qid); 603 pool_put(&pf_altq_pl, altq); 604 } 605 if (error) 606 return (error); 607 *ticket = ++ticket_altqs_inactive; 608 altqs_inactive_open = 1; 609 return (0); 610 } 611 612 int 613 pf_rollback_altq(u_int32_t ticket) 614 { 615 struct pf_altq *altq; 616 int error = 0; 617 618 if (!altqs_inactive_open || ticket != ticket_altqs_inactive) 619 return (0); 620 /* Purge the old altq list */ 621 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) { 622 TAILQ_REMOVE(pf_altqs_inactive, altq, entries); 623 if (altq->qname[0] == 0) { 624 /* detach and destroy the discipline */ 625 error = altq_remove(altq); 626 } else 627 pf_qid_unref(altq->qid); 628 pool_put(&pf_altq_pl, altq); 629 } 630 altqs_inactive_open = 0; 631 return (error); 632 } 633 634 int 635 pf_commit_altq(u_int32_t ticket) 636 { 637 struct pf_altqqueue *old_altqs; 638 struct pf_altq *altq; 639 int s, err, error = 0; 640 641 if (!altqs_inactive_open || ticket != ticket_altqs_inactive) 642 return (EBUSY); 643 644 /* swap altqs, keep the old. */ 645 s = splsoftnet(); 646 old_altqs = pf_altqs_active; 647 pf_altqs_active = pf_altqs_inactive; 648 pf_altqs_inactive = old_altqs; 649 ticket_altqs_active = ticket_altqs_inactive; 650 651 /* Attach new disciplines */ 652 TAILQ_FOREACH(altq, pf_altqs_active, entries) { 653 if (altq->qname[0] == 0) { 654 /* attach the discipline */ 655 error = altq_pfattach(altq); 656 if (error == 0 && pf_altq_running) 657 error = pf_enable_altq(altq); 658 if (error != 0) { 659 splx(s); 660 return (error); 661 } 662 } 663 } 664 665 /* Purge the old altq list */ 666 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) { 667 TAILQ_REMOVE(pf_altqs_inactive, altq, entries); 668 if (altq->qname[0] == 0) { 669 /* detach and destroy the discipline */ 670 if (pf_altq_running) 671 error = pf_disable_altq(altq); 672 err = altq_pfdetach(altq); 673 if (err != 0 && error == 0) 674 error = err; 675 err = altq_remove(altq); 676 if (err != 0 && error == 0) 677 error = err; 678 } else 679 pf_qid_unref(altq->qid); 680 pool_put(&pf_altq_pl, altq); 681 } 682 splx(s); 683 684 altqs_inactive_open = 0; 685 return (error); 686 } 687 688 int 689 pf_enable_altq(struct pf_altq *altq) 690 { 691 struct ifnet *ifp; 692 struct tb_profile tb; 693 int s, error = 0; 694 695 if ((ifp = ifunit(altq->ifname)) == NULL) 696 return (EINVAL); 697 698 if (ifp->if_snd.altq_type != ALTQT_NONE) 699 error = altq_enable(&ifp->if_snd); 700 701 /* set tokenbucket regulator */ 702 if (error == 0 && ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) { 703 tb.rate = altq->ifbandwidth; 704 tb.depth = altq->tbrsize; 705 s = splnet(); 706 error = tbr_set(&ifp->if_snd, &tb); 707 splx(s); 708 } 709 710 return (error); 711 } 712 713 int 714 pf_disable_altq(struct pf_altq *altq) 715 { 716 struct ifnet *ifp; 717 struct tb_profile tb; 718 int s, error; 719 720 if ((ifp = ifunit(altq->ifname)) == NULL) 721 return (EINVAL); 722 723 /* 724 * when the discipline is no longer referenced, it was overridden 725 * by a new one. if so, just return. 726 */ 727 if (altq->altq_disc != ifp->if_snd.altq_disc) 728 return (0); 729 730 error = altq_disable(&ifp->if_snd); 731 732 if (error == 0) { 733 /* clear tokenbucket regulator */ 734 tb.rate = 0; 735 s = splnet(); 736 error = tbr_set(&ifp->if_snd, &tb); 737 splx(s); 738 } 739 740 return (error); 741 } 742 #endif /* ALTQ */ 743 744 int 745 pf_begin_rules(u_int32_t *ticket, int rs_num, const char *anchor) 746 { 747 struct pf_ruleset *rs; 748 struct pf_rule *rule; 749 750 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) 751 return (EINVAL); 752 rs = pf_find_or_create_ruleset(anchor); 753 if (rs == NULL) 754 return (EINVAL); 755 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) { 756 pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule); 757 rs->rules[rs_num].inactive.rcount--; 758 } 759 *ticket = ++rs->rules[rs_num].inactive.ticket; 760 rs->rules[rs_num].inactive.open = 1; 761 return (0); 762 } 763 764 int 765 pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor) 766 { 767 struct pf_ruleset *rs; 768 struct pf_rule *rule; 769 770 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) 771 return (EINVAL); 772 rs = pf_find_ruleset(anchor); 773 if (rs == NULL || !rs->rules[rs_num].inactive.open || 774 rs->rules[rs_num].inactive.ticket != ticket) 775 return (0); 776 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) { 777 pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule); 778 rs->rules[rs_num].inactive.rcount--; 779 } 780 rs->rules[rs_num].inactive.open = 0; 781 return (0); 782 } 783 784 #define PF_MD5_UPD(st, elm) \ 785 MD5Update(ctx, (u_int8_t *) &(st)->elm, sizeof((st)->elm)) 786 787 #define PF_MD5_UPD_STR(st, elm) \ 788 MD5Update(ctx, (u_int8_t *) (st)->elm, strlen((st)->elm)) 789 790 #define PF_MD5_UPD_HTONL(st, elm, stor) do { \ 791 (stor) = htonl((st)->elm); \ 792 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int32_t));\ 793 } while (0) 794 795 #define PF_MD5_UPD_HTONS(st, elm, stor) do { \ 796 (stor) = htons((st)->elm); \ 797 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int16_t));\ 798 } while (0) 799 800 void 801 pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr) 802 { 803 PF_MD5_UPD(pfr, addr.type); 804 switch (pfr->addr.type) { 805 case PF_ADDR_DYNIFTL: 806 PF_MD5_UPD(pfr, addr.v.ifname); 807 PF_MD5_UPD(pfr, addr.iflags); 808 break; 809 case PF_ADDR_TABLE: 810 PF_MD5_UPD(pfr, addr.v.tblname); 811 break; 812 case PF_ADDR_ADDRMASK: 813 /* XXX ignore af? */ 814 PF_MD5_UPD(pfr, addr.v.a.addr.addr32); 815 PF_MD5_UPD(pfr, addr.v.a.mask.addr32); 816 break; 817 case PF_ADDR_RTLABEL: 818 PF_MD5_UPD(pfr, addr.v.rtlabelname); 819 break; 820 } 821 822 PF_MD5_UPD(pfr, port[0]); 823 PF_MD5_UPD(pfr, port[1]); 824 PF_MD5_UPD(pfr, neg); 825 PF_MD5_UPD(pfr, port_op); 826 } 827 828 void 829 pf_hash_rule(MD5_CTX *ctx, struct pf_rule *rule) 830 { 831 u_int16_t x; 832 u_int32_t y; 833 834 pf_hash_rule_addr(ctx, &rule->src); 835 pf_hash_rule_addr(ctx, &rule->dst); 836 PF_MD5_UPD_STR(rule, label); 837 PF_MD5_UPD_STR(rule, ifname); 838 PF_MD5_UPD_STR(rule, match_tagname); 839 PF_MD5_UPD_HTONS(rule, match_tag, x); /* dup? */ 840 PF_MD5_UPD_HTONL(rule, os_fingerprint, y); 841 PF_MD5_UPD_HTONL(rule, prob, y); 842 PF_MD5_UPD_HTONL(rule, uid.uid[0], y); 843 PF_MD5_UPD_HTONL(rule, uid.uid[1], y); 844 PF_MD5_UPD(rule, uid.op); 845 PF_MD5_UPD_HTONL(rule, gid.gid[0], y); 846 PF_MD5_UPD_HTONL(rule, gid.gid[1], y); 847 PF_MD5_UPD(rule, gid.op); 848 PF_MD5_UPD_HTONL(rule, rule_flag, y); 849 PF_MD5_UPD(rule, action); 850 PF_MD5_UPD(rule, direction); 851 PF_MD5_UPD(rule, af); 852 PF_MD5_UPD(rule, quick); 853 PF_MD5_UPD(rule, ifnot); 854 PF_MD5_UPD(rule, match_tag_not); 855 PF_MD5_UPD(rule, natpass); 856 PF_MD5_UPD(rule, keep_state); 857 PF_MD5_UPD(rule, proto); 858 PF_MD5_UPD(rule, type); 859 PF_MD5_UPD(rule, code); 860 PF_MD5_UPD(rule, flags); 861 PF_MD5_UPD(rule, flagset); 862 PF_MD5_UPD(rule, allow_opts); 863 PF_MD5_UPD(rule, rt); 864 PF_MD5_UPD(rule, tos); 865 } 866 867 int 868 pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor) 869 { 870 struct pf_ruleset *rs; 871 struct pf_rule *rule, **old_array; 872 struct pf_rulequeue *old_rules; 873 int s, error; 874 u_int32_t old_rcount; 875 876 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) 877 return (EINVAL); 878 rs = pf_find_ruleset(anchor); 879 if (rs == NULL || !rs->rules[rs_num].inactive.open || 880 ticket != rs->rules[rs_num].inactive.ticket) 881 return (EBUSY); 882 883 /* Calculate checksum for the main ruleset */ 884 if (rs == &pf_main_ruleset) { 885 error = pf_setup_pfsync_matching(rs); 886 if (error != 0) 887 return (error); 888 } 889 890 /* Swap rules, keep the old. */ 891 s = splsoftnet(); 892 old_rules = rs->rules[rs_num].active.ptr; 893 old_rcount = rs->rules[rs_num].active.rcount; 894 old_array = rs->rules[rs_num].active.ptr_array; 895 896 rs->rules[rs_num].active.ptr = 897 rs->rules[rs_num].inactive.ptr; 898 rs->rules[rs_num].active.ptr_array = 899 rs->rules[rs_num].inactive.ptr_array; 900 rs->rules[rs_num].active.rcount = 901 rs->rules[rs_num].inactive.rcount; 902 rs->rules[rs_num].inactive.ptr = old_rules; 903 rs->rules[rs_num].inactive.ptr_array = old_array; 904 rs->rules[rs_num].inactive.rcount = old_rcount; 905 906 rs->rules[rs_num].active.ticket = 907 rs->rules[rs_num].inactive.ticket; 908 pf_calc_skip_steps(rs->rules[rs_num].active.ptr); 909 910 911 /* Purge the old rule list. */ 912 while ((rule = TAILQ_FIRST(old_rules)) != NULL) 913 pf_rm_rule(old_rules, rule); 914 if (rs->rules[rs_num].inactive.ptr_array) 915 free(rs->rules[rs_num].inactive.ptr_array, M_TEMP); 916 rs->rules[rs_num].inactive.ptr_array = NULL; 917 rs->rules[rs_num].inactive.rcount = 0; 918 rs->rules[rs_num].inactive.open = 0; 919 pf_remove_if_empty_ruleset(rs); 920 splx(s); 921 return (0); 922 } 923 924 void 925 pf_state_export(struct pfsync_state *sp, struct pf_state_key *sk, 926 struct pf_state *s) 927 { 928 int secs = time_second; 929 bzero(sp, sizeof(struct pfsync_state)); 930 931 /* copy from state key */ 932 sp->lan.addr = sk->lan.addr; 933 sp->lan.port = sk->lan.port; 934 sp->gwy.addr = sk->gwy.addr; 935 sp->gwy.port = sk->gwy.port; 936 sp->ext.addr = sk->ext.addr; 937 sp->ext.port = sk->ext.port; 938 sp->proto = sk->proto; 939 sp->af = sk->af; 940 sp->direction = sk->direction; 941 942 /* copy from state */ 943 memcpy(&sp->id, &s->id, sizeof(sp->id)); 944 sp->creatorid = s->creatorid; 945 strlcpy(sp->ifname, s->kif->pfik_name, sizeof(sp->ifname)); 946 pf_state_peer_to_pfsync(&s->src, &sp->src); 947 pf_state_peer_to_pfsync(&s->dst, &sp->dst); 948 949 sp->rule = s->rule.ptr->nr; 950 sp->nat_rule = (s->nat_rule.ptr == NULL) ? -1 : s->nat_rule.ptr->nr; 951 sp->anchor = (s->anchor.ptr == NULL) ? -1 : s->anchor.ptr->nr; 952 953 pf_state_counter_to_pfsync(s->bytes[0], sp->bytes[0]); 954 pf_state_counter_to_pfsync(s->bytes[1], sp->bytes[1]); 955 pf_state_counter_to_pfsync(s->packets[0], sp->packets[0]); 956 pf_state_counter_to_pfsync(s->packets[1], sp->packets[1]); 957 sp->creation = secs - s->creation; 958 sp->expire = pf_state_expires(s); 959 sp->log = s->log; 960 sp->allow_opts = s->allow_opts; 961 sp->timeout = s->timeout; 962 963 if (s->src_node) 964 sp->sync_flags |= PFSYNC_FLAG_SRCNODE; 965 if (s->nat_src_node) 966 sp->sync_flags |= PFSYNC_FLAG_NATSRCNODE; 967 968 if (sp->expire > secs) 969 sp->expire -= secs; 970 else 971 sp->expire = 0; 972 973 } 974 975 void 976 pf_state_import(struct pfsync_state *sp, struct pf_state_key *sk, 977 struct pf_state *s) 978 { 979 /* copy to state key */ 980 sk->lan.addr = sp->lan.addr; 981 sk->lan.port = sp->lan.port; 982 sk->gwy.addr = sp->gwy.addr; 983 sk->gwy.port = sp->gwy.port; 984 sk->ext.addr = sp->ext.addr; 985 sk->ext.port = sp->ext.port; 986 sk->proto = sp->proto; 987 sk->af = sp->af; 988 sk->direction = sp->direction; 989 990 /* copy to state */ 991 memcpy(&s->id, &sp->id, sizeof(sp->id)); 992 s->creatorid = sp->creatorid; 993 strlcpy(sp->ifname, s->kif->pfik_name, sizeof(sp->ifname)); 994 pf_state_peer_from_pfsync(&sp->src, &s->src); 995 pf_state_peer_from_pfsync(&sp->dst, &s->dst); 996 997 s->rule.ptr = &pf_default_rule; 998 s->nat_rule.ptr = NULL; 999 s->anchor.ptr = NULL; 1000 s->rt_kif = NULL; 1001 s->creation = time_second; 1002 s->pfsync_time = 0; 1003 s->packets[0] = s->packets[1] = 0; 1004 s->bytes[0] = s->bytes[1] = 0; 1005 } 1006 1007 int 1008 pf_setup_pfsync_matching(struct pf_ruleset *rs) 1009 { 1010 MD5_CTX ctx; 1011 struct pf_rule *rule; 1012 int rs_cnt; 1013 u_int8_t digest[PF_MD5_DIGEST_LENGTH]; 1014 1015 MD5Init(&ctx); 1016 for (rs_cnt = 0; rs_cnt < PF_RULESET_MAX; rs_cnt++) { 1017 /* XXX PF_RULESET_SCRUB as well? */ 1018 if (rs_cnt == PF_RULESET_SCRUB) 1019 continue; 1020 1021 if (rs->rules[rs_cnt].inactive.ptr_array) 1022 free(rs->rules[rs_cnt].inactive.ptr_array, M_TEMP); 1023 rs->rules[rs_cnt].inactive.ptr_array = NULL; 1024 1025 if (rs->rules[rs_cnt].inactive.rcount) { 1026 rs->rules[rs_cnt].inactive.ptr_array = 1027 malloc(sizeof(void *) * 1028 rs->rules[rs_cnt].inactive.rcount, 1029 M_TEMP, M_NOWAIT); 1030 1031 if (!rs->rules[rs_cnt].inactive.ptr_array) 1032 return (ENOMEM); 1033 } 1034 1035 TAILQ_FOREACH(rule, rs->rules[rs_cnt].inactive.ptr, 1036 entries) { 1037 pf_hash_rule(&ctx, rule); 1038 (rs->rules[rs_cnt].inactive.ptr_array)[rule->nr] = rule; 1039 } 1040 } 1041 1042 MD5Final(digest, &ctx); 1043 memcpy(pf_status.pf_chksum, digest, sizeof(pf_status.pf_chksum)); 1044 return (0); 1045 } 1046 1047 int 1048 pfioctl(dev_t dev, u_long cmd, void *addr, int flags, struct lwp *l) 1049 { 1050 struct pf_pooladdr *pa = NULL; 1051 struct pf_pool *pool = NULL; 1052 int s; 1053 int error = 0; 1054 1055 /* XXX keep in sync with switch() below */ 1056 if (kauth_authorize_network(l->l_cred, KAUTH_NETWORK_FIREWALL, 1057 KAUTH_REQ_NETWORK_FIREWALL_FW, NULL, NULL, NULL)) 1058 switch (cmd) { 1059 case DIOCGETRULES: 1060 case DIOCGETRULE: 1061 case DIOCGETADDRS: 1062 case DIOCGETADDR: 1063 case DIOCGETSTATE: 1064 case DIOCSETSTATUSIF: 1065 case DIOCGETSTATUS: 1066 case DIOCCLRSTATUS: 1067 case DIOCNATLOOK: 1068 case DIOCSETDEBUG: 1069 case DIOCGETSTATES: 1070 case DIOCGETTIMEOUT: 1071 case DIOCCLRRULECTRS: 1072 case DIOCGETLIMIT: 1073 case DIOCGETALTQS: 1074 case DIOCGETALTQ: 1075 case DIOCGETQSTATS: 1076 case DIOCGETRULESETS: 1077 case DIOCGETRULESET: 1078 case DIOCRGETTABLES: 1079 case DIOCRGETTSTATS: 1080 case DIOCRCLRTSTATS: 1081 case DIOCRCLRADDRS: 1082 case DIOCRADDADDRS: 1083 case DIOCRDELADDRS: 1084 case DIOCRSETADDRS: 1085 case DIOCRGETADDRS: 1086 case DIOCRGETASTATS: 1087 case DIOCRCLRASTATS: 1088 case DIOCRTSTADDRS: 1089 case DIOCOSFPGET: 1090 case DIOCGETSRCNODES: 1091 case DIOCCLRSRCNODES: 1092 case DIOCIGETIFACES: 1093 case DIOCSETIFFLAG: 1094 case DIOCCLRIFFLAG: 1095 break; 1096 case DIOCRCLRTABLES: 1097 case DIOCRADDTABLES: 1098 case DIOCRDELTABLES: 1099 case DIOCRSETTFLAGS: 1100 if (((struct pfioc_table *)addr)->pfrio_flags & 1101 PFR_FLAG_DUMMY) 1102 break; /* dummy operation ok */ 1103 return (EPERM); 1104 default: 1105 return (EPERM); 1106 } 1107 1108 if (!(flags & FWRITE)) 1109 switch (cmd) { 1110 case DIOCGETRULES: 1111 case DIOCGETADDRS: 1112 case DIOCGETADDR: 1113 case DIOCGETSTATE: 1114 case DIOCGETSTATUS: 1115 case DIOCGETSTATES: 1116 case DIOCGETTIMEOUT: 1117 case DIOCGETLIMIT: 1118 case DIOCGETALTQS: 1119 case DIOCGETALTQ: 1120 case DIOCGETQSTATS: 1121 case DIOCGETRULESETS: 1122 case DIOCGETRULESET: 1123 case DIOCNATLOOK: 1124 case DIOCRGETTABLES: 1125 case DIOCRGETTSTATS: 1126 case DIOCRGETADDRS: 1127 case DIOCRGETASTATS: 1128 case DIOCRTSTADDRS: 1129 case DIOCOSFPGET: 1130 case DIOCGETSRCNODES: 1131 case DIOCIGETIFACES: 1132 break; 1133 case DIOCRCLRTABLES: 1134 case DIOCRADDTABLES: 1135 case DIOCRDELTABLES: 1136 case DIOCRCLRTSTATS: 1137 case DIOCRCLRADDRS: 1138 case DIOCRADDADDRS: 1139 case DIOCRDELADDRS: 1140 case DIOCRSETADDRS: 1141 case DIOCRSETTFLAGS: 1142 if (((struct pfioc_table *)addr)->pfrio_flags & 1143 PFR_FLAG_DUMMY) { 1144 flags |= FWRITE; /* need write lock for dummy */ 1145 break; /* dummy operation ok */ 1146 } 1147 return (EACCES); 1148 case DIOCGETRULE: 1149 if (((struct pfioc_rule *)addr)->action == PF_GET_CLR_CNTR) 1150 return (EACCES); 1151 break; 1152 default: 1153 return (EACCES); 1154 } 1155 1156 if (flags & FWRITE) 1157 rw_enter_write(&pf_consistency_lock); 1158 else 1159 rw_enter_read(&pf_consistency_lock); 1160 1161 s = splsoftnet(); 1162 switch (cmd) { 1163 1164 case DIOCSTART: 1165 if (pf_status.running) 1166 error = EEXIST; 1167 else { 1168 #ifdef __NetBSD__ 1169 error = pf_pfil_attach(); 1170 if (error) 1171 break; 1172 #endif /* __NetBSD__ */ 1173 pf_status.running = 1; 1174 pf_status.since = time_second; 1175 if (pf_status.stateid == 0) { 1176 pf_status.stateid = time_second; 1177 pf_status.stateid = pf_status.stateid << 32; 1178 } 1179 DPFPRINTF(PF_DEBUG_MISC, ("pf: started\n")); 1180 } 1181 break; 1182 1183 case DIOCSTOP: 1184 if (!pf_status.running) 1185 error = ENOENT; 1186 else { 1187 #ifdef __NetBSD__ 1188 error = pf_pfil_detach(); 1189 if (error) 1190 break; 1191 #endif /* __NetBSD__ */ 1192 pf_status.running = 0; 1193 pf_status.since = time_second; 1194 DPFPRINTF(PF_DEBUG_MISC, ("pf: stopped\n")); 1195 } 1196 break; 1197 1198 case DIOCADDRULE: { 1199 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 1200 struct pf_ruleset *ruleset; 1201 struct pf_rule *rule, *tail; 1202 struct pf_pooladdr *pa; 1203 int rs_num; 1204 1205 pr->anchor[sizeof(pr->anchor) - 1] = 0; 1206 ruleset = pf_find_ruleset(pr->anchor); 1207 if (ruleset == NULL) { 1208 error = EINVAL; 1209 break; 1210 } 1211 rs_num = pf_get_ruleset_number(pr->rule.action); 1212 if (rs_num >= PF_RULESET_MAX) { 1213 error = EINVAL; 1214 break; 1215 } 1216 if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) { 1217 error = EINVAL; 1218 break; 1219 } 1220 if (pr->ticket != ruleset->rules[rs_num].inactive.ticket) { 1221 error = EBUSY; 1222 break; 1223 } 1224 if (pr->pool_ticket != ticket_pabuf) { 1225 error = EBUSY; 1226 break; 1227 } 1228 rule = pool_get(&pf_rule_pl, PR_NOWAIT); 1229 if (rule == NULL) { 1230 error = ENOMEM; 1231 break; 1232 } 1233 bcopy(&pr->rule, rule, sizeof(struct pf_rule)); 1234 #ifdef __NetBSD__ 1235 rule->cuid = kauth_cred_getuid(l->l_cred); 1236 rule->cpid = l->l_proc->p_pid; 1237 #else 1238 rule->cuid = p->p_cred->p_ruid; 1239 rule->cpid = p->p_pid; 1240 #endif /* !__NetBSD__ */ 1241 rule->anchor = NULL; 1242 rule->kif = NULL; 1243 TAILQ_INIT(&rule->rpool.list); 1244 /* initialize refcounting */ 1245 rule->states = 0; 1246 rule->src_nodes = 0; 1247 rule->entries.tqe_prev = NULL; 1248 #ifndef INET 1249 if (rule->af == AF_INET) { 1250 pool_put(&pf_rule_pl, rule); 1251 error = EAFNOSUPPORT; 1252 break; 1253 } 1254 #endif /* INET */ 1255 #ifndef INET6 1256 if (rule->af == AF_INET6) { 1257 pool_put(&pf_rule_pl, rule); 1258 error = EAFNOSUPPORT; 1259 break; 1260 } 1261 #endif /* INET6 */ 1262 tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr, 1263 pf_rulequeue); 1264 if (tail) 1265 rule->nr = tail->nr + 1; 1266 else 1267 rule->nr = 0; 1268 if (rule->ifname[0]) { 1269 rule->kif = pfi_kif_get(rule->ifname); 1270 if (rule->kif == NULL) { 1271 pool_put(&pf_rule_pl, rule); 1272 error = EINVAL; 1273 break; 1274 } 1275 pfi_kif_ref(rule->kif, PFI_KIF_REF_RULE); 1276 } 1277 1278 #ifndef __NetBSD__ 1279 if (rule->rtableid > 0 && !rtable_exists(rule->rtableid)) 1280 error = EBUSY; 1281 #endif /* !__NetBSD__ */ 1282 1283 #ifdef ALTQ 1284 /* set queue IDs */ 1285 if (rule->qname[0] != 0) { 1286 if ((rule->qid = pf_qname2qid(rule->qname)) == 0) 1287 error = EBUSY; 1288 else if (rule->pqname[0] != 0) { 1289 if ((rule->pqid = 1290 pf_qname2qid(rule->pqname)) == 0) 1291 error = EBUSY; 1292 } else 1293 rule->pqid = rule->qid; 1294 } 1295 #endif 1296 if (rule->tagname[0]) 1297 if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0) 1298 error = EBUSY; 1299 if (rule->match_tagname[0]) 1300 if ((rule->match_tag = 1301 pf_tagname2tag(rule->match_tagname)) == 0) 1302 error = EBUSY; 1303 if (rule->rt && !rule->direction) 1304 error = EINVAL; 1305 #if NPFLOG > 0 1306 if (!rule->log) 1307 rule->logif = 0; 1308 if (rule->logif >= PFLOGIFS_MAX) 1309 error = EINVAL; 1310 #endif 1311 if (pf_rtlabel_add(&rule->src.addr) || 1312 pf_rtlabel_add(&rule->dst.addr)) 1313 error = EBUSY; 1314 if (pfi_dynaddr_setup(&rule->src.addr, rule->af)) 1315 error = EINVAL; 1316 if (pfi_dynaddr_setup(&rule->dst.addr, rule->af)) 1317 error = EINVAL; 1318 if (pf_tbladdr_setup(ruleset, &rule->src.addr)) 1319 error = EINVAL; 1320 if (pf_tbladdr_setup(ruleset, &rule->dst.addr)) 1321 error = EINVAL; 1322 if (pf_anchor_setup(rule, ruleset, pr->anchor_call)) 1323 error = EINVAL; 1324 TAILQ_FOREACH(pa, &pf_pabuf, entries) 1325 if (pf_tbladdr_setup(ruleset, &pa->addr)) 1326 error = EINVAL; 1327 1328 if (rule->overload_tblname[0]) { 1329 if ((rule->overload_tbl = pfr_attach_table(ruleset, 1330 rule->overload_tblname)) == NULL) 1331 error = EINVAL; 1332 else 1333 rule->overload_tbl->pfrkt_flags |= 1334 PFR_TFLAG_ACTIVE; 1335 } 1336 1337 pf_mv_pool(&pf_pabuf, &rule->rpool.list); 1338 if (((((rule->action == PF_NAT) || (rule->action == PF_RDR) || 1339 (rule->action == PF_BINAT)) && rule->anchor == NULL) || 1340 (rule->rt > PF_FASTROUTE)) && 1341 (TAILQ_FIRST(&rule->rpool.list) == NULL)) 1342 error = EINVAL; 1343 1344 if (error) { 1345 pf_rm_rule(NULL, rule); 1346 break; 1347 } 1348 rule->rpool.cur = TAILQ_FIRST(&rule->rpool.list); 1349 rule->evaluations = rule->packets[0] = rule->packets[1] = 1350 rule->bytes[0] = rule->bytes[1] = 0; 1351 TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr, 1352 rule, entries); 1353 ruleset->rules[rs_num].inactive.rcount++; 1354 break; 1355 } 1356 1357 case DIOCGETRULES: { 1358 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 1359 struct pf_ruleset *ruleset; 1360 struct pf_rule *tail; 1361 int rs_num; 1362 1363 pr->anchor[sizeof(pr->anchor) - 1] = 0; 1364 ruleset = pf_find_ruleset(pr->anchor); 1365 if (ruleset == NULL) { 1366 error = EINVAL; 1367 break; 1368 } 1369 rs_num = pf_get_ruleset_number(pr->rule.action); 1370 if (rs_num >= PF_RULESET_MAX) { 1371 error = EINVAL; 1372 break; 1373 } 1374 tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr, 1375 pf_rulequeue); 1376 if (tail) 1377 pr->nr = tail->nr + 1; 1378 else 1379 pr->nr = 0; 1380 pr->ticket = ruleset->rules[rs_num].active.ticket; 1381 break; 1382 } 1383 1384 case DIOCGETRULE: { 1385 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 1386 struct pf_ruleset *ruleset; 1387 struct pf_rule *rule; 1388 int rs_num, i; 1389 1390 pr->anchor[sizeof(pr->anchor) - 1] = 0; 1391 ruleset = pf_find_ruleset(pr->anchor); 1392 if (ruleset == NULL) { 1393 error = EINVAL; 1394 break; 1395 } 1396 rs_num = pf_get_ruleset_number(pr->rule.action); 1397 if (rs_num >= PF_RULESET_MAX) { 1398 error = EINVAL; 1399 break; 1400 } 1401 if (pr->ticket != ruleset->rules[rs_num].active.ticket) { 1402 error = EBUSY; 1403 break; 1404 } 1405 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr); 1406 while ((rule != NULL) && (rule->nr != pr->nr)) 1407 rule = TAILQ_NEXT(rule, entries); 1408 if (rule == NULL) { 1409 error = EBUSY; 1410 break; 1411 } 1412 bcopy(rule, &pr->rule, sizeof(struct pf_rule)); 1413 if (pf_anchor_copyout(ruleset, rule, pr)) { 1414 error = EBUSY; 1415 break; 1416 } 1417 pfi_dynaddr_copyout(&pr->rule.src.addr); 1418 pfi_dynaddr_copyout(&pr->rule.dst.addr); 1419 pf_tbladdr_copyout(&pr->rule.src.addr); 1420 pf_tbladdr_copyout(&pr->rule.dst.addr); 1421 pf_rtlabel_copyout(&pr->rule.src.addr); 1422 pf_rtlabel_copyout(&pr->rule.dst.addr); 1423 for (i = 0; i < PF_SKIP_COUNT; ++i) 1424 if (rule->skip[i].ptr == NULL) 1425 pr->rule.skip[i].nr = -1; 1426 else 1427 pr->rule.skip[i].nr = 1428 rule->skip[i].ptr->nr; 1429 1430 if (pr->action == PF_GET_CLR_CNTR) { 1431 rule->evaluations = 0; 1432 rule->packets[0] = rule->packets[1] = 0; 1433 rule->bytes[0] = rule->bytes[1] = 0; 1434 } 1435 break; 1436 } 1437 1438 case DIOCCHANGERULE: { 1439 struct pfioc_rule *pcr = (struct pfioc_rule *)addr; 1440 struct pf_ruleset *ruleset; 1441 struct pf_rule *oldrule = NULL, *newrule = NULL; 1442 u_int32_t nr = 0; 1443 int rs_num; 1444 1445 if (!(pcr->action == PF_CHANGE_REMOVE || 1446 pcr->action == PF_CHANGE_GET_TICKET) && 1447 pcr->pool_ticket != ticket_pabuf) { 1448 error = EBUSY; 1449 break; 1450 } 1451 1452 if (pcr->action < PF_CHANGE_ADD_HEAD || 1453 pcr->action > PF_CHANGE_GET_TICKET) { 1454 error = EINVAL; 1455 break; 1456 } 1457 ruleset = pf_find_ruleset(pcr->anchor); 1458 if (ruleset == NULL) { 1459 error = EINVAL; 1460 break; 1461 } 1462 rs_num = pf_get_ruleset_number(pcr->rule.action); 1463 if (rs_num >= PF_RULESET_MAX) { 1464 error = EINVAL; 1465 break; 1466 } 1467 1468 if (pcr->action == PF_CHANGE_GET_TICKET) { 1469 pcr->ticket = ++ruleset->rules[rs_num].active.ticket; 1470 break; 1471 } else { 1472 if (pcr->ticket != 1473 ruleset->rules[rs_num].active.ticket) { 1474 error = EINVAL; 1475 break; 1476 } 1477 if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) { 1478 error = EINVAL; 1479 break; 1480 } 1481 } 1482 1483 if (pcr->action != PF_CHANGE_REMOVE) { 1484 newrule = pool_get(&pf_rule_pl, PR_NOWAIT); 1485 if (newrule == NULL) { 1486 error = ENOMEM; 1487 break; 1488 } 1489 bcopy(&pcr->rule, newrule, sizeof(struct pf_rule)); 1490 #ifdef __NetBSD__ 1491 newrule->cuid = kauth_cred_getuid(l->l_cred); 1492 newrule->cpid = l->l_proc->p_pid; 1493 #else 1494 newrule->cuid = p->p_cred->p_ruid; 1495 newrule->cpid = p->p_pid; 1496 #endif /* !__NetBSD__ */ 1497 TAILQ_INIT(&newrule->rpool.list); 1498 /* initialize refcounting */ 1499 newrule->states = 0; 1500 newrule->entries.tqe_prev = NULL; 1501 #ifndef INET 1502 if (newrule->af == AF_INET) { 1503 pool_put(&pf_rule_pl, newrule); 1504 error = EAFNOSUPPORT; 1505 break; 1506 } 1507 #endif /* INET */ 1508 #ifndef INET6 1509 if (newrule->af == AF_INET6) { 1510 pool_put(&pf_rule_pl, newrule); 1511 error = EAFNOSUPPORT; 1512 break; 1513 } 1514 #endif /* INET6 */ 1515 if (newrule->ifname[0]) { 1516 newrule->kif = pfi_kif_get(newrule->ifname); 1517 if (newrule->kif == NULL) { 1518 pool_put(&pf_rule_pl, newrule); 1519 error = EINVAL; 1520 break; 1521 } 1522 pfi_kif_ref(newrule->kif, PFI_KIF_REF_RULE); 1523 } else 1524 newrule->kif = NULL; 1525 1526 #ifndef __NetBSD__ 1527 if (newrule->rtableid > 0 && 1528 !rtable_exists(newrule->rtableid)) 1529 error = EBUSY; 1530 #endif /* !__NetBSD__ */ 1531 1532 #ifdef ALTQ 1533 /* set queue IDs */ 1534 if (newrule->qname[0] != 0) { 1535 if ((newrule->qid = 1536 pf_qname2qid(newrule->qname)) == 0) 1537 error = EBUSY; 1538 else if (newrule->pqname[0] != 0) { 1539 if ((newrule->pqid = 1540 pf_qname2qid(newrule->pqname)) == 0) 1541 error = EBUSY; 1542 } else 1543 newrule->pqid = newrule->qid; 1544 } 1545 #endif /* ALTQ */ 1546 if (newrule->tagname[0]) 1547 if ((newrule->tag = 1548 pf_tagname2tag(newrule->tagname)) == 0) 1549 error = EBUSY; 1550 if (newrule->match_tagname[0]) 1551 if ((newrule->match_tag = pf_tagname2tag( 1552 newrule->match_tagname)) == 0) 1553 error = EBUSY; 1554 if (newrule->rt && !newrule->direction) 1555 error = EINVAL; 1556 #if NPFLOG > 0 1557 if (!newrule->log) 1558 newrule->logif = 0; 1559 if (newrule->logif >= PFLOGIFS_MAX) 1560 error = EINVAL; 1561 #endif 1562 if (pf_rtlabel_add(&newrule->src.addr) || 1563 pf_rtlabel_add(&newrule->dst.addr)) 1564 error = EBUSY; 1565 if (pfi_dynaddr_setup(&newrule->src.addr, newrule->af)) 1566 error = EINVAL; 1567 if (pfi_dynaddr_setup(&newrule->dst.addr, newrule->af)) 1568 error = EINVAL; 1569 if (pf_tbladdr_setup(ruleset, &newrule->src.addr)) 1570 error = EINVAL; 1571 if (pf_tbladdr_setup(ruleset, &newrule->dst.addr)) 1572 error = EINVAL; 1573 if (pf_anchor_setup(newrule, ruleset, pcr->anchor_call)) 1574 error = EINVAL; 1575 TAILQ_FOREACH(pa, &pf_pabuf, entries) 1576 if (pf_tbladdr_setup(ruleset, &pa->addr)) 1577 error = EINVAL; 1578 1579 if (newrule->overload_tblname[0]) { 1580 if ((newrule->overload_tbl = pfr_attach_table( 1581 ruleset, newrule->overload_tblname)) == 1582 NULL) 1583 error = EINVAL; 1584 else 1585 newrule->overload_tbl->pfrkt_flags |= 1586 PFR_TFLAG_ACTIVE; 1587 } 1588 1589 pf_mv_pool(&pf_pabuf, &newrule->rpool.list); 1590 if (((((newrule->action == PF_NAT) || 1591 (newrule->action == PF_RDR) || 1592 (newrule->action == PF_BINAT) || 1593 (newrule->rt > PF_FASTROUTE)) && 1594 !newrule->anchor)) && 1595 (TAILQ_FIRST(&newrule->rpool.list) == NULL)) 1596 error = EINVAL; 1597 1598 if (error) { 1599 pf_rm_rule(NULL, newrule); 1600 break; 1601 } 1602 newrule->rpool.cur = TAILQ_FIRST(&newrule->rpool.list); 1603 newrule->evaluations = 0; 1604 newrule->packets[0] = newrule->packets[1] = 0; 1605 newrule->bytes[0] = newrule->bytes[1] = 0; 1606 } 1607 pf_empty_pool(&pf_pabuf); 1608 1609 if (pcr->action == PF_CHANGE_ADD_HEAD) 1610 oldrule = TAILQ_FIRST( 1611 ruleset->rules[rs_num].active.ptr); 1612 else if (pcr->action == PF_CHANGE_ADD_TAIL) 1613 oldrule = TAILQ_LAST( 1614 ruleset->rules[rs_num].active.ptr, pf_rulequeue); 1615 else { 1616 oldrule = TAILQ_FIRST( 1617 ruleset->rules[rs_num].active.ptr); 1618 while ((oldrule != NULL) && (oldrule->nr != pcr->nr)) 1619 oldrule = TAILQ_NEXT(oldrule, entries); 1620 if (oldrule == NULL) { 1621 if (newrule != NULL) 1622 pf_rm_rule(NULL, newrule); 1623 error = EINVAL; 1624 break; 1625 } 1626 } 1627 1628 if (pcr->action == PF_CHANGE_REMOVE) { 1629 pf_rm_rule(ruleset->rules[rs_num].active.ptr, oldrule); 1630 ruleset->rules[rs_num].active.rcount--; 1631 } else { 1632 if (oldrule == NULL) 1633 TAILQ_INSERT_TAIL( 1634 ruleset->rules[rs_num].active.ptr, 1635 newrule, entries); 1636 else if (pcr->action == PF_CHANGE_ADD_HEAD || 1637 pcr->action == PF_CHANGE_ADD_BEFORE) 1638 TAILQ_INSERT_BEFORE(oldrule, newrule, entries); 1639 else 1640 TAILQ_INSERT_AFTER( 1641 ruleset->rules[rs_num].active.ptr, 1642 oldrule, newrule, entries); 1643 ruleset->rules[rs_num].active.rcount++; 1644 } 1645 1646 nr = 0; 1647 TAILQ_FOREACH(oldrule, 1648 ruleset->rules[rs_num].active.ptr, entries) 1649 oldrule->nr = nr++; 1650 1651 ruleset->rules[rs_num].active.ticket++; 1652 1653 pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr); 1654 pf_remove_if_empty_ruleset(ruleset); 1655 1656 break; 1657 } 1658 1659 case DIOCCLRSTATES: { 1660 struct pf_state *s, *nexts; 1661 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr; 1662 int killed = 0; 1663 1664 for (s = RB_MIN(pf_state_tree_id, &tree_id); s; s = nexts) { 1665 nexts = RB_NEXT(pf_state_tree_id, &tree_id, s); 1666 1667 if (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname, 1668 s->kif->pfik_name)) { 1669 #if NPFSYNC 1670 /* don't send out individual delete messages */ 1671 s->sync_flags = PFSTATE_NOSYNC; 1672 #endif 1673 pf_unlink_state(s); 1674 killed++; 1675 } 1676 } 1677 psk->psk_af = killed; 1678 #if NPFSYNC 1679 pfsync_clear_states(pf_status.hostid, psk->psk_ifname); 1680 #endif 1681 break; 1682 } 1683 1684 case DIOCKILLSTATES: { 1685 struct pf_state *s, *nexts; 1686 struct pf_state_key *sk; 1687 struct pf_state_host *src, *dst; 1688 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr; 1689 int killed = 0; 1690 1691 for (s = RB_MIN(pf_state_tree_id, &tree_id); s; 1692 s = nexts) { 1693 nexts = RB_NEXT(pf_state_tree_id, &tree_id, s); 1694 sk = s->state_key; 1695 1696 if (sk->direction == PF_OUT) { 1697 src = &sk->lan; 1698 dst = &sk->ext; 1699 } else { 1700 src = &sk->ext; 1701 dst = &sk->lan; 1702 } 1703 if ((!psk->psk_af || sk->af == psk->psk_af) 1704 && (!psk->psk_proto || psk->psk_proto == 1705 sk->proto) && 1706 PF_MATCHA(psk->psk_src.neg, 1707 &psk->psk_src.addr.v.a.addr, 1708 &psk->psk_src.addr.v.a.mask, 1709 &src->addr, sk->af) && 1710 PF_MATCHA(psk->psk_dst.neg, 1711 &psk->psk_dst.addr.v.a.addr, 1712 &psk->psk_dst.addr.v.a.mask, 1713 &dst->addr, sk->af) && 1714 (psk->psk_src.port_op == 0 || 1715 pf_match_port(psk->psk_src.port_op, 1716 psk->psk_src.port[0], psk->psk_src.port[1], 1717 src->port)) && 1718 (psk->psk_dst.port_op == 0 || 1719 pf_match_port(psk->psk_dst.port_op, 1720 psk->psk_dst.port[0], psk->psk_dst.port[1], 1721 dst->port)) && 1722 (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname, 1723 s->kif->pfik_name))) { 1724 #if NPFSYNC > 0 1725 /* send immediate delete of state */ 1726 pfsync_delete_state(s); 1727 s->sync_flags |= PFSTATE_NOSYNC; 1728 #endif 1729 pf_unlink_state(s); 1730 killed++; 1731 } 1732 } 1733 psk->psk_af = killed; 1734 break; 1735 } 1736 1737 case DIOCADDSTATE: { 1738 struct pfioc_state *ps = (struct pfioc_state *)addr; 1739 struct pfsync_state *sp = (struct pfsync_state *)ps->state; 1740 struct pf_state *s; 1741 struct pf_state_key *sk; 1742 struct pfi_kif *kif; 1743 1744 if (sp->timeout >= PFTM_MAX && 1745 sp->timeout != PFTM_UNTIL_PACKET) { 1746 error = EINVAL; 1747 break; 1748 } 1749 s = pool_get(&pf_state_pl, PR_NOWAIT); 1750 if (s == NULL) { 1751 error = ENOMEM; 1752 break; 1753 } 1754 bzero(s, sizeof(struct pf_state)); 1755 if ((sk = pf_alloc_state_key(s)) == NULL) { 1756 error = ENOMEM; 1757 break; 1758 } 1759 pf_state_import(sp, sk, s); 1760 kif = pfi_kif_get(sp->ifname); 1761 if (kif == NULL) { 1762 pool_put(&pf_state_pl, s); 1763 pool_put(&pf_state_key_pl, sk); 1764 error = ENOENT; 1765 break; 1766 } 1767 if (pf_insert_state(kif, s)) { 1768 pfi_kif_unref(kif, PFI_KIF_REF_NONE); 1769 pool_put(&pf_state_pl, s); 1770 pool_put(&pf_state_key_pl, sk); 1771 error = ENOMEM; 1772 } 1773 break; 1774 } 1775 1776 case DIOCGETSTATE: { 1777 struct pfioc_state *ps = (struct pfioc_state *)addr; 1778 struct pf_state *s; 1779 u_int32_t nr; 1780 1781 nr = 0; 1782 RB_FOREACH(s, pf_state_tree_id, &tree_id) { 1783 if (nr >= ps->nr) 1784 break; 1785 nr++; 1786 } 1787 if (s == NULL) { 1788 error = EBUSY; 1789 break; 1790 } 1791 1792 pf_state_export((struct pfsync_state *)&ps->state, 1793 s->state_key, s); 1794 break; 1795 } 1796 1797 case DIOCGETSTATES: { 1798 struct pfioc_states *ps = (struct pfioc_states *)addr; 1799 struct pf_state *state; 1800 struct pfsync_state *p, *pstore; 1801 u_int32_t nr = 0; 1802 1803 if (ps->ps_len == 0) { 1804 nr = pf_status.states; 1805 ps->ps_len = sizeof(struct pfsync_state) * nr; 1806 break; 1807 } 1808 1809 pstore = malloc(sizeof(*pstore), M_TEMP, M_WAITOK); 1810 1811 p = ps->ps_states; 1812 1813 state = TAILQ_FIRST(&state_list); 1814 while (state) { 1815 if (state->timeout != PFTM_UNLINKED) { 1816 if ((nr+1) * sizeof(*p) > (unsigned)ps->ps_len) 1817 break; 1818 1819 pf_state_export(pstore, 1820 state->state_key, state); 1821 error = copyout(pstore, p, sizeof(*p)); 1822 if (error) { 1823 free(pstore, M_TEMP); 1824 goto fail; 1825 } 1826 p++; 1827 nr++; 1828 } 1829 state = TAILQ_NEXT(state, entry_list); 1830 } 1831 1832 ps->ps_len = sizeof(struct pfsync_state) * nr; 1833 1834 free(pstore, M_TEMP); 1835 break; 1836 } 1837 1838 case DIOCGETSTATUS: { 1839 struct pf_status *s = (struct pf_status *)addr; 1840 bcopy(&pf_status, s, sizeof(struct pf_status)); 1841 pfi_fill_oldstatus(s); 1842 break; 1843 } 1844 1845 case DIOCSETSTATUSIF: { 1846 struct pfioc_if *pi = (struct pfioc_if *)addr; 1847 1848 if (pi->ifname[0] == 0) { 1849 bzero(pf_status.ifname, IFNAMSIZ); 1850 break; 1851 } 1852 if (ifunit(pi->ifname) == NULL) { 1853 error = EINVAL; 1854 break; 1855 } 1856 strlcpy(pf_status.ifname, pi->ifname, IFNAMSIZ); 1857 break; 1858 } 1859 1860 case DIOCCLRSTATUS: { 1861 bzero(pf_status.counters, sizeof(pf_status.counters)); 1862 bzero(pf_status.fcounters, sizeof(pf_status.fcounters)); 1863 bzero(pf_status.scounters, sizeof(pf_status.scounters)); 1864 pf_status.since = time_second; 1865 if (*pf_status.ifname) 1866 pfi_clr_istats(pf_status.ifname); 1867 break; 1868 } 1869 1870 case DIOCNATLOOK: { 1871 struct pfioc_natlook *pnl = (struct pfioc_natlook *)addr; 1872 struct pf_state_key *sk; 1873 struct pf_state *state; 1874 struct pf_state_key_cmp key; 1875 int m = 0, direction = pnl->direction; 1876 1877 key.af = pnl->af; 1878 key.proto = pnl->proto; 1879 1880 if (!pnl->proto || 1881 PF_AZERO(&pnl->saddr, pnl->af) || 1882 PF_AZERO(&pnl->daddr, pnl->af) || 1883 ((pnl->proto == IPPROTO_TCP || 1884 pnl->proto == IPPROTO_UDP) && 1885 (!pnl->dport || !pnl->sport))) 1886 error = EINVAL; 1887 else { 1888 /* 1889 * userland gives us source and dest of connection, 1890 * reverse the lookup so we ask for what happens with 1891 * the return traffic, enabling us to find it in the 1892 * state tree. 1893 */ 1894 if (direction == PF_IN) { 1895 PF_ACPY(&key.ext.addr, &pnl->daddr, pnl->af); 1896 key.ext.port = pnl->dport; 1897 PF_ACPY(&key.gwy.addr, &pnl->saddr, pnl->af); 1898 key.gwy.port = pnl->sport; 1899 state = pf_find_state_all(&key, PF_EXT_GWY, &m); 1900 } else { 1901 PF_ACPY(&key.lan.addr, &pnl->daddr, pnl->af); 1902 key.lan.port = pnl->dport; 1903 PF_ACPY(&key.ext.addr, &pnl->saddr, pnl->af); 1904 key.ext.port = pnl->sport; 1905 state = pf_find_state_all(&key, PF_LAN_EXT, &m); 1906 } 1907 if (m > 1) 1908 error = E2BIG; /* more than one state */ 1909 else if (state != NULL) { 1910 sk = state->state_key; 1911 if (direction == PF_IN) { 1912 PF_ACPY(&pnl->rsaddr, &sk->lan.addr, 1913 sk->af); 1914 pnl->rsport = sk->lan.port; 1915 PF_ACPY(&pnl->rdaddr, &pnl->daddr, 1916 pnl->af); 1917 pnl->rdport = pnl->dport; 1918 } else { 1919 PF_ACPY(&pnl->rdaddr, &sk->gwy.addr, 1920 sk->af); 1921 pnl->rdport = sk->gwy.port; 1922 PF_ACPY(&pnl->rsaddr, &pnl->saddr, 1923 pnl->af); 1924 pnl->rsport = pnl->sport; 1925 } 1926 } else 1927 error = ENOENT; 1928 } 1929 break; 1930 } 1931 1932 case DIOCSETTIMEOUT: { 1933 struct pfioc_tm *pt = (struct pfioc_tm *)addr; 1934 int old; 1935 1936 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX || 1937 pt->seconds < 0) { 1938 error = EINVAL; 1939 goto fail; 1940 } 1941 old = pf_default_rule.timeout[pt->timeout]; 1942 if (pt->timeout == PFTM_INTERVAL && pt->seconds == 0) 1943 pt->seconds = 1; 1944 pf_default_rule.timeout[pt->timeout] = pt->seconds; 1945 if (pt->timeout == PFTM_INTERVAL && pt->seconds < old) 1946 wakeup(pf_purge_thread); 1947 pt->seconds = old; 1948 break; 1949 } 1950 1951 case DIOCGETTIMEOUT: { 1952 struct pfioc_tm *pt = (struct pfioc_tm *)addr; 1953 1954 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) { 1955 error = EINVAL; 1956 goto fail; 1957 } 1958 pt->seconds = pf_default_rule.timeout[pt->timeout]; 1959 break; 1960 } 1961 1962 case DIOCGETLIMIT: { 1963 struct pfioc_limit *pl = (struct pfioc_limit *)addr; 1964 1965 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) { 1966 error = EINVAL; 1967 goto fail; 1968 } 1969 pl->limit = pf_pool_limits[pl->index].limit; 1970 break; 1971 } 1972 1973 case DIOCSETLIMIT: { 1974 struct pfioc_limit *pl = (struct pfioc_limit *)addr; 1975 int old_limit; 1976 1977 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX || 1978 pf_pool_limits[pl->index].pp == NULL) { 1979 error = EINVAL; 1980 goto fail; 1981 } 1982 #ifdef __NetBSD__ 1983 pool_sethardlimit(pf_pool_limits[pl->index].pp, 1984 pl->limit, NULL, 0); 1985 #else 1986 if (pool_sethardlimit(pf_pool_limits[pl->index].pp, 1987 pl->limit, NULL, 0) != 0) { 1988 error = EBUSY; 1989 goto fail; 1990 } 1991 #endif /* !__NetBSD__ */ 1992 old_limit = pf_pool_limits[pl->index].limit; 1993 pf_pool_limits[pl->index].limit = pl->limit; 1994 pl->limit = old_limit; 1995 break; 1996 } 1997 1998 case DIOCSETDEBUG: { 1999 u_int32_t *level = (u_int32_t *)addr; 2000 2001 pf_status.debug = *level; 2002 break; 2003 } 2004 2005 case DIOCCLRRULECTRS: { 2006 /* obsoleted by DIOCGETRULE with action=PF_GET_CLR_CNTR */ 2007 struct pf_ruleset *ruleset = &pf_main_ruleset; 2008 struct pf_rule *rule; 2009 2010 TAILQ_FOREACH(rule, 2011 ruleset->rules[PF_RULESET_FILTER].active.ptr, entries) { 2012 rule->evaluations = 0; 2013 rule->packets[0] = rule->packets[1] = 0; 2014 rule->bytes[0] = rule->bytes[1] = 0; 2015 } 2016 break; 2017 } 2018 2019 #ifdef ALTQ 2020 case DIOCSTARTALTQ: { 2021 struct pf_altq *altq; 2022 2023 /* enable all altq interfaces on active list */ 2024 TAILQ_FOREACH(altq, pf_altqs_active, entries) { 2025 if (altq->qname[0] == 0) { 2026 error = pf_enable_altq(altq); 2027 if (error != 0) 2028 break; 2029 } 2030 } 2031 if (error == 0) 2032 pf_altq_running = 1; 2033 DPFPRINTF(PF_DEBUG_MISC, ("altq: started\n")); 2034 break; 2035 } 2036 2037 case DIOCSTOPALTQ: { 2038 struct pf_altq *altq; 2039 2040 /* disable all altq interfaces on active list */ 2041 TAILQ_FOREACH(altq, pf_altqs_active, entries) { 2042 if (altq->qname[0] == 0) { 2043 error = pf_disable_altq(altq); 2044 if (error != 0) 2045 break; 2046 } 2047 } 2048 if (error == 0) 2049 pf_altq_running = 0; 2050 DPFPRINTF(PF_DEBUG_MISC, ("altq: stopped\n")); 2051 break; 2052 } 2053 2054 case DIOCADDALTQ: { 2055 struct pfioc_altq *pa = (struct pfioc_altq *)addr; 2056 struct pf_altq *altq, *a; 2057 2058 if (pa->ticket != ticket_altqs_inactive) { 2059 error = EBUSY; 2060 break; 2061 } 2062 altq = pool_get(&pf_altq_pl, PR_NOWAIT); 2063 if (altq == NULL) { 2064 error = ENOMEM; 2065 break; 2066 } 2067 bcopy(&pa->altq, altq, sizeof(struct pf_altq)); 2068 2069 /* 2070 * if this is for a queue, find the discipline and 2071 * copy the necessary fields 2072 */ 2073 if (altq->qname[0] != 0) { 2074 if ((altq->qid = pf_qname2qid(altq->qname)) == 0) { 2075 error = EBUSY; 2076 pool_put(&pf_altq_pl, altq); 2077 break; 2078 } 2079 TAILQ_FOREACH(a, pf_altqs_inactive, entries) { 2080 if (strncmp(a->ifname, altq->ifname, 2081 IFNAMSIZ) == 0 && a->qname[0] == 0) { 2082 altq->altq_disc = a->altq_disc; 2083 break; 2084 } 2085 } 2086 } 2087 2088 error = altq_add(altq); 2089 if (error) { 2090 pool_put(&pf_altq_pl, altq); 2091 break; 2092 } 2093 2094 TAILQ_INSERT_TAIL(pf_altqs_inactive, altq, entries); 2095 bcopy(altq, &pa->altq, sizeof(struct pf_altq)); 2096 break; 2097 } 2098 2099 case DIOCGETALTQS: { 2100 struct pfioc_altq *pa = (struct pfioc_altq *)addr; 2101 struct pf_altq *altq; 2102 2103 pa->nr = 0; 2104 TAILQ_FOREACH(altq, pf_altqs_active, entries) 2105 pa->nr++; 2106 pa->ticket = ticket_altqs_active; 2107 break; 2108 } 2109 2110 case DIOCGETALTQ: { 2111 struct pfioc_altq *pa = (struct pfioc_altq *)addr; 2112 struct pf_altq *altq; 2113 u_int32_t nr; 2114 2115 if (pa->ticket != ticket_altqs_active) { 2116 error = EBUSY; 2117 break; 2118 } 2119 nr = 0; 2120 altq = TAILQ_FIRST(pf_altqs_active); 2121 while ((altq != NULL) && (nr < pa->nr)) { 2122 altq = TAILQ_NEXT(altq, entries); 2123 nr++; 2124 } 2125 if (altq == NULL) { 2126 error = EBUSY; 2127 break; 2128 } 2129 bcopy(altq, &pa->altq, sizeof(struct pf_altq)); 2130 break; 2131 } 2132 2133 case DIOCCHANGEALTQ: 2134 /* CHANGEALTQ not supported yet! */ 2135 error = ENODEV; 2136 break; 2137 2138 case DIOCGETQSTATS: { 2139 struct pfioc_qstats *pq = (struct pfioc_qstats *)addr; 2140 struct pf_altq *altq; 2141 u_int32_t nr; 2142 int nbytes; 2143 2144 if (pq->ticket != ticket_altqs_active) { 2145 error = EBUSY; 2146 break; 2147 } 2148 nbytes = pq->nbytes; 2149 nr = 0; 2150 altq = TAILQ_FIRST(pf_altqs_active); 2151 while ((altq != NULL) && (nr < pq->nr)) { 2152 altq = TAILQ_NEXT(altq, entries); 2153 nr++; 2154 } 2155 if (altq == NULL) { 2156 error = EBUSY; 2157 break; 2158 } 2159 error = altq_getqstats(altq, pq->buf, &nbytes); 2160 if (error == 0) { 2161 pq->scheduler = altq->scheduler; 2162 pq->nbytes = nbytes; 2163 } 2164 break; 2165 } 2166 #endif /* ALTQ */ 2167 2168 case DIOCBEGINADDRS: { 2169 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 2170 2171 pf_empty_pool(&pf_pabuf); 2172 pp->ticket = ++ticket_pabuf; 2173 break; 2174 } 2175 2176 case DIOCADDADDR: { 2177 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 2178 2179 if (pp->ticket != ticket_pabuf) { 2180 error = EBUSY; 2181 break; 2182 } 2183 #ifndef INET 2184 if (pp->af == AF_INET) { 2185 error = EAFNOSUPPORT; 2186 break; 2187 } 2188 #endif /* INET */ 2189 #ifndef INET6 2190 if (pp->af == AF_INET6) { 2191 error = EAFNOSUPPORT; 2192 break; 2193 } 2194 #endif /* INET6 */ 2195 if (pp->addr.addr.type != PF_ADDR_ADDRMASK && 2196 pp->addr.addr.type != PF_ADDR_DYNIFTL && 2197 pp->addr.addr.type != PF_ADDR_TABLE) { 2198 error = EINVAL; 2199 break; 2200 } 2201 pa = pool_get(&pf_pooladdr_pl, PR_NOWAIT); 2202 if (pa == NULL) { 2203 error = ENOMEM; 2204 break; 2205 } 2206 bcopy(&pp->addr, pa, sizeof(struct pf_pooladdr)); 2207 if (pa->ifname[0]) { 2208 pa->kif = pfi_kif_get(pa->ifname); 2209 if (pa->kif == NULL) { 2210 pool_put(&pf_pooladdr_pl, pa); 2211 error = EINVAL; 2212 break; 2213 } 2214 pfi_kif_ref(pa->kif, PFI_KIF_REF_RULE); 2215 } 2216 if (pfi_dynaddr_setup(&pa->addr, pp->af)) { 2217 pfi_dynaddr_remove(&pa->addr); 2218 pfi_kif_unref(pa->kif, PFI_KIF_REF_RULE); 2219 pool_put(&pf_pooladdr_pl, pa); 2220 error = EINVAL; 2221 break; 2222 } 2223 TAILQ_INSERT_TAIL(&pf_pabuf, pa, entries); 2224 break; 2225 } 2226 2227 case DIOCGETADDRS: { 2228 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 2229 2230 pp->nr = 0; 2231 pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action, 2232 pp->r_num, 0, 1, 0); 2233 if (pool == NULL) { 2234 error = EBUSY; 2235 break; 2236 } 2237 TAILQ_FOREACH(pa, &pool->list, entries) 2238 pp->nr++; 2239 break; 2240 } 2241 2242 case DIOCGETADDR: { 2243 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 2244 u_int32_t nr = 0; 2245 2246 pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action, 2247 pp->r_num, 0, 1, 1); 2248 if (pool == NULL) { 2249 error = EBUSY; 2250 break; 2251 } 2252 pa = TAILQ_FIRST(&pool->list); 2253 while ((pa != NULL) && (nr < pp->nr)) { 2254 pa = TAILQ_NEXT(pa, entries); 2255 nr++; 2256 } 2257 if (pa == NULL) { 2258 error = EBUSY; 2259 break; 2260 } 2261 bcopy(pa, &pp->addr, sizeof(struct pf_pooladdr)); 2262 pfi_dynaddr_copyout(&pp->addr.addr); 2263 pf_tbladdr_copyout(&pp->addr.addr); 2264 pf_rtlabel_copyout(&pp->addr.addr); 2265 break; 2266 } 2267 2268 case DIOCCHANGEADDR: { 2269 struct pfioc_pooladdr *pca = (struct pfioc_pooladdr *)addr; 2270 struct pf_pooladdr *oldpa = NULL, *newpa = NULL; 2271 struct pf_ruleset *ruleset; 2272 2273 if (pca->action < PF_CHANGE_ADD_HEAD || 2274 pca->action > PF_CHANGE_REMOVE) { 2275 error = EINVAL; 2276 break; 2277 } 2278 if (pca->addr.addr.type != PF_ADDR_ADDRMASK && 2279 pca->addr.addr.type != PF_ADDR_DYNIFTL && 2280 pca->addr.addr.type != PF_ADDR_TABLE) { 2281 error = EINVAL; 2282 break; 2283 } 2284 2285 ruleset = pf_find_ruleset(pca->anchor); 2286 if (ruleset == NULL) { 2287 error = EBUSY; 2288 break; 2289 } 2290 pool = pf_get_pool(pca->anchor, pca->ticket, pca->r_action, 2291 pca->r_num, pca->r_last, 1, 1); 2292 if (pool == NULL) { 2293 error = EBUSY; 2294 break; 2295 } 2296 if (pca->action != PF_CHANGE_REMOVE) { 2297 newpa = pool_get(&pf_pooladdr_pl, PR_NOWAIT); 2298 if (newpa == NULL) { 2299 error = ENOMEM; 2300 break; 2301 } 2302 bcopy(&pca->addr, newpa, sizeof(struct pf_pooladdr)); 2303 #ifndef INET 2304 if (pca->af == AF_INET) { 2305 pool_put(&pf_pooladdr_pl, newpa); 2306 error = EAFNOSUPPORT; 2307 break; 2308 } 2309 #endif /* INET */ 2310 #ifndef INET6 2311 if (pca->af == AF_INET6) { 2312 pool_put(&pf_pooladdr_pl, newpa); 2313 error = EAFNOSUPPORT; 2314 break; 2315 } 2316 #endif /* INET6 */ 2317 if (newpa->ifname[0]) { 2318 newpa->kif = pfi_kif_get(newpa->ifname); 2319 if (newpa->kif == NULL) { 2320 pool_put(&pf_pooladdr_pl, newpa); 2321 error = EINVAL; 2322 break; 2323 } 2324 pfi_kif_ref(newpa->kif, PFI_KIF_REF_RULE); 2325 } else 2326 newpa->kif = NULL; 2327 if (pfi_dynaddr_setup(&newpa->addr, pca->af) || 2328 pf_tbladdr_setup(ruleset, &newpa->addr)) { 2329 pfi_dynaddr_remove(&newpa->addr); 2330 pfi_kif_unref(newpa->kif, PFI_KIF_REF_RULE); 2331 pool_put(&pf_pooladdr_pl, newpa); 2332 error = EINVAL; 2333 break; 2334 } 2335 } 2336 2337 if (pca->action == PF_CHANGE_ADD_HEAD) 2338 oldpa = TAILQ_FIRST(&pool->list); 2339 else if (pca->action == PF_CHANGE_ADD_TAIL) 2340 oldpa = TAILQ_LAST(&pool->list, pf_palist); 2341 else { 2342 int i = 0; 2343 2344 oldpa = TAILQ_FIRST(&pool->list); 2345 while ((oldpa != NULL) && (i < pca->nr)) { 2346 oldpa = TAILQ_NEXT(oldpa, entries); 2347 i++; 2348 } 2349 if (oldpa == NULL) { 2350 error = EINVAL; 2351 break; 2352 } 2353 } 2354 2355 if (pca->action == PF_CHANGE_REMOVE) { 2356 TAILQ_REMOVE(&pool->list, oldpa, entries); 2357 pfi_dynaddr_remove(&oldpa->addr); 2358 pf_tbladdr_remove(&oldpa->addr); 2359 pfi_kif_unref(oldpa->kif, PFI_KIF_REF_RULE); 2360 pool_put(&pf_pooladdr_pl, oldpa); 2361 } else { 2362 if (oldpa == NULL) 2363 TAILQ_INSERT_TAIL(&pool->list, newpa, entries); 2364 else if (pca->action == PF_CHANGE_ADD_HEAD || 2365 pca->action == PF_CHANGE_ADD_BEFORE) 2366 TAILQ_INSERT_BEFORE(oldpa, newpa, entries); 2367 else 2368 TAILQ_INSERT_AFTER(&pool->list, oldpa, 2369 newpa, entries); 2370 } 2371 2372 pool->cur = TAILQ_FIRST(&pool->list); 2373 PF_ACPY(&pool->counter, &pool->cur->addr.v.a.addr, 2374 pca->af); 2375 break; 2376 } 2377 2378 case DIOCGETRULESETS: { 2379 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr; 2380 struct pf_ruleset *ruleset; 2381 struct pf_anchor *anchor; 2382 2383 pr->path[sizeof(pr->path) - 1] = 0; 2384 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) { 2385 error = EINVAL; 2386 break; 2387 } 2388 pr->nr = 0; 2389 if (ruleset->anchor == NULL) { 2390 /* XXX kludge for pf_main_ruleset */ 2391 RB_FOREACH(anchor, pf_anchor_global, &pf_anchors) 2392 if (anchor->parent == NULL) 2393 pr->nr++; 2394 } else { 2395 RB_FOREACH(anchor, pf_anchor_node, 2396 &ruleset->anchor->children) 2397 pr->nr++; 2398 } 2399 break; 2400 } 2401 2402 case DIOCGETRULESET: { 2403 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr; 2404 struct pf_ruleset *ruleset; 2405 struct pf_anchor *anchor; 2406 u_int32_t nr = 0; 2407 2408 pr->path[sizeof(pr->path) - 1] = 0; 2409 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) { 2410 error = EINVAL; 2411 break; 2412 } 2413 pr->name[0] = 0; 2414 if (ruleset->anchor == NULL) { 2415 /* XXX kludge for pf_main_ruleset */ 2416 RB_FOREACH(anchor, pf_anchor_global, &pf_anchors) 2417 if (anchor->parent == NULL && nr++ == pr->nr) { 2418 strlcpy(pr->name, anchor->name, 2419 sizeof(pr->name)); 2420 break; 2421 } 2422 } else { 2423 RB_FOREACH(anchor, pf_anchor_node, 2424 &ruleset->anchor->children) 2425 if (nr++ == pr->nr) { 2426 strlcpy(pr->name, anchor->name, 2427 sizeof(pr->name)); 2428 break; 2429 } 2430 } 2431 if (!pr->name[0]) 2432 error = EBUSY; 2433 break; 2434 } 2435 2436 case DIOCRCLRTABLES: { 2437 struct pfioc_table *io = (struct pfioc_table *)addr; 2438 2439 if (io->pfrio_esize != 0) { 2440 error = ENODEV; 2441 break; 2442 } 2443 error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel, 2444 io->pfrio_flags | PFR_FLAG_USERIOCTL); 2445 break; 2446 } 2447 2448 case DIOCRADDTABLES: { 2449 struct pfioc_table *io = (struct pfioc_table *)addr; 2450 2451 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2452 error = ENODEV; 2453 break; 2454 } 2455 error = pfr_add_tables(io->pfrio_buffer, io->pfrio_size, 2456 &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2457 break; 2458 } 2459 2460 case DIOCRDELTABLES: { 2461 struct pfioc_table *io = (struct pfioc_table *)addr; 2462 2463 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2464 error = ENODEV; 2465 break; 2466 } 2467 error = pfr_del_tables(io->pfrio_buffer, io->pfrio_size, 2468 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2469 break; 2470 } 2471 2472 case DIOCRGETTABLES: { 2473 struct pfioc_table *io = (struct pfioc_table *)addr; 2474 2475 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2476 error = ENODEV; 2477 break; 2478 } 2479 error = pfr_get_tables(&io->pfrio_table, io->pfrio_buffer, 2480 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2481 break; 2482 } 2483 2484 case DIOCRGETTSTATS: { 2485 struct pfioc_table *io = (struct pfioc_table *)addr; 2486 2487 if (io->pfrio_esize != sizeof(struct pfr_tstats)) { 2488 error = ENODEV; 2489 break; 2490 } 2491 error = pfr_get_tstats(&io->pfrio_table, io->pfrio_buffer, 2492 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2493 break; 2494 } 2495 2496 case DIOCRCLRTSTATS: { 2497 struct pfioc_table *io = (struct pfioc_table *)addr; 2498 2499 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2500 error = ENODEV; 2501 break; 2502 } 2503 error = pfr_clr_tstats(io->pfrio_buffer, io->pfrio_size, 2504 &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2505 break; 2506 } 2507 2508 case DIOCRSETTFLAGS: { 2509 struct pfioc_table *io = (struct pfioc_table *)addr; 2510 2511 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2512 error = ENODEV; 2513 break; 2514 } 2515 error = pfr_set_tflags(io->pfrio_buffer, io->pfrio_size, 2516 io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange, 2517 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2518 break; 2519 } 2520 2521 case DIOCRCLRADDRS: { 2522 struct pfioc_table *io = (struct pfioc_table *)addr; 2523 2524 if (io->pfrio_esize != 0) { 2525 error = ENODEV; 2526 break; 2527 } 2528 error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel, 2529 io->pfrio_flags | PFR_FLAG_USERIOCTL); 2530 break; 2531 } 2532 2533 case DIOCRADDADDRS: { 2534 struct pfioc_table *io = (struct pfioc_table *)addr; 2535 2536 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2537 error = ENODEV; 2538 break; 2539 } 2540 error = pfr_add_addrs(&io->pfrio_table, io->pfrio_buffer, 2541 io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags | 2542 PFR_FLAG_USERIOCTL); 2543 break; 2544 } 2545 2546 case DIOCRDELADDRS: { 2547 struct pfioc_table *io = (struct pfioc_table *)addr; 2548 2549 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2550 error = ENODEV; 2551 break; 2552 } 2553 error = pfr_del_addrs(&io->pfrio_table, io->pfrio_buffer, 2554 io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags | 2555 PFR_FLAG_USERIOCTL); 2556 break; 2557 } 2558 2559 case DIOCRSETADDRS: { 2560 struct pfioc_table *io = (struct pfioc_table *)addr; 2561 2562 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2563 error = ENODEV; 2564 break; 2565 } 2566 error = pfr_set_addrs(&io->pfrio_table, io->pfrio_buffer, 2567 io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd, 2568 &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags | 2569 PFR_FLAG_USERIOCTL, 0); 2570 break; 2571 } 2572 2573 case DIOCRGETADDRS: { 2574 struct pfioc_table *io = (struct pfioc_table *)addr; 2575 2576 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2577 error = ENODEV; 2578 break; 2579 } 2580 error = pfr_get_addrs(&io->pfrio_table, io->pfrio_buffer, 2581 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2582 break; 2583 } 2584 2585 case DIOCRGETASTATS: { 2586 struct pfioc_table *io = (struct pfioc_table *)addr; 2587 2588 if (io->pfrio_esize != sizeof(struct pfr_astats)) { 2589 error = ENODEV; 2590 break; 2591 } 2592 error = pfr_get_astats(&io->pfrio_table, io->pfrio_buffer, 2593 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2594 break; 2595 } 2596 2597 case DIOCRCLRASTATS: { 2598 struct pfioc_table *io = (struct pfioc_table *)addr; 2599 2600 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2601 error = ENODEV; 2602 break; 2603 } 2604 error = pfr_clr_astats(&io->pfrio_table, io->pfrio_buffer, 2605 io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags | 2606 PFR_FLAG_USERIOCTL); 2607 break; 2608 } 2609 2610 case DIOCRTSTADDRS: { 2611 struct pfioc_table *io = (struct pfioc_table *)addr; 2612 2613 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2614 error = ENODEV; 2615 break; 2616 } 2617 error = pfr_tst_addrs(&io->pfrio_table, io->pfrio_buffer, 2618 io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags | 2619 PFR_FLAG_USERIOCTL); 2620 break; 2621 } 2622 2623 case DIOCRINADEFINE: { 2624 struct pfioc_table *io = (struct pfioc_table *)addr; 2625 2626 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2627 error = ENODEV; 2628 break; 2629 } 2630 error = pfr_ina_define(&io->pfrio_table, io->pfrio_buffer, 2631 io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr, 2632 io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2633 break; 2634 } 2635 2636 case DIOCOSFPADD: { 2637 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr; 2638 error = pf_osfp_add(io); 2639 break; 2640 } 2641 2642 case DIOCOSFPGET: { 2643 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr; 2644 error = pf_osfp_get(io); 2645 break; 2646 } 2647 2648 case DIOCXBEGIN: { 2649 struct pfioc_trans *io = (struct pfioc_trans *)addr; 2650 struct pfioc_trans_e *ioe; 2651 struct pfr_table *table; 2652 int i; 2653 2654 if (io->esize != sizeof(*ioe)) { 2655 error = ENODEV; 2656 goto fail; 2657 } 2658 ioe = (struct pfioc_trans_e *)malloc(sizeof(*ioe), 2659 M_TEMP, M_WAITOK); 2660 table = (struct pfr_table *)malloc(sizeof(*table), 2661 M_TEMP, M_WAITOK); 2662 for (i = 0; i < io->size; i++) { 2663 if (copyin(io->array+i, ioe, sizeof(*ioe))) { 2664 free(table, M_TEMP); 2665 free(ioe, M_TEMP); 2666 error = EFAULT; 2667 goto fail; 2668 } 2669 switch (ioe->rs_num) { 2670 #ifdef ALTQ 2671 case PF_RULESET_ALTQ: 2672 if (ioe->anchor[0]) { 2673 free(table, M_TEMP); 2674 free(ioe, M_TEMP); 2675 error = EINVAL; 2676 goto fail; 2677 } 2678 if ((error = pf_begin_altq(&ioe->ticket))) { 2679 free(table, M_TEMP); 2680 free(ioe, M_TEMP); 2681 goto fail; 2682 } 2683 break; 2684 #endif /* ALTQ */ 2685 case PF_RULESET_TABLE: 2686 bzero(table, sizeof(*table)); 2687 strlcpy(table->pfrt_anchor, ioe->anchor, 2688 sizeof(table->pfrt_anchor)); 2689 if ((error = pfr_ina_begin(table, 2690 &ioe->ticket, NULL, 0))) { 2691 free(table, M_TEMP); 2692 free(ioe, M_TEMP); 2693 goto fail; 2694 } 2695 break; 2696 default: 2697 if ((error = pf_begin_rules(&ioe->ticket, 2698 ioe->rs_num, ioe->anchor))) { 2699 free(table, M_TEMP); 2700 free(ioe, M_TEMP); 2701 goto fail; 2702 } 2703 break; 2704 } 2705 if (copyout(ioe, io->array+i, sizeof(io->array[i]))) { 2706 free(table, M_TEMP); 2707 free(ioe, M_TEMP); 2708 error = EFAULT; 2709 goto fail; 2710 } 2711 } 2712 free(table, M_TEMP); 2713 free(ioe, M_TEMP); 2714 break; 2715 } 2716 2717 case DIOCXROLLBACK: { 2718 struct pfioc_trans *io = (struct pfioc_trans *)addr; 2719 struct pfioc_trans_e *ioe; 2720 struct pfr_table *table; 2721 int i; 2722 2723 if (io->esize != sizeof(*ioe)) { 2724 error = ENODEV; 2725 goto fail; 2726 } 2727 ioe = (struct pfioc_trans_e *)malloc(sizeof(*ioe), 2728 M_TEMP, M_WAITOK); 2729 table = (struct pfr_table *)malloc(sizeof(*table), 2730 M_TEMP, M_WAITOK); 2731 for (i = 0; i < io->size; i++) { 2732 if (copyin(io->array+i, ioe, sizeof(*ioe))) { 2733 free(table, M_TEMP); 2734 free(ioe, M_TEMP); 2735 error = EFAULT; 2736 goto fail; 2737 } 2738 switch (ioe->rs_num) { 2739 #ifdef ALTQ 2740 case PF_RULESET_ALTQ: 2741 if (ioe->anchor[0]) { 2742 free(table, M_TEMP); 2743 free(ioe, M_TEMP); 2744 error = EINVAL; 2745 goto fail; 2746 } 2747 if ((error = pf_rollback_altq(ioe->ticket))) { 2748 free(table, M_TEMP); 2749 free(ioe, M_TEMP); 2750 goto fail; /* really bad */ 2751 } 2752 break; 2753 #endif /* ALTQ */ 2754 case PF_RULESET_TABLE: 2755 bzero(table, sizeof(*table)); 2756 strlcpy(table->pfrt_anchor, ioe->anchor, 2757 sizeof(table->pfrt_anchor)); 2758 if ((error = pfr_ina_rollback(table, 2759 ioe->ticket, NULL, 0))) { 2760 free(table, M_TEMP); 2761 free(ioe, M_TEMP); 2762 goto fail; /* really bad */ 2763 } 2764 break; 2765 default: 2766 if ((error = pf_rollback_rules(ioe->ticket, 2767 ioe->rs_num, ioe->anchor))) { 2768 free(table, M_TEMP); 2769 free(ioe, M_TEMP); 2770 goto fail; /* really bad */ 2771 } 2772 break; 2773 } 2774 } 2775 free(table, M_TEMP); 2776 free(ioe, M_TEMP); 2777 break; 2778 } 2779 2780 case DIOCXCOMMIT: { 2781 struct pfioc_trans *io = (struct pfioc_trans *)addr; 2782 struct pfioc_trans_e *ioe; 2783 struct pfr_table *table; 2784 struct pf_ruleset *rs; 2785 int i; 2786 2787 if (io->esize != sizeof(*ioe)) { 2788 error = ENODEV; 2789 goto fail; 2790 } 2791 ioe = (struct pfioc_trans_e *)malloc(sizeof(*ioe), 2792 M_TEMP, M_WAITOK); 2793 table = (struct pfr_table *)malloc(sizeof(*table), 2794 M_TEMP, M_WAITOK); 2795 /* first makes sure everything will succeed */ 2796 for (i = 0; i < io->size; i++) { 2797 if (copyin(io->array+i, ioe, sizeof(*ioe))) { 2798 free(table, M_TEMP); 2799 free(ioe, M_TEMP); 2800 error = EFAULT; 2801 goto fail; 2802 } 2803 switch (ioe->rs_num) { 2804 #ifdef ALTQ 2805 case PF_RULESET_ALTQ: 2806 if (ioe->anchor[0]) { 2807 free(table, M_TEMP); 2808 free(ioe, M_TEMP); 2809 error = EINVAL; 2810 goto fail; 2811 } 2812 if (!altqs_inactive_open || ioe->ticket != 2813 ticket_altqs_inactive) { 2814 free(table, M_TEMP); 2815 free(ioe, M_TEMP); 2816 error = EBUSY; 2817 goto fail; 2818 } 2819 break; 2820 #endif /* ALTQ */ 2821 case PF_RULESET_TABLE: 2822 rs = pf_find_ruleset(ioe->anchor); 2823 if (rs == NULL || !rs->topen || ioe->ticket != 2824 rs->tticket) { 2825 free(table, M_TEMP); 2826 free(ioe, M_TEMP); 2827 error = EBUSY; 2828 goto fail; 2829 } 2830 break; 2831 default: 2832 if (ioe->rs_num < 0 || ioe->rs_num >= 2833 PF_RULESET_MAX) { 2834 free(table, M_TEMP); 2835 free(ioe, M_TEMP); 2836 error = EINVAL; 2837 goto fail; 2838 } 2839 rs = pf_find_ruleset(ioe->anchor); 2840 if (rs == NULL || 2841 !rs->rules[ioe->rs_num].inactive.open || 2842 rs->rules[ioe->rs_num].inactive.ticket != 2843 ioe->ticket) { 2844 free(table, M_TEMP); 2845 free(ioe, M_TEMP); 2846 error = EBUSY; 2847 goto fail; 2848 } 2849 break; 2850 } 2851 } 2852 /* now do the commit - no errors should happen here */ 2853 for (i = 0; i < io->size; i++) { 2854 if (copyin(io->array+i, ioe, sizeof(*ioe))) { 2855 free(table, M_TEMP); 2856 free(ioe, M_TEMP); 2857 error = EFAULT; 2858 goto fail; 2859 } 2860 switch (ioe->rs_num) { 2861 #ifdef ALTQ 2862 case PF_RULESET_ALTQ: 2863 if ((error = pf_commit_altq(ioe->ticket))) { 2864 free(table, M_TEMP); 2865 free(ioe, M_TEMP); 2866 goto fail; /* really bad */ 2867 } 2868 break; 2869 #endif /* ALTQ */ 2870 case PF_RULESET_TABLE: 2871 bzero(table, sizeof(*table)); 2872 strlcpy(table->pfrt_anchor, ioe->anchor, 2873 sizeof(table->pfrt_anchor)); 2874 if ((error = pfr_ina_commit(table, ioe->ticket, 2875 NULL, NULL, 0))) { 2876 free(table, M_TEMP); 2877 free(ioe, M_TEMP); 2878 goto fail; /* really bad */ 2879 } 2880 break; 2881 default: 2882 if ((error = pf_commit_rules(ioe->ticket, 2883 ioe->rs_num, ioe->anchor))) { 2884 free(table, M_TEMP); 2885 free(ioe, M_TEMP); 2886 goto fail; /* really bad */ 2887 } 2888 break; 2889 } 2890 } 2891 free(table, M_TEMP); 2892 free(ioe, M_TEMP); 2893 break; 2894 } 2895 2896 case DIOCGETSRCNODES: { 2897 struct pfioc_src_nodes *psn = (struct pfioc_src_nodes *)addr; 2898 struct pf_src_node *n, *p, *pstore; 2899 u_int32_t nr = 0; 2900 int space = psn->psn_len; 2901 2902 if (space == 0) { 2903 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) 2904 nr++; 2905 psn->psn_len = sizeof(struct pf_src_node) * nr; 2906 break; 2907 } 2908 2909 pstore = malloc(sizeof(*pstore), M_TEMP, M_WAITOK); 2910 2911 p = psn->psn_src_nodes; 2912 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) { 2913 int secs = time_second, diff; 2914 2915 if ((nr + 1) * sizeof(*p) > (unsigned)psn->psn_len) 2916 break; 2917 2918 bcopy(n, pstore, sizeof(*pstore)); 2919 if (n->rule.ptr != NULL) 2920 pstore->rule.nr = n->rule.ptr->nr; 2921 pstore->creation = secs - pstore->creation; 2922 if (pstore->expire > secs) 2923 pstore->expire -= secs; 2924 else 2925 pstore->expire = 0; 2926 2927 /* adjust the connection rate estimate */ 2928 diff = secs - n->conn_rate.last; 2929 if (diff >= n->conn_rate.seconds) 2930 pstore->conn_rate.count = 0; 2931 else 2932 pstore->conn_rate.count -= 2933 n->conn_rate.count * diff / 2934 n->conn_rate.seconds; 2935 2936 error = copyout(pstore, p, sizeof(*p)); 2937 if (error) { 2938 free(pstore, M_TEMP); 2939 goto fail; 2940 } 2941 p++; 2942 nr++; 2943 } 2944 psn->psn_len = sizeof(struct pf_src_node) * nr; 2945 2946 free(pstore, M_TEMP); 2947 break; 2948 } 2949 2950 case DIOCCLRSRCNODES: { 2951 struct pf_src_node *n; 2952 struct pf_state *state; 2953 2954 RB_FOREACH(state, pf_state_tree_id, &tree_id) { 2955 state->src_node = NULL; 2956 state->nat_src_node = NULL; 2957 } 2958 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) { 2959 n->expire = 1; 2960 n->states = 0; 2961 } 2962 pf_purge_expired_src_nodes(1); 2963 pf_status.src_nodes = 0; 2964 break; 2965 } 2966 2967 case DIOCKILLSRCNODES: { 2968 struct pf_src_node *sn; 2969 struct pf_state *s; 2970 struct pfioc_src_node_kill *psnk = \ 2971 (struct pfioc_src_node_kill *) addr; 2972 int killed = 0; 2973 2974 RB_FOREACH(sn, pf_src_tree, &tree_src_tracking) { 2975 if (PF_MATCHA(psnk->psnk_src.neg, \ 2976 &psnk->psnk_src.addr.v.a.addr, \ 2977 &psnk->psnk_src.addr.v.a.mask, \ 2978 &sn->addr, sn->af) && 2979 PF_MATCHA(psnk->psnk_dst.neg, \ 2980 &psnk->psnk_dst.addr.v.a.addr, \ 2981 &psnk->psnk_dst.addr.v.a.mask, \ 2982 &sn->raddr, sn->af)) { 2983 /* Handle state to src_node linkage */ 2984 if (sn->states != 0) { 2985 RB_FOREACH(s, pf_state_tree_id, 2986 &tree_id) { 2987 if (s->src_node == sn) 2988 s->src_node = NULL; 2989 if (s->nat_src_node == sn) 2990 s->nat_src_node = NULL; 2991 } 2992 sn->states = 0; 2993 } 2994 sn->expire = 1; 2995 killed++; 2996 } 2997 } 2998 2999 if (killed > 0) 3000 pf_purge_expired_src_nodes(1); 3001 3002 psnk->psnk_af = killed; 3003 break; 3004 } 3005 3006 case DIOCSETHOSTID: { 3007 u_int32_t *hostid = (u_int32_t *)addr; 3008 3009 if (*hostid == 0) 3010 pf_status.hostid = arc4random(); 3011 else 3012 pf_status.hostid = *hostid; 3013 break; 3014 } 3015 3016 case DIOCOSFPFLUSH: 3017 pf_osfp_flush(); 3018 break; 3019 3020 case DIOCIGETIFACES: { 3021 struct pfioc_iface *io = (struct pfioc_iface *)addr; 3022 3023 if (io->pfiio_esize != sizeof(struct pfi_kif)) { 3024 error = ENODEV; 3025 break; 3026 } 3027 error = pfi_get_ifaces(io->pfiio_name, io->pfiio_buffer, 3028 &io->pfiio_size); 3029 break; 3030 } 3031 3032 case DIOCSETIFFLAG: { 3033 struct pfioc_iface *io = (struct pfioc_iface *)addr; 3034 3035 error = pfi_set_flags(io->pfiio_name, io->pfiio_flags); 3036 break; 3037 } 3038 3039 case DIOCCLRIFFLAG: { 3040 struct pfioc_iface *io = (struct pfioc_iface *)addr; 3041 3042 error = pfi_clear_flags(io->pfiio_name, io->pfiio_flags); 3043 break; 3044 } 3045 3046 default: 3047 error = ENODEV; 3048 break; 3049 } 3050 fail: 3051 splx(s); 3052 if (flags & FWRITE) 3053 rw_exit_write(&pf_consistency_lock); 3054 else 3055 rw_exit_read(&pf_consistency_lock); 3056 return (error); 3057 } 3058 3059 #ifdef __NetBSD__ 3060 #ifdef INET 3061 static int 3062 pfil4_wrapper(void *arg, struct mbuf **mp, struct ifnet *ifp, int dir) 3063 { 3064 int error; 3065 3066 /* 3067 * ensure that mbufs are writable beforehand 3068 * as it's assumed by pf code. 3069 * ip hdr (60 bytes) + tcp hdr (60 bytes) should be enough. 3070 * XXX inefficient 3071 */ 3072 error = m_makewritable(mp, 0, 60 + 60, M_DONTWAIT); 3073 if (error) { 3074 m_freem(*mp); 3075 *mp = NULL; 3076 return error; 3077 } 3078 3079 /* 3080 * If the packet is out-bound, we can't delay checksums 3081 * here. For in-bound, the checksum has already been 3082 * validated. 3083 */ 3084 if (dir == PFIL_OUT) { 3085 if ((*mp)->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) { 3086 in_delayed_cksum(*mp); 3087 (*mp)->m_pkthdr.csum_flags &= 3088 ~(M_CSUM_TCPv4|M_CSUM_UDPv4); 3089 } 3090 } 3091 3092 if (pf_test(dir == PFIL_OUT ? PF_OUT : PF_IN, ifp, mp, NULL) 3093 != PF_PASS) { 3094 m_freem(*mp); 3095 *mp = NULL; 3096 return EHOSTUNREACH; 3097 } 3098 3099 /* 3100 * we're not compatible with fast-forward. 3101 */ 3102 3103 if (dir == PFIL_IN && *mp) { 3104 (*mp)->m_flags &= ~M_CANFASTFWD; 3105 } 3106 3107 return (0); 3108 } 3109 #endif /* INET */ 3110 3111 #ifdef INET6 3112 static int 3113 pfil6_wrapper(void *arg, struct mbuf **mp, struct ifnet *ifp, int dir) 3114 { 3115 int error; 3116 3117 /* 3118 * ensure that mbufs are writable beforehand 3119 * as it's assumed by pf code. 3120 * XXX inefficient 3121 */ 3122 error = m_makewritable(mp, 0, M_COPYALL, M_DONTWAIT); 3123 if (error) { 3124 m_freem(*mp); 3125 *mp = NULL; 3126 return error; 3127 } 3128 3129 /* 3130 * If the packet is out-bound, we can't delay checksums 3131 * here. For in-bound, the checksum has already been 3132 * validated. 3133 */ 3134 if (dir == PFIL_OUT) { 3135 if ((*mp)->m_pkthdr.csum_flags & (M_CSUM_TCPv6|M_CSUM_UDPv6)) { 3136 in6_delayed_cksum(*mp); 3137 (*mp)->m_pkthdr.csum_flags &= 3138 ~(M_CSUM_TCPv6|M_CSUM_UDPv6); 3139 } 3140 } 3141 3142 if (pf_test6(dir == PFIL_OUT ? PF_OUT : PF_IN, ifp, mp, NULL) 3143 != PF_PASS) { 3144 m_freem(*mp); 3145 *mp = NULL; 3146 return EHOSTUNREACH; 3147 } else 3148 return (0); 3149 } 3150 #endif /* INET6 */ 3151 3152 static int 3153 pf_pfil_attach(void) 3154 { 3155 struct pfil_head *ph_inet; 3156 #ifdef INET6 3157 struct pfil_head *ph_inet6; 3158 #endif /* INET6 */ 3159 int error; 3160 3161 if (pf_pfil_attached) 3162 return (EBUSY); 3163 3164 ph_inet = pfil_head_get(PFIL_TYPE_AF, AF_INET); 3165 if (ph_inet) 3166 error = pfil_add_hook((void *)pfil4_wrapper, NULL, 3167 PFIL_IN|PFIL_OUT, ph_inet); 3168 else 3169 error = ENOENT; 3170 if (error) 3171 return (error); 3172 3173 #ifdef INET6 3174 ph_inet6 = pfil_head_get(PFIL_TYPE_AF, AF_INET6); 3175 if (ph_inet6) 3176 error = pfil_add_hook((void *)pfil6_wrapper, NULL, 3177 PFIL_IN|PFIL_OUT, ph_inet6); 3178 else 3179 error = ENOENT; 3180 if (error) 3181 goto bad; 3182 #endif /* INET6 */ 3183 3184 pf_pfil_attached = 1; 3185 3186 return (0); 3187 3188 #ifdef INET6 3189 bad: 3190 pfil_remove_hook(pfil4_wrapper, NULL, PFIL_IN|PFIL_OUT, ph_inet); 3191 #endif /* INET6 */ 3192 3193 return (error); 3194 } 3195 3196 static int 3197 pf_pfil_detach(void) 3198 { 3199 struct pfil_head *ph_inet; 3200 #ifdef INET6 3201 struct pfil_head *ph_inet6; 3202 #endif /* INET6 */ 3203 3204 if (pf_pfil_attached == 0) 3205 return (EBUSY); 3206 3207 ph_inet = pfil_head_get(PFIL_TYPE_AF, AF_INET); 3208 if (ph_inet) 3209 pfil_remove_hook((void *)pfil4_wrapper, NULL, 3210 PFIL_IN|PFIL_OUT, ph_inet); 3211 #ifdef INET6 3212 ph_inet6 = pfil_head_get(PFIL_TYPE_AF, AF_INET6); 3213 if (ph_inet6) 3214 pfil_remove_hook((void *)pfil6_wrapper, NULL, 3215 PFIL_IN|PFIL_OUT, ph_inet6); 3216 #endif /* INET6 */ 3217 pf_pfil_attached = 0; 3218 3219 return (0); 3220 } 3221 #endif /* __NetBSD__ */ 3222