1 /* $OpenBSD: pf_ioctl.c,v 1.182 2007/06/24 11:17:13 mcbride Exp $ */ 2 3 /* 4 * Copyright (c) 2010 The DragonFly Project. All rights reserved. 5 * 6 * Copyright (c) 2001 Daniel Hartmeier 7 * Copyright (c) 2002,2003 Henning Brauer 8 * All rights reserved. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 14 * - Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * - Redistributions in binary form must reproduce the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer in the documentation and/or other materials provided 19 * with the distribution. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 29 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 31 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 32 * POSSIBILITY OF SUCH DAMAGE. 33 * 34 * Effort sponsored in part by the Defense Advanced Research Projects 35 * Agency (DARPA) and Air Force Research Laboratory, Air Force 36 * Materiel Command, USAF, under agreement number F30602-01-2-0537. 37 * 38 */ 39 40 #include "opt_inet.h" 41 #include "opt_inet6.h" 42 #include "use_pfsync.h" 43 44 #include <sys/param.h> 45 #include <sys/systm.h> 46 #include <sys/conf.h> 47 #include <sys/device.h> 48 #include <sys/mbuf.h> 49 #include <sys/filio.h> 50 #include <sys/fcntl.h> 51 #include <sys/socket.h> 52 #include <sys/socketvar.h> 53 #include <sys/kernel.h> 54 #include <sys/kthread.h> 55 #include <sys/time.h> 56 #include <sys/proc.h> 57 #include <sys/malloc.h> 58 #include <sys/module.h> 59 #include <vm/vm_zone.h> 60 #include <sys/lock.h> 61 62 #include <sys/thread2.h> 63 #include <sys/mplock2.h> 64 65 #include <net/if.h> 66 #include <net/if_types.h> 67 #include <net/route.h> 68 69 #include <netinet/in.h> 70 #include <netinet/in_var.h> 71 #include <netinet/in_systm.h> 72 #include <netinet/ip.h> 73 #include <netinet/ip_var.h> 74 #include <netinet/ip_icmp.h> 75 76 #include <net/pf/pfvar.h> 77 #include <sys/md5.h> 78 #include <net/pf/pfvar.h> 79 80 #if NPFSYNC > 0 81 #include <net/pf/if_pfsync.h> 82 #endif /* NPFSYNC > 0 */ 83 84 #if NPFLOG > 0 85 #include <net/if_pflog.h> 86 #endif /* NPFLOG > 0 */ 87 88 #ifdef INET6 89 #include <netinet/ip6.h> 90 #include <netinet/in_pcb.h> 91 #endif /* INET6 */ 92 93 #ifdef ALTQ 94 #include <net/altq/altq.h> 95 #endif 96 97 #include <machine/limits.h> 98 #include <net/pfil.h> 99 #include <sys/mutex.h> 100 101 u_int rt_numfibs = RT_NUMFIBS; 102 103 void init_zone_var(void); 104 void cleanup_pf_zone(void); 105 int pfattach(void); 106 struct pf_pool *pf_get_pool(char *, u_int32_t, u_int8_t, u_int32_t, 107 u_int8_t, u_int8_t, u_int8_t); 108 109 void pf_mv_pool(struct pf_palist *, struct pf_palist *); 110 void pf_empty_pool(struct pf_palist *); 111 #ifdef ALTQ 112 int pf_begin_altq(u_int32_t *); 113 int pf_rollback_altq(u_int32_t); 114 int pf_commit_altq(u_int32_t); 115 int pf_enable_altq(struct pf_altq *); 116 int pf_disable_altq(struct pf_altq *); 117 #endif /* ALTQ */ 118 int pf_begin_rules(u_int32_t *, int, const char *); 119 int pf_rollback_rules(u_int32_t, int, char *); 120 int pf_setup_pfsync_matching(struct pf_ruleset *); 121 void pf_hash_rule(MD5_CTX *, struct pf_rule *); 122 void pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *); 123 int pf_commit_rules(u_int32_t, int, char *); 124 void pf_state_export(struct pfsync_state *, 125 struct pf_state_key *, struct pf_state *); 126 void pf_state_import(struct pfsync_state *, 127 struct pf_state_key *, struct pf_state *); 128 129 struct pf_rule pf_default_rule; 130 struct lock pf_consistency_lock; 131 #ifdef ALTQ 132 static int pf_altq_running; 133 #endif 134 135 #define TAGID_MAX 50000 136 TAILQ_HEAD(pf_tags, pf_tagname) pf_tags = TAILQ_HEAD_INITIALIZER(pf_tags), 137 pf_qids = TAILQ_HEAD_INITIALIZER(pf_qids); 138 139 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE) 140 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE 141 #endif 142 u_int16_t tagname2tag(struct pf_tags *, char *); 143 void tag2tagname(struct pf_tags *, u_int16_t, char *); 144 void tag_unref(struct pf_tags *, u_int16_t); 145 int pf_rtlabel_add(struct pf_addr_wrap *); 146 void pf_rtlabel_remove(struct pf_addr_wrap *); 147 void pf_rtlabel_copyout(struct pf_addr_wrap *); 148 149 #define DPFPRINTF(n, x) if (pf_status.debug >= (n)) kprintf x 150 151 static cdev_t pf_dev; 152 153 /* 154 * XXX - These are new and need to be checked when moveing to a new version 155 */ 156 static void pf_clear_states(void); 157 static int pf_clear_tables(void); 158 static void pf_clear_srcnodes(void); 159 /* 160 * XXX - These are new and need to be checked when moveing to a new version 161 */ 162 163 /* 164 * Wrapper functions for pfil(9) hooks 165 */ 166 static int pf_check_in(void *arg, struct mbuf **m, struct ifnet *ifp, 167 int dir); 168 static int pf_check_out(void *arg, struct mbuf **m, struct ifnet *ifp, 169 int dir); 170 #ifdef INET6 171 static int pf_check6_in(void *arg, struct mbuf **m, struct ifnet *ifp, 172 int dir); 173 static int pf_check6_out(void *arg, struct mbuf **m, struct ifnet *ifp, 174 int dir); 175 #endif 176 177 static int hook_pf(void); 178 static int dehook_pf(void); 179 static int shutdown_pf(void); 180 static int pf_load(void); 181 static int pf_unload(void); 182 183 d_open_t pfopen; 184 d_close_t pfclose; 185 d_ioctl_t pfioctl; 186 187 static struct dev_ops pf_ops = { /* XXX convert to port model */ 188 { PF_NAME, 73, 0 }, 189 .d_open = pfopen, 190 .d_close = pfclose, 191 .d_ioctl = pfioctl 192 }; 193 194 static volatile int pf_pfil_hooked = 0; 195 int pf_end_threads = 0; 196 struct lock pf_mod_lck; 197 198 int debug_pfugidhack = 0; 199 SYSCTL_INT(_debug, OID_AUTO, pfugidhack, CTLFLAG_RW, &debug_pfugidhack, 0, 200 "Enable/disable pf user/group rules mpsafe hack"); 201 202 void 203 init_zone_var(void) 204 { 205 pf_src_tree_pl = pf_rule_pl = NULL; 206 pf_state_pl = pf_altq_pl = pf_pooladdr_pl = NULL; 207 pf_frent_pl = pf_frag_pl = pf_cache_pl = pf_cent_pl = NULL; 208 pf_state_scrub_pl = NULL; 209 pfr_ktable_pl = pfr_kentry_pl = NULL; 210 } 211 212 void 213 cleanup_pf_zone(void) 214 { 215 ZONE_DESTROY(pf_src_tree_pl); 216 ZONE_DESTROY(pf_rule_pl); 217 ZONE_DESTROY(pf_state_pl); 218 ZONE_DESTROY(pf_altq_pl); 219 ZONE_DESTROY(pf_pooladdr_pl); 220 ZONE_DESTROY(pf_frent_pl); 221 ZONE_DESTROY(pf_frag_pl); 222 ZONE_DESTROY(pf_cache_pl); 223 ZONE_DESTROY(pf_cent_pl); 224 ZONE_DESTROY(pfr_ktable_pl); 225 ZONE_DESTROY(pfr_kentry_pl); 226 ZONE_DESTROY(pfr_kentry_pl2); 227 ZONE_DESTROY(pf_state_scrub_pl); 228 ZONE_DESTROY(pfi_addr_pl); 229 } 230 231 int 232 pfattach(void) 233 { 234 u_int32_t *my_timeout = pf_default_rule.timeout; 235 int error = 1; 236 237 do { 238 ZONE_CREATE(pf_src_tree_pl,struct pf_src_node, "pfsrctrpl"); 239 ZONE_CREATE(pf_rule_pl, struct pf_rule, "pfrulepl"); 240 ZONE_CREATE(pf_state_pl, struct pf_state, "pfstatepl"); 241 ZONE_CREATE(pf_state_key_pl, struct pf_state_key, "pfstatekeypl"); 242 ZONE_CREATE(pf_altq_pl, struct pf_altq, "pfaltqpl"); 243 ZONE_CREATE(pf_pooladdr_pl,struct pf_pooladdr, "pfpooladdrpl"); 244 ZONE_CREATE(pfr_ktable_pl, struct pfr_ktable, "pfrktable"); 245 ZONE_CREATE(pfr_kentry_pl, struct pfr_kentry, "pfrkentry"); 246 ZONE_CREATE(pfr_kentry_pl2, struct pfr_kentry, "pfrkentry2"); 247 ZONE_CREATE(pf_frent_pl, struct pf_frent, "pffrent"); 248 ZONE_CREATE(pf_frag_pl, struct pf_fragment, "pffrag"); 249 ZONE_CREATE(pf_cache_pl, struct pf_fragment, "pffrcache"); 250 ZONE_CREATE(pf_cent_pl, struct pf_frcache, "pffrcent"); 251 ZONE_CREATE(pf_state_scrub_pl, struct pf_state_scrub, 252 "pfstatescrub"); 253 ZONE_CREATE(pfi_addr_pl, struct pfi_dynaddr, "pfiaddrpl"); 254 error = 0; 255 } while(0); 256 if (error) { 257 cleanup_pf_zone(); 258 return (error); 259 } 260 pfr_initialize(); 261 pfi_initialize(); 262 error = pf_osfp_initialize(); 263 if (error) { 264 cleanup_pf_zone(); 265 pf_osfp_cleanup(); 266 return (error); 267 } 268 269 pf_pool_limits[PF_LIMIT_STATES].pp = pf_state_pl; 270 pf_pool_limits[PF_LIMIT_STATES].limit = PFSTATE_HIWAT; 271 pf_pool_limits[PF_LIMIT_FRAGS].pp = pf_frent_pl; 272 pf_pool_limits[PF_LIMIT_FRAGS].limit = PFFRAG_FRENT_HIWAT; 273 /* XXX uma_zone_set_max(pf_pool_limits[PF_LIMIT_STATES].pp, 274 pf_pool_limits[PF_LIMIT_STATES].limit); 275 */ 276 if (ctob(physmem) <= 100*1024*1024) 277 pf_pool_limits[PF_LIMIT_TABLE_ENTRIES].limit = 278 PFR_KENTRY_HIWAT_SMALL; 279 RB_INIT(&tree_src_tracking); 280 RB_INIT(&pf_anchors); 281 pf_init_ruleset(&pf_main_ruleset); 282 TAILQ_INIT(&pf_altqs[0]); 283 TAILQ_INIT(&pf_altqs[1]); 284 TAILQ_INIT(&pf_pabuf); 285 pf_altqs_active = &pf_altqs[0]; 286 pf_altqs_inactive = &pf_altqs[1]; 287 TAILQ_INIT(&state_list); 288 289 /* default rule should never be garbage collected */ 290 pf_default_rule.entries.tqe_prev = &pf_default_rule.entries.tqe_next; 291 pf_default_rule.action = PF_PASS; 292 pf_default_rule.nr = (uint32_t)(-1); 293 pf_default_rule.rtableid = -1; 294 295 /* initialize default timeouts */ 296 my_timeout[PFTM_TCP_FIRST_PACKET] = 120; /* First TCP packet */ 297 my_timeout[PFTM_TCP_OPENING] = 30; /* No response yet */ 298 my_timeout[PFTM_TCP_ESTABLISHED] = 24*60*60; /* Established */ 299 my_timeout[PFTM_TCP_CLOSING] = 15 * 60; /* Half closed */ 300 my_timeout[PFTM_TCP_FIN_WAIT] = 45; /* Got both FINs */ 301 my_timeout[PFTM_TCP_CLOSED] = 90; /* Got a RST */ 302 my_timeout[PFTM_UDP_FIRST_PACKET] = 60; /* First UDP packet */ 303 my_timeout[PFTM_UDP_SINGLE] = 30; /* Unidirectional */ 304 my_timeout[PFTM_UDP_MULTIPLE] = 60; /* Bidirectional */ 305 my_timeout[PFTM_ICMP_FIRST_PACKET] = 20; /* First ICMP packet */ 306 my_timeout[PFTM_ICMP_ERROR_REPLY] = 10; /* Got error response */ 307 my_timeout[PFTM_OTHER_FIRST_PACKET] = 60; /* First packet */ 308 my_timeout[PFTM_OTHER_SINGLE] = 30; /* Unidirectional */ 309 my_timeout[PFTM_OTHER_MULTIPLE] = 60; /* Bidirectional */ 310 my_timeout[PFTM_FRAG] = 30; /* Fragment expire */ 311 my_timeout[PFTM_INTERVAL] = 10; /* Expire interval */ 312 my_timeout[PFTM_SRC_NODE] = 0; /* Source Tracking */ 313 my_timeout[PFTM_TS_DIFF] = 30; /* Allowed TS diff */ 314 my_timeout[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START; 315 my_timeout[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END; 316 317 pf_normalize_init(); 318 bzero(&pf_status, sizeof(pf_status)); 319 pf_status.debug = PF_DEBUG_URGENT; 320 321 /* XXX do our best to avoid a conflict */ 322 pf_status.hostid = karc4random(); 323 324 if (kthread_create(pf_purge_thread, NULL, NULL, "pfpurge")) 325 panic("pfpurge thread"); 326 327 return (error); 328 } 329 330 int 331 pfopen(struct dev_open_args *ap) 332 { 333 cdev_t dev = ap->a_head.a_dev; 334 if (minor(dev) >= 1) 335 return (ENXIO); 336 return (0); 337 } 338 339 int 340 pfclose(struct dev_close_args *ap) 341 { 342 cdev_t dev = ap->a_head.a_dev; 343 if (minor(dev) >= 1) 344 return (ENXIO); 345 return (0); 346 } 347 348 struct pf_pool * 349 pf_get_pool(char *anchor, u_int32_t ticket, u_int8_t rule_action, 350 u_int32_t rule_number, u_int8_t r_last, u_int8_t active, 351 u_int8_t check_ticket) 352 { 353 struct pf_ruleset *ruleset; 354 struct pf_rule *rule; 355 int rs_num; 356 357 ruleset = pf_find_ruleset(anchor); 358 if (ruleset == NULL) 359 return (NULL); 360 rs_num = pf_get_ruleset_number(rule_action); 361 if (rs_num >= PF_RULESET_MAX) 362 return (NULL); 363 if (active) { 364 if (check_ticket && ticket != 365 ruleset->rules[rs_num].active.ticket) 366 return (NULL); 367 if (r_last) 368 rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr, 369 pf_rulequeue); 370 else 371 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr); 372 } else { 373 if (check_ticket && ticket != 374 ruleset->rules[rs_num].inactive.ticket) 375 return (NULL); 376 if (r_last) 377 rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr, 378 pf_rulequeue); 379 else 380 rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr); 381 } 382 if (!r_last) { 383 while ((rule != NULL) && (rule->nr != rule_number)) 384 rule = TAILQ_NEXT(rule, entries); 385 } 386 if (rule == NULL) 387 return (NULL); 388 389 return (&rule->rpool); 390 } 391 392 void 393 pf_mv_pool(struct pf_palist *poola, struct pf_palist *poolb) 394 { 395 struct pf_pooladdr *mv_pool_pa; 396 397 while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) { 398 TAILQ_REMOVE(poola, mv_pool_pa, entries); 399 TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries); 400 } 401 } 402 403 void 404 pf_empty_pool(struct pf_palist *poola) 405 { 406 struct pf_pooladdr *empty_pool_pa; 407 408 while ((empty_pool_pa = TAILQ_FIRST(poola)) != NULL) { 409 pfi_dynaddr_remove(&empty_pool_pa->addr); 410 pf_tbladdr_remove(&empty_pool_pa->addr); 411 pfi_kif_unref(empty_pool_pa->kif, PFI_KIF_REF_RULE); 412 TAILQ_REMOVE(poola, empty_pool_pa, entries); 413 pool_put(&pf_pooladdr_pl, empty_pool_pa); 414 } 415 } 416 417 void 418 pf_rm_rule(struct pf_rulequeue *rulequeue, struct pf_rule *rule) 419 { 420 if (rulequeue != NULL) { 421 if (rule->states <= 0) { 422 /* 423 * XXX - we need to remove the table *before* detaching 424 * the rule to make sure the table code does not delete 425 * the anchor under our feet. 426 */ 427 pf_tbladdr_remove(&rule->src.addr); 428 pf_tbladdr_remove(&rule->dst.addr); 429 if (rule->overload_tbl) 430 pfr_detach_table(rule->overload_tbl); 431 } 432 TAILQ_REMOVE(rulequeue, rule, entries); 433 rule->entries.tqe_prev = NULL; 434 rule->nr = -1; 435 } 436 437 if (rule->states > 0 || rule->src_nodes > 0 || 438 rule->entries.tqe_prev != NULL) 439 return; 440 pf_tag_unref(rule->tag); 441 pf_tag_unref(rule->match_tag); 442 #ifdef ALTQ 443 if (rule->pqid != rule->qid) 444 pf_qid_unref(rule->pqid); 445 pf_qid_unref(rule->qid); 446 #endif 447 pf_rtlabel_remove(&rule->src.addr); 448 pf_rtlabel_remove(&rule->dst.addr); 449 pfi_dynaddr_remove(&rule->src.addr); 450 pfi_dynaddr_remove(&rule->dst.addr); 451 if (rulequeue == NULL) { 452 pf_tbladdr_remove(&rule->src.addr); 453 pf_tbladdr_remove(&rule->dst.addr); 454 if (rule->overload_tbl) 455 pfr_detach_table(rule->overload_tbl); 456 } 457 pfi_kif_unref(rule->kif, PFI_KIF_REF_RULE); 458 pf_anchor_remove(rule); 459 pf_empty_pool(&rule->rpool.list); 460 pool_put(&pf_rule_pl, rule); 461 } 462 463 u_int16_t 464 tagname2tag(struct pf_tags *head, char *tagname) 465 { 466 struct pf_tagname *tag, *p = NULL; 467 u_int16_t new_tagid = 1; 468 469 TAILQ_FOREACH(tag, head, entries) 470 if (strcmp(tagname, tag->name) == 0) { 471 tag->ref++; 472 return (tag->tag); 473 } 474 475 /* 476 * to avoid fragmentation, we do a linear search from the beginning 477 * and take the first free slot we find. if there is none or the list 478 * is empty, append a new entry at the end. 479 */ 480 481 /* new entry */ 482 if (!TAILQ_EMPTY(head)) 483 for (p = TAILQ_FIRST(head); p != NULL && 484 p->tag == new_tagid; p = TAILQ_NEXT(p, entries)) 485 new_tagid = p->tag + 1; 486 487 if (new_tagid > TAGID_MAX) 488 return (0); 489 490 /* allocate and fill new struct pf_tagname */ 491 tag = kmalloc(sizeof(struct pf_tagname), M_TEMP, M_WAITOK); 492 if (tag == NULL) 493 return (0); 494 bzero(tag, sizeof(struct pf_tagname)); 495 strlcpy(tag->name, tagname, sizeof(tag->name)); 496 tag->tag = new_tagid; 497 tag->ref++; 498 499 if (p != NULL) /* insert new entry before p */ 500 TAILQ_INSERT_BEFORE(p, tag, entries); 501 else /* either list empty or no free slot in between */ 502 TAILQ_INSERT_TAIL(head, tag, entries); 503 504 return (tag->tag); 505 } 506 507 void 508 tag2tagname(struct pf_tags *head, u_int16_t tagid, char *p) 509 { 510 struct pf_tagname *tag; 511 512 TAILQ_FOREACH(tag, head, entries) 513 if (tag->tag == tagid) { 514 strlcpy(p, tag->name, PF_TAG_NAME_SIZE); 515 return; 516 } 517 } 518 519 void 520 tag_unref(struct pf_tags *head, u_int16_t tag) 521 { 522 struct pf_tagname *p, *next; 523 524 if (tag == 0) 525 return; 526 527 for (p = TAILQ_FIRST(head); p != NULL; p = next) { 528 next = TAILQ_NEXT(p, entries); 529 if (tag == p->tag) { 530 if (--p->ref == 0) { 531 TAILQ_REMOVE(head, p, entries); 532 kfree(p, M_TEMP); 533 } 534 break; 535 } 536 } 537 } 538 539 u_int16_t 540 pf_tagname2tag(char *tagname) 541 { 542 return (tagname2tag(&pf_tags, tagname)); 543 } 544 545 void 546 pf_tag2tagname(u_int16_t tagid, char *p) 547 { 548 tag2tagname(&pf_tags, tagid, p); 549 } 550 551 void 552 pf_tag_ref(u_int16_t tag) 553 { 554 struct pf_tagname *t; 555 556 TAILQ_FOREACH(t, &pf_tags, entries) 557 if (t->tag == tag) 558 break; 559 if (t != NULL) 560 t->ref++; 561 } 562 563 void 564 pf_tag_unref(u_int16_t tag) 565 { 566 tag_unref(&pf_tags, tag); 567 } 568 569 int 570 pf_rtlabel_add(struct pf_addr_wrap *a) 571 { 572 return (0); 573 } 574 575 void 576 pf_rtlabel_remove(struct pf_addr_wrap *a) 577 { 578 } 579 580 void 581 pf_rtlabel_copyout(struct pf_addr_wrap *a) 582 { 583 if (a->type == PF_ADDR_RTLABEL && a->v.rtlabel) 584 strlcpy(a->v.rtlabelname, "?", sizeof(a->v.rtlabelname)); 585 } 586 587 #ifdef ALTQ 588 u_int32_t 589 pf_qname2qid(char *qname) 590 { 591 return ((u_int32_t)tagname2tag(&pf_qids, qname)); 592 } 593 594 void 595 pf_qid2qname(u_int32_t qid, char *p) 596 { 597 tag2tagname(&pf_qids, (u_int16_t)qid, p); 598 } 599 600 void 601 pf_qid_unref(u_int32_t qid) 602 { 603 tag_unref(&pf_qids, (u_int16_t)qid); 604 } 605 606 int 607 pf_begin_altq(u_int32_t *ticket) 608 { 609 struct pf_altq *altq; 610 int error = 0; 611 612 /* Purge the old altq list */ 613 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) { 614 TAILQ_REMOVE(pf_altqs_inactive, altq, entries); 615 if (altq->qname[0] == 0) { 616 /* detach and destroy the discipline */ 617 error = altq_remove(altq); 618 } else 619 pf_qid_unref(altq->qid); 620 pool_put(&pf_altq_pl, altq); 621 } 622 if (error) 623 return (error); 624 *ticket = ++ticket_altqs_inactive; 625 altqs_inactive_open = 1; 626 return (0); 627 } 628 629 int 630 pf_rollback_altq(u_int32_t ticket) 631 { 632 struct pf_altq *altq; 633 int error = 0; 634 635 if (!altqs_inactive_open || ticket != ticket_altqs_inactive) 636 return (0); 637 /* Purge the old altq list */ 638 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) { 639 TAILQ_REMOVE(pf_altqs_inactive, altq, entries); 640 if (altq->qname[0] == 0) { 641 /* detach and destroy the discipline */ 642 error = altq_remove(altq); 643 } else 644 pf_qid_unref(altq->qid); 645 pool_put(&pf_altq_pl, altq); 646 } 647 altqs_inactive_open = 0; 648 return (error); 649 } 650 651 int 652 pf_commit_altq(u_int32_t ticket) 653 { 654 struct pf_altqqueue *old_altqs; 655 struct pf_altq *altq; 656 int err, error = 0; 657 658 if (!altqs_inactive_open || ticket != ticket_altqs_inactive) 659 return (EBUSY); 660 661 /* swap altqs, keep the old. */ 662 crit_enter(); 663 old_altqs = pf_altqs_active; 664 pf_altqs_active = pf_altqs_inactive; 665 pf_altqs_inactive = old_altqs; 666 ticket_altqs_active = ticket_altqs_inactive; 667 668 /* Attach new disciplines */ 669 TAILQ_FOREACH(altq, pf_altqs_active, entries) { 670 if (altq->qname[0] == 0) { 671 /* attach the discipline */ 672 error = altq_pfattach(altq); 673 if (error) { 674 crit_exit(); 675 return (error); 676 } 677 } 678 } 679 680 /* Purge the old altq list */ 681 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) { 682 TAILQ_REMOVE(pf_altqs_inactive, altq, entries); 683 if (altq->qname[0] == 0) { 684 /* detach and destroy the discipline */ 685 if (pf_altq_running) 686 error = pf_disable_altq(altq); 687 err = altq_pfdetach(altq); 688 if (err != 0 && error == 0) 689 error = err; 690 err = altq_remove(altq); 691 if (err != 0 && error == 0) 692 error = err; 693 } else 694 pf_qid_unref(altq->qid); 695 pool_put(&pf_altq_pl, altq); 696 } 697 crit_exit(); 698 699 altqs_inactive_open = 0; 700 return (error); 701 } 702 703 int 704 pf_enable_altq(struct pf_altq *altq) 705 { 706 struct ifnet *ifp; 707 struct tb_profile tb; 708 int error = 0; 709 710 if ((ifp = ifunit(altq->ifname)) == NULL) 711 return (EINVAL); 712 713 if (ifp->if_snd.altq_type != ALTQT_NONE) 714 error = altq_enable(&ifp->if_snd); 715 716 /* set tokenbucket regulator */ 717 if (error == 0 && ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) { 718 tb.rate = altq->ifbandwidth; 719 tb.depth = altq->tbrsize; 720 crit_enter(); 721 error = tbr_set(&ifp->if_snd, &tb); 722 crit_exit(); 723 } 724 725 return (error); 726 } 727 728 int 729 pf_disable_altq(struct pf_altq *altq) 730 { 731 struct ifnet *ifp; 732 struct tb_profile tb; 733 int error; 734 735 if ((ifp = ifunit(altq->ifname)) == NULL) 736 return (EINVAL); 737 738 /* 739 * when the discipline is no longer referenced, it was overridden 740 * by a new one. if so, just return. 741 */ 742 if (altq->altq_disc != ifp->if_snd.altq_disc) 743 return (0); 744 745 error = altq_disable(&ifp->if_snd); 746 747 if (error == 0) { 748 /* clear tokenbucket regulator */ 749 tb.rate = 0; 750 crit_enter(); 751 error = tbr_set(&ifp->if_snd, &tb); 752 crit_exit(); 753 } 754 755 return (error); 756 } 757 #endif /* ALTQ */ 758 759 int 760 pf_begin_rules(u_int32_t *ticket, int rs_num, const char *anchor) 761 { 762 struct pf_ruleset *rs; 763 struct pf_rule *rule; 764 765 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) 766 return (EINVAL); 767 rs = pf_find_or_create_ruleset(anchor); 768 if (rs == NULL) 769 return (EINVAL); 770 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) { 771 pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule); 772 rs->rules[rs_num].inactive.rcount--; 773 } 774 *ticket = ++rs->rules[rs_num].inactive.ticket; 775 rs->rules[rs_num].inactive.open = 1; 776 return (0); 777 } 778 779 int 780 pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor) 781 { 782 struct pf_ruleset *rs; 783 struct pf_rule *rule; 784 785 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) 786 return (EINVAL); 787 rs = pf_find_ruleset(anchor); 788 if (rs == NULL || !rs->rules[rs_num].inactive.open || 789 rs->rules[rs_num].inactive.ticket != ticket) 790 return (0); 791 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) { 792 pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule); 793 rs->rules[rs_num].inactive.rcount--; 794 } 795 rs->rules[rs_num].inactive.open = 0; 796 return (0); 797 } 798 799 #define PF_MD5_UPD(st, elm) \ 800 MD5Update(ctx, (u_int8_t *) &(st)->elm, sizeof((st)->elm)) 801 802 #define PF_MD5_UPD_STR(st, elm) \ 803 MD5Update(ctx, (u_int8_t *) (st)->elm, strlen((st)->elm)) 804 805 #define PF_MD5_UPD_HTONL(st, elm, stor) do { \ 806 (stor) = htonl((st)->elm); \ 807 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int32_t));\ 808 } while (0) 809 810 #define PF_MD5_UPD_HTONS(st, elm, stor) do { \ 811 (stor) = htons((st)->elm); \ 812 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int16_t));\ 813 } while (0) 814 815 void 816 pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr) 817 { 818 PF_MD5_UPD(pfr, addr.type); 819 switch (pfr->addr.type) { 820 case PF_ADDR_DYNIFTL: 821 PF_MD5_UPD(pfr, addr.v.ifname); 822 PF_MD5_UPD(pfr, addr.iflags); 823 break; 824 case PF_ADDR_TABLE: 825 PF_MD5_UPD(pfr, addr.v.tblname); 826 break; 827 case PF_ADDR_ADDRMASK: 828 /* XXX ignore af? */ 829 PF_MD5_UPD(pfr, addr.v.a.addr.addr32); 830 PF_MD5_UPD(pfr, addr.v.a.mask.addr32); 831 break; 832 case PF_ADDR_RTLABEL: 833 PF_MD5_UPD(pfr, addr.v.rtlabelname); 834 break; 835 } 836 837 PF_MD5_UPD(pfr, port[0]); 838 PF_MD5_UPD(pfr, port[1]); 839 PF_MD5_UPD(pfr, neg); 840 PF_MD5_UPD(pfr, port_op); 841 } 842 843 void 844 pf_hash_rule(MD5_CTX *ctx, struct pf_rule *rule) 845 { 846 u_int16_t x; 847 u_int32_t y; 848 849 pf_hash_rule_addr(ctx, &rule->src); 850 pf_hash_rule_addr(ctx, &rule->dst); 851 PF_MD5_UPD_STR(rule, label); 852 PF_MD5_UPD_STR(rule, ifname); 853 PF_MD5_UPD_STR(rule, match_tagname); 854 PF_MD5_UPD_HTONS(rule, match_tag, x); /* dup? */ 855 PF_MD5_UPD_HTONL(rule, os_fingerprint, y); 856 PF_MD5_UPD_HTONL(rule, prob, y); 857 PF_MD5_UPD_HTONL(rule, uid.uid[0], y); 858 PF_MD5_UPD_HTONL(rule, uid.uid[1], y); 859 PF_MD5_UPD(rule, uid.op); 860 PF_MD5_UPD_HTONL(rule, gid.gid[0], y); 861 PF_MD5_UPD_HTONL(rule, gid.gid[1], y); 862 PF_MD5_UPD(rule, gid.op); 863 PF_MD5_UPD_HTONL(rule, rule_flag, y); 864 PF_MD5_UPD(rule, action); 865 PF_MD5_UPD(rule, direction); 866 PF_MD5_UPD(rule, af); 867 PF_MD5_UPD(rule, quick); 868 PF_MD5_UPD(rule, ifnot); 869 PF_MD5_UPD(rule, match_tag_not); 870 PF_MD5_UPD(rule, natpass); 871 PF_MD5_UPD(rule, keep_state); 872 PF_MD5_UPD(rule, proto); 873 PF_MD5_UPD(rule, type); 874 PF_MD5_UPD(rule, code); 875 PF_MD5_UPD(rule, flags); 876 PF_MD5_UPD(rule, flagset); 877 PF_MD5_UPD(rule, allow_opts); 878 PF_MD5_UPD(rule, rt); 879 PF_MD5_UPD(rule, tos); 880 } 881 882 int 883 pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor) 884 { 885 struct pf_ruleset *rs; 886 struct pf_rule *rule, **old_array; 887 struct pf_rulequeue *old_rules; 888 int error; 889 u_int32_t old_rcount; 890 891 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) 892 return (EINVAL); 893 rs = pf_find_ruleset(anchor); 894 if (rs == NULL || !rs->rules[rs_num].inactive.open || 895 ticket != rs->rules[rs_num].inactive.ticket) 896 return (EBUSY); 897 898 /* Calculate checksum for the main ruleset */ 899 if (rs == &pf_main_ruleset) { 900 error = pf_setup_pfsync_matching(rs); 901 if (error != 0) 902 return (error); 903 } 904 905 /* Swap rules, keep the old. */ 906 crit_enter(); 907 old_rules = rs->rules[rs_num].active.ptr; 908 old_rcount = rs->rules[rs_num].active.rcount; 909 old_array = rs->rules[rs_num].active.ptr_array; 910 911 rs->rules[rs_num].active.ptr = 912 rs->rules[rs_num].inactive.ptr; 913 rs->rules[rs_num].active.ptr_array = 914 rs->rules[rs_num].inactive.ptr_array; 915 rs->rules[rs_num].active.rcount = 916 rs->rules[rs_num].inactive.rcount; 917 rs->rules[rs_num].inactive.ptr = old_rules; 918 rs->rules[rs_num].inactive.ptr_array = old_array; 919 rs->rules[rs_num].inactive.rcount = old_rcount; 920 921 rs->rules[rs_num].active.ticket = 922 rs->rules[rs_num].inactive.ticket; 923 pf_calc_skip_steps(rs->rules[rs_num].active.ptr); 924 925 926 /* Purge the old rule list. */ 927 while ((rule = TAILQ_FIRST(old_rules)) != NULL) 928 pf_rm_rule(old_rules, rule); 929 if (rs->rules[rs_num].inactive.ptr_array) 930 kfree(rs->rules[rs_num].inactive.ptr_array, M_TEMP); 931 rs->rules[rs_num].inactive.ptr_array = NULL; 932 rs->rules[rs_num].inactive.rcount = 0; 933 rs->rules[rs_num].inactive.open = 0; 934 pf_remove_if_empty_ruleset(rs); 935 crit_exit(); 936 return (0); 937 } 938 939 void 940 pf_state_export(struct pfsync_state *sp, struct pf_state_key *sk, 941 struct pf_state *s) 942 { 943 int secs = time_second; 944 bzero(sp, sizeof(struct pfsync_state)); 945 946 /* copy from state key */ 947 sp->lan.addr = sk->lan.addr; 948 sp->lan.port = sk->lan.port; 949 sp->gwy.addr = sk->gwy.addr; 950 sp->gwy.port = sk->gwy.port; 951 sp->ext.addr = sk->ext.addr; 952 sp->ext.port = sk->ext.port; 953 sp->proto = sk->proto; 954 sp->af = sk->af; 955 sp->direction = sk->direction; 956 957 /* copy from state */ 958 memcpy(&sp->id, &s->id, sizeof(sp->id)); 959 sp->creatorid = s->creatorid; 960 strlcpy(sp->ifname, s->kif->pfik_name, sizeof(sp->ifname)); 961 pf_state_peer_to_pfsync(&s->src, &sp->src); 962 pf_state_peer_to_pfsync(&s->dst, &sp->dst); 963 964 sp->rule = s->rule.ptr->nr; 965 sp->nat_rule = (s->nat_rule.ptr == NULL) ? -1 : s->nat_rule.ptr->nr; 966 sp->anchor = (s->anchor.ptr == NULL) ? -1 : s->anchor.ptr->nr; 967 968 pf_state_counter_to_pfsync(s->bytes[0], sp->bytes[0]); 969 pf_state_counter_to_pfsync(s->bytes[1], sp->bytes[1]); 970 pf_state_counter_to_pfsync(s->packets[0], sp->packets[0]); 971 pf_state_counter_to_pfsync(s->packets[1], sp->packets[1]); 972 sp->creation = secs - s->creation; 973 sp->expire = pf_state_expires(s); 974 sp->log = s->log; 975 sp->allow_opts = s->allow_opts; 976 sp->timeout = s->timeout; 977 978 if (s->src_node) 979 sp->sync_flags |= PFSYNC_FLAG_SRCNODE; 980 if (s->nat_src_node) 981 sp->sync_flags |= PFSYNC_FLAG_NATSRCNODE; 982 983 if (sp->expire > secs) 984 sp->expire -= secs; 985 else 986 sp->expire = 0; 987 988 } 989 990 void 991 pf_state_import(struct pfsync_state *sp, struct pf_state_key *sk, 992 struct pf_state *s) 993 { 994 /* copy to state key */ 995 sk->lan.addr = sp->lan.addr; 996 sk->lan.port = sp->lan.port; 997 sk->gwy.addr = sp->gwy.addr; 998 sk->gwy.port = sp->gwy.port; 999 sk->ext.addr = sp->ext.addr; 1000 sk->ext.port = sp->ext.port; 1001 sk->proto = sp->proto; 1002 sk->af = sp->af; 1003 sk->direction = sp->direction; 1004 1005 /* copy to state */ 1006 memcpy(&s->id, &sp->id, sizeof(sp->id)); 1007 s->creatorid = sp->creatorid; 1008 strlcpy(sp->ifname, s->kif->pfik_name, sizeof(sp->ifname)); 1009 pf_state_peer_from_pfsync(&sp->src, &s->src); 1010 pf_state_peer_from_pfsync(&sp->dst, &s->dst); 1011 1012 s->rule.ptr = &pf_default_rule; 1013 s->nat_rule.ptr = NULL; 1014 s->anchor.ptr = NULL; 1015 s->rt_kif = NULL; 1016 s->creation = time_second; 1017 s->pfsync_time = 0; 1018 s->packets[0] = s->packets[1] = 0; 1019 s->bytes[0] = s->bytes[1] = 0; 1020 } 1021 1022 int 1023 pf_setup_pfsync_matching(struct pf_ruleset *rs) 1024 { 1025 MD5_CTX ctx; 1026 struct pf_rule *rule; 1027 int rs_cnt; 1028 u_int8_t digest[PF_MD5_DIGEST_LENGTH]; 1029 1030 MD5Init(&ctx); 1031 for (rs_cnt = 0; rs_cnt < PF_RULESET_MAX; rs_cnt++) { 1032 /* XXX PF_RULESET_SCRUB as well? */ 1033 if (rs_cnt == PF_RULESET_SCRUB) 1034 continue; 1035 1036 if (rs->rules[rs_cnt].inactive.ptr_array) 1037 kfree(rs->rules[rs_cnt].inactive.ptr_array, M_TEMP); 1038 rs->rules[rs_cnt].inactive.ptr_array = NULL; 1039 1040 if (rs->rules[rs_cnt].inactive.rcount) { 1041 rs->rules[rs_cnt].inactive.ptr_array = 1042 kmalloc(sizeof(caddr_t) * 1043 rs->rules[rs_cnt].inactive.rcount, 1044 M_TEMP, M_WAITOK); 1045 1046 if (!rs->rules[rs_cnt].inactive.ptr_array) 1047 return (ENOMEM); 1048 } 1049 1050 TAILQ_FOREACH(rule, rs->rules[rs_cnt].inactive.ptr, 1051 entries) { 1052 pf_hash_rule(&ctx, rule); 1053 (rs->rules[rs_cnt].inactive.ptr_array)[rule->nr] = rule; 1054 } 1055 } 1056 1057 MD5Final(digest, &ctx); 1058 memcpy(pf_status.pf_chksum, digest, sizeof(pf_status.pf_chksum)); 1059 return (0); 1060 } 1061 1062 int 1063 pfioctl(struct dev_ioctl_args *ap) 1064 { 1065 u_long cmd = ap->a_cmd; 1066 caddr_t addr = ap->a_data; 1067 struct pf_pooladdr *pa = NULL; 1068 struct pf_pool *pool = NULL; 1069 int error = 0; 1070 1071 /* XXX keep in sync with switch() below */ 1072 if (securelevel > 1) 1073 switch (cmd) { 1074 case DIOCGETRULES: 1075 case DIOCGETRULE: 1076 case DIOCGETADDRS: 1077 case DIOCGETADDR: 1078 case DIOCGETSTATE: 1079 case DIOCSETSTATUSIF: 1080 case DIOCGETSTATUS: 1081 case DIOCCLRSTATUS: 1082 case DIOCNATLOOK: 1083 case DIOCSETDEBUG: 1084 case DIOCGETSTATES: 1085 case DIOCGETTIMEOUT: 1086 case DIOCCLRRULECTRS: 1087 case DIOCGETLIMIT: 1088 case DIOCGETALTQS: 1089 case DIOCGETALTQ: 1090 case DIOCGETQSTATS: 1091 case DIOCGETRULESETS: 1092 case DIOCGETRULESET: 1093 case DIOCRGETTABLES: 1094 case DIOCRGETTSTATS: 1095 case DIOCRCLRTSTATS: 1096 case DIOCRCLRADDRS: 1097 case DIOCRADDADDRS: 1098 case DIOCRDELADDRS: 1099 case DIOCRSETADDRS: 1100 case DIOCRGETADDRS: 1101 case DIOCRGETASTATS: 1102 case DIOCRCLRASTATS: 1103 case DIOCRTSTADDRS: 1104 case DIOCOSFPGET: 1105 case DIOCGETSRCNODES: 1106 case DIOCCLRSRCNODES: 1107 case DIOCIGETIFACES: 1108 case DIOCSETIFFLAG: 1109 case DIOCCLRIFFLAG: 1110 case DIOCGIFSPEED: 1111 break; 1112 case DIOCRCLRTABLES: 1113 case DIOCRADDTABLES: 1114 case DIOCRDELTABLES: 1115 case DIOCRSETTFLAGS: 1116 if (((struct pfioc_table *)addr)->pfrio_flags & 1117 PFR_FLAG_DUMMY) 1118 break; /* dummy operation ok */ 1119 return (EPERM); 1120 default: 1121 return (EPERM); 1122 } 1123 1124 if (!(ap->a_fflag & FWRITE)) 1125 switch (cmd) { 1126 case DIOCGETRULES: 1127 case DIOCGETADDRS: 1128 case DIOCGETADDR: 1129 case DIOCGETSTATE: 1130 case DIOCGETSTATUS: 1131 case DIOCGETSTATES: 1132 case DIOCGETTIMEOUT: 1133 case DIOCGETLIMIT: 1134 case DIOCGETALTQS: 1135 case DIOCGETALTQ: 1136 case DIOCGETQSTATS: 1137 case DIOCGETRULESETS: 1138 case DIOCGETRULESET: 1139 case DIOCNATLOOK: 1140 case DIOCRGETTABLES: 1141 case DIOCRGETTSTATS: 1142 case DIOCRGETADDRS: 1143 case DIOCRGETASTATS: 1144 case DIOCRTSTADDRS: 1145 case DIOCOSFPGET: 1146 case DIOCGETSRCNODES: 1147 case DIOCIGETIFACES: 1148 case DIOCGIFSPEED: 1149 break; 1150 case DIOCRCLRTABLES: 1151 case DIOCRADDTABLES: 1152 case DIOCRDELTABLES: 1153 case DIOCRCLRTSTATS: 1154 case DIOCRCLRADDRS: 1155 case DIOCRADDADDRS: 1156 case DIOCRDELADDRS: 1157 case DIOCRSETADDRS: 1158 case DIOCRSETTFLAGS: 1159 if (((struct pfioc_table *)addr)->pfrio_flags & 1160 PFR_FLAG_DUMMY) 1161 break; /* dummy operation ok */ 1162 return (EACCES); 1163 case DIOCGETRULE: 1164 if (((struct pfioc_rule *)addr)->action == PF_GET_CLR_CNTR) 1165 return (EACCES); 1166 break; 1167 default: 1168 return (EACCES); 1169 } 1170 1171 switch (cmd) { 1172 1173 case DIOCSTART: 1174 if (pf_status.running) 1175 error = EEXIST; 1176 else { 1177 error = hook_pf(); 1178 if (error) { 1179 DPFPRINTF(PF_DEBUG_MISC, 1180 ("pf: pfil registration fail\n")); 1181 break; 1182 } 1183 pf_status.running = 1; 1184 pf_status.since = time_second; 1185 if (pf_status.stateid == 0) { 1186 pf_status.stateid = time_second; 1187 pf_status.stateid = pf_status.stateid << 32; 1188 } 1189 DPFPRINTF(PF_DEBUG_MISC, ("pf: started\n")); 1190 } 1191 break; 1192 1193 case DIOCSTOP: 1194 if (!pf_status.running) 1195 error = ENOENT; 1196 else { 1197 pf_status.running = 0; 1198 error = dehook_pf(); 1199 if (error) { 1200 pf_status.running = 1; 1201 DPFPRINTF(PF_DEBUG_MISC, 1202 ("pf: pfil unregistration failed\n")); 1203 } 1204 pf_status.since = time_second; 1205 DPFPRINTF(PF_DEBUG_MISC, ("pf: stopped\n")); 1206 } 1207 break; 1208 1209 case DIOCADDRULE: { 1210 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 1211 struct pf_ruleset *ruleset; 1212 struct pf_rule *rule, *tail; 1213 struct pf_pooladdr *pa; 1214 int rs_num; 1215 1216 pr->anchor[sizeof(pr->anchor) - 1] = 0; 1217 ruleset = pf_find_ruleset(pr->anchor); 1218 if (ruleset == NULL) { 1219 error = EINVAL; 1220 break; 1221 } 1222 rs_num = pf_get_ruleset_number(pr->rule.action); 1223 if (rs_num >= PF_RULESET_MAX) { 1224 error = EINVAL; 1225 break; 1226 } 1227 if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) { 1228 error = EINVAL; 1229 break; 1230 } 1231 if (pr->ticket != ruleset->rules[rs_num].inactive.ticket) { 1232 error = EBUSY; 1233 break; 1234 } 1235 if (pr->pool_ticket != ticket_pabuf) { 1236 error = EBUSY; 1237 break; 1238 } 1239 rule = pool_get(&pf_rule_pl, PR_NOWAIT); 1240 if (rule == NULL) { 1241 error = ENOMEM; 1242 break; 1243 } 1244 bcopy(&pr->rule, rule, sizeof(struct pf_rule)); 1245 rule->cuid = ap->a_cred->cr_ruid; 1246 rule->cpid = (int)NULL; 1247 rule->anchor = NULL; 1248 rule->kif = NULL; 1249 TAILQ_INIT(&rule->rpool.list); 1250 /* initialize refcounting */ 1251 rule->states = 0; 1252 rule->src_nodes = 0; 1253 rule->entries.tqe_prev = NULL; 1254 #ifndef INET 1255 if (rule->af == AF_INET) { 1256 pool_put(&pf_rule_pl, rule); 1257 error = EAFNOSUPPORT; 1258 break; 1259 } 1260 #endif /* INET */ 1261 #ifndef INET6 1262 if (rule->af == AF_INET6) { 1263 pool_put(&pf_rule_pl, rule); 1264 error = EAFNOSUPPORT; 1265 break; 1266 } 1267 #endif /* INET6 */ 1268 tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr, 1269 pf_rulequeue); 1270 if (tail) 1271 rule->nr = tail->nr + 1; 1272 else 1273 rule->nr = 0; 1274 if (rule->ifname[0]) { 1275 rule->kif = pfi_kif_get(rule->ifname); 1276 if (rule->kif == NULL) { 1277 pool_put(&pf_rule_pl, rule); 1278 error = EINVAL; 1279 break; 1280 } 1281 pfi_kif_ref(rule->kif, PFI_KIF_REF_RULE); 1282 } 1283 1284 if (rule->rtableid > 0 && rule->rtableid > rt_numfibs) 1285 error = EBUSY; 1286 1287 #ifdef ALTQ 1288 /* set queue IDs */ 1289 if (rule->qname[0] != 0) { 1290 if ((rule->qid = pf_qname2qid(rule->qname)) == 0) 1291 error = EBUSY; 1292 else if (rule->pqname[0] != 0) { 1293 if ((rule->pqid = 1294 pf_qname2qid(rule->pqname)) == 0) 1295 error = EBUSY; 1296 } else 1297 rule->pqid = rule->qid; 1298 } 1299 #endif 1300 if (rule->tagname[0]) 1301 if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0) 1302 error = EBUSY; 1303 if (rule->match_tagname[0]) 1304 if ((rule->match_tag = 1305 pf_tagname2tag(rule->match_tagname)) == 0) 1306 error = EBUSY; 1307 if (rule->rt && !rule->direction) 1308 error = EINVAL; 1309 #if NPFLOG > 0 1310 if (!rule->log) 1311 rule->logif = 0; 1312 if (rule->logif >= PFLOGIFS_MAX) 1313 error = EINVAL; 1314 #endif 1315 if (pf_rtlabel_add(&rule->src.addr) || 1316 pf_rtlabel_add(&rule->dst.addr)) 1317 error = EBUSY; 1318 if (pfi_dynaddr_setup(&rule->src.addr, rule->af)) 1319 error = EINVAL; 1320 if (pfi_dynaddr_setup(&rule->dst.addr, rule->af)) 1321 error = EINVAL; 1322 if (pf_tbladdr_setup(ruleset, &rule->src.addr)) 1323 error = EINVAL; 1324 if (pf_tbladdr_setup(ruleset, &rule->dst.addr)) 1325 error = EINVAL; 1326 if (pf_anchor_setup(rule, ruleset, pr->anchor_call)) 1327 error = EINVAL; 1328 TAILQ_FOREACH(pa, &pf_pabuf, entries) 1329 if (pf_tbladdr_setup(ruleset, &pa->addr)) 1330 error = EINVAL; 1331 1332 if (rule->overload_tblname[0]) { 1333 if ((rule->overload_tbl = pfr_attach_table(ruleset, 1334 rule->overload_tblname)) == NULL) 1335 error = EINVAL; 1336 else 1337 rule->overload_tbl->pfrkt_flags |= 1338 PFR_TFLAG_ACTIVE; 1339 } 1340 1341 pf_mv_pool(&pf_pabuf, &rule->rpool.list); 1342 if (((((rule->action == PF_NAT) || (rule->action == PF_RDR) || 1343 (rule->action == PF_BINAT)) && rule->anchor == NULL) || 1344 (rule->rt > PF_FASTROUTE)) && 1345 (TAILQ_FIRST(&rule->rpool.list) == NULL)) 1346 error = EINVAL; 1347 1348 if (error) { 1349 pf_rm_rule(NULL, rule); 1350 break; 1351 } 1352 rule->rpool.cur = TAILQ_FIRST(&rule->rpool.list); 1353 rule->evaluations = rule->packets[0] = rule->packets[1] = 1354 rule->bytes[0] = rule->bytes[1] = 0; 1355 TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr, 1356 rule, entries); 1357 ruleset->rules[rs_num].inactive.rcount++; 1358 break; 1359 } 1360 1361 case DIOCGETRULES: { 1362 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 1363 struct pf_ruleset *ruleset; 1364 struct pf_rule *tail; 1365 int rs_num; 1366 1367 pr->anchor[sizeof(pr->anchor) - 1] = 0; 1368 ruleset = pf_find_ruleset(pr->anchor); 1369 if (ruleset == NULL) { 1370 error = EINVAL; 1371 break; 1372 } 1373 rs_num = pf_get_ruleset_number(pr->rule.action); 1374 if (rs_num >= PF_RULESET_MAX) { 1375 error = EINVAL; 1376 break; 1377 } 1378 tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr, 1379 pf_rulequeue); 1380 if (tail) 1381 pr->nr = tail->nr + 1; 1382 else 1383 pr->nr = 0; 1384 pr->ticket = ruleset->rules[rs_num].active.ticket; 1385 break; 1386 } 1387 1388 case DIOCGETRULE: { 1389 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 1390 struct pf_ruleset *ruleset; 1391 struct pf_rule *rule; 1392 int rs_num, i; 1393 1394 pr->anchor[sizeof(pr->anchor) - 1] = 0; 1395 ruleset = pf_find_ruleset(pr->anchor); 1396 if (ruleset == NULL) { 1397 error = EINVAL; 1398 break; 1399 } 1400 rs_num = pf_get_ruleset_number(pr->rule.action); 1401 if (rs_num >= PF_RULESET_MAX) { 1402 error = EINVAL; 1403 break; 1404 } 1405 if (pr->ticket != ruleset->rules[rs_num].active.ticket) { 1406 error = EBUSY; 1407 break; 1408 } 1409 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr); 1410 while ((rule != NULL) && (rule->nr != pr->nr)) 1411 rule = TAILQ_NEXT(rule, entries); 1412 if (rule == NULL) { 1413 error = EBUSY; 1414 break; 1415 } 1416 bcopy(rule, &pr->rule, sizeof(struct pf_rule)); 1417 if (pf_anchor_copyout(ruleset, rule, pr)) { 1418 error = EBUSY; 1419 break; 1420 } 1421 pfi_dynaddr_copyout(&pr->rule.src.addr); 1422 pfi_dynaddr_copyout(&pr->rule.dst.addr); 1423 pf_tbladdr_copyout(&pr->rule.src.addr); 1424 pf_tbladdr_copyout(&pr->rule.dst.addr); 1425 pf_rtlabel_copyout(&pr->rule.src.addr); 1426 pf_rtlabel_copyout(&pr->rule.dst.addr); 1427 for (i = 0; i < PF_SKIP_COUNT; ++i) 1428 if (rule->skip[i].ptr == NULL) 1429 pr->rule.skip[i].nr = (uint32_t)(-1); 1430 else 1431 pr->rule.skip[i].nr = 1432 rule->skip[i].ptr->nr; 1433 1434 if (pr->action == PF_GET_CLR_CNTR) { 1435 rule->evaluations = 0; 1436 rule->packets[0] = rule->packets[1] = 0; 1437 rule->bytes[0] = rule->bytes[1] = 0; 1438 } 1439 break; 1440 } 1441 1442 case DIOCCHANGERULE: { 1443 struct pfioc_rule *pcr = (struct pfioc_rule *)addr; 1444 struct pf_ruleset *ruleset; 1445 struct pf_rule *oldrule = NULL, *newrule = NULL; 1446 u_int32_t nr = 0; 1447 int rs_num; 1448 1449 if (!(pcr->action == PF_CHANGE_REMOVE || 1450 pcr->action == PF_CHANGE_GET_TICKET) && 1451 pcr->pool_ticket != ticket_pabuf) { 1452 error = EBUSY; 1453 break; 1454 } 1455 1456 if (pcr->action < PF_CHANGE_ADD_HEAD || 1457 pcr->action > PF_CHANGE_GET_TICKET) { 1458 error = EINVAL; 1459 break; 1460 } 1461 ruleset = pf_find_ruleset(pcr->anchor); 1462 if (ruleset == NULL) { 1463 error = EINVAL; 1464 break; 1465 } 1466 rs_num = pf_get_ruleset_number(pcr->rule.action); 1467 if (rs_num >= PF_RULESET_MAX) { 1468 error = EINVAL; 1469 break; 1470 } 1471 1472 if (pcr->action == PF_CHANGE_GET_TICKET) { 1473 pcr->ticket = ++ruleset->rules[rs_num].active.ticket; 1474 break; 1475 } else { 1476 if (pcr->ticket != 1477 ruleset->rules[rs_num].active.ticket) { 1478 error = EINVAL; 1479 break; 1480 } 1481 if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) { 1482 error = EINVAL; 1483 break; 1484 } 1485 } 1486 1487 if (pcr->action != PF_CHANGE_REMOVE) { 1488 newrule = pool_get(&pf_rule_pl, PR_NOWAIT); 1489 if (newrule == NULL) { 1490 error = ENOMEM; 1491 break; 1492 } 1493 bcopy(&pcr->rule, newrule, sizeof(struct pf_rule)); 1494 newrule->cuid = ap->a_cred->cr_ruid; 1495 newrule->cpid = (int)NULL; 1496 TAILQ_INIT(&newrule->rpool.list); 1497 /* initialize refcounting */ 1498 newrule->states = 0; 1499 newrule->entries.tqe_prev = NULL; 1500 #ifndef INET 1501 if (newrule->af == AF_INET) { 1502 pool_put(&pf_rule_pl, newrule); 1503 error = EAFNOSUPPORT; 1504 break; 1505 } 1506 #endif /* INET */ 1507 #ifndef INET6 1508 if (newrule->af == AF_INET6) { 1509 pool_put(&pf_rule_pl, newrule); 1510 error = EAFNOSUPPORT; 1511 break; 1512 } 1513 #endif /* INET6 */ 1514 if (newrule->ifname[0]) { 1515 newrule->kif = pfi_kif_get(newrule->ifname); 1516 if (newrule->kif == NULL) { 1517 pool_put(&pf_rule_pl, newrule); 1518 error = EINVAL; 1519 break; 1520 } 1521 pfi_kif_ref(newrule->kif, PFI_KIF_REF_RULE); 1522 } else 1523 newrule->kif = NULL; 1524 1525 if (newrule->rtableid > 0 && 1526 newrule->rtableid > rt_numfibs) 1527 error = EBUSY; 1528 1529 #ifdef ALTQ 1530 /* set queue IDs */ 1531 if (newrule->qname[0] != 0) { 1532 if ((newrule->qid = 1533 pf_qname2qid(newrule->qname)) == 0) 1534 error = EBUSY; 1535 else if (newrule->pqname[0] != 0) { 1536 if ((newrule->pqid = 1537 pf_qname2qid(newrule->pqname)) == 0) 1538 error = EBUSY; 1539 } else 1540 newrule->pqid = newrule->qid; 1541 } 1542 #endif /* ALTQ */ 1543 if (newrule->tagname[0]) 1544 if ((newrule->tag = 1545 pf_tagname2tag(newrule->tagname)) == 0) 1546 error = EBUSY; 1547 if (newrule->match_tagname[0]) 1548 if ((newrule->match_tag = pf_tagname2tag( 1549 newrule->match_tagname)) == 0) 1550 error = EBUSY; 1551 if (newrule->rt && !newrule->direction) 1552 error = EINVAL; 1553 #if NPFLOG > 0 1554 if (!newrule->log) 1555 newrule->logif = 0; 1556 if (newrule->logif >= PFLOGIFS_MAX) 1557 error = EINVAL; 1558 #endif 1559 if (pf_rtlabel_add(&newrule->src.addr) || 1560 pf_rtlabel_add(&newrule->dst.addr)) 1561 error = EBUSY; 1562 if (pfi_dynaddr_setup(&newrule->src.addr, newrule->af)) 1563 error = EINVAL; 1564 if (pfi_dynaddr_setup(&newrule->dst.addr, newrule->af)) 1565 error = EINVAL; 1566 if (pf_tbladdr_setup(ruleset, &newrule->src.addr)) 1567 error = EINVAL; 1568 if (pf_tbladdr_setup(ruleset, &newrule->dst.addr)) 1569 error = EINVAL; 1570 if (pf_anchor_setup(newrule, ruleset, pcr->anchor_call)) 1571 error = EINVAL; 1572 TAILQ_FOREACH(pa, &pf_pabuf, entries) 1573 if (pf_tbladdr_setup(ruleset, &pa->addr)) 1574 error = EINVAL; 1575 1576 if (newrule->overload_tblname[0]) { 1577 if ((newrule->overload_tbl = pfr_attach_table( 1578 ruleset, newrule->overload_tblname)) == 1579 NULL) 1580 error = EINVAL; 1581 else 1582 newrule->overload_tbl->pfrkt_flags |= 1583 PFR_TFLAG_ACTIVE; 1584 } 1585 1586 pf_mv_pool(&pf_pabuf, &newrule->rpool.list); 1587 if (((((newrule->action == PF_NAT) || 1588 (newrule->action == PF_RDR) || 1589 (newrule->action == PF_BINAT) || 1590 (newrule->rt > PF_FASTROUTE)) && 1591 !newrule->anchor)) && 1592 (TAILQ_FIRST(&newrule->rpool.list) == NULL)) 1593 error = EINVAL; 1594 1595 if (error) { 1596 pf_rm_rule(NULL, newrule); 1597 break; 1598 } 1599 newrule->rpool.cur = TAILQ_FIRST(&newrule->rpool.list); 1600 newrule->evaluations = 0; 1601 newrule->packets[0] = newrule->packets[1] = 0; 1602 newrule->bytes[0] = newrule->bytes[1] = 0; 1603 } 1604 pf_empty_pool(&pf_pabuf); 1605 1606 if (pcr->action == PF_CHANGE_ADD_HEAD) 1607 oldrule = TAILQ_FIRST( 1608 ruleset->rules[rs_num].active.ptr); 1609 else if (pcr->action == PF_CHANGE_ADD_TAIL) 1610 oldrule = TAILQ_LAST( 1611 ruleset->rules[rs_num].active.ptr, pf_rulequeue); 1612 else { 1613 oldrule = TAILQ_FIRST( 1614 ruleset->rules[rs_num].active.ptr); 1615 while ((oldrule != NULL) && (oldrule->nr != pcr->nr)) 1616 oldrule = TAILQ_NEXT(oldrule, entries); 1617 if (oldrule == NULL) { 1618 if (newrule != NULL) 1619 pf_rm_rule(NULL, newrule); 1620 error = EINVAL; 1621 break; 1622 } 1623 } 1624 1625 if (pcr->action == PF_CHANGE_REMOVE) { 1626 pf_rm_rule(ruleset->rules[rs_num].active.ptr, oldrule); 1627 ruleset->rules[rs_num].active.rcount--; 1628 } else { 1629 if (oldrule == NULL) 1630 TAILQ_INSERT_TAIL( 1631 ruleset->rules[rs_num].active.ptr, 1632 newrule, entries); 1633 else if (pcr->action == PF_CHANGE_ADD_HEAD || 1634 pcr->action == PF_CHANGE_ADD_BEFORE) 1635 TAILQ_INSERT_BEFORE(oldrule, newrule, entries); 1636 else 1637 TAILQ_INSERT_AFTER( 1638 ruleset->rules[rs_num].active.ptr, 1639 oldrule, newrule, entries); 1640 ruleset->rules[rs_num].active.rcount++; 1641 } 1642 1643 nr = 0; 1644 TAILQ_FOREACH(oldrule, 1645 ruleset->rules[rs_num].active.ptr, entries) 1646 oldrule->nr = nr++; 1647 1648 ruleset->rules[rs_num].active.ticket++; 1649 1650 pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr); 1651 pf_remove_if_empty_ruleset(ruleset); 1652 1653 break; 1654 } 1655 1656 case DIOCCLRSTATES: { 1657 struct pf_state *s, *nexts; 1658 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr; 1659 int killed = 0; 1660 1661 for (s = RB_MIN(pf_state_tree_id, &tree_id); s; s = nexts) { 1662 nexts = RB_NEXT(pf_state_tree_id, &tree_id, s); 1663 1664 if (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname, 1665 s->kif->pfik_name)) { 1666 #if NPFSYNC 1667 /* don't send out individual delete messages */ 1668 s->sync_flags = PFSTATE_NOSYNC; 1669 #endif 1670 pf_unlink_state(s); 1671 killed++; 1672 } 1673 } 1674 psk->psk_af = killed; 1675 #if NPFSYNC 1676 pfsync_clear_states(pf_status.hostid, psk->psk_ifname); 1677 #endif 1678 break; 1679 } 1680 1681 case DIOCKILLSTATES: { 1682 struct pf_state *s, *nexts; 1683 struct pf_state_key *sk; 1684 struct pf_state_host *src, *dst; 1685 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr; 1686 int killed = 0; 1687 1688 for (s = RB_MIN(pf_state_tree_id, &tree_id); s; 1689 s = nexts) { 1690 nexts = RB_NEXT(pf_state_tree_id, &tree_id, s); 1691 sk = s->state_key; 1692 1693 if (sk->direction == PF_OUT) { 1694 src = &sk->lan; 1695 dst = &sk->ext; 1696 } else { 1697 src = &sk->ext; 1698 dst = &sk->lan; 1699 } 1700 if ((!psk->psk_af || sk->af == psk->psk_af) 1701 && (!psk->psk_proto || psk->psk_proto == 1702 sk->proto) && 1703 PF_MATCHA(psk->psk_src.neg, 1704 &psk->psk_src.addr.v.a.addr, 1705 &psk->psk_src.addr.v.a.mask, 1706 &src->addr, sk->af) && 1707 PF_MATCHA(psk->psk_dst.neg, 1708 &psk->psk_dst.addr.v.a.addr, 1709 &psk->psk_dst.addr.v.a.mask, 1710 &dst->addr, sk->af) && 1711 (psk->psk_src.port_op == 0 || 1712 pf_match_port(psk->psk_src.port_op, 1713 psk->psk_src.port[0], psk->psk_src.port[1], 1714 src->port)) && 1715 (psk->psk_dst.port_op == 0 || 1716 pf_match_port(psk->psk_dst.port_op, 1717 psk->psk_dst.port[0], psk->psk_dst.port[1], 1718 dst->port)) && 1719 (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname, 1720 s->kif->pfik_name))) { 1721 #if NPFSYNC > 0 1722 /* send immediate delete of state */ 1723 pfsync_delete_state(s); 1724 s->sync_flags |= PFSTATE_NOSYNC; 1725 #endif 1726 pf_unlink_state(s); 1727 killed++; 1728 } 1729 } 1730 psk->psk_af = killed; 1731 break; 1732 } 1733 1734 case DIOCADDSTATE: { 1735 struct pfioc_state *ps = (struct pfioc_state *)addr; 1736 struct pfsync_state *sp = (struct pfsync_state *)ps->state; 1737 struct pf_state *s; 1738 struct pf_state_key *sk; 1739 struct pfi_kif *kif; 1740 1741 if (sp->timeout >= PFTM_MAX && 1742 sp->timeout != PFTM_UNTIL_PACKET) { 1743 error = EINVAL; 1744 break; 1745 } 1746 s = pool_get(&pf_state_pl, PR_NOWAIT); 1747 if (s == NULL) { 1748 error = ENOMEM; 1749 break; 1750 } 1751 bzero(s, sizeof(struct pf_state)); 1752 if ((sk = pf_alloc_state_key(s)) == NULL) { 1753 error = ENOMEM; 1754 break; 1755 } 1756 pf_state_import(sp, sk, s); 1757 kif = pfi_kif_get(sp->ifname); 1758 if (kif == NULL) { 1759 pool_put(&pf_state_pl, s); 1760 pool_put(&pf_state_key_pl, sk); 1761 error = ENOENT; 1762 break; 1763 } 1764 if (pf_insert_state(kif, s)) { 1765 pfi_kif_unref(kif, PFI_KIF_REF_NONE); 1766 pool_put(&pf_state_pl, s); 1767 pool_put(&pf_state_key_pl, sk); 1768 error = ENOMEM; 1769 } 1770 break; 1771 } 1772 1773 case DIOCGETSTATE: { 1774 struct pfioc_state *ps = (struct pfioc_state *)addr; 1775 struct pf_state *s; 1776 u_int32_t nr; 1777 1778 nr = 0; 1779 RB_FOREACH(s, pf_state_tree_id, &tree_id) { 1780 if (nr >= ps->nr) 1781 break; 1782 nr++; 1783 } 1784 if (s == NULL) { 1785 error = EBUSY; 1786 break; 1787 } 1788 1789 pf_state_export((struct pfsync_state *)&ps->state, 1790 s->state_key, s); 1791 break; 1792 } 1793 1794 case DIOCGETSTATES: { 1795 struct pfioc_states *ps = (struct pfioc_states *)addr; 1796 struct pf_state *state; 1797 struct pfsync_state *p, *pstore; 1798 u_int32_t nr = 0; 1799 1800 if (ps->ps_len == 0) { 1801 nr = pf_status.states; 1802 ps->ps_len = sizeof(struct pfsync_state) * nr; 1803 break; 1804 } 1805 1806 pstore = kmalloc(sizeof(*pstore), M_TEMP, M_WAITOK); 1807 1808 p = ps->ps_states; 1809 1810 state = TAILQ_FIRST(&state_list); 1811 while (state) { 1812 if (state->timeout != PFTM_UNLINKED) { 1813 if ((nr+1) * sizeof(*p) > (unsigned)ps->ps_len) 1814 break; 1815 1816 pf_state_export(pstore, 1817 state->state_key, state); 1818 error = copyout(pstore, p, sizeof(*p)); 1819 if (error) { 1820 kfree(pstore, M_TEMP); 1821 goto fail; 1822 } 1823 p++; 1824 nr++; 1825 } 1826 state = TAILQ_NEXT(state, entry_list); 1827 } 1828 1829 ps->ps_len = sizeof(struct pfsync_state) * nr; 1830 1831 kfree(pstore, M_TEMP); 1832 break; 1833 } 1834 1835 case DIOCGETSTATUS: { 1836 struct pf_status *s = (struct pf_status *)addr; 1837 bcopy(&pf_status, s, sizeof(struct pf_status)); 1838 pfi_fill_oldstatus(s); 1839 break; 1840 } 1841 1842 case DIOCSETSTATUSIF: { 1843 struct pfioc_if *pi = (struct pfioc_if *)addr; 1844 1845 if (pi->ifname[0] == 0) { 1846 bzero(pf_status.ifname, IFNAMSIZ); 1847 break; 1848 } 1849 if (ifunit(pi->ifname) == NULL) { 1850 error = EINVAL; 1851 break; 1852 } 1853 strlcpy(pf_status.ifname, pi->ifname, IFNAMSIZ); 1854 break; 1855 } 1856 1857 case DIOCCLRSTATUS: { 1858 bzero(pf_status.counters, sizeof(pf_status.counters)); 1859 bzero(pf_status.fcounters, sizeof(pf_status.fcounters)); 1860 bzero(pf_status.scounters, sizeof(pf_status.scounters)); 1861 pf_status.since = time_second; 1862 if (*pf_status.ifname) 1863 pfi_clr_istats(pf_status.ifname); 1864 break; 1865 } 1866 1867 case DIOCNATLOOK: { 1868 struct pfioc_natlook *pnl = (struct pfioc_natlook *)addr; 1869 struct pf_state_key *sk; 1870 struct pf_state *state; 1871 struct pf_state_key_cmp key; 1872 int m = 0, direction = pnl->direction; 1873 1874 key.af = pnl->af; 1875 key.proto = pnl->proto; 1876 1877 if (!pnl->proto || 1878 PF_AZERO(&pnl->saddr, pnl->af) || 1879 PF_AZERO(&pnl->daddr, pnl->af) || 1880 ((pnl->proto == IPPROTO_TCP || 1881 pnl->proto == IPPROTO_UDP) && 1882 (!pnl->dport || !pnl->sport))) 1883 error = EINVAL; 1884 else { 1885 /* 1886 * userland gives us source and dest of connection, 1887 * reverse the lookup so we ask for what happens with 1888 * the return traffic, enabling us to find it in the 1889 * state tree. 1890 */ 1891 if (direction == PF_IN) { 1892 PF_ACPY(&key.ext.addr, &pnl->daddr, pnl->af); 1893 key.ext.port = pnl->dport; 1894 PF_ACPY(&key.gwy.addr, &pnl->saddr, pnl->af); 1895 key.gwy.port = pnl->sport; 1896 state = pf_find_state_all(&key, PF_EXT_GWY, &m); 1897 } else { 1898 PF_ACPY(&key.lan.addr, &pnl->daddr, pnl->af); 1899 key.lan.port = pnl->dport; 1900 PF_ACPY(&key.ext.addr, &pnl->saddr, pnl->af); 1901 key.ext.port = pnl->sport; 1902 state = pf_find_state_all(&key, PF_LAN_EXT, &m); 1903 } 1904 if (m > 1) 1905 error = E2BIG; /* more than one state */ 1906 else if (state != NULL) { 1907 sk = state->state_key; 1908 if (direction == PF_IN) { 1909 PF_ACPY(&pnl->rsaddr, &sk->lan.addr, 1910 sk->af); 1911 pnl->rsport = sk->lan.port; 1912 PF_ACPY(&pnl->rdaddr, &pnl->daddr, 1913 pnl->af); 1914 pnl->rdport = pnl->dport; 1915 } else { 1916 PF_ACPY(&pnl->rdaddr, &sk->gwy.addr, 1917 sk->af); 1918 pnl->rdport = sk->gwy.port; 1919 PF_ACPY(&pnl->rsaddr, &pnl->saddr, 1920 pnl->af); 1921 pnl->rsport = pnl->sport; 1922 } 1923 } else 1924 error = ENOENT; 1925 } 1926 break; 1927 } 1928 1929 case DIOCSETTIMEOUT: { 1930 struct pfioc_tm *pt = (struct pfioc_tm *)addr; 1931 int old; 1932 1933 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX || 1934 pt->seconds < 0) { 1935 error = EINVAL; 1936 goto fail; 1937 } 1938 old = pf_default_rule.timeout[pt->timeout]; 1939 if (pt->timeout == PFTM_INTERVAL && pt->seconds == 0) 1940 pt->seconds = 1; 1941 pf_default_rule.timeout[pt->timeout] = pt->seconds; 1942 if (pt->timeout == PFTM_INTERVAL && pt->seconds < old) 1943 wakeup(pf_purge_thread); 1944 pt->seconds = old; 1945 break; 1946 } 1947 1948 case DIOCGETTIMEOUT: { 1949 struct pfioc_tm *pt = (struct pfioc_tm *)addr; 1950 1951 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) { 1952 error = EINVAL; 1953 goto fail; 1954 } 1955 pt->seconds = pf_default_rule.timeout[pt->timeout]; 1956 break; 1957 } 1958 1959 case DIOCGETLIMIT: { 1960 struct pfioc_limit *pl = (struct pfioc_limit *)addr; 1961 1962 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) { 1963 error = EINVAL; 1964 goto fail; 1965 } 1966 pl->limit = pf_pool_limits[pl->index].limit; 1967 break; 1968 } 1969 1970 case DIOCSETLIMIT: { 1971 struct pfioc_limit *pl = (struct pfioc_limit *)addr; 1972 int old_limit; 1973 1974 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX || 1975 pf_pool_limits[pl->index].pp == NULL) { 1976 error = EINVAL; 1977 goto fail; 1978 } 1979 1980 /* XXX Get an API to set limits on the zone/pool */ 1981 old_limit = pf_pool_limits[pl->index].limit; 1982 pf_pool_limits[pl->index].limit = pl->limit; 1983 pl->limit = old_limit; 1984 break; 1985 } 1986 1987 case DIOCSETDEBUG: { 1988 u_int32_t *level = (u_int32_t *)addr; 1989 1990 pf_status.debug = *level; 1991 break; 1992 } 1993 1994 case DIOCCLRRULECTRS: { 1995 /* obsoleted by DIOCGETRULE with action=PF_GET_CLR_CNTR */ 1996 struct pf_ruleset *ruleset = &pf_main_ruleset; 1997 struct pf_rule *rule; 1998 1999 TAILQ_FOREACH(rule, 2000 ruleset->rules[PF_RULESET_FILTER].active.ptr, entries) { 2001 rule->evaluations = 0; 2002 rule->packets[0] = rule->packets[1] = 0; 2003 rule->bytes[0] = rule->bytes[1] = 0; 2004 } 2005 break; 2006 } 2007 2008 case DIOCGIFSPEED: { 2009 struct pf_ifspeed *psp = (struct pf_ifspeed *)addr; 2010 struct pf_ifspeed ps; 2011 struct ifnet *ifp; 2012 2013 if (psp->ifname[0] != 0) { 2014 /* Can we completely trust user-land? */ 2015 strlcpy(ps.ifname, psp->ifname, IFNAMSIZ); 2016 ifp = ifunit(ps.ifname); 2017 if (ifp ) 2018 psp->baudrate = ifp->if_baudrate; 2019 else 2020 error = EINVAL; 2021 } else 2022 error = EINVAL; 2023 break; 2024 } 2025 #ifdef ALTQ 2026 case DIOCSTARTALTQ: { 2027 struct pf_altq *altq; 2028 2029 /* enable all altq interfaces on active list */ 2030 TAILQ_FOREACH(altq, pf_altqs_active, entries) { 2031 if (altq->qname[0] == 0) { 2032 error = pf_enable_altq(altq); 2033 if (error != 0) 2034 break; 2035 } 2036 } 2037 if (error == 0) 2038 pf_altq_running = 1; 2039 DPFPRINTF(PF_DEBUG_MISC, ("altq: started\n")); 2040 break; 2041 } 2042 2043 case DIOCSTOPALTQ: { 2044 struct pf_altq *altq; 2045 2046 /* disable all altq interfaces on active list */ 2047 TAILQ_FOREACH(altq, pf_altqs_active, entries) { 2048 if (altq->qname[0] == 0) { 2049 error = pf_disable_altq(altq); 2050 if (error != 0) 2051 break; 2052 } 2053 } 2054 if (error == 0) 2055 pf_altq_running = 0; 2056 DPFPRINTF(PF_DEBUG_MISC, ("altq: stopped\n")); 2057 break; 2058 } 2059 2060 case DIOCADDALTQ: { 2061 struct pfioc_altq *pa = (struct pfioc_altq *)addr; 2062 struct pf_altq *altq, *a; 2063 2064 if (pa->ticket != ticket_altqs_inactive) { 2065 error = EBUSY; 2066 break; 2067 } 2068 altq = pool_get(&pf_altq_pl, PR_NOWAIT); 2069 if (altq == NULL) { 2070 error = ENOMEM; 2071 break; 2072 } 2073 bcopy(&pa->altq, altq, sizeof(struct pf_altq)); 2074 2075 /* 2076 * if this is for a queue, find the discipline and 2077 * copy the necessary fields 2078 */ 2079 if (altq->qname[0] != 0) { 2080 if ((altq->qid = pf_qname2qid(altq->qname)) == 0) { 2081 error = EBUSY; 2082 pool_put(&pf_altq_pl, altq); 2083 break; 2084 } 2085 TAILQ_FOREACH(a, pf_altqs_inactive, entries) { 2086 if (strncmp(a->ifname, altq->ifname, 2087 IFNAMSIZ) == 0 && a->qname[0] == 0) { 2088 altq->altq_disc = a->altq_disc; 2089 break; 2090 } 2091 } 2092 } 2093 2094 error = altq_add(altq); 2095 if (error) { 2096 pool_put(&pf_altq_pl, altq); 2097 break; 2098 } 2099 2100 TAILQ_INSERT_TAIL(pf_altqs_inactive, altq, entries); 2101 bcopy(altq, &pa->altq, sizeof(struct pf_altq)); 2102 break; 2103 } 2104 2105 case DIOCGETALTQS: { 2106 struct pfioc_altq *pa = (struct pfioc_altq *)addr; 2107 struct pf_altq *altq; 2108 2109 pa->nr = 0; 2110 TAILQ_FOREACH(altq, pf_altqs_active, entries) 2111 pa->nr++; 2112 pa->ticket = ticket_altqs_active; 2113 break; 2114 } 2115 2116 case DIOCGETALTQ: { 2117 struct pfioc_altq *pa = (struct pfioc_altq *)addr; 2118 struct pf_altq *altq; 2119 u_int32_t nr; 2120 2121 if (pa->ticket != ticket_altqs_active) { 2122 error = EBUSY; 2123 break; 2124 } 2125 nr = 0; 2126 altq = TAILQ_FIRST(pf_altqs_active); 2127 while ((altq != NULL) && (nr < pa->nr)) { 2128 altq = TAILQ_NEXT(altq, entries); 2129 nr++; 2130 } 2131 if (altq == NULL) { 2132 error = EBUSY; 2133 break; 2134 } 2135 bcopy(altq, &pa->altq, sizeof(struct pf_altq)); 2136 break; 2137 } 2138 2139 case DIOCCHANGEALTQ: 2140 /* CHANGEALTQ not supported yet! */ 2141 error = ENODEV; 2142 break; 2143 2144 case DIOCGETQSTATS: { 2145 struct pfioc_qstats *pq = (struct pfioc_qstats *)addr; 2146 struct pf_altq *altq; 2147 u_int32_t nr; 2148 int nbytes; 2149 2150 if (pq->ticket != ticket_altqs_active) { 2151 error = EBUSY; 2152 break; 2153 } 2154 nbytes = pq->nbytes; 2155 nr = 0; 2156 altq = TAILQ_FIRST(pf_altqs_active); 2157 while ((altq != NULL) && (nr < pq->nr)) { 2158 altq = TAILQ_NEXT(altq, entries); 2159 nr++; 2160 } 2161 if (altq == NULL) { 2162 error = EBUSY; 2163 break; 2164 } 2165 error = altq_getqstats(altq, pq->buf, &nbytes); 2166 if (error == 0) { 2167 pq->scheduler = altq->scheduler; 2168 pq->nbytes = nbytes; 2169 } 2170 break; 2171 } 2172 #endif /* ALTQ */ 2173 2174 case DIOCBEGINADDRS: { 2175 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 2176 2177 pf_empty_pool(&pf_pabuf); 2178 pp->ticket = ++ticket_pabuf; 2179 break; 2180 } 2181 2182 case DIOCADDADDR: { 2183 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 2184 2185 if (pp->ticket != ticket_pabuf) { 2186 error = EBUSY; 2187 break; 2188 } 2189 #ifndef INET 2190 if (pp->af == AF_INET) { 2191 error = EAFNOSUPPORT; 2192 break; 2193 } 2194 #endif /* INET */ 2195 #ifndef INET6 2196 if (pp->af == AF_INET6) { 2197 error = EAFNOSUPPORT; 2198 break; 2199 } 2200 #endif /* INET6 */ 2201 if (pp->addr.addr.type != PF_ADDR_ADDRMASK && 2202 pp->addr.addr.type != PF_ADDR_DYNIFTL && 2203 pp->addr.addr.type != PF_ADDR_TABLE) { 2204 error = EINVAL; 2205 break; 2206 } 2207 pa = pool_get(&pf_pooladdr_pl, PR_NOWAIT); 2208 if (pa == NULL) { 2209 error = ENOMEM; 2210 break; 2211 } 2212 bcopy(&pp->addr, pa, sizeof(struct pf_pooladdr)); 2213 if (pa->ifname[0]) { 2214 pa->kif = pfi_kif_get(pa->ifname); 2215 if (pa->kif == NULL) { 2216 pool_put(&pf_pooladdr_pl, pa); 2217 error = EINVAL; 2218 break; 2219 } 2220 pfi_kif_ref(pa->kif, PFI_KIF_REF_RULE); 2221 } 2222 if (pfi_dynaddr_setup(&pa->addr, pp->af)) { 2223 pfi_dynaddr_remove(&pa->addr); 2224 pfi_kif_unref(pa->kif, PFI_KIF_REF_RULE); 2225 pool_put(&pf_pooladdr_pl, pa); 2226 error = EINVAL; 2227 break; 2228 } 2229 TAILQ_INSERT_TAIL(&pf_pabuf, pa, entries); 2230 break; 2231 } 2232 2233 case DIOCGETADDRS: { 2234 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 2235 2236 pp->nr = 0; 2237 pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action, 2238 pp->r_num, 0, 1, 0); 2239 if (pool == NULL) { 2240 error = EBUSY; 2241 break; 2242 } 2243 TAILQ_FOREACH(pa, &pool->list, entries) 2244 pp->nr++; 2245 break; 2246 } 2247 2248 case DIOCGETADDR: { 2249 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 2250 u_int32_t nr = 0; 2251 2252 pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action, 2253 pp->r_num, 0, 1, 1); 2254 if (pool == NULL) { 2255 error = EBUSY; 2256 break; 2257 } 2258 pa = TAILQ_FIRST(&pool->list); 2259 while ((pa != NULL) && (nr < pp->nr)) { 2260 pa = TAILQ_NEXT(pa, entries); 2261 nr++; 2262 } 2263 if (pa == NULL) { 2264 error = EBUSY; 2265 break; 2266 } 2267 bcopy(pa, &pp->addr, sizeof(struct pf_pooladdr)); 2268 pfi_dynaddr_copyout(&pp->addr.addr); 2269 pf_tbladdr_copyout(&pp->addr.addr); 2270 pf_rtlabel_copyout(&pp->addr.addr); 2271 break; 2272 } 2273 2274 case DIOCCHANGEADDR: { 2275 struct pfioc_pooladdr *pca = (struct pfioc_pooladdr *)addr; 2276 struct pf_pooladdr *oldpa = NULL, *newpa = NULL; 2277 struct pf_ruleset *ruleset; 2278 2279 if (pca->action < PF_CHANGE_ADD_HEAD || 2280 pca->action > PF_CHANGE_REMOVE) { 2281 error = EINVAL; 2282 break; 2283 } 2284 if (pca->addr.addr.type != PF_ADDR_ADDRMASK && 2285 pca->addr.addr.type != PF_ADDR_DYNIFTL && 2286 pca->addr.addr.type != PF_ADDR_TABLE) { 2287 error = EINVAL; 2288 break; 2289 } 2290 2291 ruleset = pf_find_ruleset(pca->anchor); 2292 if (ruleset == NULL) { 2293 error = EBUSY; 2294 break; 2295 } 2296 pool = pf_get_pool(pca->anchor, pca->ticket, pca->r_action, 2297 pca->r_num, pca->r_last, 1, 1); 2298 if (pool == NULL) { 2299 error = EBUSY; 2300 break; 2301 } 2302 if (pca->action != PF_CHANGE_REMOVE) { 2303 newpa = pool_get(&pf_pooladdr_pl, PR_NOWAIT); 2304 if (newpa == NULL) { 2305 error = ENOMEM; 2306 break; 2307 } 2308 bcopy(&pca->addr, newpa, sizeof(struct pf_pooladdr)); 2309 #ifndef INET 2310 if (pca->af == AF_INET) { 2311 pool_put(&pf_pooladdr_pl, newpa); 2312 error = EAFNOSUPPORT; 2313 break; 2314 } 2315 #endif /* INET */ 2316 #ifndef INET6 2317 if (pca->af == AF_INET6) { 2318 pool_put(&pf_pooladdr_pl, newpa); 2319 error = EAFNOSUPPORT; 2320 break; 2321 } 2322 #endif /* INET6 */ 2323 if (newpa->ifname[0]) { 2324 newpa->kif = pfi_kif_get(newpa->ifname); 2325 if (newpa->kif == NULL) { 2326 pool_put(&pf_pooladdr_pl, newpa); 2327 error = EINVAL; 2328 break; 2329 } 2330 pfi_kif_ref(newpa->kif, PFI_KIF_REF_RULE); 2331 } else 2332 newpa->kif = NULL; 2333 if (pfi_dynaddr_setup(&newpa->addr, pca->af) || 2334 pf_tbladdr_setup(ruleset, &newpa->addr)) { 2335 pfi_dynaddr_remove(&newpa->addr); 2336 pfi_kif_unref(newpa->kif, PFI_KIF_REF_RULE); 2337 pool_put(&pf_pooladdr_pl, newpa); 2338 error = EINVAL; 2339 break; 2340 } 2341 } 2342 2343 if (pca->action == PF_CHANGE_ADD_HEAD) 2344 oldpa = TAILQ_FIRST(&pool->list); 2345 else if (pca->action == PF_CHANGE_ADD_TAIL) 2346 oldpa = TAILQ_LAST(&pool->list, pf_palist); 2347 else { 2348 int i = 0; 2349 2350 oldpa = TAILQ_FIRST(&pool->list); 2351 while ((oldpa != NULL) && (i < pca->nr)) { 2352 oldpa = TAILQ_NEXT(oldpa, entries); 2353 i++; 2354 } 2355 if (oldpa == NULL) { 2356 error = EINVAL; 2357 break; 2358 } 2359 } 2360 2361 if (pca->action == PF_CHANGE_REMOVE) { 2362 TAILQ_REMOVE(&pool->list, oldpa, entries); 2363 pfi_dynaddr_remove(&oldpa->addr); 2364 pf_tbladdr_remove(&oldpa->addr); 2365 pfi_kif_unref(oldpa->kif, PFI_KIF_REF_RULE); 2366 pool_put(&pf_pooladdr_pl, oldpa); 2367 } else { 2368 if (oldpa == NULL) 2369 TAILQ_INSERT_TAIL(&pool->list, newpa, entries); 2370 else if (pca->action == PF_CHANGE_ADD_HEAD || 2371 pca->action == PF_CHANGE_ADD_BEFORE) 2372 TAILQ_INSERT_BEFORE(oldpa, newpa, entries); 2373 else 2374 TAILQ_INSERT_AFTER(&pool->list, oldpa, 2375 newpa, entries); 2376 } 2377 2378 pool->cur = TAILQ_FIRST(&pool->list); 2379 PF_ACPY(&pool->counter, &pool->cur->addr.v.a.addr, 2380 pca->af); 2381 break; 2382 } 2383 2384 case DIOCGETRULESETS: { 2385 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr; 2386 struct pf_ruleset *ruleset; 2387 struct pf_anchor *anchor; 2388 2389 pr->path[sizeof(pr->path) - 1] = 0; 2390 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) { 2391 error = EINVAL; 2392 break; 2393 } 2394 pr->nr = 0; 2395 if (ruleset->anchor == NULL) { 2396 /* XXX kludge for pf_main_ruleset */ 2397 RB_FOREACH(anchor, pf_anchor_global, &pf_anchors) 2398 if (anchor->parent == NULL) 2399 pr->nr++; 2400 } else { 2401 RB_FOREACH(anchor, pf_anchor_node, 2402 &ruleset->anchor->children) 2403 pr->nr++; 2404 } 2405 break; 2406 } 2407 2408 case DIOCGETRULESET: { 2409 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr; 2410 struct pf_ruleset *ruleset; 2411 struct pf_anchor *anchor; 2412 u_int32_t nr = 0; 2413 2414 pr->path[sizeof(pr->path) - 1] = 0; 2415 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) { 2416 error = EINVAL; 2417 break; 2418 } 2419 pr->name[0] = 0; 2420 if (ruleset->anchor == NULL) { 2421 /* XXX kludge for pf_main_ruleset */ 2422 RB_FOREACH(anchor, pf_anchor_global, &pf_anchors) 2423 if (anchor->parent == NULL && nr++ == pr->nr) { 2424 strlcpy(pr->name, anchor->name, 2425 sizeof(pr->name)); 2426 break; 2427 } 2428 } else { 2429 RB_FOREACH(anchor, pf_anchor_node, 2430 &ruleset->anchor->children) 2431 if (nr++ == pr->nr) { 2432 strlcpy(pr->name, anchor->name, 2433 sizeof(pr->name)); 2434 break; 2435 } 2436 } 2437 if (!pr->name[0]) 2438 error = EBUSY; 2439 break; 2440 } 2441 2442 case DIOCRCLRTABLES: { 2443 struct pfioc_table *io = (struct pfioc_table *)addr; 2444 2445 if (io->pfrio_esize != 0) { 2446 error = ENODEV; 2447 break; 2448 } 2449 error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel, 2450 io->pfrio_flags | PFR_FLAG_USERIOCTL); 2451 break; 2452 } 2453 2454 case DIOCRADDTABLES: { 2455 struct pfioc_table *io = (struct pfioc_table *)addr; 2456 2457 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2458 error = ENODEV; 2459 break; 2460 } 2461 error = pfr_add_tables(io->pfrio_buffer, io->pfrio_size, 2462 &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2463 break; 2464 } 2465 2466 case DIOCRDELTABLES: { 2467 struct pfioc_table *io = (struct pfioc_table *)addr; 2468 2469 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2470 error = ENODEV; 2471 break; 2472 } 2473 error = pfr_del_tables(io->pfrio_buffer, io->pfrio_size, 2474 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2475 break; 2476 } 2477 2478 case DIOCRGETTABLES: { 2479 struct pfioc_table *io = (struct pfioc_table *)addr; 2480 2481 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2482 error = ENODEV; 2483 break; 2484 } 2485 error = pfr_get_tables(&io->pfrio_table, io->pfrio_buffer, 2486 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2487 break; 2488 } 2489 2490 case DIOCRGETTSTATS: { 2491 struct pfioc_table *io = (struct pfioc_table *)addr; 2492 2493 if (io->pfrio_esize != sizeof(struct pfr_tstats)) { 2494 error = ENODEV; 2495 break; 2496 } 2497 error = pfr_get_tstats(&io->pfrio_table, io->pfrio_buffer, 2498 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2499 break; 2500 } 2501 2502 case DIOCRCLRTSTATS: { 2503 struct pfioc_table *io = (struct pfioc_table *)addr; 2504 2505 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2506 error = ENODEV; 2507 break; 2508 } 2509 error = pfr_clr_tstats(io->pfrio_buffer, io->pfrio_size, 2510 &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2511 break; 2512 } 2513 2514 case DIOCRSETTFLAGS: { 2515 struct pfioc_table *io = (struct pfioc_table *)addr; 2516 2517 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2518 error = ENODEV; 2519 break; 2520 } 2521 error = pfr_set_tflags(io->pfrio_buffer, io->pfrio_size, 2522 io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange, 2523 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2524 break; 2525 } 2526 2527 case DIOCRCLRADDRS: { 2528 struct pfioc_table *io = (struct pfioc_table *)addr; 2529 2530 if (io->pfrio_esize != 0) { 2531 error = ENODEV; 2532 break; 2533 } 2534 error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel, 2535 io->pfrio_flags | PFR_FLAG_USERIOCTL); 2536 break; 2537 } 2538 2539 case DIOCRADDADDRS: { 2540 struct pfioc_table *io = (struct pfioc_table *)addr; 2541 2542 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2543 error = ENODEV; 2544 break; 2545 } 2546 error = pfr_add_addrs(&io->pfrio_table, io->pfrio_buffer, 2547 io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags | 2548 PFR_FLAG_USERIOCTL); 2549 break; 2550 } 2551 2552 case DIOCRDELADDRS: { 2553 struct pfioc_table *io = (struct pfioc_table *)addr; 2554 2555 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2556 error = ENODEV; 2557 break; 2558 } 2559 error = pfr_del_addrs(&io->pfrio_table, io->pfrio_buffer, 2560 io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags | 2561 PFR_FLAG_USERIOCTL); 2562 break; 2563 } 2564 2565 case DIOCRSETADDRS: { 2566 struct pfioc_table *io = (struct pfioc_table *)addr; 2567 2568 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2569 error = ENODEV; 2570 break; 2571 } 2572 error = pfr_set_addrs(&io->pfrio_table, io->pfrio_buffer, 2573 io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd, 2574 &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags | 2575 PFR_FLAG_USERIOCTL, 0); 2576 break; 2577 } 2578 2579 case DIOCRGETADDRS: { 2580 struct pfioc_table *io = (struct pfioc_table *)addr; 2581 2582 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2583 error = ENODEV; 2584 break; 2585 } 2586 error = pfr_get_addrs(&io->pfrio_table, io->pfrio_buffer, 2587 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2588 break; 2589 } 2590 2591 case DIOCRGETASTATS: { 2592 struct pfioc_table *io = (struct pfioc_table *)addr; 2593 2594 if (io->pfrio_esize != sizeof(struct pfr_astats)) { 2595 error = ENODEV; 2596 break; 2597 } 2598 error = pfr_get_astats(&io->pfrio_table, io->pfrio_buffer, 2599 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2600 break; 2601 } 2602 2603 case DIOCRCLRASTATS: { 2604 struct pfioc_table *io = (struct pfioc_table *)addr; 2605 2606 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2607 error = ENODEV; 2608 break; 2609 } 2610 error = pfr_clr_astats(&io->pfrio_table, io->pfrio_buffer, 2611 io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags | 2612 PFR_FLAG_USERIOCTL); 2613 break; 2614 } 2615 2616 case DIOCRTSTADDRS: { 2617 struct pfioc_table *io = (struct pfioc_table *)addr; 2618 2619 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2620 error = ENODEV; 2621 break; 2622 } 2623 error = pfr_tst_addrs(&io->pfrio_table, io->pfrio_buffer, 2624 io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags | 2625 PFR_FLAG_USERIOCTL); 2626 break; 2627 } 2628 2629 case DIOCRINADEFINE: { 2630 struct pfioc_table *io = (struct pfioc_table *)addr; 2631 2632 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2633 error = ENODEV; 2634 break; 2635 } 2636 error = pfr_ina_define(&io->pfrio_table, io->pfrio_buffer, 2637 io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr, 2638 io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2639 break; 2640 } 2641 2642 case DIOCOSFPADD: { 2643 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr; 2644 error = pf_osfp_add(io); 2645 break; 2646 } 2647 2648 case DIOCOSFPGET: { 2649 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr; 2650 error = pf_osfp_get(io); 2651 break; 2652 } 2653 2654 case DIOCXBEGIN: { 2655 struct pfioc_trans *io = (struct pfioc_trans *)addr; 2656 struct pfioc_trans_e *ioe; 2657 struct pfr_table *table; 2658 int i; 2659 2660 if (io->esize != sizeof(*ioe)) { 2661 error = ENODEV; 2662 goto fail; 2663 } 2664 ioe = (struct pfioc_trans_e *)kmalloc(sizeof(*ioe), 2665 M_TEMP, M_WAITOK); 2666 table = (struct pfr_table *)kmalloc(sizeof(*table), 2667 M_TEMP, M_WAITOK); 2668 for (i = 0; i < io->size; i++) { 2669 if (copyin(io->array+i, ioe, sizeof(*ioe))) { 2670 kfree(table, M_TEMP); 2671 kfree(ioe, M_TEMP); 2672 error = EFAULT; 2673 goto fail; 2674 } 2675 switch (ioe->rs_num) { 2676 #ifdef ALTQ 2677 case PF_RULESET_ALTQ: 2678 if (ioe->anchor[0]) { 2679 kfree(table, M_TEMP); 2680 kfree(ioe, M_TEMP); 2681 error = EINVAL; 2682 goto fail; 2683 } 2684 if ((error = pf_begin_altq(&ioe->ticket))) { 2685 kfree(table, M_TEMP); 2686 kfree(ioe, M_TEMP); 2687 goto fail; 2688 } 2689 break; 2690 #endif /* ALTQ */ 2691 case PF_RULESET_TABLE: 2692 bzero(table, sizeof(*table)); 2693 strlcpy(table->pfrt_anchor, ioe->anchor, 2694 sizeof(table->pfrt_anchor)); 2695 if ((error = pfr_ina_begin(table, 2696 &ioe->ticket, NULL, 0))) { 2697 kfree(table, M_TEMP); 2698 kfree(ioe, M_TEMP); 2699 goto fail; 2700 } 2701 break; 2702 default: 2703 if ((error = pf_begin_rules(&ioe->ticket, 2704 ioe->rs_num, ioe->anchor))) { 2705 kfree(table, M_TEMP); 2706 kfree(ioe, M_TEMP); 2707 goto fail; 2708 } 2709 break; 2710 } 2711 if (copyout(ioe, io->array+i, sizeof(io->array[i]))) { 2712 kfree(table, M_TEMP); 2713 kfree(ioe, M_TEMP); 2714 error = EFAULT; 2715 goto fail; 2716 } 2717 } 2718 kfree(table, M_TEMP); 2719 kfree(ioe, M_TEMP); 2720 break; 2721 } 2722 2723 case DIOCXROLLBACK: { 2724 struct pfioc_trans *io = (struct pfioc_trans *)addr; 2725 struct pfioc_trans_e *ioe; 2726 struct pfr_table *table; 2727 int i; 2728 2729 if (io->esize != sizeof(*ioe)) { 2730 error = ENODEV; 2731 goto fail; 2732 } 2733 ioe = (struct pfioc_trans_e *)kmalloc(sizeof(*ioe), 2734 M_TEMP, M_WAITOK); 2735 table = (struct pfr_table *)kmalloc(sizeof(*table), 2736 M_TEMP, M_WAITOK); 2737 for (i = 0; i < io->size; i++) { 2738 if (copyin(io->array+i, ioe, sizeof(*ioe))) { 2739 kfree(table, M_TEMP); 2740 kfree(ioe, M_TEMP); 2741 error = EFAULT; 2742 goto fail; 2743 } 2744 switch (ioe->rs_num) { 2745 #ifdef ALTQ 2746 case PF_RULESET_ALTQ: 2747 if (ioe->anchor[0]) { 2748 kfree(table, M_TEMP); 2749 kfree(ioe, M_TEMP); 2750 error = EINVAL; 2751 goto fail; 2752 } 2753 if ((error = pf_rollback_altq(ioe->ticket))) { 2754 kfree(table, M_TEMP); 2755 kfree(ioe, M_TEMP); 2756 goto fail; /* really bad */ 2757 } 2758 break; 2759 #endif /* ALTQ */ 2760 case PF_RULESET_TABLE: 2761 bzero(table, sizeof(*table)); 2762 strlcpy(table->pfrt_anchor, ioe->anchor, 2763 sizeof(table->pfrt_anchor)); 2764 if ((error = pfr_ina_rollback(table, 2765 ioe->ticket, NULL, 0))) { 2766 kfree(table, M_TEMP); 2767 kfree(ioe, M_TEMP); 2768 goto fail; /* really bad */ 2769 } 2770 break; 2771 default: 2772 if ((error = pf_rollback_rules(ioe->ticket, 2773 ioe->rs_num, ioe->anchor))) { 2774 kfree(table, M_TEMP); 2775 kfree(ioe, M_TEMP); 2776 goto fail; /* really bad */ 2777 } 2778 break; 2779 } 2780 } 2781 kfree(table, M_TEMP); 2782 kfree(ioe, M_TEMP); 2783 break; 2784 } 2785 2786 case DIOCXCOMMIT: { 2787 struct pfioc_trans *io = (struct pfioc_trans *)addr; 2788 struct pfioc_trans_e *ioe; 2789 struct pfr_table *table; 2790 struct pf_ruleset *rs; 2791 int i; 2792 2793 if (io->esize != sizeof(*ioe)) { 2794 error = ENODEV; 2795 goto fail; 2796 } 2797 ioe = (struct pfioc_trans_e *)kmalloc(sizeof(*ioe), 2798 M_TEMP, M_WAITOK); 2799 table = (struct pfr_table *)kmalloc(sizeof(*table), 2800 M_TEMP, M_WAITOK); 2801 /* first makes sure everything will succeed */ 2802 for (i = 0; i < io->size; i++) { 2803 if (copyin(io->array+i, ioe, sizeof(*ioe))) { 2804 kfree(table, M_TEMP); 2805 kfree(ioe, M_TEMP); 2806 error = EFAULT; 2807 goto fail; 2808 } 2809 switch (ioe->rs_num) { 2810 #ifdef ALTQ 2811 case PF_RULESET_ALTQ: 2812 if (ioe->anchor[0]) { 2813 kfree(table, M_TEMP); 2814 kfree(ioe, M_TEMP); 2815 error = EINVAL; 2816 goto fail; 2817 } 2818 if (!altqs_inactive_open || ioe->ticket != 2819 ticket_altqs_inactive) { 2820 kfree(table, M_TEMP); 2821 kfree(ioe, M_TEMP); 2822 error = EBUSY; 2823 goto fail; 2824 } 2825 break; 2826 #endif /* ALTQ */ 2827 case PF_RULESET_TABLE: 2828 rs = pf_find_ruleset(ioe->anchor); 2829 if (rs == NULL || !rs->topen || ioe->ticket != 2830 rs->tticket) { 2831 kfree(table, M_TEMP); 2832 kfree(ioe, M_TEMP); 2833 error = EBUSY; 2834 goto fail; 2835 } 2836 break; 2837 default: 2838 if (ioe->rs_num < 0 || ioe->rs_num >= 2839 PF_RULESET_MAX) { 2840 kfree(table, M_TEMP); 2841 kfree(ioe, M_TEMP); 2842 error = EINVAL; 2843 goto fail; 2844 } 2845 rs = pf_find_ruleset(ioe->anchor); 2846 if (rs == NULL || 2847 !rs->rules[ioe->rs_num].inactive.open || 2848 rs->rules[ioe->rs_num].inactive.ticket != 2849 ioe->ticket) { 2850 kfree(table, M_TEMP); 2851 kfree(ioe, M_TEMP); 2852 error = EBUSY; 2853 goto fail; 2854 } 2855 break; 2856 } 2857 } 2858 /* now do the commit - no errors should happen here */ 2859 for (i = 0; i < io->size; i++) { 2860 if (copyin(io->array+i, ioe, sizeof(*ioe))) { 2861 kfree(table, M_TEMP); 2862 kfree(ioe, M_TEMP); 2863 error = EFAULT; 2864 goto fail; 2865 } 2866 switch (ioe->rs_num) { 2867 #ifdef ALTQ 2868 case PF_RULESET_ALTQ: 2869 if ((error = pf_commit_altq(ioe->ticket))) { 2870 kfree(table, M_TEMP); 2871 kfree(ioe, M_TEMP); 2872 goto fail; /* really bad */ 2873 } 2874 break; 2875 #endif /* ALTQ */ 2876 case PF_RULESET_TABLE: 2877 bzero(table, sizeof(*table)); 2878 strlcpy(table->pfrt_anchor, ioe->anchor, 2879 sizeof(table->pfrt_anchor)); 2880 if ((error = pfr_ina_commit(table, ioe->ticket, 2881 NULL, NULL, 0))) { 2882 kfree(table, M_TEMP); 2883 kfree(ioe, M_TEMP); 2884 goto fail; /* really bad */ 2885 } 2886 break; 2887 default: 2888 if ((error = pf_commit_rules(ioe->ticket, 2889 ioe->rs_num, ioe->anchor))) { 2890 kfree(table, M_TEMP); 2891 kfree(ioe, M_TEMP); 2892 goto fail; /* really bad */ 2893 } 2894 break; 2895 } 2896 } 2897 kfree(table, M_TEMP); 2898 kfree(ioe, M_TEMP); 2899 break; 2900 } 2901 2902 case DIOCGETSRCNODES: { 2903 struct pfioc_src_nodes *psn = (struct pfioc_src_nodes *)addr; 2904 struct pf_src_node *n, *p, *pstore; 2905 u_int32_t nr = 0; 2906 int space = psn->psn_len; 2907 2908 if (space == 0) { 2909 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) 2910 nr++; 2911 psn->psn_len = sizeof(struct pf_src_node) * nr; 2912 break; 2913 } 2914 2915 pstore = kmalloc(sizeof(*pstore), M_TEMP, M_WAITOK); 2916 2917 p = psn->psn_src_nodes; 2918 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) { 2919 int secs = time_second, diff; 2920 2921 if ((nr + 1) * sizeof(*p) > (unsigned)psn->psn_len) 2922 break; 2923 2924 bcopy(n, pstore, sizeof(*pstore)); 2925 if (n->rule.ptr != NULL) 2926 pstore->rule.nr = n->rule.ptr->nr; 2927 pstore->creation = secs - pstore->creation; 2928 if (pstore->expire > secs) 2929 pstore->expire -= secs; 2930 else 2931 pstore->expire = 0; 2932 2933 /* adjust the connection rate estimate */ 2934 diff = secs - n->conn_rate.last; 2935 if (diff >= n->conn_rate.seconds) 2936 pstore->conn_rate.count = 0; 2937 else 2938 pstore->conn_rate.count -= 2939 n->conn_rate.count * diff / 2940 n->conn_rate.seconds; 2941 2942 error = copyout(pstore, p, sizeof(*p)); 2943 if (error) { 2944 kfree(pstore, M_TEMP); 2945 goto fail; 2946 } 2947 p++; 2948 nr++; 2949 } 2950 psn->psn_len = sizeof(struct pf_src_node) * nr; 2951 2952 kfree(pstore, M_TEMP); 2953 break; 2954 } 2955 2956 case DIOCCLRSRCNODES: { 2957 struct pf_src_node *n; 2958 struct pf_state *state; 2959 2960 RB_FOREACH(state, pf_state_tree_id, &tree_id) { 2961 state->src_node = NULL; 2962 state->nat_src_node = NULL; 2963 } 2964 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) { 2965 n->expire = 1; 2966 n->states = 0; 2967 } 2968 pf_purge_expired_src_nodes(1); 2969 pf_status.src_nodes = 0; 2970 break; 2971 } 2972 2973 case DIOCKILLSRCNODES: { 2974 struct pf_src_node *sn; 2975 struct pf_state *s; 2976 struct pfioc_src_node_kill *psnk = \ 2977 (struct pfioc_src_node_kill *) addr; 2978 int killed = 0; 2979 2980 RB_FOREACH(sn, pf_src_tree, &tree_src_tracking) { 2981 if (PF_MATCHA(psnk->psnk_src.neg, \ 2982 &psnk->psnk_src.addr.v.a.addr, \ 2983 &psnk->psnk_src.addr.v.a.mask, \ 2984 &sn->addr, sn->af) && 2985 PF_MATCHA(psnk->psnk_dst.neg, \ 2986 &psnk->psnk_dst.addr.v.a.addr, \ 2987 &psnk->psnk_dst.addr.v.a.mask, \ 2988 &sn->raddr, sn->af)) { 2989 /* Handle state to src_node linkage */ 2990 if (sn->states != 0) { 2991 RB_FOREACH(s, pf_state_tree_id, 2992 &tree_id) { 2993 if (s->src_node == sn) 2994 s->src_node = NULL; 2995 if (s->nat_src_node == sn) 2996 s->nat_src_node = NULL; 2997 } 2998 sn->states = 0; 2999 } 3000 sn->expire = 1; 3001 killed++; 3002 } 3003 } 3004 3005 if (killed > 0) 3006 pf_purge_expired_src_nodes(1); 3007 3008 psnk->psnk_af = killed; 3009 break; 3010 } 3011 3012 case DIOCSETHOSTID: { 3013 u_int32_t *hostid = (u_int32_t *)addr; 3014 3015 if (*hostid == 0) 3016 pf_status.hostid = karc4random(); 3017 else 3018 pf_status.hostid = *hostid; 3019 break; 3020 } 3021 3022 case DIOCOSFPFLUSH: 3023 crit_enter(); 3024 pf_osfp_flush(); 3025 crit_exit(); 3026 break; 3027 3028 case DIOCIGETIFACES: { 3029 struct pfioc_iface *io = (struct pfioc_iface *)addr; 3030 3031 if (io->pfiio_esize != sizeof(struct pfi_kif)) { 3032 error = ENODEV; 3033 break; 3034 } 3035 error = pfi_get_ifaces(io->pfiio_name, io->pfiio_buffer, 3036 &io->pfiio_size); 3037 break; 3038 } 3039 3040 case DIOCSETIFFLAG: { 3041 struct pfioc_iface *io = (struct pfioc_iface *)addr; 3042 3043 error = pfi_set_flags(io->pfiio_name, io->pfiio_flags); 3044 break; 3045 } 3046 3047 case DIOCCLRIFFLAG: { 3048 struct pfioc_iface *io = (struct pfioc_iface *)addr; 3049 3050 error = pfi_clear_flags(io->pfiio_name, io->pfiio_flags); 3051 break; 3052 } 3053 3054 default: 3055 error = ENODEV; 3056 break; 3057 } 3058 fail: 3059 return (error); 3060 } 3061 3062 /* 3063 * XXX - Check for version missmatch!!! 3064 */ 3065 static void 3066 pf_clear_states(void) 3067 { 3068 struct pf_state *state; 3069 3070 RB_FOREACH(state, pf_state_tree_id, &tree_id) { 3071 state->timeout = PFTM_PURGE; 3072 #if NPFSYNC 3073 /* don't send out individual delete messages */ 3074 state->sync_flags = PFSTATE_NOSYNC; 3075 #endif 3076 pf_unlink_state(state); 3077 } 3078 pf_status.states = 0; 3079 #if 0 /* NPFSYNC */ 3080 /* 3081 * XXX This is called on module unload, we do not want to sync that over? */ 3082 */ 3083 pfsync_clear_states(pf_status.hostid, psk->psk_ifname); 3084 #endif 3085 } 3086 3087 static int 3088 pf_clear_tables(void) 3089 { 3090 struct pfioc_table io; 3091 int error; 3092 3093 bzero(&io, sizeof(io)); 3094 3095 error = pfr_clr_tables(&io.pfrio_table, &io.pfrio_ndel, 3096 io.pfrio_flags); 3097 3098 return (error); 3099 } 3100 3101 static void 3102 pf_clear_srcnodes(void) 3103 { 3104 struct pf_src_node *n; 3105 struct pf_state *state; 3106 3107 RB_FOREACH(state, pf_state_tree_id, &tree_id) { 3108 state->src_node = NULL; 3109 state->nat_src_node = NULL; 3110 } 3111 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) { 3112 n->expire = 1; 3113 n->states = 0; 3114 } 3115 pf_purge_expired_src_nodes(0); 3116 pf_status.src_nodes = 0; 3117 } 3118 /* 3119 * XXX - Check for version missmatch!!! 3120 */ 3121 3122 /* 3123 * Duplicate pfctl -Fa operation to get rid of as much as we can. 3124 */ 3125 static int 3126 shutdown_pf(void) 3127 { 3128 int error = 0; 3129 u_int32_t t[5]; 3130 char nn = '\0'; 3131 3132 pf_status.running = 0; 3133 do { 3134 if ((error = pf_begin_rules(&t[0], PF_RULESET_SCRUB, &nn)) != 0) { 3135 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: SCRUB\n")); 3136 break; 3137 } 3138 if ((error = pf_begin_rules(&t[1], PF_RULESET_FILTER, &nn)) != 0) { 3139 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: FILTER\n")); 3140 break; /* XXX: rollback? */ 3141 } 3142 if ((error = pf_begin_rules(&t[2], PF_RULESET_NAT, &nn)) != 0) { 3143 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: NAT\n")); 3144 break; /* XXX: rollback? */ 3145 } 3146 if ((error = pf_begin_rules(&t[3], PF_RULESET_BINAT, &nn)) 3147 != 0) { 3148 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: BINAT\n")); 3149 break; /* XXX: rollback? */ 3150 } 3151 if ((error = pf_begin_rules(&t[4], PF_RULESET_RDR, &nn)) 3152 != 0) { 3153 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: RDR\n")); 3154 break; /* XXX: rollback? */ 3155 } 3156 3157 /* XXX: these should always succeed here */ 3158 pf_commit_rules(t[0], PF_RULESET_SCRUB, &nn); 3159 pf_commit_rules(t[1], PF_RULESET_FILTER, &nn); 3160 pf_commit_rules(t[2], PF_RULESET_NAT, &nn); 3161 pf_commit_rules(t[3], PF_RULESET_BINAT, &nn); 3162 pf_commit_rules(t[4], PF_RULESET_RDR, &nn); 3163 3164 if ((error = pf_clear_tables()) != 0) 3165 break; 3166 3167 #ifdef ALTQ 3168 if ((error = pf_begin_altq(&t[0])) != 0) { 3169 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: ALTQ\n")); 3170 break; 3171 } 3172 pf_commit_altq(t[0]); 3173 #endif 3174 3175 pf_clear_states(); 3176 3177 pf_clear_srcnodes(); 3178 3179 /* status does not use malloced mem so no need to cleanup */ 3180 /* fingerprints and interfaces have their own cleanup code */ 3181 } while(0); 3182 3183 return (error); 3184 } 3185 3186 static int 3187 pf_check_in(void *arg, struct mbuf **m, struct ifnet *ifp, int dir) 3188 { 3189 /* 3190 * DragonFly's version of pf uses FreeBSD's native host byte ordering 3191 * for ip_len/ip_off. This is why we don't have to change byte order 3192 * like the FreeBSD-5 version does. 3193 */ 3194 int chk; 3195 3196 chk = pf_test(PF_IN, ifp, m, NULL, NULL); 3197 if (chk && *m) { 3198 m_freem(*m); 3199 *m = NULL; 3200 } 3201 return chk; 3202 } 3203 3204 static int 3205 pf_check_out(void *arg, struct mbuf **m, struct ifnet *ifp, int dir) 3206 { 3207 /* 3208 * DragonFly's version of pf uses FreeBSD's native host byte ordering 3209 * for ip_len/ip_off. This is why we don't have to change byte order 3210 * like the FreeBSD-5 version does. 3211 */ 3212 int chk; 3213 3214 /* We need a proper CSUM befor we start (s. OpenBSD ip_output) */ 3215 if ((*m)->m_pkthdr.csum_flags & CSUM_DELAY_DATA) { 3216 in_delayed_cksum(*m); 3217 (*m)->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA; 3218 } 3219 chk = pf_test(PF_OUT, ifp, m, NULL, NULL); 3220 if (chk && *m) { 3221 m_freem(*m); 3222 *m = NULL; 3223 } 3224 return chk; 3225 } 3226 3227 #ifdef INET6 3228 static int 3229 pf_check6_in(void *arg, struct mbuf **m, struct ifnet *ifp, int dir) 3230 { 3231 /* 3232 * IPv6 is not affected by ip_len/ip_off byte order changes. 3233 */ 3234 int chk; 3235 3236 chk = pf_test6(PF_IN, ifp, m, NULL, NULL); 3237 if (chk && *m) { 3238 m_freem(*m); 3239 *m = NULL; 3240 } 3241 return chk; 3242 } 3243 3244 static int 3245 pf_check6_out(void *arg, struct mbuf **m, struct ifnet *ifp, int dir) 3246 { 3247 /* 3248 * IPv6 is not affected by ip_len/ip_off byte order changes. 3249 */ 3250 int chk; 3251 3252 /* We need a proper CSUM befor we start (s. OpenBSD ip_output) */ 3253 if ((*m)->m_pkthdr.csum_flags & CSUM_DELAY_DATA) { 3254 in_delayed_cksum(*m); 3255 (*m)->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA; 3256 } 3257 chk = pf_test6(PF_OUT, ifp, m, NULL, NULL); 3258 if (chk && *m) { 3259 m_freem(*m); 3260 *m = NULL; 3261 } 3262 return chk; 3263 } 3264 #endif /* INET6 */ 3265 3266 static int 3267 hook_pf(void) 3268 { 3269 struct pfil_head *pfh_inet; 3270 #ifdef INET6 3271 struct pfil_head *pfh_inet6; 3272 #endif 3273 3274 if (pf_pfil_hooked) 3275 return (0); 3276 3277 pfh_inet = pfil_head_get(PFIL_TYPE_AF, AF_INET); 3278 if (pfh_inet == NULL) 3279 return (ENODEV); 3280 pfil_add_hook(pf_check_in, NULL, PFIL_IN, pfh_inet); 3281 pfil_add_hook(pf_check_out, NULL, PFIL_OUT, pfh_inet); 3282 #ifdef INET6 3283 pfh_inet6 = pfil_head_get(PFIL_TYPE_AF, AF_INET6); 3284 if (pfh_inet6 == NULL) { 3285 pfil_remove_hook(pf_check_in, NULL, PFIL_IN, pfh_inet); 3286 pfil_remove_hook(pf_check_out, NULL, PFIL_OUT, pfh_inet); 3287 return (ENODEV); 3288 } 3289 pfil_add_hook(pf_check6_in, NULL, PFIL_IN, pfh_inet6); 3290 pfil_add_hook(pf_check6_out, NULL, PFIL_OUT, pfh_inet6); 3291 #endif 3292 3293 pf_pfil_hooked = 1; 3294 return (0); 3295 } 3296 3297 static int 3298 dehook_pf(void) 3299 { 3300 struct pfil_head *pfh_inet; 3301 #ifdef INET6 3302 struct pfil_head *pfh_inet6; 3303 #endif 3304 3305 if (pf_pfil_hooked == 0) 3306 return (0); 3307 3308 pfh_inet = pfil_head_get(PFIL_TYPE_AF, AF_INET); 3309 if (pfh_inet == NULL) 3310 return (ENODEV); 3311 pfil_remove_hook(pf_check_in, NULL, PFIL_IN, pfh_inet); 3312 pfil_remove_hook(pf_check_out, NULL, PFIL_OUT, pfh_inet); 3313 #ifdef INET6 3314 pfh_inet6 = pfil_head_get(PFIL_TYPE_AF, AF_INET6); 3315 if (pfh_inet6 == NULL) 3316 return (ENODEV); 3317 pfil_remove_hook(pf_check6_in, NULL, PFIL_IN, pfh_inet6); 3318 pfil_remove_hook(pf_check6_out, NULL, PFIL_OUT, pfh_inet6); 3319 #endif 3320 3321 pf_pfil_hooked = 0; 3322 return (0); 3323 } 3324 3325 static int 3326 pf_load(void) 3327 { 3328 int error; 3329 3330 init_zone_var(); 3331 lockinit(&pf_mod_lck, "pf task lck", 0, LK_CANRECURSE); 3332 pf_dev = make_dev(&pf_ops, 0, 0, 0, 0600, PF_NAME); 3333 error = pfattach(); 3334 if (error) { 3335 dev_ops_remove_all(&pf_ops); 3336 lockuninit(&pf_mod_lck); 3337 return (error); 3338 } 3339 lockinit(&pf_consistency_lock, "pfconslck", 0, LK_CANRECURSE); 3340 return (0); 3341 } 3342 3343 static int 3344 pf_unload(void) 3345 { 3346 int error; 3347 3348 pf_status.running = 0; 3349 error = dehook_pf(); 3350 if (error) { 3351 /* 3352 * Should not happen! 3353 * XXX Due to error code ESRCH, kldunload will show 3354 * a message like 'No such process'. 3355 */ 3356 kprintf("pfil unregistration fail\n"); 3357 return error; 3358 } 3359 shutdown_pf(); 3360 pf_end_threads = 1; 3361 while (pf_end_threads < 2) { 3362 wakeup_one(pf_purge_thread); 3363 lksleep(pf_purge_thread, &pf_mod_lck, 0, "pftmo", hz); 3364 3365 } 3366 pfi_cleanup(); 3367 pf_osfp_flush(); 3368 pf_osfp_cleanup(); 3369 cleanup_pf_zone(); 3370 dev_ops_remove_all(&pf_ops); 3371 lockuninit(&pf_consistency_lock); 3372 lockuninit(&pf_mod_lck); 3373 return 0; 3374 } 3375 3376 static int 3377 pf_modevent(module_t mod, int type, void *data) 3378 { 3379 int error = 0; 3380 3381 switch(type) { 3382 case MOD_LOAD: 3383 error = pf_load(); 3384 break; 3385 3386 case MOD_UNLOAD: 3387 error = pf_unload(); 3388 break; 3389 default: 3390 error = EINVAL; 3391 break; 3392 } 3393 return error; 3394 } 3395 3396 static moduledata_t pf_mod = { 3397 "pf", 3398 pf_modevent, 3399 0 3400 }; 3401 DECLARE_MODULE(pf, pf_mod, SI_SUB_PSEUDO, SI_ORDER_FIRST); 3402 MODULE_VERSION(pf, PF_MODVER); 3403