1 /* $OpenBSD: pf_ioctl.c,v 1.182 2007/06/24 11:17:13 mcbride Exp $ */ 2 3 /* 4 * Copyright (c) 2010 The DragonFly Project. All rights reserved. 5 * 6 * Copyright (c) 2001 Daniel Hartmeier 7 * Copyright (c) 2002,2003 Henning Brauer 8 * All rights reserved. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 14 * - Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * - Redistributions in binary form must reproduce the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer in the documentation and/or other materials provided 19 * with the distribution. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 29 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 31 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 32 * POSSIBILITY OF SUCH DAMAGE. 33 * 34 * Effort sponsored in part by the Defense Advanced Research Projects 35 * Agency (DARPA) and Air Force Research Laboratory, Air Force 36 * Materiel Command, USAF, under agreement number F30602-01-2-0537. 37 * 38 */ 39 40 #include "opt_inet.h" 41 #include "opt_inet6.h" 42 #include "use_pfsync.h" 43 44 #include <sys/param.h> 45 #include <sys/systm.h> 46 #include <sys/conf.h> 47 #include <sys/device.h> 48 #include <sys/mbuf.h> 49 #include <sys/filio.h> 50 #include <sys/fcntl.h> 51 #include <sys/socket.h> 52 #include <sys/socketvar.h> 53 #include <sys/kernel.h> 54 #include <sys/kthread.h> 55 #include <sys/time.h> 56 #include <sys/proc.h> 57 #include <sys/malloc.h> 58 #include <sys/module.h> 59 #include <vm/vm_zone.h> 60 #include <sys/lock.h> 61 62 #include <sys/thread2.h> 63 #include <sys/mplock2.h> 64 65 #include <net/if.h> 66 #include <net/if_types.h> 67 #include <net/route.h> 68 69 #include <netinet/in.h> 70 #include <netinet/in_var.h> 71 #include <netinet/in_systm.h> 72 #include <netinet/ip.h> 73 #include <netinet/ip_var.h> 74 #include <netinet/ip_icmp.h> 75 76 #include <net/pf/pfvar.h> 77 #include <sys/md5.h> 78 #include <net/pf/pfvar.h> 79 80 #if NPFSYNC > 0 81 #include <net/pf/if_pfsync.h> 82 #endif /* NPFSYNC > 0 */ 83 84 #if NPFLOG > 0 85 #include <net/if_pflog.h> 86 #endif /* NPFLOG > 0 */ 87 88 #ifdef INET6 89 #include <netinet/ip6.h> 90 #include <netinet/in_pcb.h> 91 #endif /* INET6 */ 92 93 #ifdef ALTQ 94 #include <net/altq/altq.h> 95 #endif 96 97 #include <machine/limits.h> 98 #include <net/pfil.h> 99 #include <sys/mutex.h> 100 101 u_int rt_numfibs = RT_NUMFIBS; 102 103 void init_zone_var(void); 104 void cleanup_pf_zone(void); 105 int pfattach(void); 106 struct pf_pool *pf_get_pool(char *, u_int32_t, u_int8_t, u_int32_t, 107 u_int8_t, u_int8_t, u_int8_t); 108 109 void pf_mv_pool(struct pf_palist *, struct pf_palist *); 110 void pf_empty_pool(struct pf_palist *); 111 #ifdef ALTQ 112 int pf_begin_altq(u_int32_t *); 113 int pf_rollback_altq(u_int32_t); 114 int pf_commit_altq(u_int32_t); 115 int pf_enable_altq(struct pf_altq *); 116 int pf_disable_altq(struct pf_altq *); 117 #endif /* ALTQ */ 118 int pf_begin_rules(u_int32_t *, int, const char *); 119 int pf_rollback_rules(u_int32_t, int, char *); 120 int pf_setup_pfsync_matching(struct pf_ruleset *); 121 void pf_hash_rule(MD5_CTX *, struct pf_rule *); 122 void pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *); 123 int pf_commit_rules(u_int32_t, int, char *); 124 void pf_state_export(struct pfsync_state *, 125 struct pf_state_key *, struct pf_state *); 126 void pf_state_import(struct pfsync_state *, 127 struct pf_state_key *, struct pf_state *); 128 129 struct pf_rule pf_default_rule; 130 struct lock pf_consistency_lock; 131 #ifdef ALTQ 132 static int pf_altq_running; 133 #endif 134 135 #define TAGID_MAX 50000 136 TAILQ_HEAD(pf_tags, pf_tagname) pf_tags = TAILQ_HEAD_INITIALIZER(pf_tags), 137 pf_qids = TAILQ_HEAD_INITIALIZER(pf_qids); 138 139 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE) 140 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE 141 #endif 142 u_int16_t tagname2tag(struct pf_tags *, char *); 143 void tag2tagname(struct pf_tags *, u_int16_t, char *); 144 void tag_unref(struct pf_tags *, u_int16_t); 145 int pf_rtlabel_add(struct pf_addr_wrap *); 146 void pf_rtlabel_remove(struct pf_addr_wrap *); 147 void pf_rtlabel_copyout(struct pf_addr_wrap *); 148 149 #define DPFPRINTF(n, x) if (pf_status.debug >= (n)) kprintf x 150 151 static cdev_t pf_dev; 152 153 /* 154 * XXX - These are new and need to be checked when moveing to a new version 155 */ 156 static void pf_clear_states(void); 157 static int pf_clear_tables(void); 158 static void pf_clear_srcnodes(void); 159 /* 160 * XXX - These are new and need to be checked when moveing to a new version 161 */ 162 163 /* 164 * Wrapper functions for pfil(9) hooks 165 */ 166 static int pf_check_in(void *arg, struct mbuf **m, struct ifnet *ifp, 167 int dir); 168 static int pf_check_out(void *arg, struct mbuf **m, struct ifnet *ifp, 169 int dir); 170 #ifdef INET6 171 static int pf_check6_in(void *arg, struct mbuf **m, struct ifnet *ifp, 172 int dir); 173 static int pf_check6_out(void *arg, struct mbuf **m, struct ifnet *ifp, 174 int dir); 175 #endif 176 177 static int hook_pf(void); 178 static int dehook_pf(void); 179 static int shutdown_pf(void); 180 static int pf_load(void); 181 static int pf_unload(void); 182 183 d_open_t pfopen; 184 d_close_t pfclose; 185 d_ioctl_t pfioctl; 186 187 static struct dev_ops pf_ops = { /* XXX convert to port model */ 188 { PF_NAME, 73, 0 }, 189 .d_open = pfopen, 190 .d_close = pfclose, 191 .d_ioctl = pfioctl 192 }; 193 194 static volatile int pf_pfil_hooked = 0; 195 int pf_end_threads = 0; 196 struct lock pf_mod_lck; 197 198 int debug_pfugidhack = 0; 199 SYSCTL_INT(_debug, OID_AUTO, pfugidhack, CTLFLAG_RW, &debug_pfugidhack, 0, 200 "Enable/disable pf user/group rules mpsafe hack"); 201 202 void 203 init_zone_var(void) 204 { 205 pf_src_tree_pl = pf_rule_pl = NULL; 206 pf_state_pl = pf_altq_pl = pf_pooladdr_pl = NULL; 207 pf_frent_pl = pf_frag_pl = pf_cache_pl = pf_cent_pl = NULL; 208 pf_state_scrub_pl = NULL; 209 pfr_ktable_pl = pfr_kentry_pl = NULL; 210 } 211 212 void 213 cleanup_pf_zone(void) 214 { 215 ZONE_DESTROY(pf_src_tree_pl); 216 ZONE_DESTROY(pf_rule_pl); 217 ZONE_DESTROY(pf_state_pl); 218 ZONE_DESTROY(pf_altq_pl); 219 ZONE_DESTROY(pf_pooladdr_pl); 220 ZONE_DESTROY(pf_frent_pl); 221 ZONE_DESTROY(pf_frag_pl); 222 ZONE_DESTROY(pf_cache_pl); 223 ZONE_DESTROY(pf_cent_pl); 224 ZONE_DESTROY(pfr_ktable_pl); 225 ZONE_DESTROY(pfr_kentry_pl); 226 ZONE_DESTROY(pfr_kentry_pl2); 227 ZONE_DESTROY(pf_state_scrub_pl); 228 ZONE_DESTROY(pfi_addr_pl); 229 } 230 231 int 232 pfattach(void) 233 { 234 u_int32_t *my_timeout = pf_default_rule.timeout; 235 int error = 1; 236 237 do { 238 ZONE_CREATE(pf_src_tree_pl,struct pf_src_node, "pfsrctrpl"); 239 ZONE_CREATE(pf_rule_pl, struct pf_rule, "pfrulepl"); 240 ZONE_CREATE(pf_state_pl, struct pf_state, "pfstatepl"); 241 ZONE_CREATE(pf_state_key_pl, struct pf_state_key, "pfstatekeypl"); 242 ZONE_CREATE(pf_altq_pl, struct pf_altq, "pfaltqpl"); 243 ZONE_CREATE(pf_pooladdr_pl,struct pf_pooladdr, "pfpooladdrpl"); 244 ZONE_CREATE(pfr_ktable_pl, struct pfr_ktable, "pfrktable"); 245 ZONE_CREATE(pfr_kentry_pl, struct pfr_kentry, "pfrkentry"); 246 ZONE_CREATE(pfr_kentry_pl2, struct pfr_kentry, "pfrkentry2"); 247 ZONE_CREATE(pf_frent_pl, struct pf_frent, "pffrent"); 248 ZONE_CREATE(pf_frag_pl, struct pf_fragment, "pffrag"); 249 ZONE_CREATE(pf_cache_pl, struct pf_fragment, "pffrcache"); 250 ZONE_CREATE(pf_cent_pl, struct pf_frcache, "pffrcent"); 251 ZONE_CREATE(pf_state_scrub_pl, struct pf_state_scrub, 252 "pfstatescrub"); 253 ZONE_CREATE(pfi_addr_pl, struct pfi_dynaddr, "pfiaddrpl"); 254 error = 0; 255 } while(0); 256 if (error) { 257 cleanup_pf_zone(); 258 return (error); 259 } 260 pfr_initialize(); 261 pfi_initialize(); 262 error = pf_osfp_initialize(); 263 if (error) { 264 cleanup_pf_zone(); 265 pf_osfp_cleanup(); 266 return (error); 267 } 268 269 pf_pool_limits[PF_LIMIT_STATES].pp = pf_state_pl; 270 pf_pool_limits[PF_LIMIT_STATES].limit = PFSTATE_HIWAT; 271 pf_pool_limits[PF_LIMIT_FRAGS].pp = pf_frent_pl; 272 pf_pool_limits[PF_LIMIT_FRAGS].limit = PFFRAG_FRENT_HIWAT; 273 /* XXX uma_zone_set_max(pf_pool_limits[PF_LIMIT_STATES].pp, 274 pf_pool_limits[PF_LIMIT_STATES].limit); 275 */ 276 if (ctob(physmem) <= 100*1024*1024) 277 pf_pool_limits[PF_LIMIT_TABLE_ENTRIES].limit = 278 PFR_KENTRY_HIWAT_SMALL; 279 RB_INIT(&tree_src_tracking); 280 RB_INIT(&pf_anchors); 281 pf_init_ruleset(&pf_main_ruleset); 282 TAILQ_INIT(&pf_altqs[0]); 283 TAILQ_INIT(&pf_altqs[1]); 284 TAILQ_INIT(&pf_pabuf); 285 pf_altqs_active = &pf_altqs[0]; 286 pf_altqs_inactive = &pf_altqs[1]; 287 TAILQ_INIT(&state_list); 288 289 /* default rule should never be garbage collected */ 290 pf_default_rule.entries.tqe_prev = &pf_default_rule.entries.tqe_next; 291 pf_default_rule.action = PF_PASS; 292 pf_default_rule.nr = (uint32_t)(-1); 293 pf_default_rule.rtableid = -1; 294 295 /* initialize default timeouts */ 296 my_timeout[PFTM_TCP_FIRST_PACKET] = 120; /* First TCP packet */ 297 my_timeout[PFTM_TCP_OPENING] = 30; /* No response yet */ 298 my_timeout[PFTM_TCP_ESTABLISHED] = 24*60*60; /* Established */ 299 my_timeout[PFTM_TCP_CLOSING] = 15 * 60; /* Half closed */ 300 my_timeout[PFTM_TCP_FIN_WAIT] = 45; /* Got both FINs */ 301 my_timeout[PFTM_TCP_CLOSED] = 90; /* Got a RST */ 302 my_timeout[PFTM_UDP_FIRST_PACKET] = 60; /* First UDP packet */ 303 my_timeout[PFTM_UDP_SINGLE] = 30; /* Unidirectional */ 304 my_timeout[PFTM_UDP_MULTIPLE] = 60; /* Bidirectional */ 305 my_timeout[PFTM_ICMP_FIRST_PACKET] = 20; /* First ICMP packet */ 306 my_timeout[PFTM_ICMP_ERROR_REPLY] = 10; /* Got error response */ 307 my_timeout[PFTM_OTHER_FIRST_PACKET] = 60; /* First packet */ 308 my_timeout[PFTM_OTHER_SINGLE] = 30; /* Unidirectional */ 309 my_timeout[PFTM_OTHER_MULTIPLE] = 60; /* Bidirectional */ 310 my_timeout[PFTM_FRAG] = 30; /* Fragment expire */ 311 my_timeout[PFTM_INTERVAL] = 10; /* Expire interval */ 312 my_timeout[PFTM_SRC_NODE] = 0; /* Source Tracking */ 313 my_timeout[PFTM_TS_DIFF] = 30; /* Allowed TS diff */ 314 my_timeout[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START; 315 my_timeout[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END; 316 317 pf_normalize_init(); 318 bzero(&pf_status, sizeof(pf_status)); 319 pf_status.debug = PF_DEBUG_URGENT; 320 321 /* XXX do our best to avoid a conflict */ 322 pf_status.hostid = karc4random(); 323 324 if (kthread_create(pf_purge_thread, NULL, NULL, "pfpurge")) 325 panic("pfpurge thread"); 326 327 return (error); 328 } 329 330 int 331 pfopen(struct dev_open_args *ap) 332 { 333 lwkt_gettoken(&pf_token); 334 cdev_t dev = ap->a_head.a_dev; 335 if (minor(dev) >= 1) { 336 lwkt_reltoken(&pf_token); 337 return (ENXIO); 338 } 339 lwkt_reltoken(&pf_token); 340 return (0); 341 } 342 343 int 344 pfclose(struct dev_close_args *ap) 345 { 346 lwkt_gettoken(&pf_token); 347 cdev_t dev = ap->a_head.a_dev; 348 if (minor(dev) >= 1) { 349 lwkt_reltoken(&pf_token); 350 return (ENXIO); 351 } 352 lwkt_reltoken(&pf_token); 353 return (0); 354 } 355 356 struct pf_pool * 357 pf_get_pool(char *anchor, u_int32_t ticket, u_int8_t rule_action, 358 u_int32_t rule_number, u_int8_t r_last, u_int8_t active, 359 u_int8_t check_ticket) 360 { 361 struct pf_ruleset *ruleset; 362 struct pf_rule *rule; 363 int rs_num; 364 365 ruleset = pf_find_ruleset(anchor); 366 if (ruleset == NULL) 367 return (NULL); 368 rs_num = pf_get_ruleset_number(rule_action); 369 if (rs_num >= PF_RULESET_MAX) 370 return (NULL); 371 if (active) { 372 if (check_ticket && ticket != 373 ruleset->rules[rs_num].active.ticket) 374 return (NULL); 375 if (r_last) 376 rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr, 377 pf_rulequeue); 378 else 379 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr); 380 } else { 381 if (check_ticket && ticket != 382 ruleset->rules[rs_num].inactive.ticket) 383 return (NULL); 384 if (r_last) 385 rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr, 386 pf_rulequeue); 387 else 388 rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr); 389 } 390 if (!r_last) { 391 while ((rule != NULL) && (rule->nr != rule_number)) 392 rule = TAILQ_NEXT(rule, entries); 393 } 394 if (rule == NULL) 395 return (NULL); 396 397 return (&rule->rpool); 398 } 399 400 void 401 pf_mv_pool(struct pf_palist *poola, struct pf_palist *poolb) 402 { 403 struct pf_pooladdr *mv_pool_pa; 404 405 while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) { 406 TAILQ_REMOVE(poola, mv_pool_pa, entries); 407 TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries); 408 } 409 } 410 411 void 412 pf_empty_pool(struct pf_palist *poola) 413 { 414 struct pf_pooladdr *empty_pool_pa; 415 416 while ((empty_pool_pa = TAILQ_FIRST(poola)) != NULL) { 417 pfi_dynaddr_remove(&empty_pool_pa->addr); 418 pf_tbladdr_remove(&empty_pool_pa->addr); 419 pfi_kif_unref(empty_pool_pa->kif, PFI_KIF_REF_RULE); 420 TAILQ_REMOVE(poola, empty_pool_pa, entries); 421 pool_put(&pf_pooladdr_pl, empty_pool_pa); 422 } 423 } 424 425 void 426 pf_rm_rule(struct pf_rulequeue *rulequeue, struct pf_rule *rule) 427 { 428 if (rulequeue != NULL) { 429 if (rule->states <= 0) { 430 /* 431 * XXX - we need to remove the table *before* detaching 432 * the rule to make sure the table code does not delete 433 * the anchor under our feet. 434 */ 435 pf_tbladdr_remove(&rule->src.addr); 436 pf_tbladdr_remove(&rule->dst.addr); 437 if (rule->overload_tbl) 438 pfr_detach_table(rule->overload_tbl); 439 } 440 TAILQ_REMOVE(rulequeue, rule, entries); 441 rule->entries.tqe_prev = NULL; 442 rule->nr = -1; 443 } 444 445 if (rule->states > 0 || rule->src_nodes > 0 || 446 rule->entries.tqe_prev != NULL) 447 return; 448 pf_tag_unref(rule->tag); 449 pf_tag_unref(rule->match_tag); 450 #ifdef ALTQ 451 if (rule->pqid != rule->qid) 452 pf_qid_unref(rule->pqid); 453 pf_qid_unref(rule->qid); 454 #endif 455 pf_rtlabel_remove(&rule->src.addr); 456 pf_rtlabel_remove(&rule->dst.addr); 457 pfi_dynaddr_remove(&rule->src.addr); 458 pfi_dynaddr_remove(&rule->dst.addr); 459 if (rulequeue == NULL) { 460 pf_tbladdr_remove(&rule->src.addr); 461 pf_tbladdr_remove(&rule->dst.addr); 462 if (rule->overload_tbl) 463 pfr_detach_table(rule->overload_tbl); 464 } 465 pfi_kif_unref(rule->kif, PFI_KIF_REF_RULE); 466 pf_anchor_remove(rule); 467 pf_empty_pool(&rule->rpool.list); 468 pool_put(&pf_rule_pl, rule); 469 } 470 471 u_int16_t 472 tagname2tag(struct pf_tags *head, char *tagname) 473 { 474 struct pf_tagname *tag, *p = NULL; 475 u_int16_t new_tagid = 1; 476 477 TAILQ_FOREACH(tag, head, entries) 478 if (strcmp(tagname, tag->name) == 0) { 479 tag->ref++; 480 return (tag->tag); 481 } 482 483 /* 484 * to avoid fragmentation, we do a linear search from the beginning 485 * and take the first free slot we find. if there is none or the list 486 * is empty, append a new entry at the end. 487 */ 488 489 /* new entry */ 490 if (!TAILQ_EMPTY(head)) 491 for (p = TAILQ_FIRST(head); p != NULL && 492 p->tag == new_tagid; p = TAILQ_NEXT(p, entries)) 493 new_tagid = p->tag + 1; 494 495 if (new_tagid > TAGID_MAX) 496 return (0); 497 498 /* allocate and fill new struct pf_tagname */ 499 tag = kmalloc(sizeof(struct pf_tagname), M_TEMP, M_WAITOK); 500 if (tag == NULL) 501 return (0); 502 bzero(tag, sizeof(struct pf_tagname)); 503 strlcpy(tag->name, tagname, sizeof(tag->name)); 504 tag->tag = new_tagid; 505 tag->ref++; 506 507 if (p != NULL) /* insert new entry before p */ 508 TAILQ_INSERT_BEFORE(p, tag, entries); 509 else /* either list empty or no free slot in between */ 510 TAILQ_INSERT_TAIL(head, tag, entries); 511 512 return (tag->tag); 513 } 514 515 void 516 tag2tagname(struct pf_tags *head, u_int16_t tagid, char *p) 517 { 518 struct pf_tagname *tag; 519 520 TAILQ_FOREACH(tag, head, entries) 521 if (tag->tag == tagid) { 522 strlcpy(p, tag->name, PF_TAG_NAME_SIZE); 523 return; 524 } 525 } 526 527 void 528 tag_unref(struct pf_tags *head, u_int16_t tag) 529 { 530 struct pf_tagname *p, *next; 531 532 if (tag == 0) 533 return; 534 535 for (p = TAILQ_FIRST(head); p != NULL; p = next) { 536 next = TAILQ_NEXT(p, entries); 537 if (tag == p->tag) { 538 if (--p->ref == 0) { 539 TAILQ_REMOVE(head, p, entries); 540 kfree(p, M_TEMP); 541 } 542 break; 543 } 544 } 545 } 546 547 u_int16_t 548 pf_tagname2tag(char *tagname) 549 { 550 return (tagname2tag(&pf_tags, tagname)); 551 } 552 553 void 554 pf_tag2tagname(u_int16_t tagid, char *p) 555 { 556 tag2tagname(&pf_tags, tagid, p); 557 } 558 559 void 560 pf_tag_ref(u_int16_t tag) 561 { 562 struct pf_tagname *t; 563 564 TAILQ_FOREACH(t, &pf_tags, entries) 565 if (t->tag == tag) 566 break; 567 if (t != NULL) 568 t->ref++; 569 } 570 571 void 572 pf_tag_unref(u_int16_t tag) 573 { 574 tag_unref(&pf_tags, tag); 575 } 576 577 int 578 pf_rtlabel_add(struct pf_addr_wrap *a) 579 { 580 return (0); 581 } 582 583 void 584 pf_rtlabel_remove(struct pf_addr_wrap *a) 585 { 586 } 587 588 void 589 pf_rtlabel_copyout(struct pf_addr_wrap *a) 590 { 591 if (a->type == PF_ADDR_RTLABEL && a->v.rtlabel) 592 strlcpy(a->v.rtlabelname, "?", sizeof(a->v.rtlabelname)); 593 } 594 595 #ifdef ALTQ 596 u_int32_t 597 pf_qname2qid(char *qname) 598 { 599 return ((u_int32_t)tagname2tag(&pf_qids, qname)); 600 } 601 602 void 603 pf_qid2qname(u_int32_t qid, char *p) 604 { 605 tag2tagname(&pf_qids, (u_int16_t)qid, p); 606 } 607 608 void 609 pf_qid_unref(u_int32_t qid) 610 { 611 tag_unref(&pf_qids, (u_int16_t)qid); 612 } 613 614 int 615 pf_begin_altq(u_int32_t *ticket) 616 { 617 struct pf_altq *altq; 618 int error = 0; 619 620 /* Purge the old altq list */ 621 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) { 622 TAILQ_REMOVE(pf_altqs_inactive, altq, entries); 623 if (altq->qname[0] == 0) { 624 /* detach and destroy the discipline */ 625 error = altq_remove(altq); 626 } else 627 pf_qid_unref(altq->qid); 628 pool_put(&pf_altq_pl, altq); 629 } 630 if (error) 631 return (error); 632 *ticket = ++ticket_altqs_inactive; 633 altqs_inactive_open = 1; 634 return (0); 635 } 636 637 int 638 pf_rollback_altq(u_int32_t ticket) 639 { 640 struct pf_altq *altq; 641 int error = 0; 642 643 if (!altqs_inactive_open || ticket != ticket_altqs_inactive) 644 return (0); 645 /* Purge the old altq list */ 646 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) { 647 TAILQ_REMOVE(pf_altqs_inactive, altq, entries); 648 if (altq->qname[0] == 0) { 649 /* detach and destroy the discipline */ 650 error = altq_remove(altq); 651 } else 652 pf_qid_unref(altq->qid); 653 pool_put(&pf_altq_pl, altq); 654 } 655 altqs_inactive_open = 0; 656 return (error); 657 } 658 659 int 660 pf_commit_altq(u_int32_t ticket) 661 { 662 struct pf_altqqueue *old_altqs; 663 struct pf_altq *altq; 664 int err, error = 0; 665 666 if (!altqs_inactive_open || ticket != ticket_altqs_inactive) 667 return (EBUSY); 668 669 /* swap altqs, keep the old. */ 670 crit_enter(); 671 old_altqs = pf_altqs_active; 672 pf_altqs_active = pf_altqs_inactive; 673 pf_altqs_inactive = old_altqs; 674 ticket_altqs_active = ticket_altqs_inactive; 675 676 /* Attach new disciplines */ 677 TAILQ_FOREACH(altq, pf_altqs_active, entries) { 678 if (altq->qname[0] == 0) { 679 /* attach the discipline */ 680 error = altq_pfattach(altq); 681 if (error) { 682 crit_exit(); 683 return (error); 684 } 685 } 686 } 687 688 /* Purge the old altq list */ 689 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) { 690 TAILQ_REMOVE(pf_altqs_inactive, altq, entries); 691 if (altq->qname[0] == 0) { 692 /* detach and destroy the discipline */ 693 if (pf_altq_running) 694 error = pf_disable_altq(altq); 695 err = altq_pfdetach(altq); 696 if (err != 0 && error == 0) 697 error = err; 698 err = altq_remove(altq); 699 if (err != 0 && error == 0) 700 error = err; 701 } else 702 pf_qid_unref(altq->qid); 703 pool_put(&pf_altq_pl, altq); 704 } 705 crit_exit(); 706 707 altqs_inactive_open = 0; 708 return (error); 709 } 710 711 int 712 pf_enable_altq(struct pf_altq *altq) 713 { 714 struct ifnet *ifp; 715 struct tb_profile tb; 716 int error = 0; 717 718 if ((ifp = ifunit(altq->ifname)) == NULL) 719 return (EINVAL); 720 721 if (ifp->if_snd.altq_type != ALTQT_NONE) 722 error = altq_enable(&ifp->if_snd); 723 724 /* set tokenbucket regulator */ 725 if (error == 0 && ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) { 726 tb.rate = altq->ifbandwidth; 727 tb.depth = altq->tbrsize; 728 crit_enter(); 729 error = tbr_set(&ifp->if_snd, &tb); 730 crit_exit(); 731 } 732 733 return (error); 734 } 735 736 int 737 pf_disable_altq(struct pf_altq *altq) 738 { 739 struct ifnet *ifp; 740 struct tb_profile tb; 741 int error; 742 743 if ((ifp = ifunit(altq->ifname)) == NULL) 744 return (EINVAL); 745 746 /* 747 * when the discipline is no longer referenced, it was overridden 748 * by a new one. if so, just return. 749 */ 750 if (altq->altq_disc != ifp->if_snd.altq_disc) 751 return (0); 752 753 error = altq_disable(&ifp->if_snd); 754 755 if (error == 0) { 756 /* clear tokenbucket regulator */ 757 tb.rate = 0; 758 crit_enter(); 759 error = tbr_set(&ifp->if_snd, &tb); 760 crit_exit(); 761 } 762 763 return (error); 764 } 765 #endif /* ALTQ */ 766 767 int 768 pf_begin_rules(u_int32_t *ticket, int rs_num, const char *anchor) 769 { 770 struct pf_ruleset *rs; 771 struct pf_rule *rule; 772 773 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) 774 return (EINVAL); 775 rs = pf_find_or_create_ruleset(anchor); 776 if (rs == NULL) 777 return (EINVAL); 778 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) { 779 pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule); 780 rs->rules[rs_num].inactive.rcount--; 781 } 782 *ticket = ++rs->rules[rs_num].inactive.ticket; 783 rs->rules[rs_num].inactive.open = 1; 784 return (0); 785 } 786 787 int 788 pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor) 789 { 790 struct pf_ruleset *rs; 791 struct pf_rule *rule; 792 793 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) 794 return (EINVAL); 795 rs = pf_find_ruleset(anchor); 796 if (rs == NULL || !rs->rules[rs_num].inactive.open || 797 rs->rules[rs_num].inactive.ticket != ticket) 798 return (0); 799 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) { 800 pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule); 801 rs->rules[rs_num].inactive.rcount--; 802 } 803 rs->rules[rs_num].inactive.open = 0; 804 return (0); 805 } 806 807 #define PF_MD5_UPD(st, elm) \ 808 MD5Update(ctx, (u_int8_t *) &(st)->elm, sizeof((st)->elm)) 809 810 #define PF_MD5_UPD_STR(st, elm) \ 811 MD5Update(ctx, (u_int8_t *) (st)->elm, strlen((st)->elm)) 812 813 #define PF_MD5_UPD_HTONL(st, elm, stor) do { \ 814 (stor) = htonl((st)->elm); \ 815 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int32_t));\ 816 } while (0) 817 818 #define PF_MD5_UPD_HTONS(st, elm, stor) do { \ 819 (stor) = htons((st)->elm); \ 820 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int16_t));\ 821 } while (0) 822 823 void 824 pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr) 825 { 826 PF_MD5_UPD(pfr, addr.type); 827 switch (pfr->addr.type) { 828 case PF_ADDR_DYNIFTL: 829 PF_MD5_UPD(pfr, addr.v.ifname); 830 PF_MD5_UPD(pfr, addr.iflags); 831 break; 832 case PF_ADDR_TABLE: 833 PF_MD5_UPD(pfr, addr.v.tblname); 834 break; 835 case PF_ADDR_ADDRMASK: 836 /* XXX ignore af? */ 837 PF_MD5_UPD(pfr, addr.v.a.addr.addr32); 838 PF_MD5_UPD(pfr, addr.v.a.mask.addr32); 839 break; 840 case PF_ADDR_RTLABEL: 841 PF_MD5_UPD(pfr, addr.v.rtlabelname); 842 break; 843 } 844 845 PF_MD5_UPD(pfr, port[0]); 846 PF_MD5_UPD(pfr, port[1]); 847 PF_MD5_UPD(pfr, neg); 848 PF_MD5_UPD(pfr, port_op); 849 } 850 851 void 852 pf_hash_rule(MD5_CTX *ctx, struct pf_rule *rule) 853 { 854 u_int16_t x; 855 u_int32_t y; 856 857 pf_hash_rule_addr(ctx, &rule->src); 858 pf_hash_rule_addr(ctx, &rule->dst); 859 PF_MD5_UPD_STR(rule, label); 860 PF_MD5_UPD_STR(rule, ifname); 861 PF_MD5_UPD_STR(rule, match_tagname); 862 PF_MD5_UPD_HTONS(rule, match_tag, x); /* dup? */ 863 PF_MD5_UPD_HTONL(rule, os_fingerprint, y); 864 PF_MD5_UPD_HTONL(rule, prob, y); 865 PF_MD5_UPD_HTONL(rule, uid.uid[0], y); 866 PF_MD5_UPD_HTONL(rule, uid.uid[1], y); 867 PF_MD5_UPD(rule, uid.op); 868 PF_MD5_UPD_HTONL(rule, gid.gid[0], y); 869 PF_MD5_UPD_HTONL(rule, gid.gid[1], y); 870 PF_MD5_UPD(rule, gid.op); 871 PF_MD5_UPD_HTONL(rule, rule_flag, y); 872 PF_MD5_UPD(rule, action); 873 PF_MD5_UPD(rule, direction); 874 PF_MD5_UPD(rule, af); 875 PF_MD5_UPD(rule, quick); 876 PF_MD5_UPD(rule, ifnot); 877 PF_MD5_UPD(rule, match_tag_not); 878 PF_MD5_UPD(rule, natpass); 879 PF_MD5_UPD(rule, keep_state); 880 PF_MD5_UPD(rule, proto); 881 PF_MD5_UPD(rule, type); 882 PF_MD5_UPD(rule, code); 883 PF_MD5_UPD(rule, flags); 884 PF_MD5_UPD(rule, flagset); 885 PF_MD5_UPD(rule, allow_opts); 886 PF_MD5_UPD(rule, rt); 887 PF_MD5_UPD(rule, tos); 888 } 889 890 int 891 pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor) 892 { 893 struct pf_ruleset *rs; 894 struct pf_rule *rule, **old_array; 895 struct pf_rulequeue *old_rules; 896 int error; 897 u_int32_t old_rcount; 898 899 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) 900 return (EINVAL); 901 rs = pf_find_ruleset(anchor); 902 if (rs == NULL || !rs->rules[rs_num].inactive.open || 903 ticket != rs->rules[rs_num].inactive.ticket) 904 return (EBUSY); 905 906 /* Calculate checksum for the main ruleset */ 907 if (rs == &pf_main_ruleset) { 908 error = pf_setup_pfsync_matching(rs); 909 if (error != 0) 910 return (error); 911 } 912 913 /* Swap rules, keep the old. */ 914 crit_enter(); 915 old_rules = rs->rules[rs_num].active.ptr; 916 old_rcount = rs->rules[rs_num].active.rcount; 917 old_array = rs->rules[rs_num].active.ptr_array; 918 919 rs->rules[rs_num].active.ptr = 920 rs->rules[rs_num].inactive.ptr; 921 rs->rules[rs_num].active.ptr_array = 922 rs->rules[rs_num].inactive.ptr_array; 923 rs->rules[rs_num].active.rcount = 924 rs->rules[rs_num].inactive.rcount; 925 rs->rules[rs_num].inactive.ptr = old_rules; 926 rs->rules[rs_num].inactive.ptr_array = old_array; 927 rs->rules[rs_num].inactive.rcount = old_rcount; 928 929 rs->rules[rs_num].active.ticket = 930 rs->rules[rs_num].inactive.ticket; 931 pf_calc_skip_steps(rs->rules[rs_num].active.ptr); 932 933 934 /* Purge the old rule list. */ 935 while ((rule = TAILQ_FIRST(old_rules)) != NULL) 936 pf_rm_rule(old_rules, rule); 937 if (rs->rules[rs_num].inactive.ptr_array) 938 kfree(rs->rules[rs_num].inactive.ptr_array, M_TEMP); 939 rs->rules[rs_num].inactive.ptr_array = NULL; 940 rs->rules[rs_num].inactive.rcount = 0; 941 rs->rules[rs_num].inactive.open = 0; 942 pf_remove_if_empty_ruleset(rs); 943 crit_exit(); 944 return (0); 945 } 946 947 void 948 pf_state_export(struct pfsync_state *sp, struct pf_state_key *sk, 949 struct pf_state *s) 950 { 951 int secs = time_second; 952 bzero(sp, sizeof(struct pfsync_state)); 953 954 /* copy from state key */ 955 sp->lan.addr = sk->lan.addr; 956 sp->lan.port = sk->lan.port; 957 sp->gwy.addr = sk->gwy.addr; 958 sp->gwy.port = sk->gwy.port; 959 sp->ext.addr = sk->ext.addr; 960 sp->ext.port = sk->ext.port; 961 sp->proto = sk->proto; 962 sp->af = sk->af; 963 sp->direction = sk->direction; 964 965 /* copy from state */ 966 memcpy(&sp->id, &s->id, sizeof(sp->id)); 967 sp->creatorid = s->creatorid; 968 strlcpy(sp->ifname, s->kif->pfik_name, sizeof(sp->ifname)); 969 pf_state_peer_to_pfsync(&s->src, &sp->src); 970 pf_state_peer_to_pfsync(&s->dst, &sp->dst); 971 972 sp->rule = s->rule.ptr->nr; 973 sp->nat_rule = (s->nat_rule.ptr == NULL) ? -1 : s->nat_rule.ptr->nr; 974 sp->anchor = (s->anchor.ptr == NULL) ? -1 : s->anchor.ptr->nr; 975 976 pf_state_counter_to_pfsync(s->bytes[0], sp->bytes[0]); 977 pf_state_counter_to_pfsync(s->bytes[1], sp->bytes[1]); 978 pf_state_counter_to_pfsync(s->packets[0], sp->packets[0]); 979 pf_state_counter_to_pfsync(s->packets[1], sp->packets[1]); 980 sp->creation = secs - s->creation; 981 sp->expire = pf_state_expires(s); 982 sp->log = s->log; 983 sp->allow_opts = s->allow_opts; 984 sp->timeout = s->timeout; 985 986 if (s->src_node) 987 sp->sync_flags |= PFSYNC_FLAG_SRCNODE; 988 if (s->nat_src_node) 989 sp->sync_flags |= PFSYNC_FLAG_NATSRCNODE; 990 991 if (sp->expire > secs) 992 sp->expire -= secs; 993 else 994 sp->expire = 0; 995 996 } 997 998 void 999 pf_state_import(struct pfsync_state *sp, struct pf_state_key *sk, 1000 struct pf_state *s) 1001 { 1002 /* copy to state key */ 1003 sk->lan.addr = sp->lan.addr; 1004 sk->lan.port = sp->lan.port; 1005 sk->gwy.addr = sp->gwy.addr; 1006 sk->gwy.port = sp->gwy.port; 1007 sk->ext.addr = sp->ext.addr; 1008 sk->ext.port = sp->ext.port; 1009 sk->proto = sp->proto; 1010 sk->af = sp->af; 1011 sk->direction = sp->direction; 1012 1013 /* copy to state */ 1014 memcpy(&s->id, &sp->id, sizeof(sp->id)); 1015 s->creatorid = sp->creatorid; 1016 strlcpy(sp->ifname, s->kif->pfik_name, sizeof(sp->ifname)); 1017 pf_state_peer_from_pfsync(&sp->src, &s->src); 1018 pf_state_peer_from_pfsync(&sp->dst, &s->dst); 1019 1020 s->rule.ptr = &pf_default_rule; 1021 s->nat_rule.ptr = NULL; 1022 s->anchor.ptr = NULL; 1023 s->rt_kif = NULL; 1024 s->creation = time_second; 1025 s->pfsync_time = 0; 1026 s->packets[0] = s->packets[1] = 0; 1027 s->bytes[0] = s->bytes[1] = 0; 1028 } 1029 1030 int 1031 pf_setup_pfsync_matching(struct pf_ruleset *rs) 1032 { 1033 MD5_CTX ctx; 1034 struct pf_rule *rule; 1035 int rs_cnt; 1036 u_int8_t digest[PF_MD5_DIGEST_LENGTH]; 1037 1038 MD5Init(&ctx); 1039 for (rs_cnt = 0; rs_cnt < PF_RULESET_MAX; rs_cnt++) { 1040 /* XXX PF_RULESET_SCRUB as well? */ 1041 if (rs_cnt == PF_RULESET_SCRUB) 1042 continue; 1043 1044 if (rs->rules[rs_cnt].inactive.ptr_array) 1045 kfree(rs->rules[rs_cnt].inactive.ptr_array, M_TEMP); 1046 rs->rules[rs_cnt].inactive.ptr_array = NULL; 1047 1048 if (rs->rules[rs_cnt].inactive.rcount) { 1049 rs->rules[rs_cnt].inactive.ptr_array = 1050 kmalloc(sizeof(caddr_t) * 1051 rs->rules[rs_cnt].inactive.rcount, 1052 M_TEMP, M_WAITOK); 1053 1054 if (!rs->rules[rs_cnt].inactive.ptr_array) 1055 return (ENOMEM); 1056 } 1057 1058 TAILQ_FOREACH(rule, rs->rules[rs_cnt].inactive.ptr, 1059 entries) { 1060 pf_hash_rule(&ctx, rule); 1061 (rs->rules[rs_cnt].inactive.ptr_array)[rule->nr] = rule; 1062 } 1063 } 1064 1065 MD5Final(digest, &ctx); 1066 memcpy(pf_status.pf_chksum, digest, sizeof(pf_status.pf_chksum)); 1067 return (0); 1068 } 1069 1070 int 1071 pfioctl(struct dev_ioctl_args *ap) 1072 { 1073 u_long cmd = ap->a_cmd; 1074 caddr_t addr = ap->a_data; 1075 struct pf_pooladdr *pa = NULL; 1076 struct pf_pool *pool = NULL; 1077 int error = 0; 1078 1079 lwkt_gettoken(&pf_token); 1080 1081 /* XXX keep in sync with switch() below */ 1082 if (securelevel > 1) 1083 switch (cmd) { 1084 case DIOCGETRULES: 1085 case DIOCGETRULE: 1086 case DIOCGETADDRS: 1087 case DIOCGETADDR: 1088 case DIOCGETSTATE: 1089 case DIOCSETSTATUSIF: 1090 case DIOCGETSTATUS: 1091 case DIOCCLRSTATUS: 1092 case DIOCNATLOOK: 1093 case DIOCSETDEBUG: 1094 case DIOCGETSTATES: 1095 case DIOCGETTIMEOUT: 1096 case DIOCCLRRULECTRS: 1097 case DIOCGETLIMIT: 1098 case DIOCGETALTQS: 1099 case DIOCGETALTQ: 1100 case DIOCGETQSTATS: 1101 case DIOCGETRULESETS: 1102 case DIOCGETRULESET: 1103 case DIOCRGETTABLES: 1104 case DIOCRGETTSTATS: 1105 case DIOCRCLRTSTATS: 1106 case DIOCRCLRADDRS: 1107 case DIOCRADDADDRS: 1108 case DIOCRDELADDRS: 1109 case DIOCRSETADDRS: 1110 case DIOCRGETADDRS: 1111 case DIOCRGETASTATS: 1112 case DIOCRCLRASTATS: 1113 case DIOCRTSTADDRS: 1114 case DIOCOSFPGET: 1115 case DIOCGETSRCNODES: 1116 case DIOCCLRSRCNODES: 1117 case DIOCIGETIFACES: 1118 case DIOCSETIFFLAG: 1119 case DIOCCLRIFFLAG: 1120 case DIOCGIFSPEED: 1121 break; 1122 case DIOCRCLRTABLES: 1123 case DIOCRADDTABLES: 1124 case DIOCRDELTABLES: 1125 case DIOCRSETTFLAGS: 1126 if (((struct pfioc_table *)addr)->pfrio_flags & 1127 PFR_FLAG_DUMMY) 1128 break; /* dummy operation ok */ 1129 lwkt_reltoken(&pf_token); 1130 return (EPERM); 1131 default: 1132 lwkt_reltoken(&pf_token); 1133 return (EPERM); 1134 } 1135 1136 if (!(ap->a_fflag & FWRITE)) 1137 switch (cmd) { 1138 case DIOCGETRULES: 1139 case DIOCGETADDRS: 1140 case DIOCGETADDR: 1141 case DIOCGETSTATE: 1142 case DIOCGETSTATUS: 1143 case DIOCGETSTATES: 1144 case DIOCGETTIMEOUT: 1145 case DIOCGETLIMIT: 1146 case DIOCGETALTQS: 1147 case DIOCGETALTQ: 1148 case DIOCGETQSTATS: 1149 case DIOCGETRULESETS: 1150 case DIOCGETRULESET: 1151 case DIOCNATLOOK: 1152 case DIOCRGETTABLES: 1153 case DIOCRGETTSTATS: 1154 case DIOCRGETADDRS: 1155 case DIOCRGETASTATS: 1156 case DIOCRTSTADDRS: 1157 case DIOCOSFPGET: 1158 case DIOCGETSRCNODES: 1159 case DIOCIGETIFACES: 1160 case DIOCGIFSPEED: 1161 break; 1162 case DIOCRCLRTABLES: 1163 case DIOCRADDTABLES: 1164 case DIOCRDELTABLES: 1165 case DIOCRCLRTSTATS: 1166 case DIOCRCLRADDRS: 1167 case DIOCRADDADDRS: 1168 case DIOCRDELADDRS: 1169 case DIOCRSETADDRS: 1170 case DIOCRSETTFLAGS: 1171 if (((struct pfioc_table *)addr)->pfrio_flags & 1172 PFR_FLAG_DUMMY) 1173 break; /* dummy operation ok */ 1174 lwkt_reltoken(&pf_token); 1175 return (EACCES); 1176 case DIOCGETRULE: 1177 if (((struct pfioc_rule *)addr)->action == PF_GET_CLR_CNTR) { 1178 lwkt_reltoken(&pf_token); 1179 return (EACCES); 1180 } 1181 break; 1182 default: 1183 lwkt_reltoken(&pf_token); 1184 return (EACCES); 1185 } 1186 1187 switch (cmd) { 1188 1189 case DIOCSTART: 1190 if (pf_status.running) 1191 error = EEXIST; 1192 else { 1193 error = hook_pf(); 1194 if (error) { 1195 DPFPRINTF(PF_DEBUG_MISC, 1196 ("pf: pfil registration fail\n")); 1197 break; 1198 } 1199 pf_status.running = 1; 1200 pf_status.since = time_second; 1201 if (pf_status.stateid == 0) { 1202 pf_status.stateid = time_second; 1203 pf_status.stateid = pf_status.stateid << 32; 1204 } 1205 DPFPRINTF(PF_DEBUG_MISC, ("pf: started\n")); 1206 } 1207 break; 1208 1209 case DIOCSTOP: 1210 if (!pf_status.running) 1211 error = ENOENT; 1212 else { 1213 pf_status.running = 0; 1214 error = dehook_pf(); 1215 if (error) { 1216 pf_status.running = 1; 1217 DPFPRINTF(PF_DEBUG_MISC, 1218 ("pf: pfil unregistration failed\n")); 1219 } 1220 pf_status.since = time_second; 1221 DPFPRINTF(PF_DEBUG_MISC, ("pf: stopped\n")); 1222 } 1223 break; 1224 1225 case DIOCADDRULE: { 1226 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 1227 struct pf_ruleset *ruleset; 1228 struct pf_rule *rule, *tail; 1229 struct pf_pooladdr *pa; 1230 int rs_num; 1231 1232 pr->anchor[sizeof(pr->anchor) - 1] = 0; 1233 ruleset = pf_find_ruleset(pr->anchor); 1234 if (ruleset == NULL) { 1235 error = EINVAL; 1236 break; 1237 } 1238 rs_num = pf_get_ruleset_number(pr->rule.action); 1239 if (rs_num >= PF_RULESET_MAX) { 1240 error = EINVAL; 1241 break; 1242 } 1243 if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) { 1244 error = EINVAL; 1245 break; 1246 } 1247 if (pr->ticket != ruleset->rules[rs_num].inactive.ticket) { 1248 error = EBUSY; 1249 break; 1250 } 1251 if (pr->pool_ticket != ticket_pabuf) { 1252 error = EBUSY; 1253 break; 1254 } 1255 rule = pool_get(&pf_rule_pl, PR_NOWAIT); 1256 if (rule == NULL) { 1257 error = ENOMEM; 1258 break; 1259 } 1260 bcopy(&pr->rule, rule, sizeof(struct pf_rule)); 1261 rule->cuid = ap->a_cred->cr_ruid; 1262 rule->cpid = (int)NULL; 1263 rule->anchor = NULL; 1264 rule->kif = NULL; 1265 TAILQ_INIT(&rule->rpool.list); 1266 /* initialize refcounting */ 1267 rule->states = 0; 1268 rule->src_nodes = 0; 1269 rule->entries.tqe_prev = NULL; 1270 #ifndef INET 1271 if (rule->af == AF_INET) { 1272 pool_put(&pf_rule_pl, rule); 1273 error = EAFNOSUPPORT; 1274 break; 1275 } 1276 #endif /* INET */ 1277 #ifndef INET6 1278 if (rule->af == AF_INET6) { 1279 pool_put(&pf_rule_pl, rule); 1280 error = EAFNOSUPPORT; 1281 break; 1282 } 1283 #endif /* INET6 */ 1284 tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr, 1285 pf_rulequeue); 1286 if (tail) 1287 rule->nr = tail->nr + 1; 1288 else 1289 rule->nr = 0; 1290 if (rule->ifname[0]) { 1291 rule->kif = pfi_kif_get(rule->ifname); 1292 if (rule->kif == NULL) { 1293 pool_put(&pf_rule_pl, rule); 1294 error = EINVAL; 1295 break; 1296 } 1297 pfi_kif_ref(rule->kif, PFI_KIF_REF_RULE); 1298 } 1299 1300 if (rule->rtableid > 0 && rule->rtableid > rt_numfibs) 1301 error = EBUSY; 1302 1303 #ifdef ALTQ 1304 /* set queue IDs */ 1305 if (rule->qname[0] != 0) { 1306 if ((rule->qid = pf_qname2qid(rule->qname)) == 0) 1307 error = EBUSY; 1308 else if (rule->pqname[0] != 0) { 1309 if ((rule->pqid = 1310 pf_qname2qid(rule->pqname)) == 0) 1311 error = EBUSY; 1312 } else 1313 rule->pqid = rule->qid; 1314 } 1315 #endif 1316 if (rule->tagname[0]) 1317 if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0) 1318 error = EBUSY; 1319 if (rule->match_tagname[0]) 1320 if ((rule->match_tag = 1321 pf_tagname2tag(rule->match_tagname)) == 0) 1322 error = EBUSY; 1323 if (rule->rt && !rule->direction) 1324 error = EINVAL; 1325 #if NPFLOG > 0 1326 if (!rule->log) 1327 rule->logif = 0; 1328 if (rule->logif >= PFLOGIFS_MAX) 1329 error = EINVAL; 1330 #endif 1331 if (pf_rtlabel_add(&rule->src.addr) || 1332 pf_rtlabel_add(&rule->dst.addr)) 1333 error = EBUSY; 1334 if (pfi_dynaddr_setup(&rule->src.addr, rule->af)) 1335 error = EINVAL; 1336 if (pfi_dynaddr_setup(&rule->dst.addr, rule->af)) 1337 error = EINVAL; 1338 if (pf_tbladdr_setup(ruleset, &rule->src.addr)) 1339 error = EINVAL; 1340 if (pf_tbladdr_setup(ruleset, &rule->dst.addr)) 1341 error = EINVAL; 1342 if (pf_anchor_setup(rule, ruleset, pr->anchor_call)) 1343 error = EINVAL; 1344 TAILQ_FOREACH(pa, &pf_pabuf, entries) 1345 if (pf_tbladdr_setup(ruleset, &pa->addr)) 1346 error = EINVAL; 1347 1348 if (rule->overload_tblname[0]) { 1349 if ((rule->overload_tbl = pfr_attach_table(ruleset, 1350 rule->overload_tblname)) == NULL) 1351 error = EINVAL; 1352 else 1353 rule->overload_tbl->pfrkt_flags |= 1354 PFR_TFLAG_ACTIVE; 1355 } 1356 1357 pf_mv_pool(&pf_pabuf, &rule->rpool.list); 1358 if (((((rule->action == PF_NAT) || (rule->action == PF_RDR) || 1359 (rule->action == PF_BINAT)) && rule->anchor == NULL) || 1360 (rule->rt > PF_FASTROUTE)) && 1361 (TAILQ_FIRST(&rule->rpool.list) == NULL)) 1362 error = EINVAL; 1363 1364 if (error) { 1365 pf_rm_rule(NULL, rule); 1366 break; 1367 } 1368 rule->rpool.cur = TAILQ_FIRST(&rule->rpool.list); 1369 rule->evaluations = rule->packets[0] = rule->packets[1] = 1370 rule->bytes[0] = rule->bytes[1] = 0; 1371 TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr, 1372 rule, entries); 1373 ruleset->rules[rs_num].inactive.rcount++; 1374 break; 1375 } 1376 1377 case DIOCGETRULES: { 1378 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 1379 struct pf_ruleset *ruleset; 1380 struct pf_rule *tail; 1381 int rs_num; 1382 1383 pr->anchor[sizeof(pr->anchor) - 1] = 0; 1384 ruleset = pf_find_ruleset(pr->anchor); 1385 if (ruleset == NULL) { 1386 error = EINVAL; 1387 break; 1388 } 1389 rs_num = pf_get_ruleset_number(pr->rule.action); 1390 if (rs_num >= PF_RULESET_MAX) { 1391 error = EINVAL; 1392 break; 1393 } 1394 tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr, 1395 pf_rulequeue); 1396 if (tail) 1397 pr->nr = tail->nr + 1; 1398 else 1399 pr->nr = 0; 1400 pr->ticket = ruleset->rules[rs_num].active.ticket; 1401 break; 1402 } 1403 1404 case DIOCGETRULE: { 1405 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 1406 struct pf_ruleset *ruleset; 1407 struct pf_rule *rule; 1408 int rs_num, i; 1409 1410 pr->anchor[sizeof(pr->anchor) - 1] = 0; 1411 ruleset = pf_find_ruleset(pr->anchor); 1412 if (ruleset == NULL) { 1413 error = EINVAL; 1414 break; 1415 } 1416 rs_num = pf_get_ruleset_number(pr->rule.action); 1417 if (rs_num >= PF_RULESET_MAX) { 1418 error = EINVAL; 1419 break; 1420 } 1421 if (pr->ticket != ruleset->rules[rs_num].active.ticket) { 1422 error = EBUSY; 1423 break; 1424 } 1425 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr); 1426 while ((rule != NULL) && (rule->nr != pr->nr)) 1427 rule = TAILQ_NEXT(rule, entries); 1428 if (rule == NULL) { 1429 error = EBUSY; 1430 break; 1431 } 1432 bcopy(rule, &pr->rule, sizeof(struct pf_rule)); 1433 if (pf_anchor_copyout(ruleset, rule, pr)) { 1434 error = EBUSY; 1435 break; 1436 } 1437 pfi_dynaddr_copyout(&pr->rule.src.addr); 1438 pfi_dynaddr_copyout(&pr->rule.dst.addr); 1439 pf_tbladdr_copyout(&pr->rule.src.addr); 1440 pf_tbladdr_copyout(&pr->rule.dst.addr); 1441 pf_rtlabel_copyout(&pr->rule.src.addr); 1442 pf_rtlabel_copyout(&pr->rule.dst.addr); 1443 for (i = 0; i < PF_SKIP_COUNT; ++i) 1444 if (rule->skip[i].ptr == NULL) 1445 pr->rule.skip[i].nr = (uint32_t)(-1); 1446 else 1447 pr->rule.skip[i].nr = 1448 rule->skip[i].ptr->nr; 1449 1450 if (pr->action == PF_GET_CLR_CNTR) { 1451 rule->evaluations = 0; 1452 rule->packets[0] = rule->packets[1] = 0; 1453 rule->bytes[0] = rule->bytes[1] = 0; 1454 } 1455 break; 1456 } 1457 1458 case DIOCCHANGERULE: { 1459 struct pfioc_rule *pcr = (struct pfioc_rule *)addr; 1460 struct pf_ruleset *ruleset; 1461 struct pf_rule *oldrule = NULL, *newrule = NULL; 1462 u_int32_t nr = 0; 1463 int rs_num; 1464 1465 if (!(pcr->action == PF_CHANGE_REMOVE || 1466 pcr->action == PF_CHANGE_GET_TICKET) && 1467 pcr->pool_ticket != ticket_pabuf) { 1468 error = EBUSY; 1469 break; 1470 } 1471 1472 if (pcr->action < PF_CHANGE_ADD_HEAD || 1473 pcr->action > PF_CHANGE_GET_TICKET) { 1474 error = EINVAL; 1475 break; 1476 } 1477 ruleset = pf_find_ruleset(pcr->anchor); 1478 if (ruleset == NULL) { 1479 error = EINVAL; 1480 break; 1481 } 1482 rs_num = pf_get_ruleset_number(pcr->rule.action); 1483 if (rs_num >= PF_RULESET_MAX) { 1484 error = EINVAL; 1485 break; 1486 } 1487 1488 if (pcr->action == PF_CHANGE_GET_TICKET) { 1489 pcr->ticket = ++ruleset->rules[rs_num].active.ticket; 1490 break; 1491 } else { 1492 if (pcr->ticket != 1493 ruleset->rules[rs_num].active.ticket) { 1494 error = EINVAL; 1495 break; 1496 } 1497 if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) { 1498 error = EINVAL; 1499 break; 1500 } 1501 } 1502 1503 if (pcr->action != PF_CHANGE_REMOVE) { 1504 newrule = pool_get(&pf_rule_pl, PR_NOWAIT); 1505 if (newrule == NULL) { 1506 error = ENOMEM; 1507 break; 1508 } 1509 bcopy(&pcr->rule, newrule, sizeof(struct pf_rule)); 1510 newrule->cuid = ap->a_cred->cr_ruid; 1511 newrule->cpid = (int)NULL; 1512 TAILQ_INIT(&newrule->rpool.list); 1513 /* initialize refcounting */ 1514 newrule->states = 0; 1515 newrule->entries.tqe_prev = NULL; 1516 #ifndef INET 1517 if (newrule->af == AF_INET) { 1518 pool_put(&pf_rule_pl, newrule); 1519 error = EAFNOSUPPORT; 1520 break; 1521 } 1522 #endif /* INET */ 1523 #ifndef INET6 1524 if (newrule->af == AF_INET6) { 1525 pool_put(&pf_rule_pl, newrule); 1526 error = EAFNOSUPPORT; 1527 break; 1528 } 1529 #endif /* INET6 */ 1530 if (newrule->ifname[0]) { 1531 newrule->kif = pfi_kif_get(newrule->ifname); 1532 if (newrule->kif == NULL) { 1533 pool_put(&pf_rule_pl, newrule); 1534 error = EINVAL; 1535 break; 1536 } 1537 pfi_kif_ref(newrule->kif, PFI_KIF_REF_RULE); 1538 } else 1539 newrule->kif = NULL; 1540 1541 if (newrule->rtableid > 0 && 1542 newrule->rtableid > rt_numfibs) 1543 error = EBUSY; 1544 1545 #ifdef ALTQ 1546 /* set queue IDs */ 1547 if (newrule->qname[0] != 0) { 1548 if ((newrule->qid = 1549 pf_qname2qid(newrule->qname)) == 0) 1550 error = EBUSY; 1551 else if (newrule->pqname[0] != 0) { 1552 if ((newrule->pqid = 1553 pf_qname2qid(newrule->pqname)) == 0) 1554 error = EBUSY; 1555 } else 1556 newrule->pqid = newrule->qid; 1557 } 1558 #endif /* ALTQ */ 1559 if (newrule->tagname[0]) 1560 if ((newrule->tag = 1561 pf_tagname2tag(newrule->tagname)) == 0) 1562 error = EBUSY; 1563 if (newrule->match_tagname[0]) 1564 if ((newrule->match_tag = pf_tagname2tag( 1565 newrule->match_tagname)) == 0) 1566 error = EBUSY; 1567 if (newrule->rt && !newrule->direction) 1568 error = EINVAL; 1569 #if NPFLOG > 0 1570 if (!newrule->log) 1571 newrule->logif = 0; 1572 if (newrule->logif >= PFLOGIFS_MAX) 1573 error = EINVAL; 1574 #endif 1575 if (pf_rtlabel_add(&newrule->src.addr) || 1576 pf_rtlabel_add(&newrule->dst.addr)) 1577 error = EBUSY; 1578 if (pfi_dynaddr_setup(&newrule->src.addr, newrule->af)) 1579 error = EINVAL; 1580 if (pfi_dynaddr_setup(&newrule->dst.addr, newrule->af)) 1581 error = EINVAL; 1582 if (pf_tbladdr_setup(ruleset, &newrule->src.addr)) 1583 error = EINVAL; 1584 if (pf_tbladdr_setup(ruleset, &newrule->dst.addr)) 1585 error = EINVAL; 1586 if (pf_anchor_setup(newrule, ruleset, pcr->anchor_call)) 1587 error = EINVAL; 1588 TAILQ_FOREACH(pa, &pf_pabuf, entries) 1589 if (pf_tbladdr_setup(ruleset, &pa->addr)) 1590 error = EINVAL; 1591 1592 if (newrule->overload_tblname[0]) { 1593 if ((newrule->overload_tbl = pfr_attach_table( 1594 ruleset, newrule->overload_tblname)) == 1595 NULL) 1596 error = EINVAL; 1597 else 1598 newrule->overload_tbl->pfrkt_flags |= 1599 PFR_TFLAG_ACTIVE; 1600 } 1601 1602 pf_mv_pool(&pf_pabuf, &newrule->rpool.list); 1603 if (((((newrule->action == PF_NAT) || 1604 (newrule->action == PF_RDR) || 1605 (newrule->action == PF_BINAT) || 1606 (newrule->rt > PF_FASTROUTE)) && 1607 !newrule->anchor)) && 1608 (TAILQ_FIRST(&newrule->rpool.list) == NULL)) 1609 error = EINVAL; 1610 1611 if (error) { 1612 pf_rm_rule(NULL, newrule); 1613 break; 1614 } 1615 newrule->rpool.cur = TAILQ_FIRST(&newrule->rpool.list); 1616 newrule->evaluations = 0; 1617 newrule->packets[0] = newrule->packets[1] = 0; 1618 newrule->bytes[0] = newrule->bytes[1] = 0; 1619 } 1620 pf_empty_pool(&pf_pabuf); 1621 1622 if (pcr->action == PF_CHANGE_ADD_HEAD) 1623 oldrule = TAILQ_FIRST( 1624 ruleset->rules[rs_num].active.ptr); 1625 else if (pcr->action == PF_CHANGE_ADD_TAIL) 1626 oldrule = TAILQ_LAST( 1627 ruleset->rules[rs_num].active.ptr, pf_rulequeue); 1628 else { 1629 oldrule = TAILQ_FIRST( 1630 ruleset->rules[rs_num].active.ptr); 1631 while ((oldrule != NULL) && (oldrule->nr != pcr->nr)) 1632 oldrule = TAILQ_NEXT(oldrule, entries); 1633 if (oldrule == NULL) { 1634 if (newrule != NULL) 1635 pf_rm_rule(NULL, newrule); 1636 error = EINVAL; 1637 break; 1638 } 1639 } 1640 1641 if (pcr->action == PF_CHANGE_REMOVE) { 1642 pf_rm_rule(ruleset->rules[rs_num].active.ptr, oldrule); 1643 ruleset->rules[rs_num].active.rcount--; 1644 } else { 1645 if (oldrule == NULL) 1646 TAILQ_INSERT_TAIL( 1647 ruleset->rules[rs_num].active.ptr, 1648 newrule, entries); 1649 else if (pcr->action == PF_CHANGE_ADD_HEAD || 1650 pcr->action == PF_CHANGE_ADD_BEFORE) 1651 TAILQ_INSERT_BEFORE(oldrule, newrule, entries); 1652 else 1653 TAILQ_INSERT_AFTER( 1654 ruleset->rules[rs_num].active.ptr, 1655 oldrule, newrule, entries); 1656 ruleset->rules[rs_num].active.rcount++; 1657 } 1658 1659 nr = 0; 1660 TAILQ_FOREACH(oldrule, 1661 ruleset->rules[rs_num].active.ptr, entries) 1662 oldrule->nr = nr++; 1663 1664 ruleset->rules[rs_num].active.ticket++; 1665 1666 pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr); 1667 pf_remove_if_empty_ruleset(ruleset); 1668 1669 break; 1670 } 1671 1672 case DIOCCLRSTATES: { 1673 struct pf_state *s, *nexts; 1674 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr; 1675 int killed = 0; 1676 1677 for (s = RB_MIN(pf_state_tree_id, &tree_id); s; s = nexts) { 1678 nexts = RB_NEXT(pf_state_tree_id, &tree_id, s); 1679 1680 if (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname, 1681 s->kif->pfik_name)) { 1682 #if NPFSYNC 1683 /* don't send out individual delete messages */ 1684 s->sync_flags = PFSTATE_NOSYNC; 1685 #endif 1686 pf_unlink_state(s); 1687 killed++; 1688 } 1689 } 1690 psk->psk_af = killed; 1691 #if NPFSYNC 1692 pfsync_clear_states(pf_status.hostid, psk->psk_ifname); 1693 #endif 1694 break; 1695 } 1696 1697 case DIOCKILLSTATES: { 1698 struct pf_state *s, *nexts; 1699 struct pf_state_key *sk; 1700 struct pf_state_host *src, *dst; 1701 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr; 1702 int killed = 0; 1703 1704 for (s = RB_MIN(pf_state_tree_id, &tree_id); s; 1705 s = nexts) { 1706 nexts = RB_NEXT(pf_state_tree_id, &tree_id, s); 1707 sk = s->state_key; 1708 1709 if (sk->direction == PF_OUT) { 1710 src = &sk->lan; 1711 dst = &sk->ext; 1712 } else { 1713 src = &sk->ext; 1714 dst = &sk->lan; 1715 } 1716 if ((!psk->psk_af || sk->af == psk->psk_af) 1717 && (!psk->psk_proto || psk->psk_proto == 1718 sk->proto) && 1719 PF_MATCHA(psk->psk_src.neg, 1720 &psk->psk_src.addr.v.a.addr, 1721 &psk->psk_src.addr.v.a.mask, 1722 &src->addr, sk->af) && 1723 PF_MATCHA(psk->psk_dst.neg, 1724 &psk->psk_dst.addr.v.a.addr, 1725 &psk->psk_dst.addr.v.a.mask, 1726 &dst->addr, sk->af) && 1727 (psk->psk_src.port_op == 0 || 1728 pf_match_port(psk->psk_src.port_op, 1729 psk->psk_src.port[0], psk->psk_src.port[1], 1730 src->port)) && 1731 (psk->psk_dst.port_op == 0 || 1732 pf_match_port(psk->psk_dst.port_op, 1733 psk->psk_dst.port[0], psk->psk_dst.port[1], 1734 dst->port)) && 1735 (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname, 1736 s->kif->pfik_name))) { 1737 #if NPFSYNC > 0 1738 /* send immediate delete of state */ 1739 pfsync_delete_state(s); 1740 s->sync_flags |= PFSTATE_NOSYNC; 1741 #endif 1742 pf_unlink_state(s); 1743 killed++; 1744 } 1745 } 1746 psk->psk_af = killed; 1747 break; 1748 } 1749 1750 case DIOCADDSTATE: { 1751 struct pfioc_state *ps = (struct pfioc_state *)addr; 1752 struct pfsync_state *sp = (struct pfsync_state *)ps->state; 1753 struct pf_state *s; 1754 struct pf_state_key *sk; 1755 struct pfi_kif *kif; 1756 1757 if (sp->timeout >= PFTM_MAX && 1758 sp->timeout != PFTM_UNTIL_PACKET) { 1759 error = EINVAL; 1760 break; 1761 } 1762 s = pool_get(&pf_state_pl, PR_NOWAIT); 1763 if (s == NULL) { 1764 error = ENOMEM; 1765 break; 1766 } 1767 bzero(s, sizeof(struct pf_state)); 1768 if ((sk = pf_alloc_state_key(s)) == NULL) { 1769 error = ENOMEM; 1770 break; 1771 } 1772 pf_state_import(sp, sk, s); 1773 kif = pfi_kif_get(sp->ifname); 1774 if (kif == NULL) { 1775 pool_put(&pf_state_pl, s); 1776 pool_put(&pf_state_key_pl, sk); 1777 error = ENOENT; 1778 break; 1779 } 1780 if (pf_insert_state(kif, s)) { 1781 pfi_kif_unref(kif, PFI_KIF_REF_NONE); 1782 pool_put(&pf_state_pl, s); 1783 pool_put(&pf_state_key_pl, sk); 1784 error = ENOMEM; 1785 } 1786 break; 1787 } 1788 1789 case DIOCGETSTATE: { 1790 struct pfioc_state *ps = (struct pfioc_state *)addr; 1791 struct pf_state *s; 1792 u_int32_t nr; 1793 1794 nr = 0; 1795 RB_FOREACH(s, pf_state_tree_id, &tree_id) { 1796 if (nr >= ps->nr) 1797 break; 1798 nr++; 1799 } 1800 if (s == NULL) { 1801 error = EBUSY; 1802 break; 1803 } 1804 1805 pf_state_export((struct pfsync_state *)&ps->state, 1806 s->state_key, s); 1807 break; 1808 } 1809 1810 case DIOCGETSTATES: { 1811 struct pfioc_states *ps = (struct pfioc_states *)addr; 1812 struct pf_state *state; 1813 struct pfsync_state *p, *pstore; 1814 u_int32_t nr = 0; 1815 1816 if (ps->ps_len == 0) { 1817 nr = pf_status.states; 1818 ps->ps_len = sizeof(struct pfsync_state) * nr; 1819 break; 1820 } 1821 1822 pstore = kmalloc(sizeof(*pstore), M_TEMP, M_WAITOK); 1823 1824 p = ps->ps_states; 1825 1826 state = TAILQ_FIRST(&state_list); 1827 while (state) { 1828 if (state->timeout != PFTM_UNLINKED) { 1829 if ((nr+1) * sizeof(*p) > (unsigned)ps->ps_len) 1830 break; 1831 1832 pf_state_export(pstore, 1833 state->state_key, state); 1834 error = copyout(pstore, p, sizeof(*p)); 1835 if (error) { 1836 kfree(pstore, M_TEMP); 1837 goto fail; 1838 } 1839 p++; 1840 nr++; 1841 } 1842 state = TAILQ_NEXT(state, entry_list); 1843 } 1844 1845 ps->ps_len = sizeof(struct pfsync_state) * nr; 1846 1847 kfree(pstore, M_TEMP); 1848 break; 1849 } 1850 1851 case DIOCGETSTATUS: { 1852 struct pf_status *s = (struct pf_status *)addr; 1853 bcopy(&pf_status, s, sizeof(struct pf_status)); 1854 pfi_fill_oldstatus(s); 1855 break; 1856 } 1857 1858 case DIOCSETSTATUSIF: { 1859 struct pfioc_if *pi = (struct pfioc_if *)addr; 1860 1861 if (pi->ifname[0] == 0) { 1862 bzero(pf_status.ifname, IFNAMSIZ); 1863 break; 1864 } 1865 if (ifunit(pi->ifname) == NULL) { 1866 error = EINVAL; 1867 break; 1868 } 1869 strlcpy(pf_status.ifname, pi->ifname, IFNAMSIZ); 1870 break; 1871 } 1872 1873 case DIOCCLRSTATUS: { 1874 bzero(pf_status.counters, sizeof(pf_status.counters)); 1875 bzero(pf_status.fcounters, sizeof(pf_status.fcounters)); 1876 bzero(pf_status.scounters, sizeof(pf_status.scounters)); 1877 pf_status.since = time_second; 1878 if (*pf_status.ifname) 1879 pfi_clr_istats(pf_status.ifname); 1880 break; 1881 } 1882 1883 case DIOCNATLOOK: { 1884 struct pfioc_natlook *pnl = (struct pfioc_natlook *)addr; 1885 struct pf_state_key *sk; 1886 struct pf_state *state; 1887 struct pf_state_key_cmp key; 1888 int m = 0, direction = pnl->direction; 1889 1890 key.af = pnl->af; 1891 key.proto = pnl->proto; 1892 1893 if (!pnl->proto || 1894 PF_AZERO(&pnl->saddr, pnl->af) || 1895 PF_AZERO(&pnl->daddr, pnl->af) || 1896 ((pnl->proto == IPPROTO_TCP || 1897 pnl->proto == IPPROTO_UDP) && 1898 (!pnl->dport || !pnl->sport))) 1899 error = EINVAL; 1900 else { 1901 /* 1902 * userland gives us source and dest of connection, 1903 * reverse the lookup so we ask for what happens with 1904 * the return traffic, enabling us to find it in the 1905 * state tree. 1906 */ 1907 if (direction == PF_IN) { 1908 PF_ACPY(&key.ext.addr, &pnl->daddr, pnl->af); 1909 key.ext.port = pnl->dport; 1910 PF_ACPY(&key.gwy.addr, &pnl->saddr, pnl->af); 1911 key.gwy.port = pnl->sport; 1912 state = pf_find_state_all(&key, PF_EXT_GWY, &m); 1913 } else { 1914 PF_ACPY(&key.lan.addr, &pnl->daddr, pnl->af); 1915 key.lan.port = pnl->dport; 1916 PF_ACPY(&key.ext.addr, &pnl->saddr, pnl->af); 1917 key.ext.port = pnl->sport; 1918 state = pf_find_state_all(&key, PF_LAN_EXT, &m); 1919 } 1920 if (m > 1) 1921 error = E2BIG; /* more than one state */ 1922 else if (state != NULL) { 1923 sk = state->state_key; 1924 if (direction == PF_IN) { 1925 PF_ACPY(&pnl->rsaddr, &sk->lan.addr, 1926 sk->af); 1927 pnl->rsport = sk->lan.port; 1928 PF_ACPY(&pnl->rdaddr, &pnl->daddr, 1929 pnl->af); 1930 pnl->rdport = pnl->dport; 1931 } else { 1932 PF_ACPY(&pnl->rdaddr, &sk->gwy.addr, 1933 sk->af); 1934 pnl->rdport = sk->gwy.port; 1935 PF_ACPY(&pnl->rsaddr, &pnl->saddr, 1936 pnl->af); 1937 pnl->rsport = pnl->sport; 1938 } 1939 } else 1940 error = ENOENT; 1941 } 1942 break; 1943 } 1944 1945 case DIOCSETTIMEOUT: { 1946 struct pfioc_tm *pt = (struct pfioc_tm *)addr; 1947 int old; 1948 1949 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX || 1950 pt->seconds < 0) { 1951 error = EINVAL; 1952 goto fail; 1953 } 1954 old = pf_default_rule.timeout[pt->timeout]; 1955 if (pt->timeout == PFTM_INTERVAL && pt->seconds == 0) 1956 pt->seconds = 1; 1957 pf_default_rule.timeout[pt->timeout] = pt->seconds; 1958 if (pt->timeout == PFTM_INTERVAL && pt->seconds < old) 1959 wakeup(pf_purge_thread); 1960 pt->seconds = old; 1961 break; 1962 } 1963 1964 case DIOCGETTIMEOUT: { 1965 struct pfioc_tm *pt = (struct pfioc_tm *)addr; 1966 1967 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) { 1968 error = EINVAL; 1969 goto fail; 1970 } 1971 pt->seconds = pf_default_rule.timeout[pt->timeout]; 1972 break; 1973 } 1974 1975 case DIOCGETLIMIT: { 1976 struct pfioc_limit *pl = (struct pfioc_limit *)addr; 1977 1978 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) { 1979 error = EINVAL; 1980 goto fail; 1981 } 1982 pl->limit = pf_pool_limits[pl->index].limit; 1983 break; 1984 } 1985 1986 case DIOCSETLIMIT: { 1987 struct pfioc_limit *pl = (struct pfioc_limit *)addr; 1988 int old_limit; 1989 1990 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX || 1991 pf_pool_limits[pl->index].pp == NULL) { 1992 error = EINVAL; 1993 goto fail; 1994 } 1995 1996 /* XXX Get an API to set limits on the zone/pool */ 1997 old_limit = pf_pool_limits[pl->index].limit; 1998 pf_pool_limits[pl->index].limit = pl->limit; 1999 pl->limit = old_limit; 2000 break; 2001 } 2002 2003 case DIOCSETDEBUG: { 2004 u_int32_t *level = (u_int32_t *)addr; 2005 2006 pf_status.debug = *level; 2007 break; 2008 } 2009 2010 case DIOCCLRRULECTRS: { 2011 /* obsoleted by DIOCGETRULE with action=PF_GET_CLR_CNTR */ 2012 struct pf_ruleset *ruleset = &pf_main_ruleset; 2013 struct pf_rule *rule; 2014 2015 TAILQ_FOREACH(rule, 2016 ruleset->rules[PF_RULESET_FILTER].active.ptr, entries) { 2017 rule->evaluations = 0; 2018 rule->packets[0] = rule->packets[1] = 0; 2019 rule->bytes[0] = rule->bytes[1] = 0; 2020 } 2021 break; 2022 } 2023 2024 case DIOCGIFSPEED: { 2025 struct pf_ifspeed *psp = (struct pf_ifspeed *)addr; 2026 struct pf_ifspeed ps; 2027 struct ifnet *ifp; 2028 2029 if (psp->ifname[0] != 0) { 2030 /* Can we completely trust user-land? */ 2031 strlcpy(ps.ifname, psp->ifname, IFNAMSIZ); 2032 ifp = ifunit(ps.ifname); 2033 if (ifp ) 2034 psp->baudrate = ifp->if_baudrate; 2035 else 2036 error = EINVAL; 2037 } else 2038 error = EINVAL; 2039 break; 2040 } 2041 #ifdef ALTQ 2042 case DIOCSTARTALTQ: { 2043 struct pf_altq *altq; 2044 2045 /* enable all altq interfaces on active list */ 2046 TAILQ_FOREACH(altq, pf_altqs_active, entries) { 2047 if (altq->qname[0] == 0) { 2048 error = pf_enable_altq(altq); 2049 if (error != 0) 2050 break; 2051 } 2052 } 2053 if (error == 0) 2054 pf_altq_running = 1; 2055 DPFPRINTF(PF_DEBUG_MISC, ("altq: started\n")); 2056 break; 2057 } 2058 2059 case DIOCSTOPALTQ: { 2060 struct pf_altq *altq; 2061 2062 /* disable all altq interfaces on active list */ 2063 TAILQ_FOREACH(altq, pf_altqs_active, entries) { 2064 if (altq->qname[0] == 0) { 2065 error = pf_disable_altq(altq); 2066 if (error != 0) 2067 break; 2068 } 2069 } 2070 if (error == 0) 2071 pf_altq_running = 0; 2072 DPFPRINTF(PF_DEBUG_MISC, ("altq: stopped\n")); 2073 break; 2074 } 2075 2076 case DIOCADDALTQ: { 2077 struct pfioc_altq *pa = (struct pfioc_altq *)addr; 2078 struct pf_altq *altq, *a; 2079 2080 if (pa->ticket != ticket_altqs_inactive) { 2081 error = EBUSY; 2082 break; 2083 } 2084 altq = pool_get(&pf_altq_pl, PR_NOWAIT); 2085 if (altq == NULL) { 2086 error = ENOMEM; 2087 break; 2088 } 2089 bcopy(&pa->altq, altq, sizeof(struct pf_altq)); 2090 2091 /* 2092 * if this is for a queue, find the discipline and 2093 * copy the necessary fields 2094 */ 2095 if (altq->qname[0] != 0) { 2096 if ((altq->qid = pf_qname2qid(altq->qname)) == 0) { 2097 error = EBUSY; 2098 pool_put(&pf_altq_pl, altq); 2099 break; 2100 } 2101 TAILQ_FOREACH(a, pf_altqs_inactive, entries) { 2102 if (strncmp(a->ifname, altq->ifname, 2103 IFNAMSIZ) == 0 && a->qname[0] == 0) { 2104 altq->altq_disc = a->altq_disc; 2105 break; 2106 } 2107 } 2108 } 2109 2110 error = altq_add(altq); 2111 if (error) { 2112 pool_put(&pf_altq_pl, altq); 2113 break; 2114 } 2115 2116 TAILQ_INSERT_TAIL(pf_altqs_inactive, altq, entries); 2117 bcopy(altq, &pa->altq, sizeof(struct pf_altq)); 2118 break; 2119 } 2120 2121 case DIOCGETALTQS: { 2122 struct pfioc_altq *pa = (struct pfioc_altq *)addr; 2123 struct pf_altq *altq; 2124 2125 pa->nr = 0; 2126 TAILQ_FOREACH(altq, pf_altqs_active, entries) 2127 pa->nr++; 2128 pa->ticket = ticket_altqs_active; 2129 break; 2130 } 2131 2132 case DIOCGETALTQ: { 2133 struct pfioc_altq *pa = (struct pfioc_altq *)addr; 2134 struct pf_altq *altq; 2135 u_int32_t nr; 2136 2137 if (pa->ticket != ticket_altqs_active) { 2138 error = EBUSY; 2139 break; 2140 } 2141 nr = 0; 2142 altq = TAILQ_FIRST(pf_altqs_active); 2143 while ((altq != NULL) && (nr < pa->nr)) { 2144 altq = TAILQ_NEXT(altq, entries); 2145 nr++; 2146 } 2147 if (altq == NULL) { 2148 error = EBUSY; 2149 break; 2150 } 2151 bcopy(altq, &pa->altq, sizeof(struct pf_altq)); 2152 break; 2153 } 2154 2155 case DIOCCHANGEALTQ: 2156 /* CHANGEALTQ not supported yet! */ 2157 error = ENODEV; 2158 break; 2159 2160 case DIOCGETQSTATS: { 2161 struct pfioc_qstats *pq = (struct pfioc_qstats *)addr; 2162 struct pf_altq *altq; 2163 u_int32_t nr; 2164 int nbytes; 2165 2166 if (pq->ticket != ticket_altqs_active) { 2167 error = EBUSY; 2168 break; 2169 } 2170 nbytes = pq->nbytes; 2171 nr = 0; 2172 altq = TAILQ_FIRST(pf_altqs_active); 2173 while ((altq != NULL) && (nr < pq->nr)) { 2174 altq = TAILQ_NEXT(altq, entries); 2175 nr++; 2176 } 2177 if (altq == NULL) { 2178 error = EBUSY; 2179 break; 2180 } 2181 error = altq_getqstats(altq, pq->buf, &nbytes); 2182 if (error == 0) { 2183 pq->scheduler = altq->scheduler; 2184 pq->nbytes = nbytes; 2185 } 2186 break; 2187 } 2188 #endif /* ALTQ */ 2189 2190 case DIOCBEGINADDRS: { 2191 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 2192 2193 pf_empty_pool(&pf_pabuf); 2194 pp->ticket = ++ticket_pabuf; 2195 break; 2196 } 2197 2198 case DIOCADDADDR: { 2199 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 2200 2201 if (pp->ticket != ticket_pabuf) { 2202 error = EBUSY; 2203 break; 2204 } 2205 #ifndef INET 2206 if (pp->af == AF_INET) { 2207 error = EAFNOSUPPORT; 2208 break; 2209 } 2210 #endif /* INET */ 2211 #ifndef INET6 2212 if (pp->af == AF_INET6) { 2213 error = EAFNOSUPPORT; 2214 break; 2215 } 2216 #endif /* INET6 */ 2217 if (pp->addr.addr.type != PF_ADDR_ADDRMASK && 2218 pp->addr.addr.type != PF_ADDR_DYNIFTL && 2219 pp->addr.addr.type != PF_ADDR_TABLE) { 2220 error = EINVAL; 2221 break; 2222 } 2223 pa = pool_get(&pf_pooladdr_pl, PR_NOWAIT); 2224 if (pa == NULL) { 2225 error = ENOMEM; 2226 break; 2227 } 2228 bcopy(&pp->addr, pa, sizeof(struct pf_pooladdr)); 2229 if (pa->ifname[0]) { 2230 pa->kif = pfi_kif_get(pa->ifname); 2231 if (pa->kif == NULL) { 2232 pool_put(&pf_pooladdr_pl, pa); 2233 error = EINVAL; 2234 break; 2235 } 2236 pfi_kif_ref(pa->kif, PFI_KIF_REF_RULE); 2237 } 2238 if (pfi_dynaddr_setup(&pa->addr, pp->af)) { 2239 pfi_dynaddr_remove(&pa->addr); 2240 pfi_kif_unref(pa->kif, PFI_KIF_REF_RULE); 2241 pool_put(&pf_pooladdr_pl, pa); 2242 error = EINVAL; 2243 break; 2244 } 2245 TAILQ_INSERT_TAIL(&pf_pabuf, pa, entries); 2246 break; 2247 } 2248 2249 case DIOCGETADDRS: { 2250 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 2251 2252 pp->nr = 0; 2253 pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action, 2254 pp->r_num, 0, 1, 0); 2255 if (pool == NULL) { 2256 error = EBUSY; 2257 break; 2258 } 2259 TAILQ_FOREACH(pa, &pool->list, entries) 2260 pp->nr++; 2261 break; 2262 } 2263 2264 case DIOCGETADDR: { 2265 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 2266 u_int32_t nr = 0; 2267 2268 pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action, 2269 pp->r_num, 0, 1, 1); 2270 if (pool == NULL) { 2271 error = EBUSY; 2272 break; 2273 } 2274 pa = TAILQ_FIRST(&pool->list); 2275 while ((pa != NULL) && (nr < pp->nr)) { 2276 pa = TAILQ_NEXT(pa, entries); 2277 nr++; 2278 } 2279 if (pa == NULL) { 2280 error = EBUSY; 2281 break; 2282 } 2283 bcopy(pa, &pp->addr, sizeof(struct pf_pooladdr)); 2284 pfi_dynaddr_copyout(&pp->addr.addr); 2285 pf_tbladdr_copyout(&pp->addr.addr); 2286 pf_rtlabel_copyout(&pp->addr.addr); 2287 break; 2288 } 2289 2290 case DIOCCHANGEADDR: { 2291 struct pfioc_pooladdr *pca = (struct pfioc_pooladdr *)addr; 2292 struct pf_pooladdr *oldpa = NULL, *newpa = NULL; 2293 struct pf_ruleset *ruleset; 2294 2295 if (pca->action < PF_CHANGE_ADD_HEAD || 2296 pca->action > PF_CHANGE_REMOVE) { 2297 error = EINVAL; 2298 break; 2299 } 2300 if (pca->addr.addr.type != PF_ADDR_ADDRMASK && 2301 pca->addr.addr.type != PF_ADDR_DYNIFTL && 2302 pca->addr.addr.type != PF_ADDR_TABLE) { 2303 error = EINVAL; 2304 break; 2305 } 2306 2307 ruleset = pf_find_ruleset(pca->anchor); 2308 if (ruleset == NULL) { 2309 error = EBUSY; 2310 break; 2311 } 2312 pool = pf_get_pool(pca->anchor, pca->ticket, pca->r_action, 2313 pca->r_num, pca->r_last, 1, 1); 2314 if (pool == NULL) { 2315 error = EBUSY; 2316 break; 2317 } 2318 if (pca->action != PF_CHANGE_REMOVE) { 2319 newpa = pool_get(&pf_pooladdr_pl, PR_NOWAIT); 2320 if (newpa == NULL) { 2321 error = ENOMEM; 2322 break; 2323 } 2324 bcopy(&pca->addr, newpa, sizeof(struct pf_pooladdr)); 2325 #ifndef INET 2326 if (pca->af == AF_INET) { 2327 pool_put(&pf_pooladdr_pl, newpa); 2328 error = EAFNOSUPPORT; 2329 break; 2330 } 2331 #endif /* INET */ 2332 #ifndef INET6 2333 if (pca->af == AF_INET6) { 2334 pool_put(&pf_pooladdr_pl, newpa); 2335 error = EAFNOSUPPORT; 2336 break; 2337 } 2338 #endif /* INET6 */ 2339 if (newpa->ifname[0]) { 2340 newpa->kif = pfi_kif_get(newpa->ifname); 2341 if (newpa->kif == NULL) { 2342 pool_put(&pf_pooladdr_pl, newpa); 2343 error = EINVAL; 2344 break; 2345 } 2346 pfi_kif_ref(newpa->kif, PFI_KIF_REF_RULE); 2347 } else 2348 newpa->kif = NULL; 2349 if (pfi_dynaddr_setup(&newpa->addr, pca->af) || 2350 pf_tbladdr_setup(ruleset, &newpa->addr)) { 2351 pfi_dynaddr_remove(&newpa->addr); 2352 pfi_kif_unref(newpa->kif, PFI_KIF_REF_RULE); 2353 pool_put(&pf_pooladdr_pl, newpa); 2354 error = EINVAL; 2355 break; 2356 } 2357 } 2358 2359 if (pca->action == PF_CHANGE_ADD_HEAD) 2360 oldpa = TAILQ_FIRST(&pool->list); 2361 else if (pca->action == PF_CHANGE_ADD_TAIL) 2362 oldpa = TAILQ_LAST(&pool->list, pf_palist); 2363 else { 2364 int i = 0; 2365 2366 oldpa = TAILQ_FIRST(&pool->list); 2367 while ((oldpa != NULL) && (i < pca->nr)) { 2368 oldpa = TAILQ_NEXT(oldpa, entries); 2369 i++; 2370 } 2371 if (oldpa == NULL) { 2372 error = EINVAL; 2373 break; 2374 } 2375 } 2376 2377 if (pca->action == PF_CHANGE_REMOVE) { 2378 TAILQ_REMOVE(&pool->list, oldpa, entries); 2379 pfi_dynaddr_remove(&oldpa->addr); 2380 pf_tbladdr_remove(&oldpa->addr); 2381 pfi_kif_unref(oldpa->kif, PFI_KIF_REF_RULE); 2382 pool_put(&pf_pooladdr_pl, oldpa); 2383 } else { 2384 if (oldpa == NULL) 2385 TAILQ_INSERT_TAIL(&pool->list, newpa, entries); 2386 else if (pca->action == PF_CHANGE_ADD_HEAD || 2387 pca->action == PF_CHANGE_ADD_BEFORE) 2388 TAILQ_INSERT_BEFORE(oldpa, newpa, entries); 2389 else 2390 TAILQ_INSERT_AFTER(&pool->list, oldpa, 2391 newpa, entries); 2392 } 2393 2394 pool->cur = TAILQ_FIRST(&pool->list); 2395 PF_ACPY(&pool->counter, &pool->cur->addr.v.a.addr, 2396 pca->af); 2397 break; 2398 } 2399 2400 case DIOCGETRULESETS: { 2401 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr; 2402 struct pf_ruleset *ruleset; 2403 struct pf_anchor *anchor; 2404 2405 pr->path[sizeof(pr->path) - 1] = 0; 2406 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) { 2407 error = EINVAL; 2408 break; 2409 } 2410 pr->nr = 0; 2411 if (ruleset->anchor == NULL) { 2412 /* XXX kludge for pf_main_ruleset */ 2413 RB_FOREACH(anchor, pf_anchor_global, &pf_anchors) 2414 if (anchor->parent == NULL) 2415 pr->nr++; 2416 } else { 2417 RB_FOREACH(anchor, pf_anchor_node, 2418 &ruleset->anchor->children) 2419 pr->nr++; 2420 } 2421 break; 2422 } 2423 2424 case DIOCGETRULESET: { 2425 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr; 2426 struct pf_ruleset *ruleset; 2427 struct pf_anchor *anchor; 2428 u_int32_t nr = 0; 2429 2430 pr->path[sizeof(pr->path) - 1] = 0; 2431 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) { 2432 error = EINVAL; 2433 break; 2434 } 2435 pr->name[0] = 0; 2436 if (ruleset->anchor == NULL) { 2437 /* XXX kludge for pf_main_ruleset */ 2438 RB_FOREACH(anchor, pf_anchor_global, &pf_anchors) 2439 if (anchor->parent == NULL && nr++ == pr->nr) { 2440 strlcpy(pr->name, anchor->name, 2441 sizeof(pr->name)); 2442 break; 2443 } 2444 } else { 2445 RB_FOREACH(anchor, pf_anchor_node, 2446 &ruleset->anchor->children) 2447 if (nr++ == pr->nr) { 2448 strlcpy(pr->name, anchor->name, 2449 sizeof(pr->name)); 2450 break; 2451 } 2452 } 2453 if (!pr->name[0]) 2454 error = EBUSY; 2455 break; 2456 } 2457 2458 case DIOCRCLRTABLES: { 2459 struct pfioc_table *io = (struct pfioc_table *)addr; 2460 2461 if (io->pfrio_esize != 0) { 2462 error = ENODEV; 2463 break; 2464 } 2465 error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel, 2466 io->pfrio_flags | PFR_FLAG_USERIOCTL); 2467 break; 2468 } 2469 2470 case DIOCRADDTABLES: { 2471 struct pfioc_table *io = (struct pfioc_table *)addr; 2472 2473 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2474 error = ENODEV; 2475 break; 2476 } 2477 error = pfr_add_tables(io->pfrio_buffer, io->pfrio_size, 2478 &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2479 break; 2480 } 2481 2482 case DIOCRDELTABLES: { 2483 struct pfioc_table *io = (struct pfioc_table *)addr; 2484 2485 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2486 error = ENODEV; 2487 break; 2488 } 2489 error = pfr_del_tables(io->pfrio_buffer, io->pfrio_size, 2490 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2491 break; 2492 } 2493 2494 case DIOCRGETTABLES: { 2495 struct pfioc_table *io = (struct pfioc_table *)addr; 2496 2497 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2498 error = ENODEV; 2499 break; 2500 } 2501 error = pfr_get_tables(&io->pfrio_table, io->pfrio_buffer, 2502 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2503 break; 2504 } 2505 2506 case DIOCRGETTSTATS: { 2507 struct pfioc_table *io = (struct pfioc_table *)addr; 2508 2509 if (io->pfrio_esize != sizeof(struct pfr_tstats)) { 2510 error = ENODEV; 2511 break; 2512 } 2513 error = pfr_get_tstats(&io->pfrio_table, io->pfrio_buffer, 2514 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2515 break; 2516 } 2517 2518 case DIOCRCLRTSTATS: { 2519 struct pfioc_table *io = (struct pfioc_table *)addr; 2520 2521 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2522 error = ENODEV; 2523 break; 2524 } 2525 error = pfr_clr_tstats(io->pfrio_buffer, io->pfrio_size, 2526 &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2527 break; 2528 } 2529 2530 case DIOCRSETTFLAGS: { 2531 struct pfioc_table *io = (struct pfioc_table *)addr; 2532 2533 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2534 error = ENODEV; 2535 break; 2536 } 2537 error = pfr_set_tflags(io->pfrio_buffer, io->pfrio_size, 2538 io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange, 2539 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2540 break; 2541 } 2542 2543 case DIOCRCLRADDRS: { 2544 struct pfioc_table *io = (struct pfioc_table *)addr; 2545 2546 if (io->pfrio_esize != 0) { 2547 error = ENODEV; 2548 break; 2549 } 2550 error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel, 2551 io->pfrio_flags | PFR_FLAG_USERIOCTL); 2552 break; 2553 } 2554 2555 case DIOCRADDADDRS: { 2556 struct pfioc_table *io = (struct pfioc_table *)addr; 2557 2558 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2559 error = ENODEV; 2560 break; 2561 } 2562 error = pfr_add_addrs(&io->pfrio_table, io->pfrio_buffer, 2563 io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags | 2564 PFR_FLAG_USERIOCTL); 2565 break; 2566 } 2567 2568 case DIOCRDELADDRS: { 2569 struct pfioc_table *io = (struct pfioc_table *)addr; 2570 2571 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2572 error = ENODEV; 2573 break; 2574 } 2575 error = pfr_del_addrs(&io->pfrio_table, io->pfrio_buffer, 2576 io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags | 2577 PFR_FLAG_USERIOCTL); 2578 break; 2579 } 2580 2581 case DIOCRSETADDRS: { 2582 struct pfioc_table *io = (struct pfioc_table *)addr; 2583 2584 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2585 error = ENODEV; 2586 break; 2587 } 2588 error = pfr_set_addrs(&io->pfrio_table, io->pfrio_buffer, 2589 io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd, 2590 &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags | 2591 PFR_FLAG_USERIOCTL, 0); 2592 break; 2593 } 2594 2595 case DIOCRGETADDRS: { 2596 struct pfioc_table *io = (struct pfioc_table *)addr; 2597 2598 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2599 error = ENODEV; 2600 break; 2601 } 2602 error = pfr_get_addrs(&io->pfrio_table, io->pfrio_buffer, 2603 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2604 break; 2605 } 2606 2607 case DIOCRGETASTATS: { 2608 struct pfioc_table *io = (struct pfioc_table *)addr; 2609 2610 if (io->pfrio_esize != sizeof(struct pfr_astats)) { 2611 error = ENODEV; 2612 break; 2613 } 2614 error = pfr_get_astats(&io->pfrio_table, io->pfrio_buffer, 2615 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2616 break; 2617 } 2618 2619 case DIOCRCLRASTATS: { 2620 struct pfioc_table *io = (struct pfioc_table *)addr; 2621 2622 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2623 error = ENODEV; 2624 break; 2625 } 2626 error = pfr_clr_astats(&io->pfrio_table, io->pfrio_buffer, 2627 io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags | 2628 PFR_FLAG_USERIOCTL); 2629 break; 2630 } 2631 2632 case DIOCRTSTADDRS: { 2633 struct pfioc_table *io = (struct pfioc_table *)addr; 2634 2635 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2636 error = ENODEV; 2637 break; 2638 } 2639 error = pfr_tst_addrs(&io->pfrio_table, io->pfrio_buffer, 2640 io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags | 2641 PFR_FLAG_USERIOCTL); 2642 break; 2643 } 2644 2645 case DIOCRINADEFINE: { 2646 struct pfioc_table *io = (struct pfioc_table *)addr; 2647 2648 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2649 error = ENODEV; 2650 break; 2651 } 2652 error = pfr_ina_define(&io->pfrio_table, io->pfrio_buffer, 2653 io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr, 2654 io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2655 break; 2656 } 2657 2658 case DIOCOSFPADD: { 2659 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr; 2660 error = pf_osfp_add(io); 2661 break; 2662 } 2663 2664 case DIOCOSFPGET: { 2665 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr; 2666 error = pf_osfp_get(io); 2667 break; 2668 } 2669 2670 case DIOCXBEGIN: { 2671 struct pfioc_trans *io = (struct pfioc_trans *)addr; 2672 struct pfioc_trans_e *ioe; 2673 struct pfr_table *table; 2674 int i; 2675 2676 if (io->esize != sizeof(*ioe)) { 2677 error = ENODEV; 2678 goto fail; 2679 } 2680 ioe = (struct pfioc_trans_e *)kmalloc(sizeof(*ioe), 2681 M_TEMP, M_WAITOK); 2682 table = (struct pfr_table *)kmalloc(sizeof(*table), 2683 M_TEMP, M_WAITOK); 2684 for (i = 0; i < io->size; i++) { 2685 if (copyin(io->array+i, ioe, sizeof(*ioe))) { 2686 kfree(table, M_TEMP); 2687 kfree(ioe, M_TEMP); 2688 error = EFAULT; 2689 goto fail; 2690 } 2691 switch (ioe->rs_num) { 2692 #ifdef ALTQ 2693 case PF_RULESET_ALTQ: 2694 if (ioe->anchor[0]) { 2695 kfree(table, M_TEMP); 2696 kfree(ioe, M_TEMP); 2697 error = EINVAL; 2698 goto fail; 2699 } 2700 if ((error = pf_begin_altq(&ioe->ticket))) { 2701 kfree(table, M_TEMP); 2702 kfree(ioe, M_TEMP); 2703 goto fail; 2704 } 2705 break; 2706 #endif /* ALTQ */ 2707 case PF_RULESET_TABLE: 2708 bzero(table, sizeof(*table)); 2709 strlcpy(table->pfrt_anchor, ioe->anchor, 2710 sizeof(table->pfrt_anchor)); 2711 if ((error = pfr_ina_begin(table, 2712 &ioe->ticket, NULL, 0))) { 2713 kfree(table, M_TEMP); 2714 kfree(ioe, M_TEMP); 2715 goto fail; 2716 } 2717 break; 2718 default: 2719 if ((error = pf_begin_rules(&ioe->ticket, 2720 ioe->rs_num, ioe->anchor))) { 2721 kfree(table, M_TEMP); 2722 kfree(ioe, M_TEMP); 2723 goto fail; 2724 } 2725 break; 2726 } 2727 if (copyout(ioe, io->array+i, sizeof(io->array[i]))) { 2728 kfree(table, M_TEMP); 2729 kfree(ioe, M_TEMP); 2730 error = EFAULT; 2731 goto fail; 2732 } 2733 } 2734 kfree(table, M_TEMP); 2735 kfree(ioe, M_TEMP); 2736 break; 2737 } 2738 2739 case DIOCXROLLBACK: { 2740 struct pfioc_trans *io = (struct pfioc_trans *)addr; 2741 struct pfioc_trans_e *ioe; 2742 struct pfr_table *table; 2743 int i; 2744 2745 if (io->esize != sizeof(*ioe)) { 2746 error = ENODEV; 2747 goto fail; 2748 } 2749 ioe = (struct pfioc_trans_e *)kmalloc(sizeof(*ioe), 2750 M_TEMP, M_WAITOK); 2751 table = (struct pfr_table *)kmalloc(sizeof(*table), 2752 M_TEMP, M_WAITOK); 2753 for (i = 0; i < io->size; i++) { 2754 if (copyin(io->array+i, ioe, sizeof(*ioe))) { 2755 kfree(table, M_TEMP); 2756 kfree(ioe, M_TEMP); 2757 error = EFAULT; 2758 goto fail; 2759 } 2760 switch (ioe->rs_num) { 2761 #ifdef ALTQ 2762 case PF_RULESET_ALTQ: 2763 if (ioe->anchor[0]) { 2764 kfree(table, M_TEMP); 2765 kfree(ioe, M_TEMP); 2766 error = EINVAL; 2767 goto fail; 2768 } 2769 if ((error = pf_rollback_altq(ioe->ticket))) { 2770 kfree(table, M_TEMP); 2771 kfree(ioe, M_TEMP); 2772 goto fail; /* really bad */ 2773 } 2774 break; 2775 #endif /* ALTQ */ 2776 case PF_RULESET_TABLE: 2777 bzero(table, sizeof(*table)); 2778 strlcpy(table->pfrt_anchor, ioe->anchor, 2779 sizeof(table->pfrt_anchor)); 2780 if ((error = pfr_ina_rollback(table, 2781 ioe->ticket, NULL, 0))) { 2782 kfree(table, M_TEMP); 2783 kfree(ioe, M_TEMP); 2784 goto fail; /* really bad */ 2785 } 2786 break; 2787 default: 2788 if ((error = pf_rollback_rules(ioe->ticket, 2789 ioe->rs_num, ioe->anchor))) { 2790 kfree(table, M_TEMP); 2791 kfree(ioe, M_TEMP); 2792 goto fail; /* really bad */ 2793 } 2794 break; 2795 } 2796 } 2797 kfree(table, M_TEMP); 2798 kfree(ioe, M_TEMP); 2799 break; 2800 } 2801 2802 case DIOCXCOMMIT: { 2803 struct pfioc_trans *io = (struct pfioc_trans *)addr; 2804 struct pfioc_trans_e *ioe; 2805 struct pfr_table *table; 2806 struct pf_ruleset *rs; 2807 int i; 2808 2809 if (io->esize != sizeof(*ioe)) { 2810 error = ENODEV; 2811 goto fail; 2812 } 2813 ioe = (struct pfioc_trans_e *)kmalloc(sizeof(*ioe), 2814 M_TEMP, M_WAITOK); 2815 table = (struct pfr_table *)kmalloc(sizeof(*table), 2816 M_TEMP, M_WAITOK); 2817 /* first makes sure everything will succeed */ 2818 for (i = 0; i < io->size; i++) { 2819 if (copyin(io->array+i, ioe, sizeof(*ioe))) { 2820 kfree(table, M_TEMP); 2821 kfree(ioe, M_TEMP); 2822 error = EFAULT; 2823 goto fail; 2824 } 2825 switch (ioe->rs_num) { 2826 #ifdef ALTQ 2827 case PF_RULESET_ALTQ: 2828 if (ioe->anchor[0]) { 2829 kfree(table, M_TEMP); 2830 kfree(ioe, M_TEMP); 2831 error = EINVAL; 2832 goto fail; 2833 } 2834 if (!altqs_inactive_open || ioe->ticket != 2835 ticket_altqs_inactive) { 2836 kfree(table, M_TEMP); 2837 kfree(ioe, M_TEMP); 2838 error = EBUSY; 2839 goto fail; 2840 } 2841 break; 2842 #endif /* ALTQ */ 2843 case PF_RULESET_TABLE: 2844 rs = pf_find_ruleset(ioe->anchor); 2845 if (rs == NULL || !rs->topen || ioe->ticket != 2846 rs->tticket) { 2847 kfree(table, M_TEMP); 2848 kfree(ioe, M_TEMP); 2849 error = EBUSY; 2850 goto fail; 2851 } 2852 break; 2853 default: 2854 if (ioe->rs_num < 0 || ioe->rs_num >= 2855 PF_RULESET_MAX) { 2856 kfree(table, M_TEMP); 2857 kfree(ioe, M_TEMP); 2858 error = EINVAL; 2859 goto fail; 2860 } 2861 rs = pf_find_ruleset(ioe->anchor); 2862 if (rs == NULL || 2863 !rs->rules[ioe->rs_num].inactive.open || 2864 rs->rules[ioe->rs_num].inactive.ticket != 2865 ioe->ticket) { 2866 kfree(table, M_TEMP); 2867 kfree(ioe, M_TEMP); 2868 error = EBUSY; 2869 goto fail; 2870 } 2871 break; 2872 } 2873 } 2874 /* now do the commit - no errors should happen here */ 2875 for (i = 0; i < io->size; i++) { 2876 if (copyin(io->array+i, ioe, sizeof(*ioe))) { 2877 kfree(table, M_TEMP); 2878 kfree(ioe, M_TEMP); 2879 error = EFAULT; 2880 goto fail; 2881 } 2882 switch (ioe->rs_num) { 2883 #ifdef ALTQ 2884 case PF_RULESET_ALTQ: 2885 if ((error = pf_commit_altq(ioe->ticket))) { 2886 kfree(table, M_TEMP); 2887 kfree(ioe, M_TEMP); 2888 goto fail; /* really bad */ 2889 } 2890 break; 2891 #endif /* ALTQ */ 2892 case PF_RULESET_TABLE: 2893 bzero(table, sizeof(*table)); 2894 strlcpy(table->pfrt_anchor, ioe->anchor, 2895 sizeof(table->pfrt_anchor)); 2896 if ((error = pfr_ina_commit(table, ioe->ticket, 2897 NULL, NULL, 0))) { 2898 kfree(table, M_TEMP); 2899 kfree(ioe, M_TEMP); 2900 goto fail; /* really bad */ 2901 } 2902 break; 2903 default: 2904 if ((error = pf_commit_rules(ioe->ticket, 2905 ioe->rs_num, ioe->anchor))) { 2906 kfree(table, M_TEMP); 2907 kfree(ioe, M_TEMP); 2908 goto fail; /* really bad */ 2909 } 2910 break; 2911 } 2912 } 2913 kfree(table, M_TEMP); 2914 kfree(ioe, M_TEMP); 2915 break; 2916 } 2917 2918 case DIOCGETSRCNODES: { 2919 struct pfioc_src_nodes *psn = (struct pfioc_src_nodes *)addr; 2920 struct pf_src_node *n, *p, *pstore; 2921 u_int32_t nr = 0; 2922 int space = psn->psn_len; 2923 2924 if (space == 0) { 2925 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) 2926 nr++; 2927 psn->psn_len = sizeof(struct pf_src_node) * nr; 2928 break; 2929 } 2930 2931 pstore = kmalloc(sizeof(*pstore), M_TEMP, M_WAITOK); 2932 2933 p = psn->psn_src_nodes; 2934 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) { 2935 int secs = time_second, diff; 2936 2937 if ((nr + 1) * sizeof(*p) > (unsigned)psn->psn_len) 2938 break; 2939 2940 bcopy(n, pstore, sizeof(*pstore)); 2941 if (n->rule.ptr != NULL) 2942 pstore->rule.nr = n->rule.ptr->nr; 2943 pstore->creation = secs - pstore->creation; 2944 if (pstore->expire > secs) 2945 pstore->expire -= secs; 2946 else 2947 pstore->expire = 0; 2948 2949 /* adjust the connection rate estimate */ 2950 diff = secs - n->conn_rate.last; 2951 if (diff >= n->conn_rate.seconds) 2952 pstore->conn_rate.count = 0; 2953 else 2954 pstore->conn_rate.count -= 2955 n->conn_rate.count * diff / 2956 n->conn_rate.seconds; 2957 2958 error = copyout(pstore, p, sizeof(*p)); 2959 if (error) { 2960 kfree(pstore, M_TEMP); 2961 goto fail; 2962 } 2963 p++; 2964 nr++; 2965 } 2966 psn->psn_len = sizeof(struct pf_src_node) * nr; 2967 2968 kfree(pstore, M_TEMP); 2969 break; 2970 } 2971 2972 case DIOCCLRSRCNODES: { 2973 struct pf_src_node *n; 2974 struct pf_state *state; 2975 2976 RB_FOREACH(state, pf_state_tree_id, &tree_id) { 2977 state->src_node = NULL; 2978 state->nat_src_node = NULL; 2979 } 2980 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) { 2981 n->expire = 1; 2982 n->states = 0; 2983 } 2984 pf_purge_expired_src_nodes(1); 2985 pf_status.src_nodes = 0; 2986 break; 2987 } 2988 2989 case DIOCKILLSRCNODES: { 2990 struct pf_src_node *sn; 2991 struct pf_state *s; 2992 struct pfioc_src_node_kill *psnk = \ 2993 (struct pfioc_src_node_kill *) addr; 2994 int killed = 0; 2995 2996 RB_FOREACH(sn, pf_src_tree, &tree_src_tracking) { 2997 if (PF_MATCHA(psnk->psnk_src.neg, \ 2998 &psnk->psnk_src.addr.v.a.addr, \ 2999 &psnk->psnk_src.addr.v.a.mask, \ 3000 &sn->addr, sn->af) && 3001 PF_MATCHA(psnk->psnk_dst.neg, \ 3002 &psnk->psnk_dst.addr.v.a.addr, \ 3003 &psnk->psnk_dst.addr.v.a.mask, \ 3004 &sn->raddr, sn->af)) { 3005 /* Handle state to src_node linkage */ 3006 if (sn->states != 0) { 3007 RB_FOREACH(s, pf_state_tree_id, 3008 &tree_id) { 3009 if (s->src_node == sn) 3010 s->src_node = NULL; 3011 if (s->nat_src_node == sn) 3012 s->nat_src_node = NULL; 3013 } 3014 sn->states = 0; 3015 } 3016 sn->expire = 1; 3017 killed++; 3018 } 3019 } 3020 3021 if (killed > 0) 3022 pf_purge_expired_src_nodes(1); 3023 3024 psnk->psnk_af = killed; 3025 break; 3026 } 3027 3028 case DIOCSETHOSTID: { 3029 u_int32_t *hostid = (u_int32_t *)addr; 3030 3031 if (*hostid == 0) 3032 pf_status.hostid = karc4random(); 3033 else 3034 pf_status.hostid = *hostid; 3035 break; 3036 } 3037 3038 case DIOCOSFPFLUSH: 3039 crit_enter(); 3040 pf_osfp_flush(); 3041 crit_exit(); 3042 break; 3043 3044 case DIOCIGETIFACES: { 3045 struct pfioc_iface *io = (struct pfioc_iface *)addr; 3046 3047 if (io->pfiio_esize != sizeof(struct pfi_kif)) { 3048 error = ENODEV; 3049 break; 3050 } 3051 error = pfi_get_ifaces(io->pfiio_name, io->pfiio_buffer, 3052 &io->pfiio_size); 3053 break; 3054 } 3055 3056 case DIOCSETIFFLAG: { 3057 struct pfioc_iface *io = (struct pfioc_iface *)addr; 3058 3059 error = pfi_set_flags(io->pfiio_name, io->pfiio_flags); 3060 break; 3061 } 3062 3063 case DIOCCLRIFFLAG: { 3064 struct pfioc_iface *io = (struct pfioc_iface *)addr; 3065 3066 error = pfi_clear_flags(io->pfiio_name, io->pfiio_flags); 3067 break; 3068 } 3069 3070 default: 3071 error = ENODEV; 3072 break; 3073 } 3074 fail: 3075 lwkt_reltoken(&pf_token); 3076 return (error); 3077 } 3078 3079 /* 3080 * XXX - Check for version missmatch!!! 3081 */ 3082 static void 3083 pf_clear_states(void) 3084 { 3085 struct pf_state *state; 3086 3087 RB_FOREACH(state, pf_state_tree_id, &tree_id) { 3088 state->timeout = PFTM_PURGE; 3089 #if NPFSYNC 3090 /* don't send out individual delete messages */ 3091 state->sync_flags = PFSTATE_NOSYNC; 3092 #endif 3093 pf_unlink_state(state); 3094 } 3095 pf_status.states = 0; 3096 #if 0 /* NPFSYNC */ 3097 /* 3098 * XXX This is called on module unload, we do not want to sync that over? */ 3099 */ 3100 pfsync_clear_states(pf_status.hostid, psk->psk_ifname); 3101 #endif 3102 } 3103 3104 static int 3105 pf_clear_tables(void) 3106 { 3107 struct pfioc_table io; 3108 int error; 3109 3110 bzero(&io, sizeof(io)); 3111 3112 error = pfr_clr_tables(&io.pfrio_table, &io.pfrio_ndel, 3113 io.pfrio_flags); 3114 3115 return (error); 3116 } 3117 3118 static void 3119 pf_clear_srcnodes(void) 3120 { 3121 struct pf_src_node *n; 3122 struct pf_state *state; 3123 3124 RB_FOREACH(state, pf_state_tree_id, &tree_id) { 3125 state->src_node = NULL; 3126 state->nat_src_node = NULL; 3127 } 3128 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) { 3129 n->expire = 1; 3130 n->states = 0; 3131 } 3132 pf_purge_expired_src_nodes(0); 3133 pf_status.src_nodes = 0; 3134 } 3135 /* 3136 * XXX - Check for version missmatch!!! 3137 */ 3138 3139 /* 3140 * Duplicate pfctl -Fa operation to get rid of as much as we can. 3141 */ 3142 static int 3143 shutdown_pf(void) 3144 { 3145 int error = 0; 3146 u_int32_t t[5]; 3147 char nn = '\0'; 3148 3149 pf_status.running = 0; 3150 do { 3151 if ((error = pf_begin_rules(&t[0], PF_RULESET_SCRUB, &nn)) != 0) { 3152 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: SCRUB\n")); 3153 break; 3154 } 3155 if ((error = pf_begin_rules(&t[1], PF_RULESET_FILTER, &nn)) != 0) { 3156 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: FILTER\n")); 3157 break; /* XXX: rollback? */ 3158 } 3159 if ((error = pf_begin_rules(&t[2], PF_RULESET_NAT, &nn)) != 0) { 3160 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: NAT\n")); 3161 break; /* XXX: rollback? */ 3162 } 3163 if ((error = pf_begin_rules(&t[3], PF_RULESET_BINAT, &nn)) 3164 != 0) { 3165 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: BINAT\n")); 3166 break; /* XXX: rollback? */ 3167 } 3168 if ((error = pf_begin_rules(&t[4], PF_RULESET_RDR, &nn)) 3169 != 0) { 3170 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: RDR\n")); 3171 break; /* XXX: rollback? */ 3172 } 3173 3174 /* XXX: these should always succeed here */ 3175 pf_commit_rules(t[0], PF_RULESET_SCRUB, &nn); 3176 pf_commit_rules(t[1], PF_RULESET_FILTER, &nn); 3177 pf_commit_rules(t[2], PF_RULESET_NAT, &nn); 3178 pf_commit_rules(t[3], PF_RULESET_BINAT, &nn); 3179 pf_commit_rules(t[4], PF_RULESET_RDR, &nn); 3180 3181 if ((error = pf_clear_tables()) != 0) 3182 break; 3183 3184 #ifdef ALTQ 3185 if ((error = pf_begin_altq(&t[0])) != 0) { 3186 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: ALTQ\n")); 3187 break; 3188 } 3189 pf_commit_altq(t[0]); 3190 #endif 3191 3192 pf_clear_states(); 3193 3194 pf_clear_srcnodes(); 3195 3196 /* status does not use malloced mem so no need to cleanup */ 3197 /* fingerprints and interfaces have their own cleanup code */ 3198 } while(0); 3199 3200 return (error); 3201 } 3202 3203 static int 3204 pf_check_in(void *arg, struct mbuf **m, struct ifnet *ifp, int dir) 3205 { 3206 /* 3207 * DragonFly's version of pf uses FreeBSD's native host byte ordering 3208 * for ip_len/ip_off. This is why we don't have to change byte order 3209 * like the FreeBSD-5 version does. 3210 */ 3211 int chk; 3212 3213 lwkt_gettoken(&pf_token); 3214 3215 chk = pf_test(PF_IN, ifp, m, NULL, NULL); 3216 if (chk && *m) { 3217 m_freem(*m); 3218 *m = NULL; 3219 } 3220 lwkt_reltoken(&pf_token); 3221 return chk; 3222 } 3223 3224 static int 3225 pf_check_out(void *arg, struct mbuf **m, struct ifnet *ifp, int dir) 3226 { 3227 /* 3228 * DragonFly's version of pf uses FreeBSD's native host byte ordering 3229 * for ip_len/ip_off. This is why we don't have to change byte order 3230 * like the FreeBSD-5 version does. 3231 */ 3232 int chk; 3233 3234 lwkt_gettoken(&pf_token); 3235 3236 /* We need a proper CSUM befor we start (s. OpenBSD ip_output) */ 3237 if ((*m)->m_pkthdr.csum_flags & CSUM_DELAY_DATA) { 3238 in_delayed_cksum(*m); 3239 (*m)->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA; 3240 } 3241 chk = pf_test(PF_OUT, ifp, m, NULL, NULL); 3242 if (chk && *m) { 3243 m_freem(*m); 3244 *m = NULL; 3245 } 3246 lwkt_reltoken(&pf_token); 3247 return chk; 3248 } 3249 3250 #ifdef INET6 3251 static int 3252 pf_check6_in(void *arg, struct mbuf **m, struct ifnet *ifp, int dir) 3253 { 3254 /* 3255 * IPv6 is not affected by ip_len/ip_off byte order changes. 3256 */ 3257 int chk; 3258 3259 lwkt_gettoken(&pf_token); 3260 3261 chk = pf_test6(PF_IN, ifp, m, NULL, NULL); 3262 if (chk && *m) { 3263 m_freem(*m); 3264 *m = NULL; 3265 } 3266 lwkt_reltoken(&pf_token); 3267 return chk; 3268 } 3269 3270 static int 3271 pf_check6_out(void *arg, struct mbuf **m, struct ifnet *ifp, int dir) 3272 { 3273 /* 3274 * IPv6 is not affected by ip_len/ip_off byte order changes. 3275 */ 3276 int chk; 3277 3278 lwkt_gettoken(&pf_token); 3279 3280 /* We need a proper CSUM befor we start (s. OpenBSD ip_output) */ 3281 if ((*m)->m_pkthdr.csum_flags & CSUM_DELAY_DATA) { 3282 in_delayed_cksum(*m); 3283 (*m)->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA; 3284 } 3285 chk = pf_test6(PF_OUT, ifp, m, NULL, NULL); 3286 if (chk && *m) { 3287 m_freem(*m); 3288 *m = NULL; 3289 } 3290 lwkt_reltoken(&pf_token); 3291 return chk; 3292 } 3293 #endif /* INET6 */ 3294 3295 static int 3296 hook_pf(void) 3297 { 3298 struct pfil_head *pfh_inet; 3299 #ifdef INET6 3300 struct pfil_head *pfh_inet6; 3301 #endif 3302 3303 lwkt_gettoken(&pf_token); 3304 3305 if (pf_pfil_hooked) { 3306 lwkt_reltoken(&pf_token); 3307 return (0); 3308 } 3309 3310 pfh_inet = pfil_head_get(PFIL_TYPE_AF, AF_INET); 3311 if (pfh_inet == NULL) { 3312 lwkt_reltoken(&pf_token); 3313 return (ENODEV); 3314 } 3315 pfil_add_hook(pf_check_in, NULL, PFIL_IN, pfh_inet); 3316 pfil_add_hook(pf_check_out, NULL, PFIL_OUT, pfh_inet); 3317 #ifdef INET6 3318 pfh_inet6 = pfil_head_get(PFIL_TYPE_AF, AF_INET6); 3319 if (pfh_inet6 == NULL) { 3320 pfil_remove_hook(pf_check_in, NULL, PFIL_IN, pfh_inet); 3321 pfil_remove_hook(pf_check_out, NULL, PFIL_OUT, pfh_inet); 3322 lwkt_reltoken(&pf_token); 3323 return (ENODEV); 3324 } 3325 pfil_add_hook(pf_check6_in, NULL, PFIL_IN, pfh_inet6); 3326 pfil_add_hook(pf_check6_out, NULL, PFIL_OUT, pfh_inet6); 3327 #endif 3328 3329 pf_pfil_hooked = 1; 3330 lwkt_reltoken(&pf_token); 3331 return (0); 3332 } 3333 3334 static int 3335 dehook_pf(void) 3336 { 3337 struct pfil_head *pfh_inet; 3338 #ifdef INET6 3339 struct pfil_head *pfh_inet6; 3340 #endif 3341 3342 lwkt_gettoken(&pf_token); 3343 3344 if (pf_pfil_hooked == 0) { 3345 lwkt_reltoken(&pf_token); 3346 return (0); 3347 } 3348 3349 pfh_inet = pfil_head_get(PFIL_TYPE_AF, AF_INET); 3350 if (pfh_inet == NULL) { 3351 lwkt_reltoken(&pf_token); 3352 return (ENODEV); 3353 } 3354 pfil_remove_hook(pf_check_in, NULL, PFIL_IN, pfh_inet); 3355 pfil_remove_hook(pf_check_out, NULL, PFIL_OUT, pfh_inet); 3356 #ifdef INET6 3357 pfh_inet6 = pfil_head_get(PFIL_TYPE_AF, AF_INET6); 3358 if (pfh_inet6 == NULL) { 3359 lwkt_reltoken(&pf_token); 3360 return (ENODEV); 3361 } 3362 pfil_remove_hook(pf_check6_in, NULL, PFIL_IN, pfh_inet6); 3363 pfil_remove_hook(pf_check6_out, NULL, PFIL_OUT, pfh_inet6); 3364 #endif 3365 3366 pf_pfil_hooked = 0; 3367 lwkt_reltoken(&pf_token); 3368 return (0); 3369 } 3370 3371 static int 3372 pf_load(void) 3373 { 3374 int error; 3375 3376 lwkt_gettoken(&pf_token); 3377 3378 init_zone_var(); 3379 lockinit(&pf_mod_lck, "pf task lck", 0, LK_CANRECURSE); 3380 pf_dev = make_dev(&pf_ops, 0, 0, 0, 0600, PF_NAME); 3381 error = pfattach(); 3382 if (error) { 3383 dev_ops_remove_all(&pf_ops); 3384 lockuninit(&pf_mod_lck); 3385 lwkt_reltoken(&pf_token); 3386 return (error); 3387 } 3388 lockinit(&pf_consistency_lock, "pfconslck", 0, LK_CANRECURSE); 3389 lwkt_reltoken(&pf_token); 3390 return (0); 3391 } 3392 3393 static int 3394 pf_unload(void) 3395 { 3396 int error; 3397 pf_status.running = 0; 3398 3399 lwkt_gettoken(&pf_token); 3400 3401 error = dehook_pf(); 3402 if (error) { 3403 /* 3404 * Should not happen! 3405 * XXX Due to error code ESRCH, kldunload will show 3406 * a message like 'No such process'. 3407 */ 3408 kprintf("pfil unregistration fail\n"); 3409 lwkt_reltoken(&pf_token); 3410 return error; 3411 } 3412 shutdown_pf(); 3413 pf_end_threads = 1; 3414 while (pf_end_threads < 2) { 3415 wakeup_one(pf_purge_thread); 3416 lksleep(pf_purge_thread, &pf_mod_lck, 0, "pftmo", hz); 3417 3418 } 3419 pfi_cleanup(); 3420 pf_osfp_flush(); 3421 pf_osfp_cleanup(); 3422 cleanup_pf_zone(); 3423 dev_ops_remove_all(&pf_ops); 3424 lockuninit(&pf_consistency_lock); 3425 lockuninit(&pf_mod_lck); 3426 lwkt_reltoken(&pf_token); 3427 return 0; 3428 } 3429 3430 static int 3431 pf_modevent(module_t mod, int type, void *data) 3432 { 3433 int error = 0; 3434 3435 lwkt_gettoken(&pf_token); 3436 3437 switch(type) { 3438 case MOD_LOAD: 3439 error = pf_load(); 3440 break; 3441 3442 case MOD_UNLOAD: 3443 error = pf_unload(); 3444 break; 3445 default: 3446 error = EINVAL; 3447 break; 3448 } 3449 lwkt_reltoken(&pf_token); 3450 return error; 3451 } 3452 3453 static moduledata_t pf_mod = { 3454 "pf", 3455 pf_modevent, 3456 0 3457 }; 3458 DECLARE_MODULE(pf, pf_mod, SI_SUB_PSEUDO, SI_ORDER_FIRST); 3459 MODULE_VERSION(pf, PF_MODVER); 3460