1 /* $OpenBSD: pf_ioctl.c,v 1.383 2022/07/20 09:33:11 mbuhl Exp $ */ 2 3 /* 4 * Copyright (c) 2001 Daniel Hartmeier 5 * Copyright (c) 2002 - 2018 Henning Brauer <henning@openbsd.org> 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * - Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * - Redistributions in binary form must reproduce the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer in the documentation and/or other materials provided 17 * with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 20 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 22 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 23 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 25 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 27 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 29 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 * POSSIBILITY OF SUCH DAMAGE. 31 * 32 * Effort sponsored in part by the Defense Advanced Research Projects 33 * Agency (DARPA) and Air Force Research Laboratory, Air Force 34 * Materiel Command, USAF, under agreement number F30602-01-2-0537. 35 * 36 */ 37 38 #include "pfsync.h" 39 #include "pflog.h" 40 41 #include <sys/param.h> 42 #include <sys/systm.h> 43 #include <sys/sysctl.h> 44 #include <sys/mbuf.h> 45 #include <sys/filio.h> 46 #include <sys/fcntl.h> 47 #include <sys/socket.h> 48 #include <sys/socketvar.h> 49 #include <sys/kernel.h> 50 #include <sys/time.h> 51 #include <sys/timeout.h> 52 #include <sys/pool.h> 53 #include <sys/malloc.h> 54 #include <sys/proc.h> 55 #include <sys/rwlock.h> 56 #include <sys/syslog.h> 57 #include <uvm/uvm_extern.h> 58 59 #include <crypto/md5.h> 60 61 #include <net/if.h> 62 #include <net/if_var.h> 63 #include <net/route.h> 64 #include <net/hfsc.h> 65 #include <net/fq_codel.h> 66 67 #include <netinet/in.h> 68 #include <netinet/ip.h> 69 #include <netinet/in_pcb.h> 70 #include <netinet/ip_var.h> 71 #include <netinet/ip_icmp.h> 72 #include <netinet/tcp.h> 73 #include <netinet/udp.h> 74 75 #ifdef INET6 76 #include <netinet/ip6.h> 77 #include <netinet/icmp6.h> 78 #endif /* INET6 */ 79 80 #include <net/pfvar.h> 81 #include <net/pfvar_priv.h> 82 83 #if NPFSYNC > 0 84 #include <netinet/ip_ipsp.h> 85 #include <net/if_pfsync.h> 86 #endif /* NPFSYNC > 0 */ 87 88 struct pool pf_tag_pl; 89 90 void pfattach(int); 91 void pf_thread_create(void *); 92 int pfopen(dev_t, int, int, struct proc *); 93 int pfclose(dev_t, int, int, struct proc *); 94 int pfioctl(dev_t, u_long, caddr_t, int, struct proc *); 95 int pf_begin_rules(u_int32_t *, const char *); 96 void pf_rollback_rules(u_int32_t, char *); 97 void pf_remove_queues(void); 98 int pf_commit_queues(void); 99 void pf_free_queues(struct pf_queuehead *); 100 void pf_calc_chksum(struct pf_ruleset *); 101 void pf_hash_rule(MD5_CTX *, struct pf_rule *); 102 void pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *); 103 int pf_commit_rules(u_int32_t, char *); 104 int pf_addr_setup(struct pf_ruleset *, 105 struct pf_addr_wrap *, sa_family_t); 106 struct pfi_kif *pf_kif_setup(struct pfi_kif *); 107 void pf_addr_copyout(struct pf_addr_wrap *); 108 void pf_trans_set_commit(void); 109 void pf_pool_copyin(struct pf_pool *, struct pf_pool *); 110 int pf_validate_range(u_int8_t, u_int16_t[2], int); 111 int pf_rule_copyin(struct pf_rule *, struct pf_rule *); 112 int pf_rule_checkaf(struct pf_rule *); 113 u_int16_t pf_qname2qid(char *, int); 114 void pf_qid2qname(u_int16_t, char *); 115 void pf_qid_unref(u_int16_t); 116 int pf_states_clr(struct pfioc_state_kill *); 117 int pf_states_get(struct pfioc_states *); 118 119 struct pf_rule pf_default_rule, pf_default_rule_new; 120 121 struct { 122 char statusif[IFNAMSIZ]; 123 u_int32_t debug; 124 u_int32_t hostid; 125 u_int32_t reass; 126 u_int32_t mask; 127 } pf_trans_set; 128 129 #define PF_ORDER_HOST 0 130 #define PF_ORDER_NET 1 131 132 #define PF_TSET_STATUSIF 0x01 133 #define PF_TSET_DEBUG 0x02 134 #define PF_TSET_HOSTID 0x04 135 #define PF_TSET_REASS 0x08 136 137 #define TAGID_MAX 50000 138 TAILQ_HEAD(pf_tags, pf_tagname) pf_tags = TAILQ_HEAD_INITIALIZER(pf_tags), 139 pf_qids = TAILQ_HEAD_INITIALIZER(pf_qids); 140 141 /* 142 * pf_lock protects consistency of PF data structures, which don't have 143 * their dedicated lock yet. The pf_lock currently protects: 144 * - rules, 145 * - radix tables, 146 * - source nodes 147 * All callers must grab pf_lock exclusively. 148 * 149 * pf_state_lock protects consistency of state table. Packets, which do state 150 * look up grab the lock as readers. If packet must create state, then it must 151 * grab the lock as writer. Whenever packet creates state it grabs pf_lock 152 * first then it locks pf_state_lock as the writer. 153 */ 154 struct rwlock pf_lock = RWLOCK_INITIALIZER("pf_lock"); 155 struct rwlock pf_state_lock = RWLOCK_INITIALIZER("pf_state_lock"); 156 struct rwlock pfioctl_rw = RWLOCK_INITIALIZER("pfioctl_rw"); 157 158 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE) 159 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE 160 #endif 161 u_int16_t tagname2tag(struct pf_tags *, char *, int); 162 void tag2tagname(struct pf_tags *, u_int16_t, char *); 163 void tag_unref(struct pf_tags *, u_int16_t); 164 int pf_rtlabel_add(struct pf_addr_wrap *); 165 void pf_rtlabel_remove(struct pf_addr_wrap *); 166 void pf_rtlabel_copyout(struct pf_addr_wrap *); 167 168 169 void 170 pfattach(int num) 171 { 172 u_int32_t *timeout = pf_default_rule.timeout; 173 174 pool_init(&pf_rule_pl, sizeof(struct pf_rule), 0, 175 IPL_SOFTNET, 0, "pfrule", NULL); 176 pool_init(&pf_src_tree_pl, sizeof(struct pf_src_node), 0, 177 IPL_SOFTNET, 0, "pfsrctr", NULL); 178 pool_init(&pf_sn_item_pl, sizeof(struct pf_sn_item), 0, 179 IPL_SOFTNET, 0, "pfsnitem", NULL); 180 pool_init(&pf_state_pl, sizeof(struct pf_state), 0, 181 IPL_SOFTNET, 0, "pfstate", NULL); 182 pool_init(&pf_state_key_pl, sizeof(struct pf_state_key), 0, 183 IPL_SOFTNET, 0, "pfstkey", NULL); 184 pool_init(&pf_state_item_pl, sizeof(struct pf_state_item), 0, 185 IPL_SOFTNET, 0, "pfstitem", NULL); 186 pool_init(&pf_rule_item_pl, sizeof(struct pf_rule_item), 0, 187 IPL_SOFTNET, 0, "pfruleitem", NULL); 188 pool_init(&pf_queue_pl, sizeof(struct pf_queuespec), 0, 189 IPL_SOFTNET, 0, "pfqueue", NULL); 190 pool_init(&pf_tag_pl, sizeof(struct pf_tagname), 0, 191 IPL_SOFTNET, 0, "pftag", NULL); 192 pool_init(&pf_pktdelay_pl, sizeof(struct pf_pktdelay), 0, 193 IPL_SOFTNET, 0, "pfpktdelay", NULL); 194 pool_init(&pf_anchor_pl, sizeof(struct pf_anchor), 0, 195 IPL_SOFTNET, 0, "pfanchor", NULL); 196 197 hfsc_initialize(); 198 pfr_initialize(); 199 pfi_initialize(); 200 pf_osfp_initialize(); 201 pf_syncookies_init(); 202 203 pool_sethardlimit(pf_pool_limits[PF_LIMIT_STATES].pp, 204 pf_pool_limits[PF_LIMIT_STATES].limit, NULL, 0); 205 pool_sethardlimit(pf_pool_limits[PF_LIMIT_ANCHORS].pp, 206 pf_pool_limits[PF_LIMIT_ANCHORS].limit, NULL, 0); 207 208 if (physmem <= atop(100*1024*1024)) 209 pf_pool_limits[PF_LIMIT_TABLE_ENTRIES].limit = 210 PFR_KENTRY_HIWAT_SMALL; 211 212 RB_INIT(&tree_src_tracking); 213 RB_INIT(&pf_anchors); 214 pf_init_ruleset(&pf_main_ruleset); 215 TAILQ_INIT(&pf_queues[0]); 216 TAILQ_INIT(&pf_queues[1]); 217 pf_queues_active = &pf_queues[0]; 218 pf_queues_inactive = &pf_queues[1]; 219 220 /* default rule should never be garbage collected */ 221 pf_default_rule.entries.tqe_prev = &pf_default_rule.entries.tqe_next; 222 pf_default_rule.action = PF_PASS; 223 pf_default_rule.nr = (u_int32_t)-1; 224 pf_default_rule.rtableid = -1; 225 226 /* initialize default timeouts */ 227 timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL; 228 timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL; 229 timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL; 230 timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL; 231 timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL; 232 timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL; 233 timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL; 234 timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL; 235 timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL; 236 timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL; 237 timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL; 238 timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL; 239 timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL; 240 timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL; 241 timeout[PFTM_FRAG] = PFTM_FRAG_VAL; 242 timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL; 243 timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL; 244 timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL; 245 timeout[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START; 246 timeout[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END; 247 248 pf_default_rule.src.addr.type = PF_ADDR_ADDRMASK; 249 pf_default_rule.dst.addr.type = PF_ADDR_ADDRMASK; 250 pf_default_rule.rdr.addr.type = PF_ADDR_NONE; 251 pf_default_rule.nat.addr.type = PF_ADDR_NONE; 252 pf_default_rule.route.addr.type = PF_ADDR_NONE; 253 254 pf_normalize_init(); 255 memset(&pf_status, 0, sizeof(pf_status)); 256 pf_status.debug = LOG_ERR; 257 pf_status.reass = PF_REASS_ENABLED; 258 259 /* XXX do our best to avoid a conflict */ 260 pf_status.hostid = arc4random(); 261 262 pf_default_rule_new = pf_default_rule; 263 } 264 265 int 266 pfopen(dev_t dev, int flags, int fmt, struct proc *p) 267 { 268 if (minor(dev) >= 1) 269 return (ENXIO); 270 return (0); 271 } 272 273 int 274 pfclose(dev_t dev, int flags, int fmt, struct proc *p) 275 { 276 if (minor(dev) >= 1) 277 return (ENXIO); 278 return (0); 279 } 280 281 void 282 pf_rule_free(struct pf_rule *rule) 283 { 284 if (rule == NULL) 285 return; 286 287 pfi_kif_free(rule->kif); 288 pfi_kif_free(rule->rcv_kif); 289 pfi_kif_free(rule->rdr.kif); 290 pfi_kif_free(rule->nat.kif); 291 pfi_kif_free(rule->route.kif); 292 293 pool_put(&pf_rule_pl, rule); 294 } 295 296 void 297 pf_rm_rule(struct pf_rulequeue *rulequeue, struct pf_rule *rule) 298 { 299 if (rulequeue != NULL) { 300 if (rule->states_cur == 0 && rule->src_nodes == 0) { 301 /* 302 * XXX - we need to remove the table *before* detaching 303 * the rule to make sure the table code does not delete 304 * the anchor under our feet. 305 */ 306 pf_tbladdr_remove(&rule->src.addr); 307 pf_tbladdr_remove(&rule->dst.addr); 308 pf_tbladdr_remove(&rule->rdr.addr); 309 pf_tbladdr_remove(&rule->nat.addr); 310 pf_tbladdr_remove(&rule->route.addr); 311 if (rule->overload_tbl) 312 pfr_detach_table(rule->overload_tbl); 313 } 314 TAILQ_REMOVE(rulequeue, rule, entries); 315 rule->entries.tqe_prev = NULL; 316 rule->nr = (u_int32_t)-1; 317 } 318 319 if (rule->states_cur > 0 || rule->src_nodes > 0 || 320 rule->entries.tqe_prev != NULL) 321 return; 322 pf_tag_unref(rule->tag); 323 pf_tag_unref(rule->match_tag); 324 pf_rtlabel_remove(&rule->src.addr); 325 pf_rtlabel_remove(&rule->dst.addr); 326 pfi_dynaddr_remove(&rule->src.addr); 327 pfi_dynaddr_remove(&rule->dst.addr); 328 pfi_dynaddr_remove(&rule->rdr.addr); 329 pfi_dynaddr_remove(&rule->nat.addr); 330 pfi_dynaddr_remove(&rule->route.addr); 331 if (rulequeue == NULL) { 332 pf_tbladdr_remove(&rule->src.addr); 333 pf_tbladdr_remove(&rule->dst.addr); 334 pf_tbladdr_remove(&rule->rdr.addr); 335 pf_tbladdr_remove(&rule->nat.addr); 336 pf_tbladdr_remove(&rule->route.addr); 337 if (rule->overload_tbl) 338 pfr_detach_table(rule->overload_tbl); 339 } 340 pfi_kif_unref(rule->rcv_kif, PFI_KIF_REF_RULE); 341 pfi_kif_unref(rule->kif, PFI_KIF_REF_RULE); 342 pfi_kif_unref(rule->rdr.kif, PFI_KIF_REF_RULE); 343 pfi_kif_unref(rule->nat.kif, PFI_KIF_REF_RULE); 344 pfi_kif_unref(rule->route.kif, PFI_KIF_REF_RULE); 345 pf_remove_anchor(rule); 346 pool_put(&pf_rule_pl, rule); 347 } 348 349 void 350 pf_purge_rule(struct pf_rule *rule) 351 { 352 u_int32_t nr = 0; 353 struct pf_ruleset *ruleset; 354 355 KASSERT((rule != NULL) && (rule->ruleset != NULL)); 356 ruleset = rule->ruleset; 357 358 pf_rm_rule(ruleset->rules.active.ptr, rule); 359 ruleset->rules.active.rcount--; 360 TAILQ_FOREACH(rule, ruleset->rules.active.ptr, entries) 361 rule->nr = nr++; 362 ruleset->rules.active.ticket++; 363 pf_calc_skip_steps(ruleset->rules.active.ptr); 364 pf_remove_if_empty_ruleset(ruleset); 365 366 if (ruleset == &pf_main_ruleset) 367 pf_calc_chksum(ruleset); 368 } 369 370 u_int16_t 371 tagname2tag(struct pf_tags *head, char *tagname, int create) 372 { 373 struct pf_tagname *tag, *p = NULL; 374 u_int16_t new_tagid = 1; 375 376 TAILQ_FOREACH(tag, head, entries) 377 if (strcmp(tagname, tag->name) == 0) { 378 tag->ref++; 379 return (tag->tag); 380 } 381 382 if (!create) 383 return (0); 384 385 /* 386 * to avoid fragmentation, we do a linear search from the beginning 387 * and take the first free slot we find. if there is none or the list 388 * is empty, append a new entry at the end. 389 */ 390 391 /* new entry */ 392 TAILQ_FOREACH(p, head, entries) { 393 if (p->tag != new_tagid) 394 break; 395 new_tagid = p->tag + 1; 396 } 397 398 if (new_tagid > TAGID_MAX) 399 return (0); 400 401 /* allocate and fill new struct pf_tagname */ 402 tag = pool_get(&pf_tag_pl, PR_NOWAIT | PR_ZERO); 403 if (tag == NULL) 404 return (0); 405 strlcpy(tag->name, tagname, sizeof(tag->name)); 406 tag->tag = new_tagid; 407 tag->ref++; 408 409 if (p != NULL) /* insert new entry before p */ 410 TAILQ_INSERT_BEFORE(p, tag, entries); 411 else /* either list empty or no free slot in between */ 412 TAILQ_INSERT_TAIL(head, tag, entries); 413 414 return (tag->tag); 415 } 416 417 void 418 tag2tagname(struct pf_tags *head, u_int16_t tagid, char *p) 419 { 420 struct pf_tagname *tag; 421 422 TAILQ_FOREACH(tag, head, entries) 423 if (tag->tag == tagid) { 424 strlcpy(p, tag->name, PF_TAG_NAME_SIZE); 425 return; 426 } 427 } 428 429 void 430 tag_unref(struct pf_tags *head, u_int16_t tag) 431 { 432 struct pf_tagname *p, *next; 433 434 if (tag == 0) 435 return; 436 437 TAILQ_FOREACH_SAFE(p, head, entries, next) { 438 if (tag == p->tag) { 439 if (--p->ref == 0) { 440 TAILQ_REMOVE(head, p, entries); 441 pool_put(&pf_tag_pl, p); 442 } 443 break; 444 } 445 } 446 } 447 448 u_int16_t 449 pf_tagname2tag(char *tagname, int create) 450 { 451 return (tagname2tag(&pf_tags, tagname, create)); 452 } 453 454 void 455 pf_tag2tagname(u_int16_t tagid, char *p) 456 { 457 tag2tagname(&pf_tags, tagid, p); 458 } 459 460 void 461 pf_tag_ref(u_int16_t tag) 462 { 463 struct pf_tagname *t; 464 465 TAILQ_FOREACH(t, &pf_tags, entries) 466 if (t->tag == tag) 467 break; 468 if (t != NULL) 469 t->ref++; 470 } 471 472 void 473 pf_tag_unref(u_int16_t tag) 474 { 475 tag_unref(&pf_tags, tag); 476 } 477 478 int 479 pf_rtlabel_add(struct pf_addr_wrap *a) 480 { 481 if (a->type == PF_ADDR_RTLABEL && 482 (a->v.rtlabel = rtlabel_name2id(a->v.rtlabelname)) == 0) 483 return (-1); 484 return (0); 485 } 486 487 void 488 pf_rtlabel_remove(struct pf_addr_wrap *a) 489 { 490 if (a->type == PF_ADDR_RTLABEL) 491 rtlabel_unref(a->v.rtlabel); 492 } 493 494 void 495 pf_rtlabel_copyout(struct pf_addr_wrap *a) 496 { 497 const char *name; 498 499 if (a->type == PF_ADDR_RTLABEL && a->v.rtlabel) { 500 if ((name = rtlabel_id2name(a->v.rtlabel)) == NULL) 501 strlcpy(a->v.rtlabelname, "?", 502 sizeof(a->v.rtlabelname)); 503 else 504 strlcpy(a->v.rtlabelname, name, 505 sizeof(a->v.rtlabelname)); 506 } 507 } 508 509 u_int16_t 510 pf_qname2qid(char *qname, int create) 511 { 512 return (tagname2tag(&pf_qids, qname, create)); 513 } 514 515 void 516 pf_qid2qname(u_int16_t qid, char *p) 517 { 518 tag2tagname(&pf_qids, qid, p); 519 } 520 521 void 522 pf_qid_unref(u_int16_t qid) 523 { 524 tag_unref(&pf_qids, (u_int16_t)qid); 525 } 526 527 int 528 pf_begin_rules(u_int32_t *ticket, const char *anchor) 529 { 530 struct pf_ruleset *rs; 531 struct pf_rule *rule; 532 533 if ((rs = pf_find_or_create_ruleset(anchor)) == NULL) 534 return (EINVAL); 535 while ((rule = TAILQ_FIRST(rs->rules.inactive.ptr)) != NULL) { 536 pf_rm_rule(rs->rules.inactive.ptr, rule); 537 rs->rules.inactive.rcount--; 538 } 539 *ticket = ++rs->rules.inactive.ticket; 540 rs->rules.inactive.open = 1; 541 return (0); 542 } 543 544 void 545 pf_rollback_rules(u_int32_t ticket, char *anchor) 546 { 547 struct pf_ruleset *rs; 548 struct pf_rule *rule; 549 550 rs = pf_find_ruleset(anchor); 551 if (rs == NULL || !rs->rules.inactive.open || 552 rs->rules.inactive.ticket != ticket) 553 return; 554 while ((rule = TAILQ_FIRST(rs->rules.inactive.ptr)) != NULL) { 555 pf_rm_rule(rs->rules.inactive.ptr, rule); 556 rs->rules.inactive.rcount--; 557 } 558 rs->rules.inactive.open = 0; 559 560 /* queue defs only in the main ruleset */ 561 if (anchor[0]) 562 return; 563 564 pf_free_queues(pf_queues_inactive); 565 } 566 567 void 568 pf_free_queues(struct pf_queuehead *where) 569 { 570 struct pf_queuespec *q, *qtmp; 571 572 TAILQ_FOREACH_SAFE(q, where, entries, qtmp) { 573 TAILQ_REMOVE(where, q, entries); 574 pfi_kif_unref(q->kif, PFI_KIF_REF_RULE); 575 pool_put(&pf_queue_pl, q); 576 } 577 } 578 579 void 580 pf_remove_queues(void) 581 { 582 struct pf_queuespec *q; 583 struct ifnet *ifp; 584 585 /* put back interfaces in normal queueing mode */ 586 TAILQ_FOREACH(q, pf_queues_active, entries) { 587 if (q->parent_qid != 0) 588 continue; 589 590 ifp = q->kif->pfik_ifp; 591 if (ifp == NULL) 592 continue; 593 594 ifq_attach(&ifp->if_snd, ifq_priq_ops, NULL); 595 } 596 } 597 598 struct pf_queue_if { 599 struct ifnet *ifp; 600 const struct ifq_ops *ifqops; 601 const struct pfq_ops *pfqops; 602 void *disc; 603 struct pf_queue_if *next; 604 }; 605 606 static inline struct pf_queue_if * 607 pf_ifp2q(struct pf_queue_if *list, struct ifnet *ifp) 608 { 609 struct pf_queue_if *qif = list; 610 611 while (qif != NULL) { 612 if (qif->ifp == ifp) 613 return (qif); 614 615 qif = qif->next; 616 } 617 618 return (qif); 619 } 620 621 int 622 pf_create_queues(void) 623 { 624 struct pf_queuespec *q; 625 struct ifnet *ifp; 626 struct pf_queue_if *list = NULL, *qif; 627 int error; 628 629 /* 630 * Find root queues and allocate traffic conditioner 631 * private data for these interfaces 632 */ 633 TAILQ_FOREACH(q, pf_queues_active, entries) { 634 if (q->parent_qid != 0) 635 continue; 636 637 ifp = q->kif->pfik_ifp; 638 if (ifp == NULL) 639 continue; 640 641 qif = malloc(sizeof(*qif), M_TEMP, M_WAITOK); 642 qif->ifp = ifp; 643 644 if (q->flags & PFQS_ROOTCLASS) { 645 qif->ifqops = ifq_hfsc_ops; 646 qif->pfqops = pfq_hfsc_ops; 647 } else { 648 qif->ifqops = ifq_fqcodel_ops; 649 qif->pfqops = pfq_fqcodel_ops; 650 } 651 652 qif->disc = qif->pfqops->pfq_alloc(ifp); 653 654 qif->next = list; 655 list = qif; 656 } 657 658 /* and now everything */ 659 TAILQ_FOREACH(q, pf_queues_active, entries) { 660 ifp = q->kif->pfik_ifp; 661 if (ifp == NULL) 662 continue; 663 664 qif = pf_ifp2q(list, ifp); 665 KASSERT(qif != NULL); 666 667 error = qif->pfqops->pfq_addqueue(qif->disc, q); 668 if (error != 0) 669 goto error; 670 } 671 672 /* find root queues in old list to disable them if necessary */ 673 TAILQ_FOREACH(q, pf_queues_inactive, entries) { 674 if (q->parent_qid != 0) 675 continue; 676 677 ifp = q->kif->pfik_ifp; 678 if (ifp == NULL) 679 continue; 680 681 qif = pf_ifp2q(list, ifp); 682 if (qif != NULL) 683 continue; 684 685 ifq_attach(&ifp->if_snd, ifq_priq_ops, NULL); 686 } 687 688 /* commit the new queues */ 689 while (list != NULL) { 690 qif = list; 691 list = qif->next; 692 693 ifp = qif->ifp; 694 695 ifq_attach(&ifp->if_snd, qif->ifqops, qif->disc); 696 free(qif, M_TEMP, sizeof(*qif)); 697 } 698 699 return (0); 700 701 error: 702 while (list != NULL) { 703 qif = list; 704 list = qif->next; 705 706 qif->pfqops->pfq_free(qif->disc); 707 free(qif, M_TEMP, sizeof(*qif)); 708 } 709 710 return (error); 711 } 712 713 int 714 pf_commit_queues(void) 715 { 716 struct pf_queuehead *qswap; 717 int error; 718 719 /* swap */ 720 qswap = pf_queues_active; 721 pf_queues_active = pf_queues_inactive; 722 pf_queues_inactive = qswap; 723 724 error = pf_create_queues(); 725 if (error != 0) { 726 pf_queues_inactive = pf_queues_active; 727 pf_queues_active = qswap; 728 return (error); 729 } 730 731 pf_free_queues(pf_queues_inactive); 732 733 return (0); 734 } 735 736 const struct pfq_ops * 737 pf_queue_manager(struct pf_queuespec *q) 738 { 739 if (q->flags & PFQS_FLOWQUEUE) 740 return pfq_fqcodel_ops; 741 return (/* pfq_default_ops */ NULL); 742 } 743 744 #define PF_MD5_UPD(st, elm) \ 745 MD5Update(ctx, (u_int8_t *) &(st)->elm, sizeof((st)->elm)) 746 747 #define PF_MD5_UPD_STR(st, elm) \ 748 MD5Update(ctx, (u_int8_t *) (st)->elm, strlen((st)->elm)) 749 750 #define PF_MD5_UPD_HTONL(st, elm, stor) do { \ 751 (stor) = htonl((st)->elm); \ 752 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int32_t));\ 753 } while (0) 754 755 #define PF_MD5_UPD_HTONS(st, elm, stor) do { \ 756 (stor) = htons((st)->elm); \ 757 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int16_t));\ 758 } while (0) 759 760 void 761 pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr) 762 { 763 PF_MD5_UPD(pfr, addr.type); 764 switch (pfr->addr.type) { 765 case PF_ADDR_DYNIFTL: 766 PF_MD5_UPD(pfr, addr.v.ifname); 767 PF_MD5_UPD(pfr, addr.iflags); 768 break; 769 case PF_ADDR_TABLE: 770 if (strncmp(pfr->addr.v.tblname, PF_OPTIMIZER_TABLE_PFX, 771 strlen(PF_OPTIMIZER_TABLE_PFX))) 772 PF_MD5_UPD(pfr, addr.v.tblname); 773 break; 774 case PF_ADDR_ADDRMASK: 775 /* XXX ignore af? */ 776 PF_MD5_UPD(pfr, addr.v.a.addr.addr32); 777 PF_MD5_UPD(pfr, addr.v.a.mask.addr32); 778 break; 779 case PF_ADDR_RTLABEL: 780 PF_MD5_UPD(pfr, addr.v.rtlabelname); 781 break; 782 } 783 784 PF_MD5_UPD(pfr, port[0]); 785 PF_MD5_UPD(pfr, port[1]); 786 PF_MD5_UPD(pfr, neg); 787 PF_MD5_UPD(pfr, port_op); 788 } 789 790 void 791 pf_hash_rule(MD5_CTX *ctx, struct pf_rule *rule) 792 { 793 u_int16_t x; 794 u_int32_t y; 795 796 pf_hash_rule_addr(ctx, &rule->src); 797 pf_hash_rule_addr(ctx, &rule->dst); 798 PF_MD5_UPD_STR(rule, label); 799 PF_MD5_UPD_STR(rule, ifname); 800 PF_MD5_UPD_STR(rule, rcv_ifname); 801 PF_MD5_UPD_STR(rule, match_tagname); 802 PF_MD5_UPD_HTONS(rule, match_tag, x); /* dup? */ 803 PF_MD5_UPD_HTONL(rule, os_fingerprint, y); 804 PF_MD5_UPD_HTONL(rule, prob, y); 805 PF_MD5_UPD_HTONL(rule, uid.uid[0], y); 806 PF_MD5_UPD_HTONL(rule, uid.uid[1], y); 807 PF_MD5_UPD(rule, uid.op); 808 PF_MD5_UPD_HTONL(rule, gid.gid[0], y); 809 PF_MD5_UPD_HTONL(rule, gid.gid[1], y); 810 PF_MD5_UPD(rule, gid.op); 811 PF_MD5_UPD_HTONL(rule, rule_flag, y); 812 PF_MD5_UPD(rule, action); 813 PF_MD5_UPD(rule, direction); 814 PF_MD5_UPD(rule, af); 815 PF_MD5_UPD(rule, quick); 816 PF_MD5_UPD(rule, ifnot); 817 PF_MD5_UPD(rule, rcvifnot); 818 PF_MD5_UPD(rule, match_tag_not); 819 PF_MD5_UPD(rule, keep_state); 820 PF_MD5_UPD(rule, proto); 821 PF_MD5_UPD(rule, type); 822 PF_MD5_UPD(rule, code); 823 PF_MD5_UPD(rule, flags); 824 PF_MD5_UPD(rule, flagset); 825 PF_MD5_UPD(rule, allow_opts); 826 PF_MD5_UPD(rule, rt); 827 PF_MD5_UPD(rule, tos); 828 } 829 830 int 831 pf_commit_rules(u_int32_t ticket, char *anchor) 832 { 833 struct pf_ruleset *rs; 834 struct pf_rule *rule; 835 struct pf_rulequeue *old_rules; 836 u_int32_t old_rcount; 837 838 /* Make sure any expired rules get removed from active rules first. */ 839 pf_purge_expired_rules(); 840 841 rs = pf_find_ruleset(anchor); 842 if (rs == NULL || !rs->rules.inactive.open || 843 ticket != rs->rules.inactive.ticket) 844 return (EBUSY); 845 846 if (rs == &pf_main_ruleset) 847 pf_calc_chksum(rs); 848 849 /* Swap rules, keep the old. */ 850 old_rules = rs->rules.active.ptr; 851 old_rcount = rs->rules.active.rcount; 852 853 rs->rules.active.ptr = rs->rules.inactive.ptr; 854 rs->rules.active.rcount = rs->rules.inactive.rcount; 855 rs->rules.inactive.ptr = old_rules; 856 rs->rules.inactive.rcount = old_rcount; 857 858 rs->rules.active.ticket = rs->rules.inactive.ticket; 859 pf_calc_skip_steps(rs->rules.active.ptr); 860 861 862 /* Purge the old rule list. */ 863 while ((rule = TAILQ_FIRST(old_rules)) != NULL) 864 pf_rm_rule(old_rules, rule); 865 rs->rules.inactive.rcount = 0; 866 rs->rules.inactive.open = 0; 867 pf_remove_if_empty_ruleset(rs); 868 869 /* queue defs only in the main ruleset */ 870 if (anchor[0]) 871 return (0); 872 return (pf_commit_queues()); 873 } 874 875 void 876 pf_calc_chksum(struct pf_ruleset *rs) 877 { 878 MD5_CTX ctx; 879 struct pf_rule *rule; 880 u_int8_t digest[PF_MD5_DIGEST_LENGTH]; 881 882 MD5Init(&ctx); 883 884 if (rs->rules.inactive.rcount) { 885 TAILQ_FOREACH(rule, rs->rules.inactive.ptr, entries) { 886 pf_hash_rule(&ctx, rule); 887 } 888 } 889 890 MD5Final(digest, &ctx); 891 memcpy(pf_status.pf_chksum, digest, sizeof(pf_status.pf_chksum)); 892 } 893 894 int 895 pf_addr_setup(struct pf_ruleset *ruleset, struct pf_addr_wrap *addr, 896 sa_family_t af) 897 { 898 if (pfi_dynaddr_setup(addr, af, PR_WAITOK) || 899 pf_tbladdr_setup(ruleset, addr, PR_WAITOK) || 900 pf_rtlabel_add(addr)) 901 return (EINVAL); 902 903 return (0); 904 } 905 906 struct pfi_kif * 907 pf_kif_setup(struct pfi_kif *kif_buf) 908 { 909 struct pfi_kif *kif; 910 911 if (kif_buf == NULL) 912 return (NULL); 913 914 KASSERT(kif_buf->pfik_name[0] != '\0'); 915 916 kif = pfi_kif_get(kif_buf->pfik_name, &kif_buf); 917 if (kif_buf != NULL) 918 pfi_kif_free(kif_buf); 919 pfi_kif_ref(kif, PFI_KIF_REF_RULE); 920 921 return (kif); 922 } 923 924 void 925 pf_addr_copyout(struct pf_addr_wrap *addr) 926 { 927 pfi_dynaddr_copyout(addr); 928 pf_tbladdr_copyout(addr); 929 pf_rtlabel_copyout(addr); 930 } 931 932 int 933 pf_states_clr(struct pfioc_state_kill *psk) 934 { 935 struct pf_state *s, *nexts; 936 struct pf_state *head, *tail; 937 u_int killed = 0; 938 int error; 939 940 NET_LOCK(); 941 942 /* lock against the gc removing an item from the list */ 943 error = rw_enter(&pf_state_list.pfs_rwl, RW_READ|RW_INTR); 944 if (error != 0) 945 goto unlock; 946 947 /* get a snapshot view of the ends of the list to traverse between */ 948 mtx_enter(&pf_state_list.pfs_mtx); 949 head = TAILQ_FIRST(&pf_state_list.pfs_list); 950 tail = TAILQ_LAST(&pf_state_list.pfs_list, pf_state_queue); 951 mtx_leave(&pf_state_list.pfs_mtx); 952 953 s = NULL; 954 nexts = head; 955 956 PF_LOCK(); 957 PF_STATE_ENTER_WRITE(); 958 959 while (s != tail) { 960 s = nexts; 961 nexts = TAILQ_NEXT(s, entry_list); 962 963 if (s->timeout == PFTM_UNLINKED) 964 continue; 965 966 if (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname, 967 s->kif->pfik_name)) { 968 #if NPFSYNC > 0 969 /* don't send out individual delete messages */ 970 SET(s->state_flags, PFSTATE_NOSYNC); 971 #endif /* NPFSYNC > 0 */ 972 pf_remove_state(s); 973 killed++; 974 } 975 } 976 977 PF_STATE_EXIT_WRITE(); 978 #if NPFSYNC > 0 979 pfsync_clear_states(pf_status.hostid, psk->psk_ifname); 980 #endif /* NPFSYNC > 0 */ 981 PF_UNLOCK(); 982 rw_exit(&pf_state_list.pfs_rwl); 983 984 psk->psk_killed = killed; 985 unlock: 986 NET_UNLOCK(); 987 988 return (error); 989 } 990 991 int 992 pf_states_get(struct pfioc_states *ps) 993 { 994 struct pf_state *head, *tail; 995 struct pf_state *next, *state; 996 struct pfsync_state *p, pstore; 997 u_int32_t nr = 0; 998 int error; 999 1000 if (ps->ps_len == 0) { 1001 nr = pf_status.states; 1002 ps->ps_len = sizeof(struct pfsync_state) * nr; 1003 return (0); 1004 } 1005 1006 p = ps->ps_states; 1007 1008 /* lock against the gc removing an item from the list */ 1009 error = rw_enter(&pf_state_list.pfs_rwl, RW_READ|RW_INTR); 1010 if (error != 0) 1011 return (error); 1012 1013 /* get a snapshot view of the ends of the list to traverse between */ 1014 mtx_enter(&pf_state_list.pfs_mtx); 1015 head = TAILQ_FIRST(&pf_state_list.pfs_list); 1016 tail = TAILQ_LAST(&pf_state_list.pfs_list, pf_state_queue); 1017 mtx_leave(&pf_state_list.pfs_mtx); 1018 1019 state = NULL; 1020 next = head; 1021 1022 while (state != tail) { 1023 state = next; 1024 next = TAILQ_NEXT(state, entry_list); 1025 1026 if (state->timeout == PFTM_UNLINKED) 1027 continue; 1028 1029 if ((nr+1) * sizeof(*p) > ps->ps_len) 1030 break; 1031 1032 pf_state_export(&pstore, state); 1033 error = copyout(&pstore, p, sizeof(*p)); 1034 if (error) 1035 goto fail; 1036 1037 p++; 1038 nr++; 1039 } 1040 ps->ps_len = sizeof(struct pfsync_state) * nr; 1041 1042 fail: 1043 rw_exit(&pf_state_list.pfs_rwl); 1044 1045 return (error); 1046 } 1047 1048 int 1049 pfioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct proc *p) 1050 { 1051 int error = 0; 1052 1053 /* XXX keep in sync with switch() below */ 1054 if (securelevel > 1) 1055 switch (cmd) { 1056 case DIOCGETRULES: 1057 case DIOCGETRULE: 1058 case DIOCGETSTATE: 1059 case DIOCSETSTATUSIF: 1060 case DIOCGETSTATUS: 1061 case DIOCCLRSTATUS: 1062 case DIOCNATLOOK: 1063 case DIOCSETDEBUG: 1064 case DIOCGETSTATES: 1065 case DIOCGETTIMEOUT: 1066 case DIOCGETLIMIT: 1067 case DIOCGETRULESETS: 1068 case DIOCGETRULESET: 1069 case DIOCGETQUEUES: 1070 case DIOCGETQUEUE: 1071 case DIOCGETQSTATS: 1072 case DIOCRGETTABLES: 1073 case DIOCRGETTSTATS: 1074 case DIOCRCLRTSTATS: 1075 case DIOCRCLRADDRS: 1076 case DIOCRADDADDRS: 1077 case DIOCRDELADDRS: 1078 case DIOCRSETADDRS: 1079 case DIOCRGETADDRS: 1080 case DIOCRGETASTATS: 1081 case DIOCRCLRASTATS: 1082 case DIOCRTSTADDRS: 1083 case DIOCOSFPGET: 1084 case DIOCGETSRCNODES: 1085 case DIOCCLRSRCNODES: 1086 case DIOCIGETIFACES: 1087 case DIOCSETIFFLAG: 1088 case DIOCCLRIFFLAG: 1089 case DIOCGETSYNFLWATS: 1090 break; 1091 case DIOCRCLRTABLES: 1092 case DIOCRADDTABLES: 1093 case DIOCRDELTABLES: 1094 case DIOCRSETTFLAGS: 1095 if (((struct pfioc_table *)addr)->pfrio_flags & 1096 PFR_FLAG_DUMMY) 1097 break; /* dummy operation ok */ 1098 return (EPERM); 1099 default: 1100 return (EPERM); 1101 } 1102 1103 if (!(flags & FWRITE)) 1104 switch (cmd) { 1105 case DIOCGETRULES: 1106 case DIOCGETSTATE: 1107 case DIOCGETSTATUS: 1108 case DIOCGETSTATES: 1109 case DIOCGETTIMEOUT: 1110 case DIOCGETLIMIT: 1111 case DIOCGETRULESETS: 1112 case DIOCGETRULESET: 1113 case DIOCGETQUEUES: 1114 case DIOCGETQUEUE: 1115 case DIOCGETQSTATS: 1116 case DIOCNATLOOK: 1117 case DIOCRGETTABLES: 1118 case DIOCRGETTSTATS: 1119 case DIOCRGETADDRS: 1120 case DIOCRGETASTATS: 1121 case DIOCRTSTADDRS: 1122 case DIOCOSFPGET: 1123 case DIOCGETSRCNODES: 1124 case DIOCIGETIFACES: 1125 case DIOCGETSYNFLWATS: 1126 break; 1127 case DIOCRCLRTABLES: 1128 case DIOCRADDTABLES: 1129 case DIOCRDELTABLES: 1130 case DIOCRCLRTSTATS: 1131 case DIOCRCLRADDRS: 1132 case DIOCRADDADDRS: 1133 case DIOCRDELADDRS: 1134 case DIOCRSETADDRS: 1135 case DIOCRSETTFLAGS: 1136 if (((struct pfioc_table *)addr)->pfrio_flags & 1137 PFR_FLAG_DUMMY) { 1138 flags |= FWRITE; /* need write lock for dummy */ 1139 break; /* dummy operation ok */ 1140 } 1141 return (EACCES); 1142 case DIOCGETRULE: 1143 if (((struct pfioc_rule *)addr)->action == 1144 PF_GET_CLR_CNTR) 1145 return (EACCES); 1146 break; 1147 default: 1148 return (EACCES); 1149 } 1150 1151 if (flags & FWRITE) 1152 rw_enter_write(&pfioctl_rw); 1153 else 1154 rw_enter_read(&pfioctl_rw); 1155 1156 switch (cmd) { 1157 1158 case DIOCSTART: 1159 NET_LOCK(); 1160 PF_LOCK(); 1161 if (pf_status.running) 1162 error = EEXIST; 1163 else { 1164 pf_status.running = 1; 1165 pf_status.since = getuptime(); 1166 if (pf_status.stateid == 0) { 1167 pf_status.stateid = gettime(); 1168 pf_status.stateid = pf_status.stateid << 32; 1169 } 1170 timeout_add_sec(&pf_purge_to, 1); 1171 pf_create_queues(); 1172 DPFPRINTF(LOG_NOTICE, "pf: started"); 1173 } 1174 PF_UNLOCK(); 1175 NET_UNLOCK(); 1176 break; 1177 1178 case DIOCSTOP: 1179 NET_LOCK(); 1180 PF_LOCK(); 1181 if (!pf_status.running) 1182 error = ENOENT; 1183 else { 1184 pf_status.running = 0; 1185 pf_status.since = getuptime(); 1186 pf_remove_queues(); 1187 DPFPRINTF(LOG_NOTICE, "pf: stopped"); 1188 } 1189 PF_UNLOCK(); 1190 NET_UNLOCK(); 1191 break; 1192 1193 case DIOCGETQUEUES: { 1194 struct pfioc_queue *pq = (struct pfioc_queue *)addr; 1195 struct pf_queuespec *qs; 1196 u_int32_t nr = 0; 1197 1198 NET_LOCK(); 1199 PF_LOCK(); 1200 pq->ticket = pf_main_ruleset.rules.active.ticket; 1201 1202 /* save state to not run over them all each time? */ 1203 qs = TAILQ_FIRST(pf_queues_active); 1204 while (qs != NULL) { 1205 qs = TAILQ_NEXT(qs, entries); 1206 nr++; 1207 } 1208 pq->nr = nr; 1209 PF_UNLOCK(); 1210 NET_UNLOCK(); 1211 break; 1212 } 1213 1214 case DIOCGETQUEUE: { 1215 struct pfioc_queue *pq = (struct pfioc_queue *)addr; 1216 struct pf_queuespec *qs; 1217 u_int32_t nr = 0; 1218 1219 NET_LOCK(); 1220 PF_LOCK(); 1221 if (pq->ticket != pf_main_ruleset.rules.active.ticket) { 1222 error = EBUSY; 1223 PF_UNLOCK(); 1224 NET_UNLOCK(); 1225 goto fail; 1226 } 1227 1228 /* save state to not run over them all each time? */ 1229 qs = TAILQ_FIRST(pf_queues_active); 1230 while ((qs != NULL) && (nr++ < pq->nr)) 1231 qs = TAILQ_NEXT(qs, entries); 1232 if (qs == NULL) { 1233 error = EBUSY; 1234 PF_UNLOCK(); 1235 NET_UNLOCK(); 1236 goto fail; 1237 } 1238 memcpy(&pq->queue, qs, sizeof(pq->queue)); 1239 PF_UNLOCK(); 1240 NET_UNLOCK(); 1241 break; 1242 } 1243 1244 case DIOCGETQSTATS: { 1245 struct pfioc_qstats *pq = (struct pfioc_qstats *)addr; 1246 struct pf_queuespec *qs; 1247 u_int32_t nr; 1248 int nbytes; 1249 1250 NET_LOCK(); 1251 PF_LOCK(); 1252 if (pq->ticket != pf_main_ruleset.rules.active.ticket) { 1253 error = EBUSY; 1254 PF_UNLOCK(); 1255 NET_UNLOCK(); 1256 goto fail; 1257 } 1258 nbytes = pq->nbytes; 1259 nr = 0; 1260 1261 /* save state to not run over them all each time? */ 1262 qs = TAILQ_FIRST(pf_queues_active); 1263 while ((qs != NULL) && (nr++ < pq->nr)) 1264 qs = TAILQ_NEXT(qs, entries); 1265 if (qs == NULL) { 1266 error = EBUSY; 1267 PF_UNLOCK(); 1268 NET_UNLOCK(); 1269 goto fail; 1270 } 1271 memcpy(&pq->queue, qs, sizeof(pq->queue)); 1272 /* It's a root flow queue but is not an HFSC root class */ 1273 if ((qs->flags & PFQS_FLOWQUEUE) && qs->parent_qid == 0 && 1274 !(qs->flags & PFQS_ROOTCLASS)) 1275 error = pfq_fqcodel_ops->pfq_qstats(qs, pq->buf, 1276 &nbytes); 1277 else 1278 error = pfq_hfsc_ops->pfq_qstats(qs, pq->buf, 1279 &nbytes); 1280 if (error == 0) 1281 pq->nbytes = nbytes; 1282 PF_UNLOCK(); 1283 NET_UNLOCK(); 1284 break; 1285 } 1286 1287 case DIOCADDQUEUE: { 1288 struct pfioc_queue *q = (struct pfioc_queue *)addr; 1289 struct pf_queuespec *qs; 1290 1291 qs = pool_get(&pf_queue_pl, PR_WAITOK|PR_LIMITFAIL|PR_ZERO); 1292 if (qs == NULL) { 1293 error = ENOMEM; 1294 goto fail; 1295 } 1296 1297 NET_LOCK(); 1298 PF_LOCK(); 1299 if (q->ticket != pf_main_ruleset.rules.inactive.ticket) { 1300 error = EBUSY; 1301 PF_UNLOCK(); 1302 NET_UNLOCK(); 1303 pool_put(&pf_queue_pl, qs); 1304 goto fail; 1305 } 1306 memcpy(qs, &q->queue, sizeof(*qs)); 1307 qs->qid = pf_qname2qid(qs->qname, 1); 1308 if (qs->qid == 0) { 1309 error = EBUSY; 1310 PF_UNLOCK(); 1311 NET_UNLOCK(); 1312 pool_put(&pf_queue_pl, qs); 1313 goto fail; 1314 } 1315 if (qs->parent[0] && (qs->parent_qid = 1316 pf_qname2qid(qs->parent, 0)) == 0) { 1317 error = ESRCH; 1318 PF_UNLOCK(); 1319 NET_UNLOCK(); 1320 pool_put(&pf_queue_pl, qs); 1321 goto fail; 1322 } 1323 qs->kif = pfi_kif_get(qs->ifname, NULL); 1324 if (qs->kif == NULL) { 1325 error = ESRCH; 1326 PF_UNLOCK(); 1327 NET_UNLOCK(); 1328 pool_put(&pf_queue_pl, qs); 1329 goto fail; 1330 } 1331 /* XXX resolve bw percentage specs */ 1332 pfi_kif_ref(qs->kif, PFI_KIF_REF_RULE); 1333 1334 TAILQ_INSERT_TAIL(pf_queues_inactive, qs, entries); 1335 PF_UNLOCK(); 1336 NET_UNLOCK(); 1337 1338 break; 1339 } 1340 1341 case DIOCADDRULE: { 1342 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 1343 struct pf_ruleset *ruleset; 1344 struct pf_rule *rule, *tail; 1345 1346 rule = pool_get(&pf_rule_pl, PR_WAITOK|PR_LIMITFAIL|PR_ZERO); 1347 if (rule == NULL) { 1348 error = ENOMEM; 1349 goto fail; 1350 } 1351 1352 if ((error = pf_rule_copyin(&pr->rule, rule))) { 1353 pf_rule_free(rule); 1354 rule = NULL; 1355 goto fail; 1356 } 1357 1358 if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) { 1359 error = EINVAL; 1360 pf_rule_free(rule); 1361 rule = NULL; 1362 goto fail; 1363 } 1364 if ((error = pf_rule_checkaf(rule))) { 1365 pf_rule_free(rule); 1366 rule = NULL; 1367 goto fail; 1368 } 1369 if (rule->src.addr.type == PF_ADDR_NONE || 1370 rule->dst.addr.type == PF_ADDR_NONE) { 1371 error = EINVAL; 1372 pf_rule_free(rule); 1373 rule = NULL; 1374 goto fail; 1375 } 1376 1377 if (rule->rt && !rule->direction) { 1378 error = EINVAL; 1379 pf_rule_free(rule); 1380 rule = NULL; 1381 goto fail; 1382 } 1383 1384 NET_LOCK(); 1385 PF_LOCK(); 1386 pr->anchor[sizeof(pr->anchor) - 1] = '\0'; 1387 ruleset = pf_find_ruleset(pr->anchor); 1388 if (ruleset == NULL) { 1389 error = EINVAL; 1390 PF_UNLOCK(); 1391 NET_UNLOCK(); 1392 pf_rule_free(rule); 1393 goto fail; 1394 } 1395 if (pr->ticket != ruleset->rules.inactive.ticket) { 1396 error = EBUSY; 1397 PF_UNLOCK(); 1398 NET_UNLOCK(); 1399 pf_rule_free(rule); 1400 goto fail; 1401 } 1402 rule->cuid = p->p_ucred->cr_ruid; 1403 rule->cpid = p->p_p->ps_pid; 1404 1405 tail = TAILQ_LAST(ruleset->rules.inactive.ptr, 1406 pf_rulequeue); 1407 if (tail) 1408 rule->nr = tail->nr + 1; 1409 else 1410 rule->nr = 0; 1411 1412 rule->kif = pf_kif_setup(rule->kif); 1413 rule->rcv_kif = pf_kif_setup(rule->rcv_kif); 1414 rule->rdr.kif = pf_kif_setup(rule->rdr.kif); 1415 rule->nat.kif = pf_kif_setup(rule->nat.kif); 1416 rule->route.kif = pf_kif_setup(rule->route.kif); 1417 1418 if (rule->overload_tblname[0]) { 1419 if ((rule->overload_tbl = pfr_attach_table(ruleset, 1420 rule->overload_tblname, PR_WAITOK)) == NULL) 1421 error = EINVAL; 1422 else 1423 rule->overload_tbl->pfrkt_flags |= PFR_TFLAG_ACTIVE; 1424 } 1425 1426 if (pf_addr_setup(ruleset, &rule->src.addr, rule->af)) 1427 error = EINVAL; 1428 if (pf_addr_setup(ruleset, &rule->dst.addr, rule->af)) 1429 error = EINVAL; 1430 if (pf_addr_setup(ruleset, &rule->rdr.addr, rule->af)) 1431 error = EINVAL; 1432 if (pf_addr_setup(ruleset, &rule->nat.addr, rule->af)) 1433 error = EINVAL; 1434 if (pf_addr_setup(ruleset, &rule->route.addr, rule->af)) 1435 error = EINVAL; 1436 if (pf_anchor_setup(rule, ruleset, pr->anchor_call)) 1437 error = EINVAL; 1438 1439 if (error) { 1440 pf_rm_rule(NULL, rule); 1441 PF_UNLOCK(); 1442 NET_UNLOCK(); 1443 goto fail; 1444 } 1445 TAILQ_INSERT_TAIL(ruleset->rules.inactive.ptr, 1446 rule, entries); 1447 rule->ruleset = ruleset; 1448 ruleset->rules.inactive.rcount++; 1449 PF_UNLOCK(); 1450 NET_UNLOCK(); 1451 break; 1452 } 1453 1454 case DIOCGETRULES: { 1455 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 1456 struct pf_ruleset *ruleset; 1457 struct pf_rule *tail; 1458 1459 NET_LOCK(); 1460 PF_LOCK(); 1461 pr->anchor[sizeof(pr->anchor) - 1] = '\0'; 1462 ruleset = pf_find_ruleset(pr->anchor); 1463 if (ruleset == NULL) { 1464 error = EINVAL; 1465 PF_UNLOCK(); 1466 NET_UNLOCK(); 1467 goto fail; 1468 } 1469 tail = TAILQ_LAST(ruleset->rules.active.ptr, pf_rulequeue); 1470 if (tail) 1471 pr->nr = tail->nr + 1; 1472 else 1473 pr->nr = 0; 1474 pr->ticket = ruleset->rules.active.ticket; 1475 PF_UNLOCK(); 1476 NET_UNLOCK(); 1477 break; 1478 } 1479 1480 case DIOCGETRULE: { 1481 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 1482 struct pf_ruleset *ruleset; 1483 struct pf_rule *rule; 1484 int i; 1485 1486 NET_LOCK(); 1487 PF_LOCK(); 1488 pr->anchor[sizeof(pr->anchor) - 1] = '\0'; 1489 ruleset = pf_find_ruleset(pr->anchor); 1490 if (ruleset == NULL) { 1491 error = EINVAL; 1492 PF_UNLOCK(); 1493 NET_UNLOCK(); 1494 goto fail; 1495 } 1496 if (pr->ticket != ruleset->rules.active.ticket) { 1497 error = EBUSY; 1498 PF_UNLOCK(); 1499 NET_UNLOCK(); 1500 goto fail; 1501 } 1502 rule = TAILQ_FIRST(ruleset->rules.active.ptr); 1503 while ((rule != NULL) && (rule->nr != pr->nr)) 1504 rule = TAILQ_NEXT(rule, entries); 1505 if (rule == NULL) { 1506 error = EBUSY; 1507 PF_UNLOCK(); 1508 NET_UNLOCK(); 1509 goto fail; 1510 } 1511 memcpy(&pr->rule, rule, sizeof(struct pf_rule)); 1512 memset(&pr->rule.entries, 0, sizeof(pr->rule.entries)); 1513 pr->rule.kif = NULL; 1514 pr->rule.nat.kif = NULL; 1515 pr->rule.rdr.kif = NULL; 1516 pr->rule.route.kif = NULL; 1517 pr->rule.rcv_kif = NULL; 1518 pr->rule.anchor = NULL; 1519 pr->rule.overload_tbl = NULL; 1520 pr->rule.pktrate.limit /= PF_THRESHOLD_MULT; 1521 memset(&pr->rule.gcle, 0, sizeof(pr->rule.gcle)); 1522 pr->rule.ruleset = NULL; 1523 if (pf_anchor_copyout(ruleset, rule, pr)) { 1524 error = EBUSY; 1525 PF_UNLOCK(); 1526 NET_UNLOCK(); 1527 goto fail; 1528 } 1529 pf_addr_copyout(&pr->rule.src.addr); 1530 pf_addr_copyout(&pr->rule.dst.addr); 1531 pf_addr_copyout(&pr->rule.rdr.addr); 1532 pf_addr_copyout(&pr->rule.nat.addr); 1533 pf_addr_copyout(&pr->rule.route.addr); 1534 for (i = 0; i < PF_SKIP_COUNT; ++i) 1535 if (rule->skip[i].ptr == NULL) 1536 pr->rule.skip[i].nr = (u_int32_t)-1; 1537 else 1538 pr->rule.skip[i].nr = 1539 rule->skip[i].ptr->nr; 1540 1541 if (pr->action == PF_GET_CLR_CNTR) { 1542 rule->evaluations = 0; 1543 rule->packets[0] = rule->packets[1] = 0; 1544 rule->bytes[0] = rule->bytes[1] = 0; 1545 rule->states_tot = 0; 1546 } 1547 PF_UNLOCK(); 1548 NET_UNLOCK(); 1549 break; 1550 } 1551 1552 case DIOCCHANGERULE: { 1553 struct pfioc_rule *pcr = (struct pfioc_rule *)addr; 1554 struct pf_ruleset *ruleset; 1555 struct pf_rule *oldrule = NULL, *newrule = NULL; 1556 u_int32_t nr = 0; 1557 1558 if (pcr->action < PF_CHANGE_ADD_HEAD || 1559 pcr->action > PF_CHANGE_GET_TICKET) { 1560 error = EINVAL; 1561 goto fail; 1562 } 1563 1564 if (pcr->action == PF_CHANGE_GET_TICKET) { 1565 NET_LOCK(); 1566 PF_LOCK(); 1567 1568 ruleset = pf_find_ruleset(pcr->anchor); 1569 if (ruleset == NULL) 1570 error = EINVAL; 1571 else 1572 pcr->ticket = ++ruleset->rules.active.ticket; 1573 1574 PF_UNLOCK(); 1575 NET_UNLOCK(); 1576 goto fail; 1577 } 1578 1579 if (pcr->action != PF_CHANGE_REMOVE) { 1580 newrule = pool_get(&pf_rule_pl, 1581 PR_WAITOK|PR_LIMITFAIL|PR_ZERO); 1582 if (newrule == NULL) { 1583 error = ENOMEM; 1584 goto fail; 1585 } 1586 1587 if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) { 1588 error = EINVAL; 1589 pool_put(&pf_rule_pl, newrule); 1590 goto fail; 1591 } 1592 error = pf_rule_copyin(&pcr->rule, newrule); 1593 if (error != 0) { 1594 pf_rule_free(newrule); 1595 newrule = NULL; 1596 goto fail; 1597 } 1598 if ((error = pf_rule_checkaf(newrule))) { 1599 pf_rule_free(newrule); 1600 newrule = NULL; 1601 goto fail; 1602 } 1603 if (newrule->rt && !newrule->direction) { 1604 pf_rule_free(newrule); 1605 error = EINVAL; 1606 newrule = NULL; 1607 goto fail; 1608 } 1609 } 1610 1611 NET_LOCK(); 1612 PF_LOCK(); 1613 ruleset = pf_find_ruleset(pcr->anchor); 1614 if (ruleset == NULL) { 1615 error = EINVAL; 1616 PF_UNLOCK(); 1617 NET_UNLOCK(); 1618 pf_rule_free(newrule); 1619 goto fail; 1620 } 1621 1622 if (pcr->ticket != ruleset->rules.active.ticket) { 1623 error = EINVAL; 1624 PF_UNLOCK(); 1625 NET_UNLOCK(); 1626 pf_rule_free(newrule); 1627 goto fail; 1628 } 1629 1630 if (pcr->action != PF_CHANGE_REMOVE) { 1631 KASSERT(newrule != NULL); 1632 newrule->cuid = p->p_ucred->cr_ruid; 1633 newrule->cpid = p->p_p->ps_pid; 1634 1635 newrule->kif = pf_kif_setup(newrule->kif); 1636 newrule->rcv_kif = pf_kif_setup(newrule->rcv_kif); 1637 newrule->rdr.kif = pf_kif_setup(newrule->rdr.kif); 1638 newrule->nat.kif = pf_kif_setup(newrule->nat.kif); 1639 newrule->route.kif = pf_kif_setup(newrule->route.kif); 1640 1641 if (newrule->overload_tblname[0]) { 1642 newrule->overload_tbl = pfr_attach_table( 1643 ruleset, newrule->overload_tblname, 1644 PR_WAITOK); 1645 if (newrule->overload_tbl == NULL) 1646 error = EINVAL; 1647 else 1648 newrule->overload_tbl->pfrkt_flags |= 1649 PFR_TFLAG_ACTIVE; 1650 } 1651 1652 if (pf_addr_setup(ruleset, &newrule->src.addr, 1653 newrule->af)) 1654 error = EINVAL; 1655 if (pf_addr_setup(ruleset, &newrule->dst.addr, 1656 newrule->af)) 1657 error = EINVAL; 1658 if (pf_addr_setup(ruleset, &newrule->rdr.addr, 1659 newrule->af)) 1660 error = EINVAL; 1661 if (pf_addr_setup(ruleset, &newrule->nat.addr, 1662 newrule->af)) 1663 error = EINVAL; 1664 if (pf_addr_setup(ruleset, &newrule->route.addr, 1665 newrule->af)) 1666 error = EINVAL; 1667 if (pf_anchor_setup(newrule, ruleset, pcr->anchor_call)) 1668 error = EINVAL; 1669 1670 if (error) { 1671 pf_rm_rule(NULL, newrule); 1672 PF_UNLOCK(); 1673 NET_UNLOCK(); 1674 goto fail; 1675 } 1676 } 1677 1678 if (pcr->action == PF_CHANGE_ADD_HEAD) 1679 oldrule = TAILQ_FIRST(ruleset->rules.active.ptr); 1680 else if (pcr->action == PF_CHANGE_ADD_TAIL) 1681 oldrule = TAILQ_LAST(ruleset->rules.active.ptr, 1682 pf_rulequeue); 1683 else { 1684 oldrule = TAILQ_FIRST(ruleset->rules.active.ptr); 1685 while ((oldrule != NULL) && (oldrule->nr != pcr->nr)) 1686 oldrule = TAILQ_NEXT(oldrule, entries); 1687 if (oldrule == NULL) { 1688 if (newrule != NULL) 1689 pf_rm_rule(NULL, newrule); 1690 error = EINVAL; 1691 PF_UNLOCK(); 1692 NET_UNLOCK(); 1693 goto fail; 1694 } 1695 } 1696 1697 if (pcr->action == PF_CHANGE_REMOVE) { 1698 pf_rm_rule(ruleset->rules.active.ptr, oldrule); 1699 ruleset->rules.active.rcount--; 1700 } else { 1701 if (oldrule == NULL) 1702 TAILQ_INSERT_TAIL( 1703 ruleset->rules.active.ptr, 1704 newrule, entries); 1705 else if (pcr->action == PF_CHANGE_ADD_HEAD || 1706 pcr->action == PF_CHANGE_ADD_BEFORE) 1707 TAILQ_INSERT_BEFORE(oldrule, newrule, entries); 1708 else 1709 TAILQ_INSERT_AFTER( 1710 ruleset->rules.active.ptr, 1711 oldrule, newrule, entries); 1712 ruleset->rules.active.rcount++; 1713 newrule->ruleset = ruleset; 1714 } 1715 1716 nr = 0; 1717 TAILQ_FOREACH(oldrule, ruleset->rules.active.ptr, entries) 1718 oldrule->nr = nr++; 1719 1720 ruleset->rules.active.ticket++; 1721 1722 pf_calc_skip_steps(ruleset->rules.active.ptr); 1723 pf_remove_if_empty_ruleset(ruleset); 1724 1725 PF_UNLOCK(); 1726 NET_UNLOCK(); 1727 break; 1728 } 1729 1730 case DIOCCLRSTATES: 1731 error = pf_states_clr((struct pfioc_state_kill *)addr); 1732 break; 1733 1734 case DIOCKILLSTATES: { 1735 struct pf_state *s, *nexts; 1736 struct pf_state_item *si, *sit; 1737 struct pf_state_key *sk, key; 1738 struct pf_addr *srcaddr, *dstaddr; 1739 u_int16_t srcport, dstport; 1740 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr; 1741 u_int i, killed = 0; 1742 const int dirs[] = { PF_IN, PF_OUT }; 1743 int sidx, didx; 1744 1745 if (psk->psk_pfcmp.id) { 1746 if (psk->psk_pfcmp.creatorid == 0) 1747 psk->psk_pfcmp.creatorid = pf_status.hostid; 1748 NET_LOCK(); 1749 PF_LOCK(); 1750 PF_STATE_ENTER_WRITE(); 1751 if ((s = pf_find_state_byid(&psk->psk_pfcmp))) { 1752 pf_remove_state(s); 1753 psk->psk_killed = 1; 1754 } 1755 PF_STATE_EXIT_WRITE(); 1756 PF_UNLOCK(); 1757 NET_UNLOCK(); 1758 goto fail; 1759 } 1760 1761 if (psk->psk_af && psk->psk_proto && 1762 psk->psk_src.port_op == PF_OP_EQ && 1763 psk->psk_dst.port_op == PF_OP_EQ) { 1764 1765 key.af = psk->psk_af; 1766 key.proto = psk->psk_proto; 1767 key.rdomain = psk->psk_rdomain; 1768 1769 NET_LOCK(); 1770 PF_LOCK(); 1771 PF_STATE_ENTER_WRITE(); 1772 for (i = 0; i < nitems(dirs); i++) { 1773 if (dirs[i] == PF_IN) { 1774 sidx = 0; 1775 didx = 1; 1776 } else { 1777 sidx = 1; 1778 didx = 0; 1779 } 1780 pf_addrcpy(&key.addr[sidx], 1781 &psk->psk_src.addr.v.a.addr, key.af); 1782 pf_addrcpy(&key.addr[didx], 1783 &psk->psk_dst.addr.v.a.addr, key.af); 1784 key.port[sidx] = psk->psk_src.port[0]; 1785 key.port[didx] = psk->psk_dst.port[0]; 1786 1787 sk = RB_FIND(pf_state_tree, &pf_statetbl, &key); 1788 if (sk == NULL) 1789 continue; 1790 1791 TAILQ_FOREACH_SAFE(si, &sk->states, entry, sit) 1792 if (((si->s->key[PF_SK_WIRE]->af == 1793 si->s->key[PF_SK_STACK]->af && 1794 sk == (dirs[i] == PF_IN ? 1795 si->s->key[PF_SK_WIRE] : 1796 si->s->key[PF_SK_STACK])) || 1797 (si->s->key[PF_SK_WIRE]->af != 1798 si->s->key[PF_SK_STACK]->af && 1799 dirs[i] == PF_IN && 1800 (sk == si->s->key[PF_SK_STACK] || 1801 sk == si->s->key[PF_SK_WIRE]))) && 1802 (!psk->psk_ifname[0] || 1803 (si->s->kif != pfi_all && 1804 !strcmp(psk->psk_ifname, 1805 si->s->kif->pfik_name)))) { 1806 pf_remove_state(si->s); 1807 killed++; 1808 } 1809 } 1810 if (killed) 1811 psk->psk_killed = killed; 1812 PF_STATE_EXIT_WRITE(); 1813 PF_UNLOCK(); 1814 NET_UNLOCK(); 1815 goto fail; 1816 } 1817 1818 NET_LOCK(); 1819 PF_LOCK(); 1820 PF_STATE_ENTER_WRITE(); 1821 for (s = RB_MIN(pf_state_tree_id, &tree_id); s; 1822 s = nexts) { 1823 nexts = RB_NEXT(pf_state_tree_id, &tree_id, s); 1824 1825 if (s->direction == PF_OUT) { 1826 sk = s->key[PF_SK_STACK]; 1827 srcaddr = &sk->addr[1]; 1828 dstaddr = &sk->addr[0]; 1829 srcport = sk->port[1]; 1830 dstport = sk->port[0]; 1831 } else { 1832 sk = s->key[PF_SK_WIRE]; 1833 srcaddr = &sk->addr[0]; 1834 dstaddr = &sk->addr[1]; 1835 srcport = sk->port[0]; 1836 dstport = sk->port[1]; 1837 } 1838 if ((!psk->psk_af || sk->af == psk->psk_af) 1839 && (!psk->psk_proto || psk->psk_proto == 1840 sk->proto) && psk->psk_rdomain == sk->rdomain && 1841 pf_match_addr(psk->psk_src.neg, 1842 &psk->psk_src.addr.v.a.addr, 1843 &psk->psk_src.addr.v.a.mask, 1844 srcaddr, sk->af) && 1845 pf_match_addr(psk->psk_dst.neg, 1846 &psk->psk_dst.addr.v.a.addr, 1847 &psk->psk_dst.addr.v.a.mask, 1848 dstaddr, sk->af) && 1849 (psk->psk_src.port_op == 0 || 1850 pf_match_port(psk->psk_src.port_op, 1851 psk->psk_src.port[0], psk->psk_src.port[1], 1852 srcport)) && 1853 (psk->psk_dst.port_op == 0 || 1854 pf_match_port(psk->psk_dst.port_op, 1855 psk->psk_dst.port[0], psk->psk_dst.port[1], 1856 dstport)) && 1857 (!psk->psk_label[0] || (s->rule.ptr->label[0] && 1858 !strcmp(psk->psk_label, s->rule.ptr->label))) && 1859 (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname, 1860 s->kif->pfik_name))) { 1861 pf_remove_state(s); 1862 killed++; 1863 } 1864 } 1865 psk->psk_killed = killed; 1866 PF_STATE_EXIT_WRITE(); 1867 PF_UNLOCK(); 1868 NET_UNLOCK(); 1869 break; 1870 } 1871 1872 #if NPFSYNC > 0 1873 case DIOCADDSTATE: { 1874 struct pfioc_state *ps = (struct pfioc_state *)addr; 1875 struct pfsync_state *sp = &ps->state; 1876 1877 if (sp->timeout >= PFTM_MAX) { 1878 error = EINVAL; 1879 goto fail; 1880 } 1881 NET_LOCK(); 1882 PF_LOCK(); 1883 error = pfsync_state_import(sp, PFSYNC_SI_IOCTL); 1884 PF_UNLOCK(); 1885 NET_UNLOCK(); 1886 break; 1887 } 1888 #endif /* NPFSYNC > 0 */ 1889 1890 case DIOCGETSTATE: { 1891 struct pfioc_state *ps = (struct pfioc_state *)addr; 1892 struct pf_state *s; 1893 struct pf_state_cmp id_key; 1894 1895 memset(&id_key, 0, sizeof(id_key)); 1896 id_key.id = ps->state.id; 1897 id_key.creatorid = ps->state.creatorid; 1898 1899 NET_LOCK(); 1900 PF_STATE_ENTER_READ(); 1901 s = pf_find_state_byid(&id_key); 1902 s = pf_state_ref(s); 1903 PF_STATE_EXIT_READ(); 1904 NET_UNLOCK(); 1905 if (s == NULL) { 1906 error = ENOENT; 1907 goto fail; 1908 } 1909 1910 pf_state_export(&ps->state, s); 1911 pf_state_unref(s); 1912 break; 1913 } 1914 1915 case DIOCGETSTATES: 1916 error = pf_states_get((struct pfioc_states *)addr); 1917 break; 1918 1919 case DIOCGETSTATUS: { 1920 struct pf_status *s = (struct pf_status *)addr; 1921 NET_LOCK(); 1922 PF_LOCK(); 1923 memcpy(s, &pf_status, sizeof(struct pf_status)); 1924 pfi_update_status(s->ifname, s); 1925 PF_UNLOCK(); 1926 NET_UNLOCK(); 1927 break; 1928 } 1929 1930 case DIOCSETSTATUSIF: { 1931 struct pfioc_iface *pi = (struct pfioc_iface *)addr; 1932 1933 NET_LOCK(); 1934 PF_LOCK(); 1935 if (pi->pfiio_name[0] == 0) { 1936 memset(pf_status.ifname, 0, IFNAMSIZ); 1937 PF_UNLOCK(); 1938 NET_UNLOCK(); 1939 goto fail; 1940 } 1941 strlcpy(pf_trans_set.statusif, pi->pfiio_name, IFNAMSIZ); 1942 pf_trans_set.mask |= PF_TSET_STATUSIF; 1943 PF_UNLOCK(); 1944 NET_UNLOCK(); 1945 break; 1946 } 1947 1948 case DIOCCLRSTATUS: { 1949 struct pfioc_iface *pi = (struct pfioc_iface *)addr; 1950 1951 NET_LOCK(); 1952 PF_LOCK(); 1953 /* if ifname is specified, clear counters there only */ 1954 if (pi->pfiio_name[0]) { 1955 pfi_update_status(pi->pfiio_name, NULL); 1956 PF_UNLOCK(); 1957 NET_UNLOCK(); 1958 goto fail; 1959 } 1960 1961 memset(pf_status.counters, 0, sizeof(pf_status.counters)); 1962 memset(pf_status.fcounters, 0, sizeof(pf_status.fcounters)); 1963 memset(pf_status.scounters, 0, sizeof(pf_status.scounters)); 1964 pf_status.since = getuptime(); 1965 1966 PF_UNLOCK(); 1967 NET_UNLOCK(); 1968 break; 1969 } 1970 1971 case DIOCNATLOOK: { 1972 struct pfioc_natlook *pnl = (struct pfioc_natlook *)addr; 1973 struct pf_state_key *sk; 1974 struct pf_state *state; 1975 struct pf_state_key_cmp key; 1976 int m = 0, direction = pnl->direction; 1977 int sidx, didx; 1978 1979 switch (pnl->af) { 1980 case AF_INET: 1981 break; 1982 #ifdef INET6 1983 case AF_INET6: 1984 break; 1985 #endif /* INET6 */ 1986 default: 1987 error = EAFNOSUPPORT; 1988 goto fail; 1989 } 1990 1991 /* NATLOOK src and dst are reversed, so reverse sidx/didx */ 1992 sidx = (direction == PF_IN) ? 1 : 0; 1993 didx = (direction == PF_IN) ? 0 : 1; 1994 1995 if (!pnl->proto || 1996 PF_AZERO(&pnl->saddr, pnl->af) || 1997 PF_AZERO(&pnl->daddr, pnl->af) || 1998 ((pnl->proto == IPPROTO_TCP || 1999 pnl->proto == IPPROTO_UDP) && 2000 (!pnl->dport || !pnl->sport)) || 2001 pnl->rdomain > RT_TABLEID_MAX) 2002 error = EINVAL; 2003 else { 2004 key.af = pnl->af; 2005 key.proto = pnl->proto; 2006 key.rdomain = pnl->rdomain; 2007 pf_addrcpy(&key.addr[sidx], &pnl->saddr, pnl->af); 2008 key.port[sidx] = pnl->sport; 2009 pf_addrcpy(&key.addr[didx], &pnl->daddr, pnl->af); 2010 key.port[didx] = pnl->dport; 2011 2012 NET_LOCK(); 2013 PF_STATE_ENTER_READ(); 2014 state = pf_find_state_all(&key, direction, &m); 2015 state = pf_state_ref(state); 2016 PF_STATE_EXIT_READ(); 2017 NET_UNLOCK(); 2018 2019 if (m > 1) 2020 error = E2BIG; /* more than one state */ 2021 else if (state != NULL) { 2022 sk = state->key[sidx]; 2023 pf_addrcpy(&pnl->rsaddr, &sk->addr[sidx], 2024 sk->af); 2025 pnl->rsport = sk->port[sidx]; 2026 pf_addrcpy(&pnl->rdaddr, &sk->addr[didx], 2027 sk->af); 2028 pnl->rdport = sk->port[didx]; 2029 pnl->rrdomain = sk->rdomain; 2030 } else 2031 error = ENOENT; 2032 pf_state_unref(state); 2033 } 2034 break; 2035 } 2036 2037 case DIOCSETTIMEOUT: { 2038 struct pfioc_tm *pt = (struct pfioc_tm *)addr; 2039 2040 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX || 2041 pt->seconds < 0) { 2042 error = EINVAL; 2043 goto fail; 2044 } 2045 NET_LOCK(); 2046 PF_LOCK(); 2047 if (pt->timeout == PFTM_INTERVAL && pt->seconds == 0) 2048 pt->seconds = 1; 2049 pf_default_rule_new.timeout[pt->timeout] = pt->seconds; 2050 pt->seconds = pf_default_rule.timeout[pt->timeout]; 2051 PF_UNLOCK(); 2052 NET_UNLOCK(); 2053 break; 2054 } 2055 2056 case DIOCGETTIMEOUT: { 2057 struct pfioc_tm *pt = (struct pfioc_tm *)addr; 2058 2059 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) { 2060 error = EINVAL; 2061 goto fail; 2062 } 2063 NET_LOCK(); 2064 PF_LOCK(); 2065 pt->seconds = pf_default_rule.timeout[pt->timeout]; 2066 PF_UNLOCK(); 2067 NET_UNLOCK(); 2068 break; 2069 } 2070 2071 case DIOCGETLIMIT: { 2072 struct pfioc_limit *pl = (struct pfioc_limit *)addr; 2073 2074 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) { 2075 error = EINVAL; 2076 goto fail; 2077 } 2078 NET_LOCK(); 2079 PF_LOCK(); 2080 pl->limit = pf_pool_limits[pl->index].limit; 2081 PF_UNLOCK(); 2082 NET_UNLOCK(); 2083 break; 2084 } 2085 2086 case DIOCSETLIMIT: { 2087 struct pfioc_limit *pl = (struct pfioc_limit *)addr; 2088 2089 NET_LOCK(); 2090 PF_LOCK(); 2091 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX || 2092 pf_pool_limits[pl->index].pp == NULL) { 2093 error = EINVAL; 2094 PF_UNLOCK(); 2095 NET_UNLOCK(); 2096 goto fail; 2097 } 2098 if (((struct pool *)pf_pool_limits[pl->index].pp)->pr_nout > 2099 pl->limit) { 2100 error = EBUSY; 2101 PF_UNLOCK(); 2102 NET_UNLOCK(); 2103 goto fail; 2104 } 2105 /* Fragments reference mbuf clusters. */ 2106 if (pl->index == PF_LIMIT_FRAGS && pl->limit > nmbclust) { 2107 error = EINVAL; 2108 PF_UNLOCK(); 2109 NET_UNLOCK(); 2110 goto fail; 2111 } 2112 2113 pf_pool_limits[pl->index].limit_new = pl->limit; 2114 pl->limit = pf_pool_limits[pl->index].limit; 2115 PF_UNLOCK(); 2116 NET_UNLOCK(); 2117 break; 2118 } 2119 2120 case DIOCSETDEBUG: { 2121 u_int32_t *level = (u_int32_t *)addr; 2122 2123 NET_LOCK(); 2124 PF_LOCK(); 2125 pf_trans_set.debug = *level; 2126 pf_trans_set.mask |= PF_TSET_DEBUG; 2127 PF_UNLOCK(); 2128 NET_UNLOCK(); 2129 break; 2130 } 2131 2132 case DIOCGETRULESETS: { 2133 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr; 2134 struct pf_ruleset *ruleset; 2135 struct pf_anchor *anchor; 2136 2137 NET_LOCK(); 2138 PF_LOCK(); 2139 pr->path[sizeof(pr->path) - 1] = '\0'; 2140 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) { 2141 error = EINVAL; 2142 PF_UNLOCK(); 2143 NET_UNLOCK(); 2144 goto fail; 2145 } 2146 pr->nr = 0; 2147 if (ruleset == &pf_main_ruleset) { 2148 /* XXX kludge for pf_main_ruleset */ 2149 RB_FOREACH(anchor, pf_anchor_global, &pf_anchors) 2150 if (anchor->parent == NULL) 2151 pr->nr++; 2152 } else { 2153 RB_FOREACH(anchor, pf_anchor_node, 2154 &ruleset->anchor->children) 2155 pr->nr++; 2156 } 2157 PF_UNLOCK(); 2158 NET_UNLOCK(); 2159 break; 2160 } 2161 2162 case DIOCGETRULESET: { 2163 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr; 2164 struct pf_ruleset *ruleset; 2165 struct pf_anchor *anchor; 2166 u_int32_t nr = 0; 2167 2168 NET_LOCK(); 2169 PF_LOCK(); 2170 pr->path[sizeof(pr->path) - 1] = '\0'; 2171 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) { 2172 error = EINVAL; 2173 PF_UNLOCK(); 2174 NET_UNLOCK(); 2175 goto fail; 2176 } 2177 pr->name[0] = '\0'; 2178 if (ruleset == &pf_main_ruleset) { 2179 /* XXX kludge for pf_main_ruleset */ 2180 RB_FOREACH(anchor, pf_anchor_global, &pf_anchors) 2181 if (anchor->parent == NULL && nr++ == pr->nr) { 2182 strlcpy(pr->name, anchor->name, 2183 sizeof(pr->name)); 2184 break; 2185 } 2186 } else { 2187 RB_FOREACH(anchor, pf_anchor_node, 2188 &ruleset->anchor->children) 2189 if (nr++ == pr->nr) { 2190 strlcpy(pr->name, anchor->name, 2191 sizeof(pr->name)); 2192 break; 2193 } 2194 } 2195 PF_UNLOCK(); 2196 NET_UNLOCK(); 2197 if (!pr->name[0]) 2198 error = EBUSY; 2199 break; 2200 } 2201 2202 case DIOCRCLRTABLES: { 2203 struct pfioc_table *io = (struct pfioc_table *)addr; 2204 2205 if (io->pfrio_esize != 0) { 2206 error = ENODEV; 2207 goto fail; 2208 } 2209 NET_LOCK(); 2210 PF_LOCK(); 2211 error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel, 2212 io->pfrio_flags | PFR_FLAG_USERIOCTL); 2213 PF_UNLOCK(); 2214 NET_UNLOCK(); 2215 break; 2216 } 2217 2218 case DIOCRADDTABLES: { 2219 struct pfioc_table *io = (struct pfioc_table *)addr; 2220 2221 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2222 error = ENODEV; 2223 goto fail; 2224 } 2225 error = pfr_add_tables(io->pfrio_buffer, io->pfrio_size, 2226 &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2227 break; 2228 } 2229 2230 case DIOCRDELTABLES: { 2231 struct pfioc_table *io = (struct pfioc_table *)addr; 2232 2233 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2234 error = ENODEV; 2235 goto fail; 2236 } 2237 NET_LOCK(); 2238 PF_LOCK(); 2239 error = pfr_del_tables(io->pfrio_buffer, io->pfrio_size, 2240 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2241 PF_UNLOCK(); 2242 NET_UNLOCK(); 2243 break; 2244 } 2245 2246 case DIOCRGETTABLES: { 2247 struct pfioc_table *io = (struct pfioc_table *)addr; 2248 2249 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2250 error = ENODEV; 2251 goto fail; 2252 } 2253 NET_LOCK(); 2254 PF_LOCK(); 2255 error = pfr_get_tables(&io->pfrio_table, io->pfrio_buffer, 2256 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2257 PF_UNLOCK(); 2258 NET_UNLOCK(); 2259 break; 2260 } 2261 2262 case DIOCRGETTSTATS: { 2263 struct pfioc_table *io = (struct pfioc_table *)addr; 2264 2265 if (io->pfrio_esize != sizeof(struct pfr_tstats)) { 2266 error = ENODEV; 2267 goto fail; 2268 } 2269 NET_LOCK(); 2270 PF_LOCK(); 2271 error = pfr_get_tstats(&io->pfrio_table, io->pfrio_buffer, 2272 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2273 PF_UNLOCK(); 2274 NET_UNLOCK(); 2275 break; 2276 } 2277 2278 case DIOCRCLRTSTATS: { 2279 struct pfioc_table *io = (struct pfioc_table *)addr; 2280 2281 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2282 error = ENODEV; 2283 goto fail; 2284 } 2285 NET_LOCK(); 2286 PF_LOCK(); 2287 error = pfr_clr_tstats(io->pfrio_buffer, io->pfrio_size, 2288 &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2289 PF_UNLOCK(); 2290 NET_UNLOCK(); 2291 break; 2292 } 2293 2294 case DIOCRSETTFLAGS: { 2295 struct pfioc_table *io = (struct pfioc_table *)addr; 2296 2297 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2298 error = ENODEV; 2299 goto fail; 2300 } 2301 NET_LOCK(); 2302 PF_LOCK(); 2303 error = pfr_set_tflags(io->pfrio_buffer, io->pfrio_size, 2304 io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange, 2305 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2306 PF_UNLOCK(); 2307 NET_UNLOCK(); 2308 break; 2309 } 2310 2311 case DIOCRCLRADDRS: { 2312 struct pfioc_table *io = (struct pfioc_table *)addr; 2313 2314 if (io->pfrio_esize != 0) { 2315 error = ENODEV; 2316 goto fail; 2317 } 2318 NET_LOCK(); 2319 PF_LOCK(); 2320 error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel, 2321 io->pfrio_flags | PFR_FLAG_USERIOCTL); 2322 PF_UNLOCK(); 2323 NET_UNLOCK(); 2324 break; 2325 } 2326 2327 case DIOCRADDADDRS: { 2328 struct pfioc_table *io = (struct pfioc_table *)addr; 2329 2330 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2331 error = ENODEV; 2332 goto fail; 2333 } 2334 error = pfr_add_addrs(&io->pfrio_table, io->pfrio_buffer, 2335 io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags | 2336 PFR_FLAG_USERIOCTL); 2337 break; 2338 } 2339 2340 case DIOCRDELADDRS: { 2341 struct pfioc_table *io = (struct pfioc_table *)addr; 2342 2343 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2344 error = ENODEV; 2345 goto fail; 2346 } 2347 NET_LOCK(); 2348 PF_LOCK(); 2349 error = pfr_del_addrs(&io->pfrio_table, io->pfrio_buffer, 2350 io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags | 2351 PFR_FLAG_USERIOCTL); 2352 PF_UNLOCK(); 2353 NET_UNLOCK(); 2354 break; 2355 } 2356 2357 case DIOCRSETADDRS: { 2358 struct pfioc_table *io = (struct pfioc_table *)addr; 2359 2360 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2361 error = ENODEV; 2362 goto fail; 2363 } 2364 NET_LOCK(); 2365 PF_LOCK(); 2366 error = pfr_set_addrs(&io->pfrio_table, io->pfrio_buffer, 2367 io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd, 2368 &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags | 2369 PFR_FLAG_USERIOCTL, 0); 2370 PF_UNLOCK(); 2371 NET_UNLOCK(); 2372 break; 2373 } 2374 2375 case DIOCRGETADDRS: { 2376 struct pfioc_table *io = (struct pfioc_table *)addr; 2377 2378 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2379 error = ENODEV; 2380 goto fail; 2381 } 2382 NET_LOCK(); 2383 PF_LOCK(); 2384 error = pfr_get_addrs(&io->pfrio_table, io->pfrio_buffer, 2385 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2386 PF_UNLOCK(); 2387 NET_UNLOCK(); 2388 break; 2389 } 2390 2391 case DIOCRGETASTATS: { 2392 struct pfioc_table *io = (struct pfioc_table *)addr; 2393 2394 if (io->pfrio_esize != sizeof(struct pfr_astats)) { 2395 error = ENODEV; 2396 goto fail; 2397 } 2398 NET_LOCK(); 2399 PF_LOCK(); 2400 error = pfr_get_astats(&io->pfrio_table, io->pfrio_buffer, 2401 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2402 PF_UNLOCK(); 2403 NET_UNLOCK(); 2404 break; 2405 } 2406 2407 case DIOCRCLRASTATS: { 2408 struct pfioc_table *io = (struct pfioc_table *)addr; 2409 2410 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2411 error = ENODEV; 2412 goto fail; 2413 } 2414 NET_LOCK(); 2415 PF_LOCK(); 2416 error = pfr_clr_astats(&io->pfrio_table, io->pfrio_buffer, 2417 io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags | 2418 PFR_FLAG_USERIOCTL); 2419 PF_UNLOCK(); 2420 NET_UNLOCK(); 2421 break; 2422 } 2423 2424 case DIOCRTSTADDRS: { 2425 struct pfioc_table *io = (struct pfioc_table *)addr; 2426 2427 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2428 error = ENODEV; 2429 goto fail; 2430 } 2431 NET_LOCK(); 2432 PF_LOCK(); 2433 error = pfr_tst_addrs(&io->pfrio_table, io->pfrio_buffer, 2434 io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags | 2435 PFR_FLAG_USERIOCTL); 2436 PF_UNLOCK(); 2437 NET_UNLOCK(); 2438 break; 2439 } 2440 2441 case DIOCRINADEFINE: { 2442 struct pfioc_table *io = (struct pfioc_table *)addr; 2443 2444 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2445 error = ENODEV; 2446 goto fail; 2447 } 2448 NET_LOCK(); 2449 PF_LOCK(); 2450 error = pfr_ina_define(&io->pfrio_table, io->pfrio_buffer, 2451 io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr, 2452 io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2453 PF_UNLOCK(); 2454 NET_UNLOCK(); 2455 break; 2456 } 2457 2458 case DIOCOSFPADD: { 2459 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr; 2460 error = pf_osfp_add(io); 2461 break; 2462 } 2463 2464 case DIOCOSFPGET: { 2465 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr; 2466 error = pf_osfp_get(io); 2467 break; 2468 } 2469 2470 case DIOCXBEGIN: { 2471 struct pfioc_trans *io = (struct pfioc_trans *)addr; 2472 struct pfioc_trans_e *ioe; 2473 struct pfr_table *table; 2474 int i; 2475 2476 if (io->esize != sizeof(*ioe)) { 2477 error = ENODEV; 2478 goto fail; 2479 } 2480 ioe = malloc(sizeof(*ioe), M_TEMP, M_WAITOK); 2481 table = malloc(sizeof(*table), M_TEMP, M_WAITOK); 2482 NET_LOCK(); 2483 PF_LOCK(); 2484 pf_default_rule_new = pf_default_rule; 2485 PF_UNLOCK(); 2486 NET_UNLOCK(); 2487 memset(&pf_trans_set, 0, sizeof(pf_trans_set)); 2488 for (i = 0; i < io->size; i++) { 2489 if (copyin(io->array+i, ioe, sizeof(*ioe))) { 2490 free(table, M_TEMP, sizeof(*table)); 2491 free(ioe, M_TEMP, sizeof(*ioe)); 2492 error = EFAULT; 2493 goto fail; 2494 } 2495 if (strnlen(ioe->anchor, sizeof(ioe->anchor)) == 2496 sizeof(ioe->anchor)) { 2497 free(table, M_TEMP, sizeof(*table)); 2498 free(ioe, M_TEMP, sizeof(*ioe)); 2499 error = ENAMETOOLONG; 2500 goto fail; 2501 } 2502 NET_LOCK(); 2503 PF_LOCK(); 2504 switch (ioe->type) { 2505 case PF_TRANS_TABLE: 2506 memset(table, 0, sizeof(*table)); 2507 strlcpy(table->pfrt_anchor, ioe->anchor, 2508 sizeof(table->pfrt_anchor)); 2509 if ((error = pfr_ina_begin(table, 2510 &ioe->ticket, NULL, 0))) { 2511 PF_UNLOCK(); 2512 NET_UNLOCK(); 2513 free(table, M_TEMP, sizeof(*table)); 2514 free(ioe, M_TEMP, sizeof(*ioe)); 2515 goto fail; 2516 } 2517 break; 2518 case PF_TRANS_RULESET: 2519 if ((error = pf_begin_rules(&ioe->ticket, 2520 ioe->anchor))) { 2521 PF_UNLOCK(); 2522 NET_UNLOCK(); 2523 free(table, M_TEMP, sizeof(*table)); 2524 free(ioe, M_TEMP, sizeof(*ioe)); 2525 goto fail; 2526 } 2527 break; 2528 default: 2529 PF_UNLOCK(); 2530 NET_UNLOCK(); 2531 free(table, M_TEMP, sizeof(*table)); 2532 free(ioe, M_TEMP, sizeof(*ioe)); 2533 error = EINVAL; 2534 goto fail; 2535 } 2536 PF_UNLOCK(); 2537 NET_UNLOCK(); 2538 if (copyout(ioe, io->array+i, sizeof(io->array[i]))) { 2539 free(table, M_TEMP, sizeof(*table)); 2540 free(ioe, M_TEMP, sizeof(*ioe)); 2541 error = EFAULT; 2542 goto fail; 2543 } 2544 } 2545 free(table, M_TEMP, sizeof(*table)); 2546 free(ioe, M_TEMP, sizeof(*ioe)); 2547 break; 2548 } 2549 2550 case DIOCXROLLBACK: { 2551 struct pfioc_trans *io = (struct pfioc_trans *)addr; 2552 struct pfioc_trans_e *ioe; 2553 struct pfr_table *table; 2554 int i; 2555 2556 if (io->esize != sizeof(*ioe)) { 2557 error = ENODEV; 2558 goto fail; 2559 } 2560 ioe = malloc(sizeof(*ioe), M_TEMP, M_WAITOK); 2561 table = malloc(sizeof(*table), M_TEMP, M_WAITOK); 2562 for (i = 0; i < io->size; i++) { 2563 if (copyin(io->array+i, ioe, sizeof(*ioe))) { 2564 free(table, M_TEMP, sizeof(*table)); 2565 free(ioe, M_TEMP, sizeof(*ioe)); 2566 error = EFAULT; 2567 goto fail; 2568 } 2569 if (strnlen(ioe->anchor, sizeof(ioe->anchor)) == 2570 sizeof(ioe->anchor)) { 2571 free(table, M_TEMP, sizeof(*table)); 2572 free(ioe, M_TEMP, sizeof(*ioe)); 2573 error = ENAMETOOLONG; 2574 goto fail; 2575 } 2576 NET_LOCK(); 2577 PF_LOCK(); 2578 switch (ioe->type) { 2579 case PF_TRANS_TABLE: 2580 memset(table, 0, sizeof(*table)); 2581 strlcpy(table->pfrt_anchor, ioe->anchor, 2582 sizeof(table->pfrt_anchor)); 2583 if ((error = pfr_ina_rollback(table, 2584 ioe->ticket, NULL, 0))) { 2585 PF_UNLOCK(); 2586 NET_UNLOCK(); 2587 free(table, M_TEMP, sizeof(*table)); 2588 free(ioe, M_TEMP, sizeof(*ioe)); 2589 goto fail; /* really bad */ 2590 } 2591 break; 2592 case PF_TRANS_RULESET: 2593 pf_rollback_rules(ioe->ticket, ioe->anchor); 2594 break; 2595 default: 2596 PF_UNLOCK(); 2597 NET_UNLOCK(); 2598 free(table, M_TEMP, sizeof(*table)); 2599 free(ioe, M_TEMP, sizeof(*ioe)); 2600 error = EINVAL; 2601 goto fail; /* really bad */ 2602 } 2603 PF_UNLOCK(); 2604 NET_UNLOCK(); 2605 } 2606 free(table, M_TEMP, sizeof(*table)); 2607 free(ioe, M_TEMP, sizeof(*ioe)); 2608 break; 2609 } 2610 2611 case DIOCXCOMMIT: { 2612 struct pfioc_trans *io = (struct pfioc_trans *)addr; 2613 struct pfioc_trans_e *ioe; 2614 struct pfr_table *table; 2615 struct pf_ruleset *rs; 2616 int i; 2617 2618 if (io->esize != sizeof(*ioe)) { 2619 error = ENODEV; 2620 goto fail; 2621 } 2622 ioe = malloc(sizeof(*ioe), M_TEMP, M_WAITOK); 2623 table = malloc(sizeof(*table), M_TEMP, M_WAITOK); 2624 NET_LOCK(); 2625 PF_LOCK(); 2626 /* first makes sure everything will succeed */ 2627 for (i = 0; i < io->size; i++) { 2628 if (copyin(io->array+i, ioe, sizeof(*ioe))) { 2629 PF_UNLOCK(); 2630 NET_UNLOCK(); 2631 free(table, M_TEMP, sizeof(*table)); 2632 free(ioe, M_TEMP, sizeof(*ioe)); 2633 error = EFAULT; 2634 goto fail; 2635 } 2636 if (strnlen(ioe->anchor, sizeof(ioe->anchor)) == 2637 sizeof(ioe->anchor)) { 2638 PF_UNLOCK(); 2639 NET_UNLOCK(); 2640 free(table, M_TEMP, sizeof(*table)); 2641 free(ioe, M_TEMP, sizeof(*ioe)); 2642 error = ENAMETOOLONG; 2643 goto fail; 2644 } 2645 switch (ioe->type) { 2646 case PF_TRANS_TABLE: 2647 rs = pf_find_ruleset(ioe->anchor); 2648 if (rs == NULL || !rs->topen || ioe->ticket != 2649 rs->tticket) { 2650 PF_UNLOCK(); 2651 NET_UNLOCK(); 2652 free(table, M_TEMP, sizeof(*table)); 2653 free(ioe, M_TEMP, sizeof(*ioe)); 2654 error = EBUSY; 2655 goto fail; 2656 } 2657 break; 2658 case PF_TRANS_RULESET: 2659 rs = pf_find_ruleset(ioe->anchor); 2660 if (rs == NULL || 2661 !rs->rules.inactive.open || 2662 rs->rules.inactive.ticket != 2663 ioe->ticket) { 2664 PF_UNLOCK(); 2665 NET_UNLOCK(); 2666 free(table, M_TEMP, sizeof(*table)); 2667 free(ioe, M_TEMP, sizeof(*ioe)); 2668 error = EBUSY; 2669 goto fail; 2670 } 2671 break; 2672 default: 2673 PF_UNLOCK(); 2674 NET_UNLOCK(); 2675 free(table, M_TEMP, sizeof(*table)); 2676 free(ioe, M_TEMP, sizeof(*ioe)); 2677 error = EINVAL; 2678 goto fail; 2679 } 2680 } 2681 2682 /* 2683 * Checked already in DIOCSETLIMIT, but check again as the 2684 * situation might have changed. 2685 */ 2686 for (i = 0; i < PF_LIMIT_MAX; i++) { 2687 if (((struct pool *)pf_pool_limits[i].pp)->pr_nout > 2688 pf_pool_limits[i].limit_new) { 2689 PF_UNLOCK(); 2690 NET_UNLOCK(); 2691 free(table, M_TEMP, sizeof(*table)); 2692 free(ioe, M_TEMP, sizeof(*ioe)); 2693 error = EBUSY; 2694 goto fail; 2695 } 2696 } 2697 /* now do the commit - no errors should happen here */ 2698 for (i = 0; i < io->size; i++) { 2699 if (copyin(io->array+i, ioe, sizeof(*ioe))) { 2700 PF_UNLOCK(); 2701 NET_UNLOCK(); 2702 free(table, M_TEMP, sizeof(*table)); 2703 free(ioe, M_TEMP, sizeof(*ioe)); 2704 error = EFAULT; 2705 goto fail; 2706 } 2707 if (strnlen(ioe->anchor, sizeof(ioe->anchor)) == 2708 sizeof(ioe->anchor)) { 2709 PF_UNLOCK(); 2710 NET_UNLOCK(); 2711 free(table, M_TEMP, sizeof(*table)); 2712 free(ioe, M_TEMP, sizeof(*ioe)); 2713 error = ENAMETOOLONG; 2714 goto fail; 2715 } 2716 switch (ioe->type) { 2717 case PF_TRANS_TABLE: 2718 memset(table, 0, sizeof(*table)); 2719 strlcpy(table->pfrt_anchor, ioe->anchor, 2720 sizeof(table->pfrt_anchor)); 2721 if ((error = pfr_ina_commit(table, ioe->ticket, 2722 NULL, NULL, 0))) { 2723 PF_UNLOCK(); 2724 NET_UNLOCK(); 2725 free(table, M_TEMP, sizeof(*table)); 2726 free(ioe, M_TEMP, sizeof(*ioe)); 2727 goto fail; /* really bad */ 2728 } 2729 break; 2730 case PF_TRANS_RULESET: 2731 if ((error = pf_commit_rules(ioe->ticket, 2732 ioe->anchor))) { 2733 PF_UNLOCK(); 2734 NET_UNLOCK(); 2735 free(table, M_TEMP, sizeof(*table)); 2736 free(ioe, M_TEMP, sizeof(*ioe)); 2737 goto fail; /* really bad */ 2738 } 2739 break; 2740 default: 2741 PF_UNLOCK(); 2742 NET_UNLOCK(); 2743 free(table, M_TEMP, sizeof(*table)); 2744 free(ioe, M_TEMP, sizeof(*ioe)); 2745 error = EINVAL; 2746 goto fail; /* really bad */ 2747 } 2748 } 2749 for (i = 0; i < PF_LIMIT_MAX; i++) { 2750 if (pf_pool_limits[i].limit_new != 2751 pf_pool_limits[i].limit && 2752 pool_sethardlimit(pf_pool_limits[i].pp, 2753 pf_pool_limits[i].limit_new, NULL, 0) != 0) { 2754 PF_UNLOCK(); 2755 NET_UNLOCK(); 2756 free(table, M_TEMP, sizeof(*table)); 2757 free(ioe, M_TEMP, sizeof(*ioe)); 2758 error = EBUSY; 2759 goto fail; /* really bad */ 2760 } 2761 pf_pool_limits[i].limit = pf_pool_limits[i].limit_new; 2762 } 2763 for (i = 0; i < PFTM_MAX; i++) { 2764 int old = pf_default_rule.timeout[i]; 2765 2766 pf_default_rule.timeout[i] = 2767 pf_default_rule_new.timeout[i]; 2768 if (pf_default_rule.timeout[i] == PFTM_INTERVAL && 2769 pf_default_rule.timeout[i] < old) 2770 task_add(net_tq(0), &pf_purge_task); 2771 } 2772 pfi_xcommit(); 2773 pf_trans_set_commit(); 2774 PF_UNLOCK(); 2775 NET_UNLOCK(); 2776 free(table, M_TEMP, sizeof(*table)); 2777 free(ioe, M_TEMP, sizeof(*ioe)); 2778 break; 2779 } 2780 2781 case DIOCGETSRCNODES: { 2782 struct pfioc_src_nodes *psn = (struct pfioc_src_nodes *)addr; 2783 struct pf_src_node *n, *p, *pstore; 2784 u_int32_t nr = 0; 2785 size_t space = psn->psn_len; 2786 2787 pstore = malloc(sizeof(*pstore), M_TEMP, M_WAITOK); 2788 2789 NET_LOCK(); 2790 PF_LOCK(); 2791 if (space == 0) { 2792 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) 2793 nr++; 2794 psn->psn_len = sizeof(struct pf_src_node) * nr; 2795 PF_UNLOCK(); 2796 NET_UNLOCK(); 2797 free(pstore, M_TEMP, sizeof(*pstore)); 2798 goto fail; 2799 } 2800 2801 p = psn->psn_src_nodes; 2802 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) { 2803 int secs = getuptime(), diff; 2804 2805 if ((nr + 1) * sizeof(*p) > psn->psn_len) 2806 break; 2807 2808 memcpy(pstore, n, sizeof(*pstore)); 2809 memset(&pstore->entry, 0, sizeof(pstore->entry)); 2810 pstore->rule.ptr = NULL; 2811 pstore->kif = NULL; 2812 pstore->rule.nr = n->rule.ptr->nr; 2813 pstore->creation = secs - pstore->creation; 2814 if (pstore->expire > secs) 2815 pstore->expire -= secs; 2816 else 2817 pstore->expire = 0; 2818 2819 /* adjust the connection rate estimate */ 2820 diff = secs - n->conn_rate.last; 2821 if (diff >= n->conn_rate.seconds) 2822 pstore->conn_rate.count = 0; 2823 else 2824 pstore->conn_rate.count -= 2825 n->conn_rate.count * diff / 2826 n->conn_rate.seconds; 2827 2828 error = copyout(pstore, p, sizeof(*p)); 2829 if (error) { 2830 PF_UNLOCK(); 2831 NET_UNLOCK(); 2832 free(pstore, M_TEMP, sizeof(*pstore)); 2833 goto fail; 2834 } 2835 p++; 2836 nr++; 2837 } 2838 psn->psn_len = sizeof(struct pf_src_node) * nr; 2839 2840 PF_UNLOCK(); 2841 NET_UNLOCK(); 2842 free(pstore, M_TEMP, sizeof(*pstore)); 2843 break; 2844 } 2845 2846 case DIOCCLRSRCNODES: { 2847 struct pf_src_node *n; 2848 struct pf_state *state; 2849 2850 NET_LOCK(); 2851 PF_LOCK(); 2852 PF_STATE_ENTER_WRITE(); 2853 RB_FOREACH(state, pf_state_tree_id, &tree_id) 2854 pf_src_tree_remove_state(state); 2855 PF_STATE_EXIT_WRITE(); 2856 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) 2857 n->expire = 1; 2858 pf_purge_expired_src_nodes(); 2859 PF_UNLOCK(); 2860 NET_UNLOCK(); 2861 break; 2862 } 2863 2864 case DIOCKILLSRCNODES: { 2865 struct pf_src_node *sn; 2866 struct pf_state *s; 2867 struct pfioc_src_node_kill *psnk = 2868 (struct pfioc_src_node_kill *)addr; 2869 u_int killed = 0; 2870 2871 NET_LOCK(); 2872 PF_LOCK(); 2873 RB_FOREACH(sn, pf_src_tree, &tree_src_tracking) { 2874 if (pf_match_addr(psnk->psnk_src.neg, 2875 &psnk->psnk_src.addr.v.a.addr, 2876 &psnk->psnk_src.addr.v.a.mask, 2877 &sn->addr, sn->af) && 2878 pf_match_addr(psnk->psnk_dst.neg, 2879 &psnk->psnk_dst.addr.v.a.addr, 2880 &psnk->psnk_dst.addr.v.a.mask, 2881 &sn->raddr, sn->af)) { 2882 /* Handle state to src_node linkage */ 2883 if (sn->states != 0) { 2884 PF_ASSERT_LOCKED(); 2885 PF_STATE_ENTER_WRITE(); 2886 RB_FOREACH(s, pf_state_tree_id, 2887 &tree_id) 2888 pf_state_rm_src_node(s, sn); 2889 PF_STATE_EXIT_WRITE(); 2890 } 2891 sn->expire = 1; 2892 killed++; 2893 } 2894 } 2895 2896 if (killed > 0) 2897 pf_purge_expired_src_nodes(); 2898 2899 psnk->psnk_killed = killed; 2900 PF_UNLOCK(); 2901 NET_UNLOCK(); 2902 break; 2903 } 2904 2905 case DIOCSETHOSTID: { 2906 u_int32_t *hostid = (u_int32_t *)addr; 2907 2908 NET_LOCK(); 2909 PF_LOCK(); 2910 if (*hostid == 0) 2911 pf_trans_set.hostid = arc4random(); 2912 else 2913 pf_trans_set.hostid = *hostid; 2914 pf_trans_set.mask |= PF_TSET_HOSTID; 2915 PF_UNLOCK(); 2916 NET_UNLOCK(); 2917 break; 2918 } 2919 2920 case DIOCOSFPFLUSH: 2921 pf_osfp_flush(); 2922 break; 2923 2924 case DIOCIGETIFACES: { 2925 struct pfioc_iface *io = (struct pfioc_iface *)addr; 2926 struct pfi_kif *kif_buf; 2927 int apfiio_size = io->pfiio_size; 2928 2929 if (io->pfiio_esize != sizeof(struct pfi_kif)) { 2930 error = ENODEV; 2931 goto fail; 2932 } 2933 2934 if ((kif_buf = mallocarray(sizeof(*kif_buf), apfiio_size, 2935 M_TEMP, M_WAITOK|M_CANFAIL)) == NULL) { 2936 error = EINVAL; 2937 goto fail; 2938 } 2939 2940 NET_LOCK(); 2941 PF_LOCK(); 2942 pfi_get_ifaces(io->pfiio_name, kif_buf, &io->pfiio_size); 2943 PF_UNLOCK(); 2944 NET_UNLOCK(); 2945 if (copyout(kif_buf, io->pfiio_buffer, sizeof(*kif_buf) * 2946 io->pfiio_size)) 2947 error = EFAULT; 2948 free(kif_buf, M_TEMP, sizeof(*kif_buf) * apfiio_size); 2949 break; 2950 } 2951 2952 case DIOCSETIFFLAG: { 2953 struct pfioc_iface *io = (struct pfioc_iface *)addr; 2954 2955 if (io == NULL) { 2956 error = EINVAL; 2957 goto fail; 2958 } 2959 2960 NET_LOCK(); 2961 PF_LOCK(); 2962 error = pfi_set_flags(io->pfiio_name, io->pfiio_flags); 2963 PF_UNLOCK(); 2964 NET_UNLOCK(); 2965 break; 2966 } 2967 2968 case DIOCCLRIFFLAG: { 2969 struct pfioc_iface *io = (struct pfioc_iface *)addr; 2970 2971 if (io == NULL) { 2972 error = EINVAL; 2973 goto fail; 2974 } 2975 2976 NET_LOCK(); 2977 PF_LOCK(); 2978 error = pfi_clear_flags(io->pfiio_name, io->pfiio_flags); 2979 PF_UNLOCK(); 2980 NET_UNLOCK(); 2981 break; 2982 } 2983 2984 case DIOCSETREASS: { 2985 u_int32_t *reass = (u_int32_t *)addr; 2986 2987 NET_LOCK(); 2988 PF_LOCK(); 2989 pf_trans_set.reass = *reass; 2990 pf_trans_set.mask |= PF_TSET_REASS; 2991 PF_UNLOCK(); 2992 NET_UNLOCK(); 2993 break; 2994 } 2995 2996 case DIOCSETSYNFLWATS: { 2997 struct pfioc_synflwats *io = (struct pfioc_synflwats *)addr; 2998 2999 NET_LOCK(); 3000 PF_LOCK(); 3001 error = pf_syncookies_setwats(io->hiwat, io->lowat); 3002 PF_UNLOCK(); 3003 NET_UNLOCK(); 3004 break; 3005 } 3006 3007 case DIOCGETSYNFLWATS: { 3008 struct pfioc_synflwats *io = (struct pfioc_synflwats *)addr; 3009 3010 NET_LOCK(); 3011 PF_LOCK(); 3012 error = pf_syncookies_getwats(io); 3013 PF_UNLOCK(); 3014 NET_UNLOCK(); 3015 break; 3016 } 3017 3018 case DIOCSETSYNCOOKIES: { 3019 u_int8_t *mode = (u_int8_t *)addr; 3020 3021 NET_LOCK(); 3022 PF_LOCK(); 3023 error = pf_syncookies_setmode(*mode); 3024 PF_UNLOCK(); 3025 NET_UNLOCK(); 3026 break; 3027 } 3028 3029 default: 3030 error = ENODEV; 3031 break; 3032 } 3033 fail: 3034 if (flags & FWRITE) 3035 rw_exit_write(&pfioctl_rw); 3036 else 3037 rw_exit_read(&pfioctl_rw); 3038 3039 return (error); 3040 } 3041 3042 void 3043 pf_trans_set_commit(void) 3044 { 3045 if (pf_trans_set.mask & PF_TSET_STATUSIF) 3046 strlcpy(pf_status.ifname, pf_trans_set.statusif, IFNAMSIZ); 3047 if (pf_trans_set.mask & PF_TSET_DEBUG) 3048 pf_status.debug = pf_trans_set.debug; 3049 if (pf_trans_set.mask & PF_TSET_HOSTID) 3050 pf_status.hostid = pf_trans_set.hostid; 3051 if (pf_trans_set.mask & PF_TSET_REASS) 3052 pf_status.reass = pf_trans_set.reass; 3053 } 3054 3055 void 3056 pf_pool_copyin(struct pf_pool *from, struct pf_pool *to) 3057 { 3058 memmove(to, from, sizeof(*to)); 3059 to->kif = NULL; 3060 to->addr.p.tbl = NULL; 3061 } 3062 3063 int 3064 pf_validate_range(u_int8_t op, u_int16_t port[2], int order) 3065 { 3066 u_int16_t a = (order == PF_ORDER_NET) ? ntohs(port[0]) : port[0]; 3067 u_int16_t b = (order == PF_ORDER_NET) ? ntohs(port[1]) : port[1]; 3068 3069 if ((op == PF_OP_RRG && a > b) || /* 34:12, i.e. none */ 3070 (op == PF_OP_IRG && a >= b) || /* 34><12, i.e. none */ 3071 (op == PF_OP_XRG && a > b)) /* 34<>22, i.e. all */ 3072 return 1; 3073 return 0; 3074 } 3075 3076 int 3077 pf_rule_copyin(struct pf_rule *from, struct pf_rule *to) 3078 { 3079 int i; 3080 3081 if (from->scrub_flags & PFSTATE_SETPRIO && 3082 (from->set_prio[0] > IFQ_MAXPRIO || 3083 from->set_prio[1] > IFQ_MAXPRIO)) 3084 return (EINVAL); 3085 3086 to->src = from->src; 3087 to->src.addr.p.tbl = NULL; 3088 to->dst = from->dst; 3089 to->dst.addr.p.tbl = NULL; 3090 3091 if (pf_validate_range(to->src.port_op, to->src.port, PF_ORDER_NET)) 3092 return (EINVAL); 3093 if (pf_validate_range(to->dst.port_op, to->dst.port, PF_ORDER_NET)) 3094 return (EINVAL); 3095 3096 /* XXX union skip[] */ 3097 3098 strlcpy(to->label, from->label, sizeof(to->label)); 3099 strlcpy(to->ifname, from->ifname, sizeof(to->ifname)); 3100 strlcpy(to->rcv_ifname, from->rcv_ifname, sizeof(to->rcv_ifname)); 3101 strlcpy(to->qname, from->qname, sizeof(to->qname)); 3102 strlcpy(to->pqname, from->pqname, sizeof(to->pqname)); 3103 strlcpy(to->tagname, from->tagname, sizeof(to->tagname)); 3104 strlcpy(to->match_tagname, from->match_tagname, 3105 sizeof(to->match_tagname)); 3106 strlcpy(to->overload_tblname, from->overload_tblname, 3107 sizeof(to->overload_tblname)); 3108 3109 pf_pool_copyin(&from->nat, &to->nat); 3110 pf_pool_copyin(&from->rdr, &to->rdr); 3111 pf_pool_copyin(&from->route, &to->route); 3112 3113 if (pf_validate_range(to->rdr.port_op, to->rdr.proxy_port, 3114 PF_ORDER_HOST)) 3115 return (EINVAL); 3116 3117 to->kif = (to->ifname[0]) ? 3118 pfi_kif_alloc(to->ifname, M_WAITOK) : NULL; 3119 to->rcv_kif = (to->rcv_ifname[0]) ? 3120 pfi_kif_alloc(to->rcv_ifname, M_WAITOK) : NULL; 3121 to->rdr.kif = (to->rdr.ifname[0]) ? 3122 pfi_kif_alloc(to->rdr.ifname, M_WAITOK) : NULL; 3123 to->nat.kif = (to->nat.ifname[0]) ? 3124 pfi_kif_alloc(to->nat.ifname, M_WAITOK) : NULL; 3125 to->route.kif = (to->route.ifname[0]) ? 3126 pfi_kif_alloc(to->route.ifname, M_WAITOK) : NULL; 3127 3128 to->os_fingerprint = from->os_fingerprint; 3129 3130 to->rtableid = from->rtableid; 3131 if (to->rtableid >= 0 && !rtable_exists(to->rtableid)) 3132 return (EBUSY); 3133 to->onrdomain = from->onrdomain; 3134 if (to->onrdomain != -1 && (to->onrdomain < 0 || 3135 to->onrdomain > RT_TABLEID_MAX)) 3136 return (EINVAL); 3137 3138 for (i = 0; i < PFTM_MAX; i++) 3139 to->timeout[i] = from->timeout[i]; 3140 to->states_tot = from->states_tot; 3141 to->max_states = from->max_states; 3142 to->max_src_nodes = from->max_src_nodes; 3143 to->max_src_states = from->max_src_states; 3144 to->max_src_conn = from->max_src_conn; 3145 to->max_src_conn_rate.limit = from->max_src_conn_rate.limit; 3146 to->max_src_conn_rate.seconds = from->max_src_conn_rate.seconds; 3147 pf_init_threshold(&to->pktrate, from->pktrate.limit, 3148 from->pktrate.seconds); 3149 3150 if (to->qname[0] != 0) { 3151 if ((to->qid = pf_qname2qid(to->qname, 0)) == 0) 3152 return (EBUSY); 3153 if (to->pqname[0] != 0) { 3154 if ((to->pqid = pf_qname2qid(to->pqname, 0)) == 0) 3155 return (EBUSY); 3156 } else 3157 to->pqid = to->qid; 3158 } 3159 to->rt_listid = from->rt_listid; 3160 to->prob = from->prob; 3161 to->return_icmp = from->return_icmp; 3162 to->return_icmp6 = from->return_icmp6; 3163 to->max_mss = from->max_mss; 3164 if (to->tagname[0]) 3165 if ((to->tag = pf_tagname2tag(to->tagname, 1)) == 0) 3166 return (EBUSY); 3167 if (to->match_tagname[0]) 3168 if ((to->match_tag = pf_tagname2tag(to->match_tagname, 1)) == 0) 3169 return (EBUSY); 3170 to->scrub_flags = from->scrub_flags; 3171 to->delay = from->delay; 3172 to->uid = from->uid; 3173 to->gid = from->gid; 3174 to->rule_flag = from->rule_flag; 3175 to->action = from->action; 3176 to->direction = from->direction; 3177 to->log = from->log; 3178 to->logif = from->logif; 3179 #if NPFLOG > 0 3180 if (!to->log) 3181 to->logif = 0; 3182 #endif /* NPFLOG > 0 */ 3183 to->quick = from->quick; 3184 to->ifnot = from->ifnot; 3185 to->rcvifnot = from->rcvifnot; 3186 to->match_tag_not = from->match_tag_not; 3187 to->keep_state = from->keep_state; 3188 to->af = from->af; 3189 to->naf = from->naf; 3190 to->proto = from->proto; 3191 to->type = from->type; 3192 to->code = from->code; 3193 to->flags = from->flags; 3194 to->flagset = from->flagset; 3195 to->min_ttl = from->min_ttl; 3196 to->allow_opts = from->allow_opts; 3197 to->rt = from->rt; 3198 to->return_ttl = from->return_ttl; 3199 to->tos = from->tos; 3200 to->set_tos = from->set_tos; 3201 to->anchor_relative = from->anchor_relative; /* XXX */ 3202 to->anchor_wildcard = from->anchor_wildcard; /* XXX */ 3203 to->flush = from->flush; 3204 to->divert.addr = from->divert.addr; 3205 to->divert.port = from->divert.port; 3206 to->divert.type = from->divert.type; 3207 to->prio = from->prio; 3208 to->set_prio[0] = from->set_prio[0]; 3209 to->set_prio[1] = from->set_prio[1]; 3210 3211 return (0); 3212 } 3213 3214 int 3215 pf_rule_checkaf(struct pf_rule *r) 3216 { 3217 switch (r->af) { 3218 case 0: 3219 if (r->rule_flag & PFRULE_AFTO) 3220 return (EPFNOSUPPORT); 3221 break; 3222 case AF_INET: 3223 if ((r->rule_flag & PFRULE_AFTO) && r->naf != AF_INET6) 3224 return (EPFNOSUPPORT); 3225 break; 3226 #ifdef INET6 3227 case AF_INET6: 3228 if ((r->rule_flag & PFRULE_AFTO) && r->naf != AF_INET) 3229 return (EPFNOSUPPORT); 3230 break; 3231 #endif /* INET6 */ 3232 default: 3233 return (EPFNOSUPPORT); 3234 } 3235 3236 if ((r->rule_flag & PFRULE_AFTO) == 0 && r->naf != 0) 3237 return (EPFNOSUPPORT); 3238 3239 return (0); 3240 } 3241 3242 int 3243 pf_sysctl(void *oldp, size_t *oldlenp, void *newp, size_t newlen) 3244 { 3245 struct pf_status pfs; 3246 3247 NET_RLOCK_IN_IOCTL(); 3248 PF_LOCK(); 3249 memcpy(&pfs, &pf_status, sizeof(struct pf_status)); 3250 pfi_update_status(pfs.ifname, &pfs); 3251 PF_UNLOCK(); 3252 NET_RUNLOCK_IN_IOCTL(); 3253 3254 return sysctl_rdstruct(oldp, oldlenp, newp, &pfs, sizeof(pfs)); 3255 } 3256