1 /* $OpenBSD: pf_ioctl.c,v 1.343 2019/02/18 13:11:44 bluhm Exp $ */ 2 3 /* 4 * Copyright (c) 2001 Daniel Hartmeier 5 * Copyright (c) 2002 - 2018 Henning Brauer <henning@openbsd.org> 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * - Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * - Redistributions in binary form must reproduce the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer in the documentation and/or other materials provided 17 * with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 20 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 22 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 23 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 25 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 27 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 29 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 * POSSIBILITY OF SUCH DAMAGE. 31 * 32 * Effort sponsored in part by the Defense Advanced Research Projects 33 * Agency (DARPA) and Air Force Research Laboratory, Air Force 34 * Materiel Command, USAF, under agreement number F30602-01-2-0537. 35 * 36 */ 37 38 #include "pfsync.h" 39 #include "pflog.h" 40 41 #include <sys/param.h> 42 #include <sys/systm.h> 43 #include <sys/mbuf.h> 44 #include <sys/filio.h> 45 #include <sys/fcntl.h> 46 #include <sys/socket.h> 47 #include <sys/socketvar.h> 48 #include <sys/kernel.h> 49 #include <sys/time.h> 50 #include <sys/timeout.h> 51 #include <sys/pool.h> 52 #include <sys/malloc.h> 53 #include <sys/kthread.h> 54 #include <sys/rwlock.h> 55 #include <sys/syslog.h> 56 #include <uvm/uvm_extern.h> 57 58 #include <crypto/md5.h> 59 60 #include <net/if.h> 61 #include <net/if_var.h> 62 #include <net/route.h> 63 #include <net/hfsc.h> 64 #include <net/fq_codel.h> 65 66 #include <netinet/in.h> 67 #include <netinet/ip.h> 68 #include <netinet/in_pcb.h> 69 #include <netinet/ip_var.h> 70 #include <netinet/ip_icmp.h> 71 #include <netinet/tcp.h> 72 #include <netinet/udp.h> 73 74 #ifdef INET6 75 #include <netinet/ip6.h> 76 #include <netinet/icmp6.h> 77 #endif /* INET6 */ 78 79 #include <net/pfvar.h> 80 #include <net/pfvar_priv.h> 81 82 #if NPFSYNC > 0 83 #include <netinet/ip_ipsp.h> 84 #include <net/if_pfsync.h> 85 #endif /* NPFSYNC > 0 */ 86 87 struct pool pf_tag_pl; 88 89 void pfattach(int); 90 void pf_thread_create(void *); 91 int pfopen(dev_t, int, int, struct proc *); 92 int pfclose(dev_t, int, int, struct proc *); 93 int pfioctl(dev_t, u_long, caddr_t, int, struct proc *); 94 int pf_begin_rules(u_int32_t *, const char *); 95 int pf_rollback_rules(u_int32_t, char *); 96 void pf_remove_queues(void); 97 int pf_commit_queues(void); 98 void pf_free_queues(struct pf_queuehead *); 99 int pf_setup_pfsync_matching(struct pf_ruleset *); 100 void pf_hash_rule(MD5_CTX *, struct pf_rule *); 101 void pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *); 102 int pf_commit_rules(u_int32_t, char *); 103 int pf_addr_setup(struct pf_ruleset *, 104 struct pf_addr_wrap *, sa_family_t); 105 int pf_kif_setup(char *, struct pfi_kif **); 106 void pf_addr_copyout(struct pf_addr_wrap *); 107 void pf_trans_set_commit(void); 108 void pf_pool_copyin(struct pf_pool *, struct pf_pool *); 109 int pf_rule_copyin(struct pf_rule *, struct pf_rule *, 110 struct pf_ruleset *); 111 u_int16_t pf_qname2qid(char *, int); 112 void pf_qid2qname(u_int16_t, char *); 113 void pf_qid_unref(u_int16_t); 114 115 struct pf_rule pf_default_rule, pf_default_rule_new; 116 117 struct { 118 char statusif[IFNAMSIZ]; 119 u_int32_t debug; 120 u_int32_t hostid; 121 u_int32_t reass; 122 u_int32_t mask; 123 } pf_trans_set; 124 125 #define PF_TSET_STATUSIF 0x01 126 #define PF_TSET_DEBUG 0x02 127 #define PF_TSET_HOSTID 0x04 128 #define PF_TSET_REASS 0x08 129 130 #define TAGID_MAX 50000 131 TAILQ_HEAD(pf_tags, pf_tagname) pf_tags = TAILQ_HEAD_INITIALIZER(pf_tags), 132 pf_qids = TAILQ_HEAD_INITIALIZER(pf_qids); 133 134 #ifdef WITH_PF_LOCK 135 /* 136 * pf_lock protects consistency of PF data structures, which don't have 137 * their dedicated lock yet. The pf_lock currently protects: 138 * - rules, 139 * - radix tables, 140 * - source nodes 141 * All callers must grab pf_lock exclusively. 142 * 143 * pf_state_lock protects consistency of state table. Packets, which do state 144 * look up grab the lock as readers. If packet must create state, then it must 145 * grab the lock as writer. Whenever packet creates state it grabs pf_lock 146 * first then it locks pf_state_lock as the writer. 147 */ 148 struct rwlock pf_lock = RWLOCK_INITIALIZER("pf_lock"); 149 struct rwlock pf_state_lock = RWLOCK_INITIALIZER("pf_state_lock"); 150 #endif /* WITH_PF_LOCK */ 151 152 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE) 153 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE 154 #endif 155 u_int16_t tagname2tag(struct pf_tags *, char *, int); 156 void tag2tagname(struct pf_tags *, u_int16_t, char *); 157 void tag_unref(struct pf_tags *, u_int16_t); 158 int pf_rtlabel_add(struct pf_addr_wrap *); 159 void pf_rtlabel_remove(struct pf_addr_wrap *); 160 void pf_rtlabel_copyout(struct pf_addr_wrap *); 161 162 163 void 164 pfattach(int num) 165 { 166 u_int32_t *timeout = pf_default_rule.timeout; 167 168 pool_init(&pf_rule_pl, sizeof(struct pf_rule), 0, 169 IPL_SOFTNET, 0, "pfrule", NULL); 170 pool_init(&pf_src_tree_pl, sizeof(struct pf_src_node), 0, 171 IPL_SOFTNET, 0, "pfsrctr", NULL); 172 pool_init(&pf_sn_item_pl, sizeof(struct pf_sn_item), 0, 173 IPL_SOFTNET, 0, "pfsnitem", NULL); 174 pool_init(&pf_state_pl, sizeof(struct pf_state), 0, 175 IPL_SOFTNET, 0, "pfstate", NULL); 176 pool_init(&pf_state_key_pl, sizeof(struct pf_state_key), 0, 177 IPL_SOFTNET, 0, "pfstkey", NULL); 178 pool_init(&pf_state_item_pl, sizeof(struct pf_state_item), 0, 179 IPL_SOFTNET, 0, "pfstitem", NULL); 180 pool_init(&pf_rule_item_pl, sizeof(struct pf_rule_item), 0, 181 IPL_SOFTNET, 0, "pfruleitem", NULL); 182 pool_init(&pf_queue_pl, sizeof(struct pf_queuespec), 0, 183 IPL_SOFTNET, 0, "pfqueue", NULL); 184 pool_init(&pf_tag_pl, sizeof(struct pf_tagname), 0, 185 IPL_SOFTNET, 0, "pftag", NULL); 186 pool_init(&pf_pktdelay_pl, sizeof(struct pf_pktdelay), 0, 187 IPL_SOFTNET, 0, "pfpktdelay", NULL); 188 189 hfsc_initialize(); 190 pfr_initialize(); 191 pfi_initialize(); 192 pf_osfp_initialize(); 193 pf_syncookies_init(); 194 195 pool_sethardlimit(pf_pool_limits[PF_LIMIT_STATES].pp, 196 pf_pool_limits[PF_LIMIT_STATES].limit, NULL, 0); 197 198 if (physmem <= atop(100*1024*1024)) 199 pf_pool_limits[PF_LIMIT_TABLE_ENTRIES].limit = 200 PFR_KENTRY_HIWAT_SMALL; 201 202 RB_INIT(&tree_src_tracking); 203 RB_INIT(&pf_anchors); 204 pf_init_ruleset(&pf_main_ruleset); 205 TAILQ_INIT(&pf_queues[0]); 206 TAILQ_INIT(&pf_queues[1]); 207 pf_queues_active = &pf_queues[0]; 208 pf_queues_inactive = &pf_queues[1]; 209 TAILQ_INIT(&state_list); 210 211 /* default rule should never be garbage collected */ 212 pf_default_rule.entries.tqe_prev = &pf_default_rule.entries.tqe_next; 213 pf_default_rule.action = PF_PASS; 214 pf_default_rule.nr = (u_int32_t)-1; 215 pf_default_rule.rtableid = -1; 216 217 /* initialize default timeouts */ 218 timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL; 219 timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL; 220 timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL; 221 timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL; 222 timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL; 223 timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL; 224 timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL; 225 timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL; 226 timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL; 227 timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL; 228 timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL; 229 timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL; 230 timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL; 231 timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL; 232 timeout[PFTM_FRAG] = PFTM_FRAG_VAL; 233 timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL; 234 timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL; 235 timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL; 236 timeout[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START; 237 timeout[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END; 238 239 pf_default_rule.src.addr.type = PF_ADDR_ADDRMASK; 240 pf_default_rule.dst.addr.type = PF_ADDR_ADDRMASK; 241 pf_default_rule.rdr.addr.type = PF_ADDR_NONE; 242 pf_default_rule.nat.addr.type = PF_ADDR_NONE; 243 pf_default_rule.route.addr.type = PF_ADDR_NONE; 244 245 pf_normalize_init(); 246 memset(&pf_status, 0, sizeof(pf_status)); 247 pf_status.debug = LOG_ERR; 248 pf_status.reass = PF_REASS_ENABLED; 249 250 /* XXX do our best to avoid a conflict */ 251 pf_status.hostid = arc4random(); 252 } 253 254 int 255 pfopen(dev_t dev, int flags, int fmt, struct proc *p) 256 { 257 if (minor(dev) >= 1) 258 return (ENXIO); 259 return (0); 260 } 261 262 int 263 pfclose(dev_t dev, int flags, int fmt, struct proc *p) 264 { 265 if (minor(dev) >= 1) 266 return (ENXIO); 267 return (0); 268 } 269 270 void 271 pf_rm_rule(struct pf_rulequeue *rulequeue, struct pf_rule *rule) 272 { 273 if (rulequeue != NULL) { 274 if (rule->states_cur == 0 && rule->src_nodes == 0) { 275 /* 276 * XXX - we need to remove the table *before* detaching 277 * the rule to make sure the table code does not delete 278 * the anchor under our feet. 279 */ 280 pf_tbladdr_remove(&rule->src.addr); 281 pf_tbladdr_remove(&rule->dst.addr); 282 pf_tbladdr_remove(&rule->rdr.addr); 283 pf_tbladdr_remove(&rule->nat.addr); 284 pf_tbladdr_remove(&rule->route.addr); 285 if (rule->overload_tbl) 286 pfr_detach_table(rule->overload_tbl); 287 } 288 TAILQ_REMOVE(rulequeue, rule, entries); 289 rule->entries.tqe_prev = NULL; 290 rule->nr = (u_int32_t)-1; 291 } 292 293 if (rule->states_cur > 0 || rule->src_nodes > 0 || 294 rule->entries.tqe_prev != NULL) 295 return; 296 pf_tag_unref(rule->tag); 297 pf_tag_unref(rule->match_tag); 298 pf_rtlabel_remove(&rule->src.addr); 299 pf_rtlabel_remove(&rule->dst.addr); 300 pfi_dynaddr_remove(&rule->src.addr); 301 pfi_dynaddr_remove(&rule->dst.addr); 302 pfi_dynaddr_remove(&rule->rdr.addr); 303 pfi_dynaddr_remove(&rule->nat.addr); 304 pfi_dynaddr_remove(&rule->route.addr); 305 if (rulequeue == NULL) { 306 pf_tbladdr_remove(&rule->src.addr); 307 pf_tbladdr_remove(&rule->dst.addr); 308 pf_tbladdr_remove(&rule->rdr.addr); 309 pf_tbladdr_remove(&rule->nat.addr); 310 pf_tbladdr_remove(&rule->route.addr); 311 if (rule->overload_tbl) 312 pfr_detach_table(rule->overload_tbl); 313 } 314 pfi_kif_unref(rule->rcv_kif, PFI_KIF_REF_RULE); 315 pfi_kif_unref(rule->kif, PFI_KIF_REF_RULE); 316 pfi_kif_unref(rule->rdr.kif, PFI_KIF_REF_RULE); 317 pfi_kif_unref(rule->nat.kif, PFI_KIF_REF_RULE); 318 pfi_kif_unref(rule->route.kif, PFI_KIF_REF_RULE); 319 pf_remove_anchor(rule); 320 pool_put(&pf_rule_pl, rule); 321 } 322 323 void 324 pf_purge_rule(struct pf_rule *rule) 325 { 326 u_int32_t nr = 0; 327 struct pf_ruleset *ruleset; 328 329 KASSERT((rule != NULL) && (rule->ruleset != NULL)); 330 ruleset = rule->ruleset; 331 332 pf_rm_rule(ruleset->rules.active.ptr, rule); 333 ruleset->rules.active.rcount--; 334 TAILQ_FOREACH(rule, ruleset->rules.active.ptr, entries) 335 rule->nr = nr++; 336 ruleset->rules.active.ticket++; 337 pf_calc_skip_steps(ruleset->rules.active.ptr); 338 pf_remove_if_empty_ruleset(ruleset); 339 } 340 341 u_int16_t 342 tagname2tag(struct pf_tags *head, char *tagname, int create) 343 { 344 struct pf_tagname *tag, *p = NULL; 345 u_int16_t new_tagid = 1; 346 347 TAILQ_FOREACH(tag, head, entries) 348 if (strcmp(tagname, tag->name) == 0) { 349 tag->ref++; 350 return (tag->tag); 351 } 352 353 if (!create) 354 return (0); 355 356 /* 357 * to avoid fragmentation, we do a linear search from the beginning 358 * and take the first free slot we find. if there is none or the list 359 * is empty, append a new entry at the end. 360 */ 361 362 /* new entry */ 363 TAILQ_FOREACH(p, head, entries) { 364 if (p->tag != new_tagid) 365 break; 366 new_tagid = p->tag + 1; 367 } 368 369 if (new_tagid > TAGID_MAX) 370 return (0); 371 372 /* allocate and fill new struct pf_tagname */ 373 tag = pool_get(&pf_tag_pl, PR_NOWAIT | PR_ZERO); 374 if (tag == NULL) 375 return (0); 376 strlcpy(tag->name, tagname, sizeof(tag->name)); 377 tag->tag = new_tagid; 378 tag->ref++; 379 380 if (p != NULL) /* insert new entry before p */ 381 TAILQ_INSERT_BEFORE(p, tag, entries); 382 else /* either list empty or no free slot in between */ 383 TAILQ_INSERT_TAIL(head, tag, entries); 384 385 return (tag->tag); 386 } 387 388 void 389 tag2tagname(struct pf_tags *head, u_int16_t tagid, char *p) 390 { 391 struct pf_tagname *tag; 392 393 TAILQ_FOREACH(tag, head, entries) 394 if (tag->tag == tagid) { 395 strlcpy(p, tag->name, PF_TAG_NAME_SIZE); 396 return; 397 } 398 } 399 400 void 401 tag_unref(struct pf_tags *head, u_int16_t tag) 402 { 403 struct pf_tagname *p, *next; 404 405 if (tag == 0) 406 return; 407 408 TAILQ_FOREACH_SAFE(p, head, entries, next) { 409 if (tag == p->tag) { 410 if (--p->ref == 0) { 411 TAILQ_REMOVE(head, p, entries); 412 pool_put(&pf_tag_pl, p); 413 } 414 break; 415 } 416 } 417 } 418 419 u_int16_t 420 pf_tagname2tag(char *tagname, int create) 421 { 422 return (tagname2tag(&pf_tags, tagname, create)); 423 } 424 425 void 426 pf_tag2tagname(u_int16_t tagid, char *p) 427 { 428 tag2tagname(&pf_tags, tagid, p); 429 } 430 431 void 432 pf_tag_ref(u_int16_t tag) 433 { 434 struct pf_tagname *t; 435 436 TAILQ_FOREACH(t, &pf_tags, entries) 437 if (t->tag == tag) 438 break; 439 if (t != NULL) 440 t->ref++; 441 } 442 443 void 444 pf_tag_unref(u_int16_t tag) 445 { 446 tag_unref(&pf_tags, tag); 447 } 448 449 int 450 pf_rtlabel_add(struct pf_addr_wrap *a) 451 { 452 if (a->type == PF_ADDR_RTLABEL && 453 (a->v.rtlabel = rtlabel_name2id(a->v.rtlabelname)) == 0) 454 return (-1); 455 return (0); 456 } 457 458 void 459 pf_rtlabel_remove(struct pf_addr_wrap *a) 460 { 461 if (a->type == PF_ADDR_RTLABEL) 462 rtlabel_unref(a->v.rtlabel); 463 } 464 465 void 466 pf_rtlabel_copyout(struct pf_addr_wrap *a) 467 { 468 const char *name; 469 470 if (a->type == PF_ADDR_RTLABEL && a->v.rtlabel) { 471 if ((name = rtlabel_id2name(a->v.rtlabel)) == NULL) 472 strlcpy(a->v.rtlabelname, "?", 473 sizeof(a->v.rtlabelname)); 474 else 475 strlcpy(a->v.rtlabelname, name, 476 sizeof(a->v.rtlabelname)); 477 } 478 } 479 480 u_int16_t 481 pf_qname2qid(char *qname, int create) 482 { 483 return (tagname2tag(&pf_qids, qname, create)); 484 } 485 486 void 487 pf_qid2qname(u_int16_t qid, char *p) 488 { 489 tag2tagname(&pf_qids, qid, p); 490 } 491 492 void 493 pf_qid_unref(u_int16_t qid) 494 { 495 tag_unref(&pf_qids, (u_int16_t)qid); 496 } 497 498 int 499 pf_begin_rules(u_int32_t *ticket, const char *anchor) 500 { 501 struct pf_ruleset *rs; 502 struct pf_rule *rule; 503 504 if ((rs = pf_find_or_create_ruleset(anchor)) == NULL) 505 return (EINVAL); 506 while ((rule = TAILQ_FIRST(rs->rules.inactive.ptr)) != NULL) { 507 pf_rm_rule(rs->rules.inactive.ptr, rule); 508 rs->rules.inactive.rcount--; 509 } 510 *ticket = ++rs->rules.inactive.ticket; 511 rs->rules.inactive.open = 1; 512 return (0); 513 } 514 515 int 516 pf_rollback_rules(u_int32_t ticket, char *anchor) 517 { 518 struct pf_ruleset *rs; 519 struct pf_rule *rule; 520 521 rs = pf_find_ruleset(anchor); 522 if (rs == NULL || !rs->rules.inactive.open || 523 rs->rules.inactive.ticket != ticket) 524 return (0); 525 while ((rule = TAILQ_FIRST(rs->rules.inactive.ptr)) != NULL) { 526 pf_rm_rule(rs->rules.inactive.ptr, rule); 527 rs->rules.inactive.rcount--; 528 } 529 rs->rules.inactive.open = 0; 530 531 /* queue defs only in the main ruleset */ 532 if (anchor[0]) 533 return (0); 534 535 pf_free_queues(pf_queues_inactive); 536 537 return (0); 538 } 539 540 void 541 pf_free_queues(struct pf_queuehead *where) 542 { 543 struct pf_queuespec *q, *qtmp; 544 545 TAILQ_FOREACH_SAFE(q, where, entries, qtmp) { 546 TAILQ_REMOVE(where, q, entries); 547 pfi_kif_unref(q->kif, PFI_KIF_REF_RULE); 548 pool_put(&pf_queue_pl, q); 549 } 550 } 551 552 void 553 pf_remove_queues(void) 554 { 555 struct pf_queuespec *q; 556 struct ifnet *ifp; 557 558 /* put back interfaces in normal queueing mode */ 559 TAILQ_FOREACH(q, pf_queues_active, entries) { 560 if (q->parent_qid != 0) 561 continue; 562 563 ifp = q->kif->pfik_ifp; 564 if (ifp == NULL) 565 continue; 566 567 ifq_attach(&ifp->if_snd, ifq_priq_ops, NULL); 568 } 569 } 570 571 struct pf_queue_if { 572 struct ifnet *ifp; 573 const struct ifq_ops *ifqops; 574 const struct pfq_ops *pfqops; 575 void *disc; 576 struct pf_queue_if *next; 577 }; 578 579 static inline struct pf_queue_if * 580 pf_ifp2q(struct pf_queue_if *list, struct ifnet *ifp) 581 { 582 struct pf_queue_if *qif = list; 583 584 while (qif != NULL) { 585 if (qif->ifp == ifp) 586 return (qif); 587 588 qif = qif->next; 589 } 590 591 return (qif); 592 } 593 594 int 595 pf_create_queues(void) 596 { 597 struct pf_queuespec *q; 598 struct ifnet *ifp; 599 struct pf_queue_if *list = NULL, *qif; 600 int error; 601 602 /* 603 * Find root queues and allocate traffic conditioner 604 * private data for these interfaces 605 */ 606 TAILQ_FOREACH(q, pf_queues_active, entries) { 607 if (q->parent_qid != 0) 608 continue; 609 610 ifp = q->kif->pfik_ifp; 611 if (ifp == NULL) 612 continue; 613 614 qif = malloc(sizeof(*qif), M_TEMP, M_WAITOK); 615 qif->ifp = ifp; 616 617 if (q->flags & PFQS_ROOTCLASS) { 618 qif->ifqops = ifq_hfsc_ops; 619 qif->pfqops = pfq_hfsc_ops; 620 } else { 621 qif->ifqops = ifq_fqcodel_ops; 622 qif->pfqops = pfq_fqcodel_ops; 623 } 624 625 qif->disc = qif->pfqops->pfq_alloc(ifp); 626 627 qif->next = list; 628 list = qif; 629 } 630 631 /* and now everything */ 632 TAILQ_FOREACH(q, pf_queues_active, entries) { 633 ifp = q->kif->pfik_ifp; 634 if (ifp == NULL) 635 continue; 636 637 qif = pf_ifp2q(list, ifp); 638 KASSERT(qif != NULL); 639 640 error = qif->pfqops->pfq_addqueue(qif->disc, q); 641 if (error != 0) 642 goto error; 643 } 644 645 /* find root queues in old list to disable them if necessary */ 646 TAILQ_FOREACH(q, pf_queues_inactive, entries) { 647 if (q->parent_qid != 0) 648 continue; 649 650 ifp = q->kif->pfik_ifp; 651 if (ifp == NULL) 652 continue; 653 654 qif = pf_ifp2q(list, ifp); 655 if (qif != NULL) 656 continue; 657 658 ifq_attach(&ifp->if_snd, ifq_priq_ops, NULL); 659 } 660 661 /* commit the new queues */ 662 while (list != NULL) { 663 qif = list; 664 list = qif->next; 665 666 ifp = qif->ifp; 667 668 ifq_attach(&ifp->if_snd, qif->ifqops, qif->disc); 669 free(qif, M_TEMP, sizeof(*qif)); 670 } 671 672 return (0); 673 674 error: 675 while (list != NULL) { 676 qif = list; 677 list = qif->next; 678 679 qif->pfqops->pfq_free(qif->disc); 680 free(qif, M_TEMP, sizeof(*qif)); 681 } 682 683 return (error); 684 } 685 686 int 687 pf_commit_queues(void) 688 { 689 struct pf_queuehead *qswap; 690 int error; 691 692 /* swap */ 693 qswap = pf_queues_active; 694 pf_queues_active = pf_queues_inactive; 695 pf_queues_inactive = qswap; 696 697 error = pf_create_queues(); 698 if (error != 0) { 699 pf_queues_inactive = pf_queues_active; 700 pf_queues_active = qswap; 701 return (error); 702 } 703 704 pf_free_queues(pf_queues_inactive); 705 706 return (0); 707 } 708 709 const struct pfq_ops * 710 pf_queue_manager(struct pf_queuespec *q) 711 { 712 if (q->flags & PFQS_FLOWQUEUE) 713 return pfq_fqcodel_ops; 714 return (/* pfq_default_ops */ NULL); 715 } 716 717 #define PF_MD5_UPD(st, elm) \ 718 MD5Update(ctx, (u_int8_t *) &(st)->elm, sizeof((st)->elm)) 719 720 #define PF_MD5_UPD_STR(st, elm) \ 721 MD5Update(ctx, (u_int8_t *) (st)->elm, strlen((st)->elm)) 722 723 #define PF_MD5_UPD_HTONL(st, elm, stor) do { \ 724 (stor) = htonl((st)->elm); \ 725 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int32_t));\ 726 } while (0) 727 728 #define PF_MD5_UPD_HTONS(st, elm, stor) do { \ 729 (stor) = htons((st)->elm); \ 730 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int16_t));\ 731 } while (0) 732 733 void 734 pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr) 735 { 736 PF_MD5_UPD(pfr, addr.type); 737 switch (pfr->addr.type) { 738 case PF_ADDR_DYNIFTL: 739 PF_MD5_UPD(pfr, addr.v.ifname); 740 PF_MD5_UPD(pfr, addr.iflags); 741 break; 742 case PF_ADDR_TABLE: 743 PF_MD5_UPD(pfr, addr.v.tblname); 744 break; 745 case PF_ADDR_ADDRMASK: 746 /* XXX ignore af? */ 747 PF_MD5_UPD(pfr, addr.v.a.addr.addr32); 748 PF_MD5_UPD(pfr, addr.v.a.mask.addr32); 749 break; 750 case PF_ADDR_RTLABEL: 751 PF_MD5_UPD(pfr, addr.v.rtlabelname); 752 break; 753 } 754 755 PF_MD5_UPD(pfr, port[0]); 756 PF_MD5_UPD(pfr, port[1]); 757 PF_MD5_UPD(pfr, neg); 758 PF_MD5_UPD(pfr, port_op); 759 } 760 761 void 762 pf_hash_rule(MD5_CTX *ctx, struct pf_rule *rule) 763 { 764 u_int16_t x; 765 u_int32_t y; 766 767 pf_hash_rule_addr(ctx, &rule->src); 768 pf_hash_rule_addr(ctx, &rule->dst); 769 PF_MD5_UPD_STR(rule, label); 770 PF_MD5_UPD_STR(rule, ifname); 771 PF_MD5_UPD_STR(rule, rcv_ifname); 772 PF_MD5_UPD_STR(rule, match_tagname); 773 PF_MD5_UPD_HTONS(rule, match_tag, x); /* dup? */ 774 PF_MD5_UPD_HTONL(rule, os_fingerprint, y); 775 PF_MD5_UPD_HTONL(rule, prob, y); 776 PF_MD5_UPD_HTONL(rule, uid.uid[0], y); 777 PF_MD5_UPD_HTONL(rule, uid.uid[1], y); 778 PF_MD5_UPD(rule, uid.op); 779 PF_MD5_UPD_HTONL(rule, gid.gid[0], y); 780 PF_MD5_UPD_HTONL(rule, gid.gid[1], y); 781 PF_MD5_UPD(rule, gid.op); 782 PF_MD5_UPD_HTONL(rule, rule_flag, y); 783 PF_MD5_UPD(rule, action); 784 PF_MD5_UPD(rule, direction); 785 PF_MD5_UPD(rule, af); 786 PF_MD5_UPD(rule, quick); 787 PF_MD5_UPD(rule, ifnot); 788 PF_MD5_UPD(rule, rcvifnot); 789 PF_MD5_UPD(rule, match_tag_not); 790 PF_MD5_UPD(rule, keep_state); 791 PF_MD5_UPD(rule, proto); 792 PF_MD5_UPD(rule, type); 793 PF_MD5_UPD(rule, code); 794 PF_MD5_UPD(rule, flags); 795 PF_MD5_UPD(rule, flagset); 796 PF_MD5_UPD(rule, allow_opts); 797 PF_MD5_UPD(rule, rt); 798 PF_MD5_UPD(rule, tos); 799 } 800 801 int 802 pf_commit_rules(u_int32_t ticket, char *anchor) 803 { 804 struct pf_ruleset *rs; 805 struct pf_rule *rule, **old_array; 806 struct pf_rulequeue *old_rules; 807 int error; 808 u_int32_t old_rcount; 809 810 /* Make sure any expired rules get removed from active rules first. */ 811 pf_purge_expired_rules(); 812 813 rs = pf_find_ruleset(anchor); 814 if (rs == NULL || !rs->rules.inactive.open || 815 ticket != rs->rules.inactive.ticket) 816 return (EBUSY); 817 818 /* Calculate checksum for the main ruleset */ 819 if (rs == &pf_main_ruleset) { 820 error = pf_setup_pfsync_matching(rs); 821 if (error != 0) 822 return (error); 823 } 824 825 /* Swap rules, keep the old. */ 826 old_rules = rs->rules.active.ptr; 827 old_rcount = rs->rules.active.rcount; 828 old_array = rs->rules.active.ptr_array; 829 830 rs->rules.active.ptr = rs->rules.inactive.ptr; 831 rs->rules.active.ptr_array = rs->rules.inactive.ptr_array; 832 rs->rules.active.rcount = rs->rules.inactive.rcount; 833 rs->rules.inactive.ptr = old_rules; 834 rs->rules.inactive.ptr_array = old_array; 835 rs->rules.inactive.rcount = old_rcount; 836 837 rs->rules.active.ticket = rs->rules.inactive.ticket; 838 pf_calc_skip_steps(rs->rules.active.ptr); 839 840 841 /* Purge the old rule list. */ 842 while ((rule = TAILQ_FIRST(old_rules)) != NULL) 843 pf_rm_rule(old_rules, rule); 844 if (rs->rules.inactive.ptr_array) 845 free(rs->rules.inactive.ptr_array, M_TEMP, 0); 846 rs->rules.inactive.ptr_array = NULL; 847 rs->rules.inactive.rcount = 0; 848 rs->rules.inactive.open = 0; 849 pf_remove_if_empty_ruleset(rs); 850 851 /* queue defs only in the main ruleset */ 852 if (anchor[0]) 853 return (0); 854 return (pf_commit_queues()); 855 } 856 857 int 858 pf_setup_pfsync_matching(struct pf_ruleset *rs) 859 { 860 MD5_CTX ctx; 861 struct pf_rule *rule; 862 u_int8_t digest[PF_MD5_DIGEST_LENGTH]; 863 864 MD5Init(&ctx); 865 if (rs->rules.inactive.ptr_array) 866 free(rs->rules.inactive.ptr_array, M_TEMP, 0); 867 rs->rules.inactive.ptr_array = NULL; 868 869 if (rs->rules.inactive.rcount) { 870 rs->rules.inactive.ptr_array = 871 mallocarray(rs->rules.inactive.rcount, sizeof(caddr_t), 872 M_TEMP, M_NOWAIT); 873 874 if (!rs->rules.inactive.ptr_array) 875 return (ENOMEM); 876 877 TAILQ_FOREACH(rule, rs->rules.inactive.ptr, entries) { 878 pf_hash_rule(&ctx, rule); 879 (rs->rules.inactive.ptr_array)[rule->nr] = rule; 880 } 881 } 882 883 MD5Final(digest, &ctx); 884 memcpy(pf_status.pf_chksum, digest, sizeof(pf_status.pf_chksum)); 885 return (0); 886 } 887 888 int 889 pf_addr_setup(struct pf_ruleset *ruleset, struct pf_addr_wrap *addr, 890 sa_family_t af) 891 { 892 if (pfi_dynaddr_setup(addr, af) || 893 pf_tbladdr_setup(ruleset, addr) || 894 pf_rtlabel_add(addr)) 895 return (EINVAL); 896 897 return (0); 898 } 899 900 int 901 pf_kif_setup(char *ifname, struct pfi_kif **kif) 902 { 903 if (ifname[0]) { 904 *kif = pfi_kif_get(ifname); 905 if (*kif == NULL) 906 return (EINVAL); 907 908 pfi_kif_ref(*kif, PFI_KIF_REF_RULE); 909 } else 910 *kif = NULL; 911 912 return (0); 913 } 914 915 void 916 pf_addr_copyout(struct pf_addr_wrap *addr) 917 { 918 pfi_dynaddr_copyout(addr); 919 pf_tbladdr_copyout(addr); 920 pf_rtlabel_copyout(addr); 921 } 922 923 int 924 pfioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct proc *p) 925 { 926 int error = 0; 927 928 /* XXX keep in sync with switch() below */ 929 if (securelevel > 1) 930 switch (cmd) { 931 case DIOCGETRULES: 932 case DIOCGETRULE: 933 case DIOCGETSTATE: 934 case DIOCSETSTATUSIF: 935 case DIOCGETSTATUS: 936 case DIOCCLRSTATUS: 937 case DIOCNATLOOK: 938 case DIOCSETDEBUG: 939 case DIOCGETSTATES: 940 case DIOCGETTIMEOUT: 941 case DIOCGETLIMIT: 942 case DIOCGETRULESETS: 943 case DIOCGETRULESET: 944 case DIOCGETQUEUES: 945 case DIOCGETQUEUE: 946 case DIOCGETQSTATS: 947 case DIOCRGETTABLES: 948 case DIOCRGETTSTATS: 949 case DIOCRCLRTSTATS: 950 case DIOCRCLRADDRS: 951 case DIOCRADDADDRS: 952 case DIOCRDELADDRS: 953 case DIOCRSETADDRS: 954 case DIOCRGETADDRS: 955 case DIOCRGETASTATS: 956 case DIOCRCLRASTATS: 957 case DIOCRTSTADDRS: 958 case DIOCOSFPGET: 959 case DIOCGETSRCNODES: 960 case DIOCCLRSRCNODES: 961 case DIOCIGETIFACES: 962 case DIOCSETIFFLAG: 963 case DIOCCLRIFFLAG: 964 case DIOCGETSYNFLWATS: 965 break; 966 case DIOCRCLRTABLES: 967 case DIOCRADDTABLES: 968 case DIOCRDELTABLES: 969 case DIOCRSETTFLAGS: 970 if (((struct pfioc_table *)addr)->pfrio_flags & 971 PFR_FLAG_DUMMY) 972 break; /* dummy operation ok */ 973 return (EPERM); 974 default: 975 return (EPERM); 976 } 977 978 if (!(flags & FWRITE)) 979 switch (cmd) { 980 case DIOCGETRULES: 981 case DIOCGETSTATE: 982 case DIOCGETSTATUS: 983 case DIOCGETSTATES: 984 case DIOCGETTIMEOUT: 985 case DIOCGETLIMIT: 986 case DIOCGETRULESETS: 987 case DIOCGETRULESET: 988 case DIOCGETQUEUES: 989 case DIOCGETQUEUE: 990 case DIOCGETQSTATS: 991 case DIOCNATLOOK: 992 case DIOCRGETTABLES: 993 case DIOCRGETTSTATS: 994 case DIOCRGETADDRS: 995 case DIOCRGETASTATS: 996 case DIOCRTSTADDRS: 997 case DIOCOSFPGET: 998 case DIOCGETSRCNODES: 999 case DIOCIGETIFACES: 1000 case DIOCGETSYNFLWATS: 1001 break; 1002 case DIOCRCLRTABLES: 1003 case DIOCRADDTABLES: 1004 case DIOCRDELTABLES: 1005 case DIOCRCLRTSTATS: 1006 case DIOCRCLRADDRS: 1007 case DIOCRADDADDRS: 1008 case DIOCRDELADDRS: 1009 case DIOCRSETADDRS: 1010 case DIOCRSETTFLAGS: 1011 if (((struct pfioc_table *)addr)->pfrio_flags & 1012 PFR_FLAG_DUMMY) { 1013 flags |= FWRITE; /* need write lock for dummy */ 1014 break; /* dummy operation ok */ 1015 } 1016 return (EACCES); 1017 case DIOCGETRULE: 1018 if (((struct pfioc_rule *)addr)->action == 1019 PF_GET_CLR_CNTR) 1020 return (EACCES); 1021 break; 1022 default: 1023 return (EACCES); 1024 } 1025 1026 NET_LOCK(); 1027 switch (cmd) { 1028 1029 case DIOCSTART: 1030 PF_LOCK(); 1031 if (pf_status.running) 1032 error = EEXIST; 1033 else { 1034 pf_status.running = 1; 1035 pf_status.since = time_uptime; 1036 if (pf_status.stateid == 0) { 1037 pf_status.stateid = time_second; 1038 pf_status.stateid = pf_status.stateid << 32; 1039 } 1040 timeout_add_sec(&pf_purge_to, 1); 1041 pf_create_queues(); 1042 DPFPRINTF(LOG_NOTICE, "pf: started"); 1043 } 1044 PF_UNLOCK(); 1045 break; 1046 1047 case DIOCSTOP: 1048 PF_LOCK(); 1049 if (!pf_status.running) 1050 error = ENOENT; 1051 else { 1052 pf_status.running = 0; 1053 pf_status.since = time_uptime; 1054 pf_remove_queues(); 1055 DPFPRINTF(LOG_NOTICE, "pf: stopped"); 1056 } 1057 PF_UNLOCK(); 1058 break; 1059 1060 case DIOCGETQUEUES: { 1061 struct pfioc_queue *pq = (struct pfioc_queue *)addr; 1062 struct pf_queuespec *qs; 1063 u_int32_t nr = 0; 1064 1065 PF_LOCK(); 1066 pq->ticket = pf_main_ruleset.rules.active.ticket; 1067 1068 /* save state to not run over them all each time? */ 1069 qs = TAILQ_FIRST(pf_queues_active); 1070 while (qs != NULL) { 1071 qs = TAILQ_NEXT(qs, entries); 1072 nr++; 1073 } 1074 pq->nr = nr; 1075 PF_UNLOCK(); 1076 break; 1077 } 1078 1079 case DIOCGETQUEUE: { 1080 struct pfioc_queue *pq = (struct pfioc_queue *)addr; 1081 struct pf_queuespec *qs; 1082 u_int32_t nr = 0; 1083 1084 PF_LOCK(); 1085 if (pq->ticket != pf_main_ruleset.rules.active.ticket) { 1086 error = EBUSY; 1087 PF_UNLOCK(); 1088 break; 1089 } 1090 1091 /* save state to not run over them all each time? */ 1092 qs = TAILQ_FIRST(pf_queues_active); 1093 while ((qs != NULL) && (nr++ < pq->nr)) 1094 qs = TAILQ_NEXT(qs, entries); 1095 if (qs == NULL) { 1096 error = EBUSY; 1097 PF_UNLOCK(); 1098 break; 1099 } 1100 memcpy(&pq->queue, qs, sizeof(pq->queue)); 1101 PF_UNLOCK(); 1102 break; 1103 } 1104 1105 case DIOCGETQSTATS: { 1106 struct pfioc_qstats *pq = (struct pfioc_qstats *)addr; 1107 struct pf_queuespec *qs; 1108 u_int32_t nr; 1109 int nbytes; 1110 1111 PF_LOCK(); 1112 if (pq->ticket != pf_main_ruleset.rules.active.ticket) { 1113 error = EBUSY; 1114 PF_UNLOCK(); 1115 break; 1116 } 1117 nbytes = pq->nbytes; 1118 nr = 0; 1119 1120 /* save state to not run over them all each time? */ 1121 qs = TAILQ_FIRST(pf_queues_active); 1122 while ((qs != NULL) && (nr++ < pq->nr)) 1123 qs = TAILQ_NEXT(qs, entries); 1124 if (qs == NULL) { 1125 error = EBUSY; 1126 PF_UNLOCK(); 1127 break; 1128 } 1129 memcpy(&pq->queue, qs, sizeof(pq->queue)); 1130 /* It's a root flow queue but is not an HFSC root class */ 1131 if ((qs->flags & PFQS_FLOWQUEUE) && qs->parent_qid == 0 && 1132 !(qs->flags & PFQS_ROOTCLASS)) 1133 error = pfq_fqcodel_ops->pfq_qstats(qs, pq->buf, 1134 &nbytes); 1135 else 1136 error = pfq_hfsc_ops->pfq_qstats(qs, pq->buf, 1137 &nbytes); 1138 if (error == 0) 1139 pq->nbytes = nbytes; 1140 PF_UNLOCK(); 1141 break; 1142 } 1143 1144 case DIOCADDQUEUE: { 1145 struct pfioc_queue *q = (struct pfioc_queue *)addr; 1146 struct pf_queuespec *qs; 1147 1148 PF_LOCK(); 1149 if (q->ticket != pf_main_ruleset.rules.inactive.ticket) { 1150 error = EBUSY; 1151 PF_UNLOCK(); 1152 break; 1153 } 1154 qs = pool_get(&pf_queue_pl, PR_WAITOK|PR_LIMITFAIL|PR_ZERO); 1155 if (qs == NULL) { 1156 error = ENOMEM; 1157 PF_UNLOCK(); 1158 break; 1159 } 1160 memcpy(qs, &q->queue, sizeof(*qs)); 1161 qs->qid = pf_qname2qid(qs->qname, 1); 1162 if (qs->qid == 0) { 1163 pool_put(&pf_queue_pl, qs); 1164 error = EBUSY; 1165 PF_UNLOCK(); 1166 break; 1167 } 1168 if (qs->parent[0] && (qs->parent_qid = 1169 pf_qname2qid(qs->parent, 0)) == 0) { 1170 pool_put(&pf_queue_pl, qs); 1171 error = ESRCH; 1172 PF_UNLOCK(); 1173 break; 1174 } 1175 qs->kif = pfi_kif_get(qs->ifname); 1176 if (qs->kif == NULL) { 1177 pool_put(&pf_queue_pl, qs); 1178 error = ESRCH; 1179 PF_UNLOCK(); 1180 break; 1181 } 1182 /* XXX resolve bw percentage specs */ 1183 pfi_kif_ref(qs->kif, PFI_KIF_REF_RULE); 1184 1185 TAILQ_INSERT_TAIL(pf_queues_inactive, qs, entries); 1186 PF_UNLOCK(); 1187 1188 break; 1189 } 1190 1191 case DIOCADDRULE: { 1192 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 1193 struct pf_ruleset *ruleset; 1194 struct pf_rule *rule, *tail; 1195 1196 PF_LOCK(); 1197 pr->anchor[sizeof(pr->anchor) - 1] = 0; 1198 ruleset = pf_find_ruleset(pr->anchor); 1199 if (ruleset == NULL) { 1200 error = EINVAL; 1201 PF_UNLOCK(); 1202 break; 1203 } 1204 if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) { 1205 error = EINVAL; 1206 PF_UNLOCK(); 1207 break; 1208 } 1209 if (pr->ticket != ruleset->rules.inactive.ticket) { 1210 error = EBUSY; 1211 PF_UNLOCK(); 1212 break; 1213 } 1214 rule = pool_get(&pf_rule_pl, PR_WAITOK|PR_LIMITFAIL|PR_ZERO); 1215 if (rule == NULL) { 1216 error = ENOMEM; 1217 PF_UNLOCK(); 1218 break; 1219 } 1220 if ((error = pf_rule_copyin(&pr->rule, rule, ruleset))) { 1221 pf_rm_rule(NULL, rule); 1222 rule = NULL; 1223 PF_UNLOCK(); 1224 break; 1225 } 1226 rule->cuid = p->p_ucred->cr_ruid; 1227 rule->cpid = p->p_p->ps_pid; 1228 1229 switch (rule->af) { 1230 case 0: 1231 break; 1232 case AF_INET: 1233 break; 1234 #ifdef INET6 1235 case AF_INET6: 1236 break; 1237 #endif /* INET6 */ 1238 default: 1239 pf_rm_rule(NULL, rule); 1240 rule = NULL; 1241 error = EAFNOSUPPORT; 1242 PF_UNLOCK(); 1243 goto fail; 1244 } 1245 tail = TAILQ_LAST(ruleset->rules.inactive.ptr, 1246 pf_rulequeue); 1247 if (tail) 1248 rule->nr = tail->nr + 1; 1249 else 1250 rule->nr = 0; 1251 1252 if (rule->src.addr.type == PF_ADDR_NONE || 1253 rule->dst.addr.type == PF_ADDR_NONE) 1254 error = EINVAL; 1255 1256 if (pf_addr_setup(ruleset, &rule->src.addr, rule->af)) 1257 error = EINVAL; 1258 if (pf_addr_setup(ruleset, &rule->dst.addr, rule->af)) 1259 error = EINVAL; 1260 if (pf_addr_setup(ruleset, &rule->rdr.addr, rule->af)) 1261 error = EINVAL; 1262 if (pf_addr_setup(ruleset, &rule->nat.addr, rule->af)) 1263 error = EINVAL; 1264 if (pf_addr_setup(ruleset, &rule->route.addr, rule->af)) 1265 error = EINVAL; 1266 if (pf_anchor_setup(rule, ruleset, pr->anchor_call)) 1267 error = EINVAL; 1268 if (rule->rt && !rule->direction) 1269 error = EINVAL; 1270 if (rule->scrub_flags & PFSTATE_SETPRIO && 1271 (rule->set_prio[0] > IFQ_MAXPRIO || 1272 rule->set_prio[1] > IFQ_MAXPRIO)) 1273 error = EINVAL; 1274 1275 if (error) { 1276 pf_rm_rule(NULL, rule); 1277 PF_UNLOCK(); 1278 break; 1279 } 1280 TAILQ_INSERT_TAIL(ruleset->rules.inactive.ptr, 1281 rule, entries); 1282 rule->ruleset = ruleset; 1283 ruleset->rules.inactive.rcount++; 1284 PF_UNLOCK(); 1285 break; 1286 } 1287 1288 case DIOCGETRULES: { 1289 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 1290 struct pf_ruleset *ruleset; 1291 struct pf_rule *tail; 1292 1293 PF_LOCK(); 1294 pr->anchor[sizeof(pr->anchor) - 1] = 0; 1295 ruleset = pf_find_ruleset(pr->anchor); 1296 if (ruleset == NULL) { 1297 error = EINVAL; 1298 PF_UNLOCK(); 1299 break; 1300 } 1301 tail = TAILQ_LAST(ruleset->rules.active.ptr, pf_rulequeue); 1302 if (tail) 1303 pr->nr = tail->nr + 1; 1304 else 1305 pr->nr = 0; 1306 pr->ticket = ruleset->rules.active.ticket; 1307 PF_UNLOCK(); 1308 break; 1309 } 1310 1311 case DIOCGETRULE: { 1312 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 1313 struct pf_ruleset *ruleset; 1314 struct pf_rule *rule; 1315 int i; 1316 1317 PF_LOCK(); 1318 pr->anchor[sizeof(pr->anchor) - 1] = 0; 1319 ruleset = pf_find_ruleset(pr->anchor); 1320 if (ruleset == NULL) { 1321 error = EINVAL; 1322 PF_UNLOCK(); 1323 break; 1324 } 1325 if (pr->ticket != ruleset->rules.active.ticket) { 1326 error = EBUSY; 1327 PF_UNLOCK(); 1328 break; 1329 } 1330 rule = TAILQ_FIRST(ruleset->rules.active.ptr); 1331 while ((rule != NULL) && (rule->nr != pr->nr)) 1332 rule = TAILQ_NEXT(rule, entries); 1333 if (rule == NULL) { 1334 error = EBUSY; 1335 PF_UNLOCK(); 1336 break; 1337 } 1338 memcpy(&pr->rule, rule, sizeof(struct pf_rule)); 1339 memset(&pr->rule.entries, 0, sizeof(pr->rule.entries)); 1340 pr->rule.kif = NULL; 1341 pr->rule.nat.kif = NULL; 1342 pr->rule.rdr.kif = NULL; 1343 pr->rule.route.kif = NULL; 1344 pr->rule.rcv_kif = NULL; 1345 pr->rule.anchor = NULL; 1346 pr->rule.overload_tbl = NULL; 1347 pr->rule.pktrate.limit /= PF_THRESHOLD_MULT; 1348 memset(&pr->rule.gcle, 0, sizeof(pr->rule.gcle)); 1349 pr->rule.ruleset = NULL; 1350 if (pf_anchor_copyout(ruleset, rule, pr)) { 1351 error = EBUSY; 1352 PF_UNLOCK(); 1353 break; 1354 } 1355 pf_addr_copyout(&pr->rule.src.addr); 1356 pf_addr_copyout(&pr->rule.dst.addr); 1357 pf_addr_copyout(&pr->rule.rdr.addr); 1358 pf_addr_copyout(&pr->rule.nat.addr); 1359 pf_addr_copyout(&pr->rule.route.addr); 1360 for (i = 0; i < PF_SKIP_COUNT; ++i) 1361 if (rule->skip[i].ptr == NULL) 1362 pr->rule.skip[i].nr = (u_int32_t)-1; 1363 else 1364 pr->rule.skip[i].nr = 1365 rule->skip[i].ptr->nr; 1366 1367 if (pr->action == PF_GET_CLR_CNTR) { 1368 rule->evaluations = 0; 1369 rule->packets[0] = rule->packets[1] = 0; 1370 rule->bytes[0] = rule->bytes[1] = 0; 1371 rule->states_tot = 0; 1372 } 1373 PF_UNLOCK(); 1374 break; 1375 } 1376 1377 case DIOCCHANGERULE: { 1378 struct pfioc_rule *pcr = (struct pfioc_rule *)addr; 1379 struct pf_ruleset *ruleset; 1380 struct pf_rule *oldrule = NULL, *newrule = NULL; 1381 u_int32_t nr = 0; 1382 1383 if (pcr->action < PF_CHANGE_ADD_HEAD || 1384 pcr->action > PF_CHANGE_GET_TICKET) { 1385 error = EINVAL; 1386 break; 1387 } 1388 PF_LOCK(); 1389 ruleset = pf_find_ruleset(pcr->anchor); 1390 if (ruleset == NULL) { 1391 error = EINVAL; 1392 PF_UNLOCK(); 1393 break; 1394 } 1395 1396 if (pcr->action == PF_CHANGE_GET_TICKET) { 1397 pcr->ticket = ++ruleset->rules.active.ticket; 1398 PF_UNLOCK(); 1399 break; 1400 } else { 1401 if (pcr->ticket != 1402 ruleset->rules.active.ticket) { 1403 error = EINVAL; 1404 PF_UNLOCK(); 1405 break; 1406 } 1407 if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) { 1408 error = EINVAL; 1409 PF_UNLOCK(); 1410 break; 1411 } 1412 } 1413 1414 if (pcr->action != PF_CHANGE_REMOVE) { 1415 newrule = pool_get(&pf_rule_pl, 1416 PR_WAITOK|PR_LIMITFAIL|PR_ZERO); 1417 if (newrule == NULL) { 1418 error = ENOMEM; 1419 PF_UNLOCK(); 1420 break; 1421 } 1422 pf_rule_copyin(&pcr->rule, newrule, ruleset); 1423 newrule->cuid = p->p_ucred->cr_ruid; 1424 newrule->cpid = p->p_p->ps_pid; 1425 1426 switch (newrule->af) { 1427 case 0: 1428 break; 1429 case AF_INET: 1430 break; 1431 #ifdef INET6 1432 case AF_INET6: 1433 break; 1434 #endif /* INET6 */ 1435 default: 1436 pf_rm_rule(NULL, newrule); 1437 error = EAFNOSUPPORT; 1438 PF_UNLOCK(); 1439 goto fail; 1440 } 1441 1442 if (newrule->rt && !newrule->direction) 1443 error = EINVAL; 1444 if (pf_addr_setup(ruleset, &newrule->src.addr, newrule->af)) 1445 error = EINVAL; 1446 if (pf_addr_setup(ruleset, &newrule->dst.addr, newrule->af)) 1447 error = EINVAL; 1448 if (pf_addr_setup(ruleset, &newrule->rdr.addr, newrule->af)) 1449 error = EINVAL; 1450 if (pf_addr_setup(ruleset, &newrule->nat.addr, newrule->af)) 1451 error = EINVAL; 1452 if (pf_addr_setup(ruleset, &newrule->route.addr, newrule->af)) 1453 error = EINVAL; 1454 if (pf_anchor_setup(newrule, ruleset, pcr->anchor_call)) 1455 error = EINVAL; 1456 1457 if (error) { 1458 pf_rm_rule(NULL, newrule); 1459 PF_UNLOCK(); 1460 break; 1461 } 1462 } 1463 1464 if (pcr->action == PF_CHANGE_ADD_HEAD) 1465 oldrule = TAILQ_FIRST(ruleset->rules.active.ptr); 1466 else if (pcr->action == PF_CHANGE_ADD_TAIL) 1467 oldrule = TAILQ_LAST(ruleset->rules.active.ptr, 1468 pf_rulequeue); 1469 else { 1470 oldrule = TAILQ_FIRST(ruleset->rules.active.ptr); 1471 while ((oldrule != NULL) && (oldrule->nr != pcr->nr)) 1472 oldrule = TAILQ_NEXT(oldrule, entries); 1473 if (oldrule == NULL) { 1474 if (newrule != NULL) 1475 pf_rm_rule(NULL, newrule); 1476 error = EINVAL; 1477 PF_UNLOCK(); 1478 break; 1479 } 1480 } 1481 1482 if (pcr->action == PF_CHANGE_REMOVE) { 1483 pf_rm_rule(ruleset->rules.active.ptr, oldrule); 1484 ruleset->rules.active.rcount--; 1485 } else { 1486 if (oldrule == NULL) 1487 TAILQ_INSERT_TAIL( 1488 ruleset->rules.active.ptr, 1489 newrule, entries); 1490 else if (pcr->action == PF_CHANGE_ADD_HEAD || 1491 pcr->action == PF_CHANGE_ADD_BEFORE) 1492 TAILQ_INSERT_BEFORE(oldrule, newrule, entries); 1493 else 1494 TAILQ_INSERT_AFTER( 1495 ruleset->rules.active.ptr, 1496 oldrule, newrule, entries); 1497 ruleset->rules.active.rcount++; 1498 } 1499 1500 nr = 0; 1501 TAILQ_FOREACH(oldrule, ruleset->rules.active.ptr, entries) 1502 oldrule->nr = nr++; 1503 1504 ruleset->rules.active.ticket++; 1505 1506 pf_calc_skip_steps(ruleset->rules.active.ptr); 1507 pf_remove_if_empty_ruleset(ruleset); 1508 1509 PF_UNLOCK(); 1510 break; 1511 } 1512 1513 case DIOCCLRSTATES: { 1514 struct pf_state *s, *nexts; 1515 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr; 1516 u_int killed = 0; 1517 1518 PF_LOCK(); 1519 PF_STATE_ENTER_WRITE(); 1520 for (s = RB_MIN(pf_state_tree_id, &tree_id); s; s = nexts) { 1521 nexts = RB_NEXT(pf_state_tree_id, &tree_id, s); 1522 1523 if (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname, 1524 s->kif->pfik_name)) { 1525 #if NPFSYNC > 0 1526 /* don't send out individual delete messages */ 1527 SET(s->state_flags, PFSTATE_NOSYNC); 1528 #endif /* NPFSYNC > 0 */ 1529 pf_remove_state(s); 1530 killed++; 1531 } 1532 } 1533 PF_STATE_EXIT_WRITE(); 1534 psk->psk_killed = killed; 1535 #if NPFSYNC > 0 1536 pfsync_clear_states(pf_status.hostid, psk->psk_ifname); 1537 #endif /* NPFSYNC > 0 */ 1538 PF_UNLOCK(); 1539 break; 1540 } 1541 1542 case DIOCKILLSTATES: { 1543 struct pf_state *s, *nexts; 1544 struct pf_state_item *si, *sit; 1545 struct pf_state_key *sk, key; 1546 struct pf_addr *srcaddr, *dstaddr; 1547 u_int16_t srcport, dstport; 1548 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr; 1549 u_int i, killed = 0; 1550 const int dirs[] = { PF_IN, PF_OUT }; 1551 int sidx, didx; 1552 1553 if (psk->psk_pfcmp.id) { 1554 if (psk->psk_pfcmp.creatorid == 0) 1555 psk->psk_pfcmp.creatorid = pf_status.hostid; 1556 PF_LOCK(); 1557 PF_STATE_ENTER_WRITE(); 1558 if ((s = pf_find_state_byid(&psk->psk_pfcmp))) { 1559 pf_remove_state(s); 1560 psk->psk_killed = 1; 1561 } 1562 PF_STATE_EXIT_WRITE(); 1563 PF_UNLOCK(); 1564 break; 1565 } 1566 1567 if (psk->psk_af && psk->psk_proto && 1568 psk->psk_src.port_op == PF_OP_EQ && 1569 psk->psk_dst.port_op == PF_OP_EQ) { 1570 1571 key.af = psk->psk_af; 1572 key.proto = psk->psk_proto; 1573 key.rdomain = psk->psk_rdomain; 1574 1575 PF_LOCK(); 1576 PF_STATE_ENTER_WRITE(); 1577 for (i = 0; i < nitems(dirs); i++) { 1578 if (dirs[i] == PF_IN) { 1579 sidx = 0; 1580 didx = 1; 1581 } else { 1582 sidx = 1; 1583 didx = 0; 1584 } 1585 pf_addrcpy(&key.addr[sidx], 1586 &psk->psk_src.addr.v.a.addr, key.af); 1587 pf_addrcpy(&key.addr[didx], 1588 &psk->psk_dst.addr.v.a.addr, key.af); 1589 key.port[sidx] = psk->psk_src.port[0]; 1590 key.port[didx] = psk->psk_dst.port[0]; 1591 1592 sk = RB_FIND(pf_state_tree, &pf_statetbl, &key); 1593 if (sk == NULL) 1594 continue; 1595 1596 TAILQ_FOREACH_SAFE(si, &sk->states, entry, sit) 1597 if (((si->s->key[PF_SK_WIRE]->af == 1598 si->s->key[PF_SK_STACK]->af && 1599 sk == (dirs[i] == PF_IN ? 1600 si->s->key[PF_SK_WIRE] : 1601 si->s->key[PF_SK_STACK])) || 1602 (si->s->key[PF_SK_WIRE]->af != 1603 si->s->key[PF_SK_STACK]->af && 1604 dirs[i] == PF_IN && 1605 (sk == si->s->key[PF_SK_STACK] || 1606 sk == si->s->key[PF_SK_WIRE]))) && 1607 (!psk->psk_ifname[0] || 1608 (si->s->kif != pfi_all && 1609 !strcmp(psk->psk_ifname, 1610 si->s->kif->pfik_name)))) { 1611 pf_remove_state(si->s); 1612 killed++; 1613 } 1614 } 1615 if (killed) 1616 psk->psk_killed = killed; 1617 PF_STATE_EXIT_WRITE(); 1618 PF_UNLOCK(); 1619 break; 1620 } 1621 1622 PF_LOCK(); 1623 PF_STATE_ENTER_WRITE(); 1624 for (s = RB_MIN(pf_state_tree_id, &tree_id); s; 1625 s = nexts) { 1626 nexts = RB_NEXT(pf_state_tree_id, &tree_id, s); 1627 1628 if (s->direction == PF_OUT) { 1629 sk = s->key[PF_SK_STACK]; 1630 srcaddr = &sk->addr[1]; 1631 dstaddr = &sk->addr[0]; 1632 srcport = sk->port[1]; 1633 dstport = sk->port[0]; 1634 } else { 1635 sk = s->key[PF_SK_WIRE]; 1636 srcaddr = &sk->addr[0]; 1637 dstaddr = &sk->addr[1]; 1638 srcport = sk->port[0]; 1639 dstport = sk->port[1]; 1640 } 1641 if ((!psk->psk_af || sk->af == psk->psk_af) 1642 && (!psk->psk_proto || psk->psk_proto == 1643 sk->proto) && psk->psk_rdomain == sk->rdomain && 1644 pf_match_addr(psk->psk_src.neg, 1645 &psk->psk_src.addr.v.a.addr, 1646 &psk->psk_src.addr.v.a.mask, 1647 srcaddr, sk->af) && 1648 pf_match_addr(psk->psk_dst.neg, 1649 &psk->psk_dst.addr.v.a.addr, 1650 &psk->psk_dst.addr.v.a.mask, 1651 dstaddr, sk->af) && 1652 (psk->psk_src.port_op == 0 || 1653 pf_match_port(psk->psk_src.port_op, 1654 psk->psk_src.port[0], psk->psk_src.port[1], 1655 srcport)) && 1656 (psk->psk_dst.port_op == 0 || 1657 pf_match_port(psk->psk_dst.port_op, 1658 psk->psk_dst.port[0], psk->psk_dst.port[1], 1659 dstport)) && 1660 (!psk->psk_label[0] || (s->rule.ptr->label[0] && 1661 !strcmp(psk->psk_label, s->rule.ptr->label))) && 1662 (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname, 1663 s->kif->pfik_name))) { 1664 pf_remove_state(s); 1665 killed++; 1666 } 1667 } 1668 psk->psk_killed = killed; 1669 PF_STATE_EXIT_WRITE(); 1670 PF_UNLOCK(); 1671 break; 1672 } 1673 1674 #if NPFSYNC > 0 1675 case DIOCADDSTATE: { 1676 struct pfioc_state *ps = (struct pfioc_state *)addr; 1677 struct pfsync_state *sp = &ps->state; 1678 1679 if (sp->timeout >= PFTM_MAX) { 1680 error = EINVAL; 1681 break; 1682 } 1683 PF_LOCK(); 1684 PF_STATE_ENTER_WRITE(); 1685 error = pfsync_state_import(sp, PFSYNC_SI_IOCTL); 1686 PF_STATE_EXIT_WRITE(); 1687 PF_UNLOCK(); 1688 break; 1689 } 1690 #endif /* NPFSYNC > 0 */ 1691 1692 case DIOCGETSTATE: { 1693 struct pfioc_state *ps = (struct pfioc_state *)addr; 1694 struct pf_state *s; 1695 struct pf_state_cmp id_key; 1696 1697 memset(&id_key, 0, sizeof(id_key)); 1698 id_key.id = ps->state.id; 1699 id_key.creatorid = ps->state.creatorid; 1700 1701 PF_STATE_ENTER_READ(); 1702 s = pf_find_state_byid(&id_key); 1703 s = pf_state_ref(s); 1704 PF_STATE_EXIT_READ(); 1705 if (s == NULL) { 1706 error = ENOENT; 1707 break; 1708 } 1709 1710 pf_state_export(&ps->state, s); 1711 pf_state_unref(s); 1712 break; 1713 } 1714 1715 case DIOCGETSTATES: { 1716 struct pfioc_states *ps = (struct pfioc_states *)addr; 1717 struct pf_state *state; 1718 struct pfsync_state *p, *pstore; 1719 u_int32_t nr = 0; 1720 1721 if (ps->ps_len == 0) { 1722 nr = pf_status.states; 1723 ps->ps_len = sizeof(struct pfsync_state) * nr; 1724 break; 1725 } 1726 1727 pstore = malloc(sizeof(*pstore), M_TEMP, M_WAITOK); 1728 1729 p = ps->ps_states; 1730 1731 PF_STATE_ENTER_READ(); 1732 state = TAILQ_FIRST(&state_list); 1733 while (state) { 1734 if (state->timeout != PFTM_UNLINKED) { 1735 if ((nr+1) * sizeof(*p) > ps->ps_len) 1736 break; 1737 pf_state_export(pstore, state); 1738 error = copyout(pstore, p, sizeof(*p)); 1739 if (error) { 1740 free(pstore, M_TEMP, sizeof(*pstore)); 1741 PF_STATE_EXIT_READ(); 1742 goto fail; 1743 } 1744 p++; 1745 nr++; 1746 } 1747 state = TAILQ_NEXT(state, entry_list); 1748 } 1749 PF_STATE_EXIT_READ(); 1750 1751 ps->ps_len = sizeof(struct pfsync_state) * nr; 1752 1753 free(pstore, M_TEMP, sizeof(*pstore)); 1754 break; 1755 } 1756 1757 case DIOCGETSTATUS: { 1758 struct pf_status *s = (struct pf_status *)addr; 1759 PF_LOCK(); 1760 memcpy(s, &pf_status, sizeof(struct pf_status)); 1761 pfi_update_status(s->ifname, s); 1762 PF_UNLOCK(); 1763 break; 1764 } 1765 1766 case DIOCSETSTATUSIF: { 1767 struct pfioc_iface *pi = (struct pfioc_iface *)addr; 1768 1769 PF_LOCK(); 1770 if (pi->pfiio_name[0] == 0) { 1771 memset(pf_status.ifname, 0, IFNAMSIZ); 1772 PF_UNLOCK(); 1773 break; 1774 } 1775 strlcpy(pf_trans_set.statusif, pi->pfiio_name, IFNAMSIZ); 1776 pf_trans_set.mask |= PF_TSET_STATUSIF; 1777 PF_UNLOCK(); 1778 break; 1779 } 1780 1781 case DIOCCLRSTATUS: { 1782 struct pfioc_iface *pi = (struct pfioc_iface *)addr; 1783 1784 PF_LOCK(); 1785 /* if ifname is specified, clear counters there only */ 1786 if (pi->pfiio_name[0]) { 1787 pfi_update_status(pi->pfiio_name, NULL); 1788 PF_UNLOCK(); 1789 break; 1790 } 1791 1792 memset(pf_status.counters, 0, sizeof(pf_status.counters)); 1793 memset(pf_status.fcounters, 0, sizeof(pf_status.fcounters)); 1794 memset(pf_status.scounters, 0, sizeof(pf_status.scounters)); 1795 pf_status.since = time_uptime; 1796 1797 PF_UNLOCK(); 1798 break; 1799 } 1800 1801 case DIOCNATLOOK: { 1802 struct pfioc_natlook *pnl = (struct pfioc_natlook *)addr; 1803 struct pf_state_key *sk; 1804 struct pf_state *state; 1805 struct pf_state_key_cmp key; 1806 int m = 0, direction = pnl->direction; 1807 int sidx, didx; 1808 1809 /* NATLOOK src and dst are reversed, so reverse sidx/didx */ 1810 sidx = (direction == PF_IN) ? 1 : 0; 1811 didx = (direction == PF_IN) ? 0 : 1; 1812 1813 if (!pnl->proto || 1814 PF_AZERO(&pnl->saddr, pnl->af) || 1815 PF_AZERO(&pnl->daddr, pnl->af) || 1816 ((pnl->proto == IPPROTO_TCP || 1817 pnl->proto == IPPROTO_UDP) && 1818 (!pnl->dport || !pnl->sport)) || 1819 pnl->rdomain > RT_TABLEID_MAX) 1820 error = EINVAL; 1821 else { 1822 key.af = pnl->af; 1823 key.proto = pnl->proto; 1824 key.rdomain = pnl->rdomain; 1825 pf_addrcpy(&key.addr[sidx], &pnl->saddr, pnl->af); 1826 key.port[sidx] = pnl->sport; 1827 pf_addrcpy(&key.addr[didx], &pnl->daddr, pnl->af); 1828 key.port[didx] = pnl->dport; 1829 1830 PF_STATE_ENTER_READ(); 1831 state = pf_find_state_all(&key, direction, &m); 1832 state = pf_state_ref(state); 1833 PF_STATE_EXIT_READ(); 1834 1835 if (m > 1) 1836 error = E2BIG; /* more than one state */ 1837 else if (state != NULL) { 1838 sk = state->key[sidx]; 1839 pf_addrcpy(&pnl->rsaddr, &sk->addr[sidx], 1840 sk->af); 1841 pnl->rsport = sk->port[sidx]; 1842 pf_addrcpy(&pnl->rdaddr, &sk->addr[didx], 1843 sk->af); 1844 pnl->rdport = sk->port[didx]; 1845 pnl->rrdomain = sk->rdomain; 1846 } else 1847 error = ENOENT; 1848 pf_state_unref(state); 1849 } 1850 break; 1851 } 1852 1853 case DIOCSETTIMEOUT: { 1854 struct pfioc_tm *pt = (struct pfioc_tm *)addr; 1855 1856 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX || 1857 pt->seconds < 0) { 1858 error = EINVAL; 1859 goto fail; 1860 } 1861 PF_LOCK(); 1862 if (pt->timeout == PFTM_INTERVAL && pt->seconds == 0) 1863 pt->seconds = 1; 1864 pf_default_rule_new.timeout[pt->timeout] = pt->seconds; 1865 pt->seconds = pf_default_rule.timeout[pt->timeout]; 1866 PF_UNLOCK(); 1867 break; 1868 } 1869 1870 case DIOCGETTIMEOUT: { 1871 struct pfioc_tm *pt = (struct pfioc_tm *)addr; 1872 1873 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) { 1874 error = EINVAL; 1875 goto fail; 1876 } 1877 PF_LOCK(); 1878 pt->seconds = pf_default_rule.timeout[pt->timeout]; 1879 PF_UNLOCK(); 1880 break; 1881 } 1882 1883 case DIOCGETLIMIT: { 1884 struct pfioc_limit *pl = (struct pfioc_limit *)addr; 1885 1886 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) { 1887 error = EINVAL; 1888 goto fail; 1889 } 1890 PF_LOCK(); 1891 pl->limit = pf_pool_limits[pl->index].limit; 1892 PF_UNLOCK(); 1893 break; 1894 } 1895 1896 case DIOCSETLIMIT: { 1897 struct pfioc_limit *pl = (struct pfioc_limit *)addr; 1898 1899 PF_LOCK(); 1900 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX || 1901 pf_pool_limits[pl->index].pp == NULL) { 1902 error = EINVAL; 1903 PF_UNLOCK(); 1904 goto fail; 1905 } 1906 if (((struct pool *)pf_pool_limits[pl->index].pp)->pr_nout > 1907 pl->limit) { 1908 error = EBUSY; 1909 PF_UNLOCK(); 1910 goto fail; 1911 } 1912 /* Fragments reference mbuf clusters. */ 1913 if (pl->index == PF_LIMIT_FRAGS && pl->limit > nmbclust) { 1914 error = EINVAL; 1915 PF_UNLOCK(); 1916 goto fail; 1917 } 1918 1919 pf_pool_limits[pl->index].limit_new = pl->limit; 1920 pl->limit = pf_pool_limits[pl->index].limit; 1921 PF_UNLOCK(); 1922 break; 1923 } 1924 1925 case DIOCSETDEBUG: { 1926 u_int32_t *level = (u_int32_t *)addr; 1927 1928 PF_LOCK(); 1929 pf_trans_set.debug = *level; 1930 pf_trans_set.mask |= PF_TSET_DEBUG; 1931 PF_UNLOCK(); 1932 break; 1933 } 1934 1935 case DIOCGETRULESETS: { 1936 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr; 1937 struct pf_ruleset *ruleset; 1938 struct pf_anchor *anchor; 1939 1940 PF_LOCK(); 1941 pr->path[sizeof(pr->path) - 1] = 0; 1942 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) { 1943 error = EINVAL; 1944 PF_UNLOCK(); 1945 break; 1946 } 1947 pr->nr = 0; 1948 if (ruleset == &pf_main_ruleset) { 1949 /* XXX kludge for pf_main_ruleset */ 1950 RB_FOREACH(anchor, pf_anchor_global, &pf_anchors) 1951 if (anchor->parent == NULL) 1952 pr->nr++; 1953 } else { 1954 RB_FOREACH(anchor, pf_anchor_node, 1955 &ruleset->anchor->children) 1956 pr->nr++; 1957 } 1958 PF_UNLOCK(); 1959 break; 1960 } 1961 1962 case DIOCGETRULESET: { 1963 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr; 1964 struct pf_ruleset *ruleset; 1965 struct pf_anchor *anchor; 1966 u_int32_t nr = 0; 1967 1968 PF_LOCK(); 1969 pr->path[sizeof(pr->path) - 1] = 0; 1970 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) { 1971 error = EINVAL; 1972 PF_UNLOCK(); 1973 break; 1974 } 1975 pr->name[0] = 0; 1976 if (ruleset == &pf_main_ruleset) { 1977 /* XXX kludge for pf_main_ruleset */ 1978 RB_FOREACH(anchor, pf_anchor_global, &pf_anchors) 1979 if (anchor->parent == NULL && nr++ == pr->nr) { 1980 strlcpy(pr->name, anchor->name, 1981 sizeof(pr->name)); 1982 PF_UNLOCK(); 1983 break; 1984 } 1985 } else { 1986 RB_FOREACH(anchor, pf_anchor_node, 1987 &ruleset->anchor->children) 1988 if (nr++ == pr->nr) { 1989 strlcpy(pr->name, anchor->name, 1990 sizeof(pr->name)); 1991 PF_UNLOCK(); 1992 break; 1993 } 1994 } 1995 if (!pr->name[0]) 1996 error = EBUSY; 1997 PF_UNLOCK(); 1998 break; 1999 } 2000 2001 case DIOCRCLRTABLES: { 2002 struct pfioc_table *io = (struct pfioc_table *)addr; 2003 2004 if (io->pfrio_esize != 0) { 2005 error = ENODEV; 2006 break; 2007 } 2008 PF_LOCK(); 2009 error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel, 2010 io->pfrio_flags | PFR_FLAG_USERIOCTL); 2011 PF_UNLOCK(); 2012 break; 2013 } 2014 2015 case DIOCRADDTABLES: { 2016 struct pfioc_table *io = (struct pfioc_table *)addr; 2017 2018 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2019 error = ENODEV; 2020 break; 2021 } 2022 PF_LOCK(); 2023 error = pfr_add_tables(io->pfrio_buffer, io->pfrio_size, 2024 &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2025 PF_UNLOCK(); 2026 break; 2027 } 2028 2029 case DIOCRDELTABLES: { 2030 struct pfioc_table *io = (struct pfioc_table *)addr; 2031 2032 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2033 error = ENODEV; 2034 break; 2035 } 2036 PF_LOCK(); 2037 error = pfr_del_tables(io->pfrio_buffer, io->pfrio_size, 2038 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2039 PF_UNLOCK(); 2040 break; 2041 } 2042 2043 case DIOCRGETTABLES: { 2044 struct pfioc_table *io = (struct pfioc_table *)addr; 2045 2046 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2047 error = ENODEV; 2048 break; 2049 } 2050 PF_LOCK(); 2051 error = pfr_get_tables(&io->pfrio_table, io->pfrio_buffer, 2052 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2053 PF_UNLOCK(); 2054 break; 2055 } 2056 2057 case DIOCRGETTSTATS: { 2058 struct pfioc_table *io = (struct pfioc_table *)addr; 2059 2060 if (io->pfrio_esize != sizeof(struct pfr_tstats)) { 2061 error = ENODEV; 2062 break; 2063 } 2064 PF_LOCK(); 2065 error = pfr_get_tstats(&io->pfrio_table, io->pfrio_buffer, 2066 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2067 PF_UNLOCK(); 2068 break; 2069 } 2070 2071 case DIOCRCLRTSTATS: { 2072 struct pfioc_table *io = (struct pfioc_table *)addr; 2073 2074 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2075 error = ENODEV; 2076 break; 2077 } 2078 PF_LOCK(); 2079 error = pfr_clr_tstats(io->pfrio_buffer, io->pfrio_size, 2080 &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2081 PF_UNLOCK(); 2082 break; 2083 } 2084 2085 case DIOCRSETTFLAGS: { 2086 struct pfioc_table *io = (struct pfioc_table *)addr; 2087 2088 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2089 error = ENODEV; 2090 break; 2091 } 2092 PF_LOCK(); 2093 error = pfr_set_tflags(io->pfrio_buffer, io->pfrio_size, 2094 io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange, 2095 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2096 PF_UNLOCK(); 2097 break; 2098 } 2099 2100 case DIOCRCLRADDRS: { 2101 struct pfioc_table *io = (struct pfioc_table *)addr; 2102 2103 if (io->pfrio_esize != 0) { 2104 error = ENODEV; 2105 break; 2106 } 2107 PF_LOCK(); 2108 error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel, 2109 io->pfrio_flags | PFR_FLAG_USERIOCTL); 2110 PF_UNLOCK(); 2111 break; 2112 } 2113 2114 case DIOCRADDADDRS: { 2115 struct pfioc_table *io = (struct pfioc_table *)addr; 2116 2117 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2118 error = ENODEV; 2119 break; 2120 } 2121 PF_LOCK(); 2122 error = pfr_add_addrs(&io->pfrio_table, io->pfrio_buffer, 2123 io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags | 2124 PFR_FLAG_USERIOCTL); 2125 PF_UNLOCK(); 2126 break; 2127 } 2128 2129 case DIOCRDELADDRS: { 2130 struct pfioc_table *io = (struct pfioc_table *)addr; 2131 2132 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2133 error = ENODEV; 2134 break; 2135 } 2136 PF_LOCK(); 2137 error = pfr_del_addrs(&io->pfrio_table, io->pfrio_buffer, 2138 io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags | 2139 PFR_FLAG_USERIOCTL); 2140 PF_UNLOCK(); 2141 break; 2142 } 2143 2144 case DIOCRSETADDRS: { 2145 struct pfioc_table *io = (struct pfioc_table *)addr; 2146 2147 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2148 error = ENODEV; 2149 break; 2150 } 2151 PF_LOCK(); 2152 error = pfr_set_addrs(&io->pfrio_table, io->pfrio_buffer, 2153 io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd, 2154 &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags | 2155 PFR_FLAG_USERIOCTL, 0); 2156 PF_UNLOCK(); 2157 break; 2158 } 2159 2160 case DIOCRGETADDRS: { 2161 struct pfioc_table *io = (struct pfioc_table *)addr; 2162 2163 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2164 error = ENODEV; 2165 break; 2166 } 2167 PF_LOCK(); 2168 error = pfr_get_addrs(&io->pfrio_table, io->pfrio_buffer, 2169 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2170 PF_UNLOCK(); 2171 break; 2172 } 2173 2174 case DIOCRGETASTATS: { 2175 struct pfioc_table *io = (struct pfioc_table *)addr; 2176 2177 if (io->pfrio_esize != sizeof(struct pfr_astats)) { 2178 error = ENODEV; 2179 break; 2180 } 2181 PF_LOCK(); 2182 error = pfr_get_astats(&io->pfrio_table, io->pfrio_buffer, 2183 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2184 PF_UNLOCK(); 2185 break; 2186 } 2187 2188 case DIOCRCLRASTATS: { 2189 struct pfioc_table *io = (struct pfioc_table *)addr; 2190 2191 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2192 error = ENODEV; 2193 break; 2194 } 2195 PF_LOCK(); 2196 error = pfr_clr_astats(&io->pfrio_table, io->pfrio_buffer, 2197 io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags | 2198 PFR_FLAG_USERIOCTL); 2199 PF_UNLOCK(); 2200 break; 2201 } 2202 2203 case DIOCRTSTADDRS: { 2204 struct pfioc_table *io = (struct pfioc_table *)addr; 2205 2206 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2207 error = ENODEV; 2208 break; 2209 } 2210 PF_LOCK(); 2211 error = pfr_tst_addrs(&io->pfrio_table, io->pfrio_buffer, 2212 io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags | 2213 PFR_FLAG_USERIOCTL); 2214 PF_UNLOCK(); 2215 break; 2216 } 2217 2218 case DIOCRINADEFINE: { 2219 struct pfioc_table *io = (struct pfioc_table *)addr; 2220 2221 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2222 error = ENODEV; 2223 break; 2224 } 2225 PF_LOCK(); 2226 error = pfr_ina_define(&io->pfrio_table, io->pfrio_buffer, 2227 io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr, 2228 io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2229 PF_UNLOCK(); 2230 break; 2231 } 2232 2233 case DIOCOSFPADD: { 2234 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr; 2235 PF_LOCK(); 2236 error = pf_osfp_add(io); 2237 PF_UNLOCK(); 2238 break; 2239 } 2240 2241 case DIOCOSFPGET: { 2242 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr; 2243 PF_LOCK(); 2244 error = pf_osfp_get(io); 2245 PF_UNLOCK(); 2246 break; 2247 } 2248 2249 case DIOCXBEGIN: { 2250 struct pfioc_trans *io = (struct pfioc_trans *)addr; 2251 struct pfioc_trans_e *ioe; 2252 struct pfr_table *table; 2253 int i; 2254 2255 if (io->esize != sizeof(*ioe)) { 2256 error = ENODEV; 2257 goto fail; 2258 } 2259 PF_LOCK(); 2260 ioe = malloc(sizeof(*ioe), M_TEMP, M_WAITOK); 2261 table = malloc(sizeof(*table), M_TEMP, M_WAITOK); 2262 pf_default_rule_new = pf_default_rule; 2263 memset(&pf_trans_set, 0, sizeof(pf_trans_set)); 2264 for (i = 0; i < io->size; i++) { 2265 if (copyin(io->array+i, ioe, sizeof(*ioe))) { 2266 free(table, M_TEMP, sizeof(*table)); 2267 free(ioe, M_TEMP, sizeof(*ioe)); 2268 error = EFAULT; 2269 PF_UNLOCK(); 2270 goto fail; 2271 } 2272 if (strnlen(ioe->anchor, sizeof(ioe->anchor)) == 2273 sizeof(ioe->anchor)) { 2274 free(table, M_TEMP, sizeof(*table)); 2275 free(ioe, M_TEMP, sizeof(*ioe)); 2276 error = ENAMETOOLONG; 2277 PF_UNLOCK(); 2278 goto fail; 2279 } 2280 switch (ioe->type) { 2281 case PF_TRANS_TABLE: 2282 memset(table, 0, sizeof(*table)); 2283 strlcpy(table->pfrt_anchor, ioe->anchor, 2284 sizeof(table->pfrt_anchor)); 2285 if ((error = pfr_ina_begin(table, 2286 &ioe->ticket, NULL, 0))) { 2287 free(table, M_TEMP, sizeof(*table)); 2288 free(ioe, M_TEMP, sizeof(*ioe)); 2289 PF_UNLOCK(); 2290 goto fail; 2291 } 2292 break; 2293 case PF_TRANS_RULESET: 2294 if ((error = pf_begin_rules(&ioe->ticket, 2295 ioe->anchor))) { 2296 free(table, M_TEMP, sizeof(*table)); 2297 free(ioe, M_TEMP, sizeof(*ioe)); 2298 PF_UNLOCK(); 2299 goto fail; 2300 } 2301 break; 2302 default: 2303 free(table, M_TEMP, sizeof(*table)); 2304 free(ioe, M_TEMP, sizeof(*ioe)); 2305 error = EINVAL; 2306 PF_UNLOCK(); 2307 goto fail; 2308 } 2309 if (copyout(ioe, io->array+i, sizeof(io->array[i]))) { 2310 free(table, M_TEMP, sizeof(*table)); 2311 free(ioe, M_TEMP, sizeof(*ioe)); 2312 error = EFAULT; 2313 PF_UNLOCK(); 2314 goto fail; 2315 } 2316 } 2317 free(table, M_TEMP, sizeof(*table)); 2318 free(ioe, M_TEMP, sizeof(*ioe)); 2319 PF_UNLOCK(); 2320 break; 2321 } 2322 2323 case DIOCXROLLBACK: { 2324 struct pfioc_trans *io = (struct pfioc_trans *)addr; 2325 struct pfioc_trans_e *ioe; 2326 struct pfr_table *table; 2327 int i; 2328 2329 if (io->esize != sizeof(*ioe)) { 2330 error = ENODEV; 2331 goto fail; 2332 } 2333 PF_LOCK(); 2334 ioe = malloc(sizeof(*ioe), M_TEMP, M_WAITOK); 2335 table = malloc(sizeof(*table), M_TEMP, M_WAITOK); 2336 for (i = 0; i < io->size; i++) { 2337 if (copyin(io->array+i, ioe, sizeof(*ioe))) { 2338 free(table, M_TEMP, sizeof(*table)); 2339 free(ioe, M_TEMP, sizeof(*ioe)); 2340 error = EFAULT; 2341 PF_UNLOCK(); 2342 goto fail; 2343 } 2344 if (strnlen(ioe->anchor, sizeof(ioe->anchor)) == 2345 sizeof(ioe->anchor)) { 2346 free(table, M_TEMP, sizeof(*table)); 2347 free(ioe, M_TEMP, sizeof(*ioe)); 2348 error = ENAMETOOLONG; 2349 PF_UNLOCK(); 2350 goto fail; 2351 } 2352 switch (ioe->type) { 2353 case PF_TRANS_TABLE: 2354 memset(table, 0, sizeof(*table)); 2355 strlcpy(table->pfrt_anchor, ioe->anchor, 2356 sizeof(table->pfrt_anchor)); 2357 if ((error = pfr_ina_rollback(table, 2358 ioe->ticket, NULL, 0))) { 2359 free(table, M_TEMP, sizeof(*table)); 2360 free(ioe, M_TEMP, sizeof(*ioe)); 2361 PF_UNLOCK(); 2362 goto fail; /* really bad */ 2363 } 2364 break; 2365 case PF_TRANS_RULESET: 2366 if ((error = pf_rollback_rules(ioe->ticket, 2367 ioe->anchor))) { 2368 free(table, M_TEMP, sizeof(*table)); 2369 free(ioe, M_TEMP, sizeof(*ioe)); 2370 PF_UNLOCK(); 2371 goto fail; /* really bad */ 2372 } 2373 break; 2374 default: 2375 free(table, M_TEMP, sizeof(*table)); 2376 free(ioe, M_TEMP, sizeof(*ioe)); 2377 error = EINVAL; 2378 PF_UNLOCK(); 2379 goto fail; /* really bad */ 2380 } 2381 } 2382 free(table, M_TEMP, sizeof(*table)); 2383 free(ioe, M_TEMP, sizeof(*ioe)); 2384 PF_UNLOCK(); 2385 break; 2386 } 2387 2388 case DIOCXCOMMIT: { 2389 struct pfioc_trans *io = (struct pfioc_trans *)addr; 2390 struct pfioc_trans_e *ioe; 2391 struct pfr_table *table; 2392 struct pf_ruleset *rs; 2393 int i; 2394 2395 if (io->esize != sizeof(*ioe)) { 2396 error = ENODEV; 2397 goto fail; 2398 } 2399 PF_LOCK(); 2400 ioe = malloc(sizeof(*ioe), M_TEMP, M_WAITOK); 2401 table = malloc(sizeof(*table), M_TEMP, M_WAITOK); 2402 /* first makes sure everything will succeed */ 2403 for (i = 0; i < io->size; i++) { 2404 if (copyin(io->array+i, ioe, sizeof(*ioe))) { 2405 free(table, M_TEMP, sizeof(*table)); 2406 free(ioe, M_TEMP, sizeof(*ioe)); 2407 error = EFAULT; 2408 PF_UNLOCK(); 2409 goto fail; 2410 } 2411 if (strnlen(ioe->anchor, sizeof(ioe->anchor)) == 2412 sizeof(ioe->anchor)) { 2413 free(table, M_TEMP, sizeof(*table)); 2414 free(ioe, M_TEMP, sizeof(*ioe)); 2415 error = ENAMETOOLONG; 2416 PF_UNLOCK(); 2417 goto fail; 2418 } 2419 switch (ioe->type) { 2420 case PF_TRANS_TABLE: 2421 rs = pf_find_ruleset(ioe->anchor); 2422 if (rs == NULL || !rs->topen || ioe->ticket != 2423 rs->tticket) { 2424 free(table, M_TEMP, sizeof(*table)); 2425 free(ioe, M_TEMP, sizeof(*ioe)); 2426 error = EBUSY; 2427 PF_UNLOCK(); 2428 goto fail; 2429 } 2430 break; 2431 case PF_TRANS_RULESET: 2432 rs = pf_find_ruleset(ioe->anchor); 2433 if (rs == NULL || 2434 !rs->rules.inactive.open || 2435 rs->rules.inactive.ticket != 2436 ioe->ticket) { 2437 free(table, M_TEMP, sizeof(*table)); 2438 free(ioe, M_TEMP, sizeof(*ioe)); 2439 error = EBUSY; 2440 PF_UNLOCK(); 2441 goto fail; 2442 } 2443 break; 2444 default: 2445 free(table, M_TEMP, sizeof(*table)); 2446 free(ioe, M_TEMP, sizeof(*ioe)); 2447 error = EINVAL; 2448 PF_UNLOCK(); 2449 goto fail; 2450 } 2451 } 2452 2453 /* 2454 * Checked already in DIOCSETLIMIT, but check again as the 2455 * situation might have changed. 2456 */ 2457 for (i = 0; i < PF_LIMIT_MAX; i++) { 2458 if (((struct pool *)pf_pool_limits[i].pp)->pr_nout > 2459 pf_pool_limits[i].limit_new) { 2460 free(table, M_TEMP, sizeof(*table)); 2461 free(ioe, M_TEMP, sizeof(*ioe)); 2462 error = EBUSY; 2463 PF_UNLOCK(); 2464 goto fail; 2465 } 2466 } 2467 /* now do the commit - no errors should happen here */ 2468 for (i = 0; i < io->size; i++) { 2469 if (copyin(io->array+i, ioe, sizeof(*ioe))) { 2470 free(table, M_TEMP, sizeof(*table)); 2471 free(ioe, M_TEMP, sizeof(*ioe)); 2472 error = EFAULT; 2473 PF_UNLOCK(); 2474 goto fail; 2475 } 2476 if (strnlen(ioe->anchor, sizeof(ioe->anchor)) == 2477 sizeof(ioe->anchor)) { 2478 free(table, M_TEMP, sizeof(*table)); 2479 free(ioe, M_TEMP, sizeof(*ioe)); 2480 error = ENAMETOOLONG; 2481 PF_UNLOCK(); 2482 goto fail; 2483 } 2484 switch (ioe->type) { 2485 case PF_TRANS_TABLE: 2486 memset(table, 0, sizeof(*table)); 2487 strlcpy(table->pfrt_anchor, ioe->anchor, 2488 sizeof(table->pfrt_anchor)); 2489 if ((error = pfr_ina_commit(table, ioe->ticket, 2490 NULL, NULL, 0))) { 2491 free(table, M_TEMP, sizeof(*table)); 2492 free(ioe, M_TEMP, sizeof(*ioe)); 2493 PF_UNLOCK(); 2494 goto fail; /* really bad */ 2495 } 2496 break; 2497 case PF_TRANS_RULESET: 2498 if ((error = pf_commit_rules(ioe->ticket, 2499 ioe->anchor))) { 2500 free(table, M_TEMP, sizeof(*table)); 2501 free(ioe, M_TEMP, sizeof(*ioe)); 2502 PF_UNLOCK(); 2503 goto fail; /* really bad */ 2504 } 2505 break; 2506 default: 2507 free(table, M_TEMP, sizeof(*table)); 2508 free(ioe, M_TEMP, sizeof(*ioe)); 2509 error = EINVAL; 2510 PF_UNLOCK(); 2511 goto fail; /* really bad */ 2512 } 2513 } 2514 for (i = 0; i < PF_LIMIT_MAX; i++) { 2515 if (pf_pool_limits[i].limit_new != 2516 pf_pool_limits[i].limit && 2517 pool_sethardlimit(pf_pool_limits[i].pp, 2518 pf_pool_limits[i].limit_new, NULL, 0) != 0) { 2519 free(table, M_TEMP, sizeof(*table)); 2520 free(ioe, M_TEMP, sizeof(*ioe)); 2521 error = EBUSY; 2522 PF_UNLOCK(); 2523 goto fail; /* really bad */ 2524 } 2525 pf_pool_limits[i].limit = pf_pool_limits[i].limit_new; 2526 } 2527 for (i = 0; i < PFTM_MAX; i++) { 2528 int old = pf_default_rule.timeout[i]; 2529 2530 pf_default_rule.timeout[i] = 2531 pf_default_rule_new.timeout[i]; 2532 if (pf_default_rule.timeout[i] == PFTM_INTERVAL && 2533 pf_default_rule.timeout[i] < old) 2534 task_add(net_tq(0), &pf_purge_task); 2535 } 2536 pfi_xcommit(); 2537 pf_trans_set_commit(); 2538 free(table, M_TEMP, sizeof(*table)); 2539 free(ioe, M_TEMP, sizeof(*ioe)); 2540 PF_UNLOCK(); 2541 break; 2542 } 2543 2544 case DIOCGETSRCNODES: { 2545 struct pfioc_src_nodes *psn = (struct pfioc_src_nodes *)addr; 2546 struct pf_src_node *n, *p, *pstore; 2547 u_int32_t nr = 0; 2548 size_t space = psn->psn_len; 2549 2550 PF_LOCK(); 2551 if (space == 0) { 2552 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) 2553 nr++; 2554 psn->psn_len = sizeof(struct pf_src_node) * nr; 2555 PF_UNLOCK(); 2556 break; 2557 } 2558 2559 pstore = malloc(sizeof(*pstore), M_TEMP, M_WAITOK); 2560 2561 p = psn->psn_src_nodes; 2562 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) { 2563 int secs = time_uptime, diff; 2564 2565 if ((nr + 1) * sizeof(*p) > psn->psn_len) 2566 break; 2567 2568 memcpy(pstore, n, sizeof(*pstore)); 2569 memset(&pstore->entry, 0, sizeof(pstore->entry)); 2570 pstore->rule.ptr = NULL; 2571 pstore->kif = NULL; 2572 pstore->rule.nr = n->rule.ptr->nr; 2573 pstore->creation = secs - pstore->creation; 2574 if (pstore->expire > secs) 2575 pstore->expire -= secs; 2576 else 2577 pstore->expire = 0; 2578 2579 /* adjust the connection rate estimate */ 2580 diff = secs - n->conn_rate.last; 2581 if (diff >= n->conn_rate.seconds) 2582 pstore->conn_rate.count = 0; 2583 else 2584 pstore->conn_rate.count -= 2585 n->conn_rate.count * diff / 2586 n->conn_rate.seconds; 2587 2588 error = copyout(pstore, p, sizeof(*p)); 2589 if (error) { 2590 free(pstore, M_TEMP, sizeof(*pstore)); 2591 PF_UNLOCK(); 2592 goto fail; 2593 } 2594 p++; 2595 nr++; 2596 } 2597 psn->psn_len = sizeof(struct pf_src_node) * nr; 2598 2599 free(pstore, M_TEMP, sizeof(*pstore)); 2600 PF_UNLOCK(); 2601 break; 2602 } 2603 2604 case DIOCCLRSRCNODES: { 2605 struct pf_src_node *n; 2606 struct pf_state *state; 2607 2608 PF_LOCK(); 2609 PF_STATE_ENTER_WRITE(); 2610 RB_FOREACH(state, pf_state_tree_id, &tree_id) 2611 pf_src_tree_remove_state(state); 2612 PF_STATE_EXIT_WRITE(); 2613 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) 2614 n->expire = 1; 2615 pf_purge_expired_src_nodes(); 2616 PF_UNLOCK(); 2617 break; 2618 } 2619 2620 case DIOCKILLSRCNODES: { 2621 struct pf_src_node *sn; 2622 struct pf_state *s; 2623 struct pfioc_src_node_kill *psnk = 2624 (struct pfioc_src_node_kill *)addr; 2625 u_int killed = 0; 2626 2627 PF_LOCK(); 2628 RB_FOREACH(sn, pf_src_tree, &tree_src_tracking) { 2629 if (pf_match_addr(psnk->psnk_src.neg, 2630 &psnk->psnk_src.addr.v.a.addr, 2631 &psnk->psnk_src.addr.v.a.mask, 2632 &sn->addr, sn->af) && 2633 pf_match_addr(psnk->psnk_dst.neg, 2634 &psnk->psnk_dst.addr.v.a.addr, 2635 &psnk->psnk_dst.addr.v.a.mask, 2636 &sn->raddr, sn->af)) { 2637 /* Handle state to src_node linkage */ 2638 if (sn->states != 0) { 2639 PF_ASSERT_LOCKED(); 2640 PF_STATE_ENTER_WRITE(); 2641 RB_FOREACH(s, pf_state_tree_id, 2642 &tree_id) 2643 pf_state_rm_src_node(s, sn); 2644 PF_STATE_EXIT_WRITE(); 2645 } 2646 sn->expire = 1; 2647 killed++; 2648 } 2649 } 2650 2651 if (killed > 0) 2652 pf_purge_expired_src_nodes(); 2653 2654 psnk->psnk_killed = killed; 2655 PF_UNLOCK(); 2656 break; 2657 } 2658 2659 case DIOCSETHOSTID: { 2660 u_int32_t *hostid = (u_int32_t *)addr; 2661 2662 PF_LOCK(); 2663 if (*hostid == 0) 2664 pf_trans_set.hostid = arc4random(); 2665 else 2666 pf_trans_set.hostid = *hostid; 2667 pf_trans_set.mask |= PF_TSET_HOSTID; 2668 PF_UNLOCK(); 2669 break; 2670 } 2671 2672 case DIOCOSFPFLUSH: 2673 PF_LOCK(); 2674 pf_osfp_flush(); 2675 PF_UNLOCK(); 2676 break; 2677 2678 case DIOCIGETIFACES: { 2679 struct pfioc_iface *io = (struct pfioc_iface *)addr; 2680 2681 if (io->pfiio_esize != sizeof(struct pfi_kif)) { 2682 error = ENODEV; 2683 break; 2684 } 2685 PF_LOCK(); 2686 error = pfi_get_ifaces(io->pfiio_name, io->pfiio_buffer, 2687 &io->pfiio_size); 2688 PF_UNLOCK(); 2689 break; 2690 } 2691 2692 case DIOCSETIFFLAG: { 2693 struct pfioc_iface *io = (struct pfioc_iface *)addr; 2694 2695 PF_LOCK(); 2696 error = pfi_set_flags(io->pfiio_name, io->pfiio_flags); 2697 PF_UNLOCK(); 2698 break; 2699 } 2700 2701 case DIOCCLRIFFLAG: { 2702 struct pfioc_iface *io = (struct pfioc_iface *)addr; 2703 2704 PF_LOCK(); 2705 error = pfi_clear_flags(io->pfiio_name, io->pfiio_flags); 2706 PF_UNLOCK(); 2707 break; 2708 } 2709 2710 case DIOCSETREASS: { 2711 u_int32_t *reass = (u_int32_t *)addr; 2712 2713 PF_LOCK(); 2714 pf_trans_set.reass = *reass; 2715 pf_trans_set.mask |= PF_TSET_REASS; 2716 PF_UNLOCK(); 2717 break; 2718 } 2719 2720 case DIOCSETSYNFLWATS: { 2721 struct pfioc_synflwats *io = (struct pfioc_synflwats *)addr; 2722 2723 PF_LOCK(); 2724 error = pf_syncookies_setwats(io->hiwat, io->lowat); 2725 PF_UNLOCK(); 2726 break; 2727 } 2728 2729 case DIOCGETSYNFLWATS: { 2730 struct pfioc_synflwats *io = (struct pfioc_synflwats *)addr; 2731 2732 PF_LOCK(); 2733 error = pf_syncookies_getwats(io); 2734 PF_UNLOCK(); 2735 break; 2736 } 2737 2738 case DIOCSETSYNCOOKIES: { 2739 u_int8_t *mode = (u_int8_t *)addr; 2740 2741 PF_LOCK(); 2742 error = pf_syncookies_setmode(*mode); 2743 PF_UNLOCK(); 2744 break; 2745 } 2746 2747 default: 2748 error = ENODEV; 2749 break; 2750 } 2751 fail: 2752 NET_UNLOCK(); 2753 return (error); 2754 } 2755 2756 void 2757 pf_trans_set_commit(void) 2758 { 2759 if (pf_trans_set.mask & PF_TSET_STATUSIF) 2760 strlcpy(pf_status.ifname, pf_trans_set.statusif, IFNAMSIZ); 2761 if (pf_trans_set.mask & PF_TSET_DEBUG) 2762 pf_status.debug = pf_trans_set.debug; 2763 if (pf_trans_set.mask & PF_TSET_HOSTID) 2764 pf_status.hostid = pf_trans_set.hostid; 2765 if (pf_trans_set.mask & PF_TSET_REASS) 2766 pf_status.reass = pf_trans_set.reass; 2767 } 2768 2769 void 2770 pf_pool_copyin(struct pf_pool *from, struct pf_pool *to) 2771 { 2772 memmove(to, from, sizeof(*to)); 2773 to->kif = NULL; 2774 } 2775 2776 int 2777 pf_rule_copyin(struct pf_rule *from, struct pf_rule *to, 2778 struct pf_ruleset *ruleset) 2779 { 2780 int i; 2781 2782 to->src = from->src; 2783 to->dst = from->dst; 2784 2785 /* XXX union skip[] */ 2786 2787 strlcpy(to->label, from->label, sizeof(to->label)); 2788 strlcpy(to->ifname, from->ifname, sizeof(to->ifname)); 2789 strlcpy(to->rcv_ifname, from->rcv_ifname, sizeof(to->rcv_ifname)); 2790 strlcpy(to->qname, from->qname, sizeof(to->qname)); 2791 strlcpy(to->pqname, from->pqname, sizeof(to->pqname)); 2792 strlcpy(to->tagname, from->tagname, sizeof(to->tagname)); 2793 strlcpy(to->match_tagname, from->match_tagname, 2794 sizeof(to->match_tagname)); 2795 strlcpy(to->overload_tblname, from->overload_tblname, 2796 sizeof(to->overload_tblname)); 2797 2798 pf_pool_copyin(&from->nat, &to->nat); 2799 pf_pool_copyin(&from->rdr, &to->rdr); 2800 pf_pool_copyin(&from->route, &to->route); 2801 2802 if (pf_kif_setup(to->ifname, &to->kif)) 2803 return (EINVAL); 2804 if (pf_kif_setup(to->rcv_ifname, &to->rcv_kif)) 2805 return (EINVAL); 2806 if (to->overload_tblname[0]) { 2807 if ((to->overload_tbl = pfr_attach_table(ruleset, 2808 to->overload_tblname, 0)) == NULL) 2809 return (EINVAL); 2810 else 2811 to->overload_tbl->pfrkt_flags |= PFR_TFLAG_ACTIVE; 2812 } 2813 2814 if (pf_kif_setup(to->rdr.ifname, &to->rdr.kif)) 2815 return (EINVAL); 2816 if (pf_kif_setup(to->nat.ifname, &to->nat.kif)) 2817 return (EINVAL); 2818 if (pf_kif_setup(to->route.ifname, &to->route.kif)) 2819 return (EINVAL); 2820 2821 to->os_fingerprint = from->os_fingerprint; 2822 2823 to->rtableid = from->rtableid; 2824 if (to->rtableid >= 0 && !rtable_exists(to->rtableid)) 2825 return (EBUSY); 2826 to->onrdomain = from->onrdomain; 2827 if (to->onrdomain >= 0 && !rtable_exists(to->onrdomain)) 2828 return (EBUSY); 2829 if (to->onrdomain >= 0) /* make sure it is a real rdomain */ 2830 to->onrdomain = rtable_l2(to->onrdomain); 2831 2832 for (i = 0; i < PFTM_MAX; i++) 2833 to->timeout[i] = from->timeout[i]; 2834 to->states_tot = from->states_tot; 2835 to->max_states = from->max_states; 2836 to->max_src_nodes = from->max_src_nodes; 2837 to->max_src_states = from->max_src_states; 2838 to->max_src_conn = from->max_src_conn; 2839 to->max_src_conn_rate.limit = from->max_src_conn_rate.limit; 2840 to->max_src_conn_rate.seconds = from->max_src_conn_rate.seconds; 2841 pf_init_threshold(&to->pktrate, from->pktrate.limit, 2842 from->pktrate.seconds); 2843 2844 if (to->qname[0] != 0) { 2845 if ((to->qid = pf_qname2qid(to->qname, 0)) == 0) 2846 return (EBUSY); 2847 if (to->pqname[0] != 0) { 2848 if ((to->pqid = pf_qname2qid(to->pqname, 0)) == 0) 2849 return (EBUSY); 2850 } else 2851 to->pqid = to->qid; 2852 } 2853 to->rt_listid = from->rt_listid; 2854 to->prob = from->prob; 2855 to->return_icmp = from->return_icmp; 2856 to->return_icmp6 = from->return_icmp6; 2857 to->max_mss = from->max_mss; 2858 if (to->tagname[0]) 2859 if ((to->tag = pf_tagname2tag(to->tagname, 1)) == 0) 2860 return (EBUSY); 2861 if (to->match_tagname[0]) 2862 if ((to->match_tag = pf_tagname2tag(to->match_tagname, 1)) == 0) 2863 return (EBUSY); 2864 to->scrub_flags = from->scrub_flags; 2865 to->uid = from->uid; 2866 to->gid = from->gid; 2867 to->rule_flag = from->rule_flag; 2868 to->action = from->action; 2869 to->direction = from->direction; 2870 to->log = from->log; 2871 to->logif = from->logif; 2872 #if NPFLOG > 0 2873 if (!to->log) 2874 to->logif = 0; 2875 #endif /* NPFLOG > 0 */ 2876 to->quick = from->quick; 2877 to->ifnot = from->ifnot; 2878 to->rcvifnot = from->rcvifnot; 2879 to->match_tag_not = from->match_tag_not; 2880 to->keep_state = from->keep_state; 2881 to->af = from->af; 2882 to->naf = from->naf; 2883 to->proto = from->proto; 2884 to->type = from->type; 2885 to->code = from->code; 2886 to->flags = from->flags; 2887 to->flagset = from->flagset; 2888 to->min_ttl = from->min_ttl; 2889 to->allow_opts = from->allow_opts; 2890 to->rt = from->rt; 2891 to->return_ttl = from->return_ttl; 2892 to->tos = from->tos; 2893 to->set_tos = from->set_tos; 2894 to->anchor_relative = from->anchor_relative; /* XXX */ 2895 to->anchor_wildcard = from->anchor_wildcard; /* XXX */ 2896 to->flush = from->flush; 2897 to->divert.addr = from->divert.addr; 2898 to->divert.port = from->divert.port; 2899 to->divert.type = from->divert.type; 2900 to->prio = from->prio; 2901 to->set_prio[0] = from->set_prio[0]; 2902 to->set_prio[1] = from->set_prio[1]; 2903 2904 return (0); 2905 } 2906