1 /* $OpenBSD: pf_ioctl.c,v 1.349 2020/02/18 12:13:40 mpi Exp $ */ 2 3 /* 4 * Copyright (c) 2001 Daniel Hartmeier 5 * Copyright (c) 2002 - 2018 Henning Brauer <henning@openbsd.org> 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * - Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * - Redistributions in binary form must reproduce the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer in the documentation and/or other materials provided 17 * with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 20 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 22 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 23 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 25 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 27 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 29 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 * POSSIBILITY OF SUCH DAMAGE. 31 * 32 * Effort sponsored in part by the Defense Advanced Research Projects 33 * Agency (DARPA) and Air Force Research Laboratory, Air Force 34 * Materiel Command, USAF, under agreement number F30602-01-2-0537. 35 * 36 */ 37 38 #include "pfsync.h" 39 #include "pflog.h" 40 41 #include <sys/param.h> 42 #include <sys/systm.h> 43 #include <sys/sysctl.h> 44 #include <sys/mbuf.h> 45 #include <sys/filio.h> 46 #include <sys/fcntl.h> 47 #include <sys/socket.h> 48 #include <sys/socketvar.h> 49 #include <sys/kernel.h> 50 #include <sys/time.h> 51 #include <sys/timeout.h> 52 #include <sys/pool.h> 53 #include <sys/malloc.h> 54 #include <sys/proc.h> 55 #include <sys/rwlock.h> 56 #include <sys/syslog.h> 57 #include <uvm/uvm_extern.h> 58 59 #include <crypto/md5.h> 60 61 #include <net/if.h> 62 #include <net/if_var.h> 63 #include <net/route.h> 64 #include <net/hfsc.h> 65 #include <net/fq_codel.h> 66 67 #include <netinet/in.h> 68 #include <netinet/ip.h> 69 #include <netinet/in_pcb.h> 70 #include <netinet/ip_var.h> 71 #include <netinet/ip_icmp.h> 72 #include <netinet/tcp.h> 73 #include <netinet/udp.h> 74 75 #ifdef INET6 76 #include <netinet/ip6.h> 77 #include <netinet/icmp6.h> 78 #endif /* INET6 */ 79 80 #include <net/pfvar.h> 81 #include <net/pfvar_priv.h> 82 83 #if NPFSYNC > 0 84 #include <netinet/ip_ipsp.h> 85 #include <net/if_pfsync.h> 86 #endif /* NPFSYNC > 0 */ 87 88 struct pool pf_tag_pl; 89 90 void pfattach(int); 91 void pf_thread_create(void *); 92 int pfopen(dev_t, int, int, struct proc *); 93 int pfclose(dev_t, int, int, struct proc *); 94 int pfioctl(dev_t, u_long, caddr_t, int, struct proc *); 95 int pf_begin_rules(u_int32_t *, const char *); 96 int pf_rollback_rules(u_int32_t, char *); 97 void pf_remove_queues(void); 98 int pf_commit_queues(void); 99 void pf_free_queues(struct pf_queuehead *); 100 int pf_setup_pfsync_matching(struct pf_ruleset *); 101 void pf_hash_rule(MD5_CTX *, struct pf_rule *); 102 void pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *); 103 int pf_commit_rules(u_int32_t, char *); 104 int pf_addr_setup(struct pf_ruleset *, 105 struct pf_addr_wrap *, sa_family_t); 106 int pf_kif_setup(char *, struct pfi_kif **); 107 void pf_addr_copyout(struct pf_addr_wrap *); 108 void pf_trans_set_commit(void); 109 void pf_pool_copyin(struct pf_pool *, struct pf_pool *); 110 int pf_rule_copyin(struct pf_rule *, struct pf_rule *, 111 struct pf_ruleset *); 112 u_int16_t pf_qname2qid(char *, int); 113 void pf_qid2qname(u_int16_t, char *); 114 void pf_qid_unref(u_int16_t); 115 116 struct pf_rule pf_default_rule, pf_default_rule_new; 117 118 struct { 119 char statusif[IFNAMSIZ]; 120 u_int32_t debug; 121 u_int32_t hostid; 122 u_int32_t reass; 123 u_int32_t mask; 124 } pf_trans_set; 125 126 #define PF_TSET_STATUSIF 0x01 127 #define PF_TSET_DEBUG 0x02 128 #define PF_TSET_HOSTID 0x04 129 #define PF_TSET_REASS 0x08 130 131 #define TAGID_MAX 50000 132 TAILQ_HEAD(pf_tags, pf_tagname) pf_tags = TAILQ_HEAD_INITIALIZER(pf_tags), 133 pf_qids = TAILQ_HEAD_INITIALIZER(pf_qids); 134 135 #ifdef WITH_PF_LOCK 136 /* 137 * pf_lock protects consistency of PF data structures, which don't have 138 * their dedicated lock yet. The pf_lock currently protects: 139 * - rules, 140 * - radix tables, 141 * - source nodes 142 * All callers must grab pf_lock exclusively. 143 * 144 * pf_state_lock protects consistency of state table. Packets, which do state 145 * look up grab the lock as readers. If packet must create state, then it must 146 * grab the lock as writer. Whenever packet creates state it grabs pf_lock 147 * first then it locks pf_state_lock as the writer. 148 */ 149 struct rwlock pf_lock = RWLOCK_INITIALIZER("pf_lock"); 150 struct rwlock pf_state_lock = RWLOCK_INITIALIZER("pf_state_lock"); 151 #endif /* WITH_PF_LOCK */ 152 153 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE) 154 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE 155 #endif 156 u_int16_t tagname2tag(struct pf_tags *, char *, int); 157 void tag2tagname(struct pf_tags *, u_int16_t, char *); 158 void tag_unref(struct pf_tags *, u_int16_t); 159 int pf_rtlabel_add(struct pf_addr_wrap *); 160 void pf_rtlabel_remove(struct pf_addr_wrap *); 161 void pf_rtlabel_copyout(struct pf_addr_wrap *); 162 163 164 void 165 pfattach(int num) 166 { 167 u_int32_t *timeout = pf_default_rule.timeout; 168 169 pool_init(&pf_rule_pl, sizeof(struct pf_rule), 0, 170 IPL_SOFTNET, 0, "pfrule", NULL); 171 pool_init(&pf_src_tree_pl, sizeof(struct pf_src_node), 0, 172 IPL_SOFTNET, 0, "pfsrctr", NULL); 173 pool_init(&pf_sn_item_pl, sizeof(struct pf_sn_item), 0, 174 IPL_SOFTNET, 0, "pfsnitem", NULL); 175 pool_init(&pf_state_pl, sizeof(struct pf_state), 0, 176 IPL_SOFTNET, 0, "pfstate", NULL); 177 pool_init(&pf_state_key_pl, sizeof(struct pf_state_key), 0, 178 IPL_SOFTNET, 0, "pfstkey", NULL); 179 pool_init(&pf_state_item_pl, sizeof(struct pf_state_item), 0, 180 IPL_SOFTNET, 0, "pfstitem", NULL); 181 pool_init(&pf_rule_item_pl, sizeof(struct pf_rule_item), 0, 182 IPL_SOFTNET, 0, "pfruleitem", NULL); 183 pool_init(&pf_queue_pl, sizeof(struct pf_queuespec), 0, 184 IPL_SOFTNET, 0, "pfqueue", NULL); 185 pool_init(&pf_tag_pl, sizeof(struct pf_tagname), 0, 186 IPL_SOFTNET, 0, "pftag", NULL); 187 pool_init(&pf_pktdelay_pl, sizeof(struct pf_pktdelay), 0, 188 IPL_SOFTNET, 0, "pfpktdelay", NULL); 189 190 hfsc_initialize(); 191 pfr_initialize(); 192 pfi_initialize(); 193 pf_osfp_initialize(); 194 pf_syncookies_init(); 195 196 pool_sethardlimit(pf_pool_limits[PF_LIMIT_STATES].pp, 197 pf_pool_limits[PF_LIMIT_STATES].limit, NULL, 0); 198 199 if (physmem <= atop(100*1024*1024)) 200 pf_pool_limits[PF_LIMIT_TABLE_ENTRIES].limit = 201 PFR_KENTRY_HIWAT_SMALL; 202 203 RB_INIT(&tree_src_tracking); 204 RB_INIT(&pf_anchors); 205 pf_init_ruleset(&pf_main_ruleset); 206 TAILQ_INIT(&pf_queues[0]); 207 TAILQ_INIT(&pf_queues[1]); 208 pf_queues_active = &pf_queues[0]; 209 pf_queues_inactive = &pf_queues[1]; 210 TAILQ_INIT(&state_list); 211 212 /* default rule should never be garbage collected */ 213 pf_default_rule.entries.tqe_prev = &pf_default_rule.entries.tqe_next; 214 pf_default_rule.action = PF_PASS; 215 pf_default_rule.nr = (u_int32_t)-1; 216 pf_default_rule.rtableid = -1; 217 218 /* initialize default timeouts */ 219 timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL; 220 timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL; 221 timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL; 222 timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL; 223 timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL; 224 timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL; 225 timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL; 226 timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL; 227 timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL; 228 timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL; 229 timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL; 230 timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL; 231 timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL; 232 timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL; 233 timeout[PFTM_FRAG] = PFTM_FRAG_VAL; 234 timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL; 235 timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL; 236 timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL; 237 timeout[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START; 238 timeout[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END; 239 240 pf_default_rule.src.addr.type = PF_ADDR_ADDRMASK; 241 pf_default_rule.dst.addr.type = PF_ADDR_ADDRMASK; 242 pf_default_rule.rdr.addr.type = PF_ADDR_NONE; 243 pf_default_rule.nat.addr.type = PF_ADDR_NONE; 244 pf_default_rule.route.addr.type = PF_ADDR_NONE; 245 246 pf_normalize_init(); 247 memset(&pf_status, 0, sizeof(pf_status)); 248 pf_status.debug = LOG_ERR; 249 pf_status.reass = PF_REASS_ENABLED; 250 251 /* XXX do our best to avoid a conflict */ 252 pf_status.hostid = arc4random(); 253 } 254 255 int 256 pfopen(dev_t dev, int flags, int fmt, struct proc *p) 257 { 258 if (minor(dev) >= 1) 259 return (ENXIO); 260 return (0); 261 } 262 263 int 264 pfclose(dev_t dev, int flags, int fmt, struct proc *p) 265 { 266 if (minor(dev) >= 1) 267 return (ENXIO); 268 return (0); 269 } 270 271 void 272 pf_rm_rule(struct pf_rulequeue *rulequeue, struct pf_rule *rule) 273 { 274 if (rulequeue != NULL) { 275 if (rule->states_cur == 0 && rule->src_nodes == 0) { 276 /* 277 * XXX - we need to remove the table *before* detaching 278 * the rule to make sure the table code does not delete 279 * the anchor under our feet. 280 */ 281 pf_tbladdr_remove(&rule->src.addr); 282 pf_tbladdr_remove(&rule->dst.addr); 283 pf_tbladdr_remove(&rule->rdr.addr); 284 pf_tbladdr_remove(&rule->nat.addr); 285 pf_tbladdr_remove(&rule->route.addr); 286 if (rule->overload_tbl) 287 pfr_detach_table(rule->overload_tbl); 288 } 289 TAILQ_REMOVE(rulequeue, rule, entries); 290 rule->entries.tqe_prev = NULL; 291 rule->nr = (u_int32_t)-1; 292 } 293 294 if (rule->states_cur > 0 || rule->src_nodes > 0 || 295 rule->entries.tqe_prev != NULL) 296 return; 297 pf_tag_unref(rule->tag); 298 pf_tag_unref(rule->match_tag); 299 pf_rtlabel_remove(&rule->src.addr); 300 pf_rtlabel_remove(&rule->dst.addr); 301 pfi_dynaddr_remove(&rule->src.addr); 302 pfi_dynaddr_remove(&rule->dst.addr); 303 pfi_dynaddr_remove(&rule->rdr.addr); 304 pfi_dynaddr_remove(&rule->nat.addr); 305 pfi_dynaddr_remove(&rule->route.addr); 306 if (rulequeue == NULL) { 307 pf_tbladdr_remove(&rule->src.addr); 308 pf_tbladdr_remove(&rule->dst.addr); 309 pf_tbladdr_remove(&rule->rdr.addr); 310 pf_tbladdr_remove(&rule->nat.addr); 311 pf_tbladdr_remove(&rule->route.addr); 312 if (rule->overload_tbl) 313 pfr_detach_table(rule->overload_tbl); 314 } 315 pfi_kif_unref(rule->rcv_kif, PFI_KIF_REF_RULE); 316 pfi_kif_unref(rule->kif, PFI_KIF_REF_RULE); 317 pfi_kif_unref(rule->rdr.kif, PFI_KIF_REF_RULE); 318 pfi_kif_unref(rule->nat.kif, PFI_KIF_REF_RULE); 319 pfi_kif_unref(rule->route.kif, PFI_KIF_REF_RULE); 320 pf_remove_anchor(rule); 321 pool_put(&pf_rule_pl, rule); 322 } 323 324 void 325 pf_purge_rule(struct pf_rule *rule) 326 { 327 u_int32_t nr = 0; 328 struct pf_ruleset *ruleset; 329 330 KASSERT((rule != NULL) && (rule->ruleset != NULL)); 331 ruleset = rule->ruleset; 332 333 pf_rm_rule(ruleset->rules.active.ptr, rule); 334 ruleset->rules.active.rcount--; 335 TAILQ_FOREACH(rule, ruleset->rules.active.ptr, entries) 336 rule->nr = nr++; 337 ruleset->rules.active.ticket++; 338 pf_calc_skip_steps(ruleset->rules.active.ptr); 339 pf_remove_if_empty_ruleset(ruleset); 340 } 341 342 u_int16_t 343 tagname2tag(struct pf_tags *head, char *tagname, int create) 344 { 345 struct pf_tagname *tag, *p = NULL; 346 u_int16_t new_tagid = 1; 347 348 TAILQ_FOREACH(tag, head, entries) 349 if (strcmp(tagname, tag->name) == 0) { 350 tag->ref++; 351 return (tag->tag); 352 } 353 354 if (!create) 355 return (0); 356 357 /* 358 * to avoid fragmentation, we do a linear search from the beginning 359 * and take the first free slot we find. if there is none or the list 360 * is empty, append a new entry at the end. 361 */ 362 363 /* new entry */ 364 TAILQ_FOREACH(p, head, entries) { 365 if (p->tag != new_tagid) 366 break; 367 new_tagid = p->tag + 1; 368 } 369 370 if (new_tagid > TAGID_MAX) 371 return (0); 372 373 /* allocate and fill new struct pf_tagname */ 374 tag = pool_get(&pf_tag_pl, PR_NOWAIT | PR_ZERO); 375 if (tag == NULL) 376 return (0); 377 strlcpy(tag->name, tagname, sizeof(tag->name)); 378 tag->tag = new_tagid; 379 tag->ref++; 380 381 if (p != NULL) /* insert new entry before p */ 382 TAILQ_INSERT_BEFORE(p, tag, entries); 383 else /* either list empty or no free slot in between */ 384 TAILQ_INSERT_TAIL(head, tag, entries); 385 386 return (tag->tag); 387 } 388 389 void 390 tag2tagname(struct pf_tags *head, u_int16_t tagid, char *p) 391 { 392 struct pf_tagname *tag; 393 394 TAILQ_FOREACH(tag, head, entries) 395 if (tag->tag == tagid) { 396 strlcpy(p, tag->name, PF_TAG_NAME_SIZE); 397 return; 398 } 399 } 400 401 void 402 tag_unref(struct pf_tags *head, u_int16_t tag) 403 { 404 struct pf_tagname *p, *next; 405 406 if (tag == 0) 407 return; 408 409 TAILQ_FOREACH_SAFE(p, head, entries, next) { 410 if (tag == p->tag) { 411 if (--p->ref == 0) { 412 TAILQ_REMOVE(head, p, entries); 413 pool_put(&pf_tag_pl, p); 414 } 415 break; 416 } 417 } 418 } 419 420 u_int16_t 421 pf_tagname2tag(char *tagname, int create) 422 { 423 return (tagname2tag(&pf_tags, tagname, create)); 424 } 425 426 void 427 pf_tag2tagname(u_int16_t tagid, char *p) 428 { 429 tag2tagname(&pf_tags, tagid, p); 430 } 431 432 void 433 pf_tag_ref(u_int16_t tag) 434 { 435 struct pf_tagname *t; 436 437 TAILQ_FOREACH(t, &pf_tags, entries) 438 if (t->tag == tag) 439 break; 440 if (t != NULL) 441 t->ref++; 442 } 443 444 void 445 pf_tag_unref(u_int16_t tag) 446 { 447 tag_unref(&pf_tags, tag); 448 } 449 450 int 451 pf_rtlabel_add(struct pf_addr_wrap *a) 452 { 453 if (a->type == PF_ADDR_RTLABEL && 454 (a->v.rtlabel = rtlabel_name2id(a->v.rtlabelname)) == 0) 455 return (-1); 456 return (0); 457 } 458 459 void 460 pf_rtlabel_remove(struct pf_addr_wrap *a) 461 { 462 if (a->type == PF_ADDR_RTLABEL) 463 rtlabel_unref(a->v.rtlabel); 464 } 465 466 void 467 pf_rtlabel_copyout(struct pf_addr_wrap *a) 468 { 469 const char *name; 470 471 if (a->type == PF_ADDR_RTLABEL && a->v.rtlabel) { 472 if ((name = rtlabel_id2name(a->v.rtlabel)) == NULL) 473 strlcpy(a->v.rtlabelname, "?", 474 sizeof(a->v.rtlabelname)); 475 else 476 strlcpy(a->v.rtlabelname, name, 477 sizeof(a->v.rtlabelname)); 478 } 479 } 480 481 u_int16_t 482 pf_qname2qid(char *qname, int create) 483 { 484 return (tagname2tag(&pf_qids, qname, create)); 485 } 486 487 void 488 pf_qid2qname(u_int16_t qid, char *p) 489 { 490 tag2tagname(&pf_qids, qid, p); 491 } 492 493 void 494 pf_qid_unref(u_int16_t qid) 495 { 496 tag_unref(&pf_qids, (u_int16_t)qid); 497 } 498 499 int 500 pf_begin_rules(u_int32_t *ticket, const char *anchor) 501 { 502 struct pf_ruleset *rs; 503 struct pf_rule *rule; 504 505 if ((rs = pf_find_or_create_ruleset(anchor)) == NULL) 506 return (EINVAL); 507 while ((rule = TAILQ_FIRST(rs->rules.inactive.ptr)) != NULL) { 508 pf_rm_rule(rs->rules.inactive.ptr, rule); 509 rs->rules.inactive.rcount--; 510 } 511 *ticket = ++rs->rules.inactive.ticket; 512 rs->rules.inactive.open = 1; 513 return (0); 514 } 515 516 int 517 pf_rollback_rules(u_int32_t ticket, char *anchor) 518 { 519 struct pf_ruleset *rs; 520 struct pf_rule *rule; 521 522 rs = pf_find_ruleset(anchor); 523 if (rs == NULL || !rs->rules.inactive.open || 524 rs->rules.inactive.ticket != ticket) 525 return (0); 526 while ((rule = TAILQ_FIRST(rs->rules.inactive.ptr)) != NULL) { 527 pf_rm_rule(rs->rules.inactive.ptr, rule); 528 rs->rules.inactive.rcount--; 529 } 530 rs->rules.inactive.open = 0; 531 532 /* queue defs only in the main ruleset */ 533 if (anchor[0]) 534 return (0); 535 536 pf_free_queues(pf_queues_inactive); 537 538 return (0); 539 } 540 541 void 542 pf_free_queues(struct pf_queuehead *where) 543 { 544 struct pf_queuespec *q, *qtmp; 545 546 TAILQ_FOREACH_SAFE(q, where, entries, qtmp) { 547 TAILQ_REMOVE(where, q, entries); 548 pfi_kif_unref(q->kif, PFI_KIF_REF_RULE); 549 pool_put(&pf_queue_pl, q); 550 } 551 } 552 553 void 554 pf_remove_queues(void) 555 { 556 struct pf_queuespec *q; 557 struct ifnet *ifp; 558 559 /* put back interfaces in normal queueing mode */ 560 TAILQ_FOREACH(q, pf_queues_active, entries) { 561 if (q->parent_qid != 0) 562 continue; 563 564 ifp = q->kif->pfik_ifp; 565 if (ifp == NULL) 566 continue; 567 568 ifq_attach(&ifp->if_snd, ifq_priq_ops, NULL); 569 } 570 } 571 572 struct pf_queue_if { 573 struct ifnet *ifp; 574 const struct ifq_ops *ifqops; 575 const struct pfq_ops *pfqops; 576 void *disc; 577 struct pf_queue_if *next; 578 }; 579 580 static inline struct pf_queue_if * 581 pf_ifp2q(struct pf_queue_if *list, struct ifnet *ifp) 582 { 583 struct pf_queue_if *qif = list; 584 585 while (qif != NULL) { 586 if (qif->ifp == ifp) 587 return (qif); 588 589 qif = qif->next; 590 } 591 592 return (qif); 593 } 594 595 int 596 pf_create_queues(void) 597 { 598 struct pf_queuespec *q; 599 struct ifnet *ifp; 600 struct pf_queue_if *list = NULL, *qif; 601 int error; 602 603 /* 604 * Find root queues and allocate traffic conditioner 605 * private data for these interfaces 606 */ 607 TAILQ_FOREACH(q, pf_queues_active, entries) { 608 if (q->parent_qid != 0) 609 continue; 610 611 ifp = q->kif->pfik_ifp; 612 if (ifp == NULL) 613 continue; 614 615 qif = malloc(sizeof(*qif), M_TEMP, M_WAITOK); 616 qif->ifp = ifp; 617 618 if (q->flags & PFQS_ROOTCLASS) { 619 qif->ifqops = ifq_hfsc_ops; 620 qif->pfqops = pfq_hfsc_ops; 621 } else { 622 qif->ifqops = ifq_fqcodel_ops; 623 qif->pfqops = pfq_fqcodel_ops; 624 } 625 626 qif->disc = qif->pfqops->pfq_alloc(ifp); 627 628 qif->next = list; 629 list = qif; 630 } 631 632 /* and now everything */ 633 TAILQ_FOREACH(q, pf_queues_active, entries) { 634 ifp = q->kif->pfik_ifp; 635 if (ifp == NULL) 636 continue; 637 638 qif = pf_ifp2q(list, ifp); 639 KASSERT(qif != NULL); 640 641 error = qif->pfqops->pfq_addqueue(qif->disc, q); 642 if (error != 0) 643 goto error; 644 } 645 646 /* find root queues in old list to disable them if necessary */ 647 TAILQ_FOREACH(q, pf_queues_inactive, entries) { 648 if (q->parent_qid != 0) 649 continue; 650 651 ifp = q->kif->pfik_ifp; 652 if (ifp == NULL) 653 continue; 654 655 qif = pf_ifp2q(list, ifp); 656 if (qif != NULL) 657 continue; 658 659 ifq_attach(&ifp->if_snd, ifq_priq_ops, NULL); 660 } 661 662 /* commit the new queues */ 663 while (list != NULL) { 664 qif = list; 665 list = qif->next; 666 667 ifp = qif->ifp; 668 669 ifq_attach(&ifp->if_snd, qif->ifqops, qif->disc); 670 free(qif, M_TEMP, sizeof(*qif)); 671 } 672 673 return (0); 674 675 error: 676 while (list != NULL) { 677 qif = list; 678 list = qif->next; 679 680 qif->pfqops->pfq_free(qif->disc); 681 free(qif, M_TEMP, sizeof(*qif)); 682 } 683 684 return (error); 685 } 686 687 int 688 pf_commit_queues(void) 689 { 690 struct pf_queuehead *qswap; 691 int error; 692 693 /* swap */ 694 qswap = pf_queues_active; 695 pf_queues_active = pf_queues_inactive; 696 pf_queues_inactive = qswap; 697 698 error = pf_create_queues(); 699 if (error != 0) { 700 pf_queues_inactive = pf_queues_active; 701 pf_queues_active = qswap; 702 return (error); 703 } 704 705 pf_free_queues(pf_queues_inactive); 706 707 return (0); 708 } 709 710 const struct pfq_ops * 711 pf_queue_manager(struct pf_queuespec *q) 712 { 713 if (q->flags & PFQS_FLOWQUEUE) 714 return pfq_fqcodel_ops; 715 return (/* pfq_default_ops */ NULL); 716 } 717 718 #define PF_MD5_UPD(st, elm) \ 719 MD5Update(ctx, (u_int8_t *) &(st)->elm, sizeof((st)->elm)) 720 721 #define PF_MD5_UPD_STR(st, elm) \ 722 MD5Update(ctx, (u_int8_t *) (st)->elm, strlen((st)->elm)) 723 724 #define PF_MD5_UPD_HTONL(st, elm, stor) do { \ 725 (stor) = htonl((st)->elm); \ 726 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int32_t));\ 727 } while (0) 728 729 #define PF_MD5_UPD_HTONS(st, elm, stor) do { \ 730 (stor) = htons((st)->elm); \ 731 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int16_t));\ 732 } while (0) 733 734 void 735 pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr) 736 { 737 PF_MD5_UPD(pfr, addr.type); 738 switch (pfr->addr.type) { 739 case PF_ADDR_DYNIFTL: 740 PF_MD5_UPD(pfr, addr.v.ifname); 741 PF_MD5_UPD(pfr, addr.iflags); 742 break; 743 case PF_ADDR_TABLE: 744 PF_MD5_UPD(pfr, addr.v.tblname); 745 break; 746 case PF_ADDR_ADDRMASK: 747 /* XXX ignore af? */ 748 PF_MD5_UPD(pfr, addr.v.a.addr.addr32); 749 PF_MD5_UPD(pfr, addr.v.a.mask.addr32); 750 break; 751 case PF_ADDR_RTLABEL: 752 PF_MD5_UPD(pfr, addr.v.rtlabelname); 753 break; 754 } 755 756 PF_MD5_UPD(pfr, port[0]); 757 PF_MD5_UPD(pfr, port[1]); 758 PF_MD5_UPD(pfr, neg); 759 PF_MD5_UPD(pfr, port_op); 760 } 761 762 void 763 pf_hash_rule(MD5_CTX *ctx, struct pf_rule *rule) 764 { 765 u_int16_t x; 766 u_int32_t y; 767 768 pf_hash_rule_addr(ctx, &rule->src); 769 pf_hash_rule_addr(ctx, &rule->dst); 770 PF_MD5_UPD_STR(rule, label); 771 PF_MD5_UPD_STR(rule, ifname); 772 PF_MD5_UPD_STR(rule, rcv_ifname); 773 PF_MD5_UPD_STR(rule, match_tagname); 774 PF_MD5_UPD_HTONS(rule, match_tag, x); /* dup? */ 775 PF_MD5_UPD_HTONL(rule, os_fingerprint, y); 776 PF_MD5_UPD_HTONL(rule, prob, y); 777 PF_MD5_UPD_HTONL(rule, uid.uid[0], y); 778 PF_MD5_UPD_HTONL(rule, uid.uid[1], y); 779 PF_MD5_UPD(rule, uid.op); 780 PF_MD5_UPD_HTONL(rule, gid.gid[0], y); 781 PF_MD5_UPD_HTONL(rule, gid.gid[1], y); 782 PF_MD5_UPD(rule, gid.op); 783 PF_MD5_UPD_HTONL(rule, rule_flag, y); 784 PF_MD5_UPD(rule, action); 785 PF_MD5_UPD(rule, direction); 786 PF_MD5_UPD(rule, af); 787 PF_MD5_UPD(rule, quick); 788 PF_MD5_UPD(rule, ifnot); 789 PF_MD5_UPD(rule, rcvifnot); 790 PF_MD5_UPD(rule, match_tag_not); 791 PF_MD5_UPD(rule, keep_state); 792 PF_MD5_UPD(rule, proto); 793 PF_MD5_UPD(rule, type); 794 PF_MD5_UPD(rule, code); 795 PF_MD5_UPD(rule, flags); 796 PF_MD5_UPD(rule, flagset); 797 PF_MD5_UPD(rule, allow_opts); 798 PF_MD5_UPD(rule, rt); 799 PF_MD5_UPD(rule, tos); 800 } 801 802 int 803 pf_commit_rules(u_int32_t ticket, char *anchor) 804 { 805 struct pf_ruleset *rs; 806 struct pf_rule *rule, **old_array; 807 struct pf_rulequeue *old_rules; 808 int error; 809 u_int32_t old_rcount; 810 811 /* Make sure any expired rules get removed from active rules first. */ 812 pf_purge_expired_rules(); 813 814 rs = pf_find_ruleset(anchor); 815 if (rs == NULL || !rs->rules.inactive.open || 816 ticket != rs->rules.inactive.ticket) 817 return (EBUSY); 818 819 /* Calculate checksum for the main ruleset */ 820 if (rs == &pf_main_ruleset) { 821 error = pf_setup_pfsync_matching(rs); 822 if (error != 0) 823 return (error); 824 } 825 826 /* Swap rules, keep the old. */ 827 old_rules = rs->rules.active.ptr; 828 old_rcount = rs->rules.active.rcount; 829 old_array = rs->rules.active.ptr_array; 830 831 rs->rules.active.ptr = rs->rules.inactive.ptr; 832 rs->rules.active.ptr_array = rs->rules.inactive.ptr_array; 833 rs->rules.active.rcount = rs->rules.inactive.rcount; 834 rs->rules.inactive.ptr = old_rules; 835 rs->rules.inactive.ptr_array = old_array; 836 rs->rules.inactive.rcount = old_rcount; 837 838 rs->rules.active.ticket = rs->rules.inactive.ticket; 839 pf_calc_skip_steps(rs->rules.active.ptr); 840 841 842 /* Purge the old rule list. */ 843 while ((rule = TAILQ_FIRST(old_rules)) != NULL) 844 pf_rm_rule(old_rules, rule); 845 if (rs->rules.inactive.ptr_array) 846 free(rs->rules.inactive.ptr_array, M_TEMP, 0); 847 rs->rules.inactive.ptr_array = NULL; 848 rs->rules.inactive.rcount = 0; 849 rs->rules.inactive.open = 0; 850 pf_remove_if_empty_ruleset(rs); 851 852 /* queue defs only in the main ruleset */ 853 if (anchor[0]) 854 return (0); 855 return (pf_commit_queues()); 856 } 857 858 int 859 pf_setup_pfsync_matching(struct pf_ruleset *rs) 860 { 861 MD5_CTX ctx; 862 struct pf_rule *rule; 863 u_int8_t digest[PF_MD5_DIGEST_LENGTH]; 864 865 MD5Init(&ctx); 866 if (rs->rules.inactive.ptr_array) 867 free(rs->rules.inactive.ptr_array, M_TEMP, 0); 868 rs->rules.inactive.ptr_array = NULL; 869 870 if (rs->rules.inactive.rcount) { 871 rs->rules.inactive.ptr_array = 872 mallocarray(rs->rules.inactive.rcount, sizeof(caddr_t), 873 M_TEMP, M_NOWAIT); 874 875 if (!rs->rules.inactive.ptr_array) 876 return (ENOMEM); 877 878 TAILQ_FOREACH(rule, rs->rules.inactive.ptr, entries) { 879 pf_hash_rule(&ctx, rule); 880 (rs->rules.inactive.ptr_array)[rule->nr] = rule; 881 } 882 } 883 884 MD5Final(digest, &ctx); 885 memcpy(pf_status.pf_chksum, digest, sizeof(pf_status.pf_chksum)); 886 return (0); 887 } 888 889 int 890 pf_addr_setup(struct pf_ruleset *ruleset, struct pf_addr_wrap *addr, 891 sa_family_t af) 892 { 893 if (pfi_dynaddr_setup(addr, af) || 894 pf_tbladdr_setup(ruleset, addr) || 895 pf_rtlabel_add(addr)) 896 return (EINVAL); 897 898 return (0); 899 } 900 901 int 902 pf_kif_setup(char *ifname, struct pfi_kif **kif) 903 { 904 if (ifname[0]) { 905 *kif = pfi_kif_get(ifname); 906 if (*kif == NULL) 907 return (EINVAL); 908 909 pfi_kif_ref(*kif, PFI_KIF_REF_RULE); 910 } else 911 *kif = NULL; 912 913 return (0); 914 } 915 916 void 917 pf_addr_copyout(struct pf_addr_wrap *addr) 918 { 919 pfi_dynaddr_copyout(addr); 920 pf_tbladdr_copyout(addr); 921 pf_rtlabel_copyout(addr); 922 } 923 924 int 925 pfioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct proc *p) 926 { 927 int error = 0; 928 929 /* XXX keep in sync with switch() below */ 930 if (securelevel > 1) 931 switch (cmd) { 932 case DIOCGETRULES: 933 case DIOCGETRULE: 934 case DIOCGETSTATE: 935 case DIOCSETSTATUSIF: 936 case DIOCGETSTATUS: 937 case DIOCCLRSTATUS: 938 case DIOCNATLOOK: 939 case DIOCSETDEBUG: 940 case DIOCGETSTATES: 941 case DIOCGETTIMEOUT: 942 case DIOCGETLIMIT: 943 case DIOCGETRULESETS: 944 case DIOCGETRULESET: 945 case DIOCGETQUEUES: 946 case DIOCGETQUEUE: 947 case DIOCGETQSTATS: 948 case DIOCRGETTABLES: 949 case DIOCRGETTSTATS: 950 case DIOCRCLRTSTATS: 951 case DIOCRCLRADDRS: 952 case DIOCRADDADDRS: 953 case DIOCRDELADDRS: 954 case DIOCRSETADDRS: 955 case DIOCRGETADDRS: 956 case DIOCRGETASTATS: 957 case DIOCRCLRASTATS: 958 case DIOCRTSTADDRS: 959 case DIOCOSFPGET: 960 case DIOCGETSRCNODES: 961 case DIOCCLRSRCNODES: 962 case DIOCIGETIFACES: 963 case DIOCSETIFFLAG: 964 case DIOCCLRIFFLAG: 965 case DIOCGETSYNFLWATS: 966 break; 967 case DIOCRCLRTABLES: 968 case DIOCRADDTABLES: 969 case DIOCRDELTABLES: 970 case DIOCRSETTFLAGS: 971 if (((struct pfioc_table *)addr)->pfrio_flags & 972 PFR_FLAG_DUMMY) 973 break; /* dummy operation ok */ 974 return (EPERM); 975 default: 976 return (EPERM); 977 } 978 979 if (!(flags & FWRITE)) 980 switch (cmd) { 981 case DIOCGETRULES: 982 case DIOCGETSTATE: 983 case DIOCGETSTATUS: 984 case DIOCGETSTATES: 985 case DIOCGETTIMEOUT: 986 case DIOCGETLIMIT: 987 case DIOCGETRULESETS: 988 case DIOCGETRULESET: 989 case DIOCGETQUEUES: 990 case DIOCGETQUEUE: 991 case DIOCGETQSTATS: 992 case DIOCNATLOOK: 993 case DIOCRGETTABLES: 994 case DIOCRGETTSTATS: 995 case DIOCRGETADDRS: 996 case DIOCRGETASTATS: 997 case DIOCRTSTADDRS: 998 case DIOCOSFPGET: 999 case DIOCGETSRCNODES: 1000 case DIOCIGETIFACES: 1001 case DIOCGETSYNFLWATS: 1002 break; 1003 case DIOCRCLRTABLES: 1004 case DIOCRADDTABLES: 1005 case DIOCRDELTABLES: 1006 case DIOCRCLRTSTATS: 1007 case DIOCRCLRADDRS: 1008 case DIOCRADDADDRS: 1009 case DIOCRDELADDRS: 1010 case DIOCRSETADDRS: 1011 case DIOCRSETTFLAGS: 1012 if (((struct pfioc_table *)addr)->pfrio_flags & 1013 PFR_FLAG_DUMMY) { 1014 flags |= FWRITE; /* need write lock for dummy */ 1015 break; /* dummy operation ok */ 1016 } 1017 return (EACCES); 1018 case DIOCGETRULE: 1019 if (((struct pfioc_rule *)addr)->action == 1020 PF_GET_CLR_CNTR) 1021 return (EACCES); 1022 break; 1023 default: 1024 return (EACCES); 1025 } 1026 1027 NET_LOCK(); 1028 switch (cmd) { 1029 1030 case DIOCSTART: 1031 PF_LOCK(); 1032 if (pf_status.running) 1033 error = EEXIST; 1034 else { 1035 pf_status.running = 1; 1036 pf_status.since = time_uptime; 1037 if (pf_status.stateid == 0) { 1038 pf_status.stateid = time_second; 1039 pf_status.stateid = pf_status.stateid << 32; 1040 } 1041 timeout_add_sec(&pf_purge_to, 1); 1042 pf_create_queues(); 1043 DPFPRINTF(LOG_NOTICE, "pf: started"); 1044 } 1045 PF_UNLOCK(); 1046 break; 1047 1048 case DIOCSTOP: 1049 PF_LOCK(); 1050 if (!pf_status.running) 1051 error = ENOENT; 1052 else { 1053 pf_status.running = 0; 1054 pf_status.since = time_uptime; 1055 pf_remove_queues(); 1056 DPFPRINTF(LOG_NOTICE, "pf: stopped"); 1057 } 1058 PF_UNLOCK(); 1059 break; 1060 1061 case DIOCGETQUEUES: { 1062 struct pfioc_queue *pq = (struct pfioc_queue *)addr; 1063 struct pf_queuespec *qs; 1064 u_int32_t nr = 0; 1065 1066 PF_LOCK(); 1067 pq->ticket = pf_main_ruleset.rules.active.ticket; 1068 1069 /* save state to not run over them all each time? */ 1070 qs = TAILQ_FIRST(pf_queues_active); 1071 while (qs != NULL) { 1072 qs = TAILQ_NEXT(qs, entries); 1073 nr++; 1074 } 1075 pq->nr = nr; 1076 PF_UNLOCK(); 1077 break; 1078 } 1079 1080 case DIOCGETQUEUE: { 1081 struct pfioc_queue *pq = (struct pfioc_queue *)addr; 1082 struct pf_queuespec *qs; 1083 u_int32_t nr = 0; 1084 1085 PF_LOCK(); 1086 if (pq->ticket != pf_main_ruleset.rules.active.ticket) { 1087 error = EBUSY; 1088 PF_UNLOCK(); 1089 break; 1090 } 1091 1092 /* save state to not run over them all each time? */ 1093 qs = TAILQ_FIRST(pf_queues_active); 1094 while ((qs != NULL) && (nr++ < pq->nr)) 1095 qs = TAILQ_NEXT(qs, entries); 1096 if (qs == NULL) { 1097 error = EBUSY; 1098 PF_UNLOCK(); 1099 break; 1100 } 1101 memcpy(&pq->queue, qs, sizeof(pq->queue)); 1102 PF_UNLOCK(); 1103 break; 1104 } 1105 1106 case DIOCGETQSTATS: { 1107 struct pfioc_qstats *pq = (struct pfioc_qstats *)addr; 1108 struct pf_queuespec *qs; 1109 u_int32_t nr; 1110 int nbytes; 1111 1112 PF_LOCK(); 1113 if (pq->ticket != pf_main_ruleset.rules.active.ticket) { 1114 error = EBUSY; 1115 PF_UNLOCK(); 1116 break; 1117 } 1118 nbytes = pq->nbytes; 1119 nr = 0; 1120 1121 /* save state to not run over them all each time? */ 1122 qs = TAILQ_FIRST(pf_queues_active); 1123 while ((qs != NULL) && (nr++ < pq->nr)) 1124 qs = TAILQ_NEXT(qs, entries); 1125 if (qs == NULL) { 1126 error = EBUSY; 1127 PF_UNLOCK(); 1128 break; 1129 } 1130 memcpy(&pq->queue, qs, sizeof(pq->queue)); 1131 /* It's a root flow queue but is not an HFSC root class */ 1132 if ((qs->flags & PFQS_FLOWQUEUE) && qs->parent_qid == 0 && 1133 !(qs->flags & PFQS_ROOTCLASS)) 1134 error = pfq_fqcodel_ops->pfq_qstats(qs, pq->buf, 1135 &nbytes); 1136 else 1137 error = pfq_hfsc_ops->pfq_qstats(qs, pq->buf, 1138 &nbytes); 1139 if (error == 0) 1140 pq->nbytes = nbytes; 1141 PF_UNLOCK(); 1142 break; 1143 } 1144 1145 case DIOCADDQUEUE: { 1146 struct pfioc_queue *q = (struct pfioc_queue *)addr; 1147 struct pf_queuespec *qs; 1148 1149 PF_LOCK(); 1150 if (q->ticket != pf_main_ruleset.rules.inactive.ticket) { 1151 error = EBUSY; 1152 PF_UNLOCK(); 1153 break; 1154 } 1155 qs = pool_get(&pf_queue_pl, PR_WAITOK|PR_LIMITFAIL|PR_ZERO); 1156 if (qs == NULL) { 1157 error = ENOMEM; 1158 PF_UNLOCK(); 1159 break; 1160 } 1161 memcpy(qs, &q->queue, sizeof(*qs)); 1162 qs->qid = pf_qname2qid(qs->qname, 1); 1163 if (qs->qid == 0) { 1164 pool_put(&pf_queue_pl, qs); 1165 error = EBUSY; 1166 PF_UNLOCK(); 1167 break; 1168 } 1169 if (qs->parent[0] && (qs->parent_qid = 1170 pf_qname2qid(qs->parent, 0)) == 0) { 1171 pool_put(&pf_queue_pl, qs); 1172 error = ESRCH; 1173 PF_UNLOCK(); 1174 break; 1175 } 1176 qs->kif = pfi_kif_get(qs->ifname); 1177 if (qs->kif == NULL) { 1178 pool_put(&pf_queue_pl, qs); 1179 error = ESRCH; 1180 PF_UNLOCK(); 1181 break; 1182 } 1183 /* XXX resolve bw percentage specs */ 1184 pfi_kif_ref(qs->kif, PFI_KIF_REF_RULE); 1185 1186 TAILQ_INSERT_TAIL(pf_queues_inactive, qs, entries); 1187 PF_UNLOCK(); 1188 1189 break; 1190 } 1191 1192 case DIOCADDRULE: { 1193 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 1194 struct pf_ruleset *ruleset; 1195 struct pf_rule *rule, *tail; 1196 1197 PF_LOCK(); 1198 pr->anchor[sizeof(pr->anchor) - 1] = '\0'; 1199 ruleset = pf_find_ruleset(pr->anchor); 1200 if (ruleset == NULL) { 1201 error = EINVAL; 1202 PF_UNLOCK(); 1203 break; 1204 } 1205 if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) { 1206 error = EINVAL; 1207 PF_UNLOCK(); 1208 break; 1209 } 1210 if (pr->ticket != ruleset->rules.inactive.ticket) { 1211 error = EBUSY; 1212 PF_UNLOCK(); 1213 break; 1214 } 1215 rule = pool_get(&pf_rule_pl, PR_WAITOK|PR_LIMITFAIL|PR_ZERO); 1216 if (rule == NULL) { 1217 error = ENOMEM; 1218 PF_UNLOCK(); 1219 break; 1220 } 1221 if ((error = pf_rule_copyin(&pr->rule, rule, ruleset))) { 1222 pf_rm_rule(NULL, rule); 1223 rule = NULL; 1224 PF_UNLOCK(); 1225 break; 1226 } 1227 rule->cuid = p->p_ucred->cr_ruid; 1228 rule->cpid = p->p_p->ps_pid; 1229 1230 switch (rule->af) { 1231 case 0: 1232 break; 1233 case AF_INET: 1234 break; 1235 #ifdef INET6 1236 case AF_INET6: 1237 break; 1238 #endif /* INET6 */ 1239 default: 1240 pf_rm_rule(NULL, rule); 1241 rule = NULL; 1242 error = EAFNOSUPPORT; 1243 PF_UNLOCK(); 1244 goto fail; 1245 } 1246 tail = TAILQ_LAST(ruleset->rules.inactive.ptr, 1247 pf_rulequeue); 1248 if (tail) 1249 rule->nr = tail->nr + 1; 1250 else 1251 rule->nr = 0; 1252 1253 if (rule->src.addr.type == PF_ADDR_NONE || 1254 rule->dst.addr.type == PF_ADDR_NONE) 1255 error = EINVAL; 1256 1257 if (pf_addr_setup(ruleset, &rule->src.addr, rule->af)) 1258 error = EINVAL; 1259 if (pf_addr_setup(ruleset, &rule->dst.addr, rule->af)) 1260 error = EINVAL; 1261 if (pf_addr_setup(ruleset, &rule->rdr.addr, rule->af)) 1262 error = EINVAL; 1263 if (pf_addr_setup(ruleset, &rule->nat.addr, rule->af)) 1264 error = EINVAL; 1265 if (pf_addr_setup(ruleset, &rule->route.addr, rule->af)) 1266 error = EINVAL; 1267 if (pf_anchor_setup(rule, ruleset, pr->anchor_call)) 1268 error = EINVAL; 1269 if (rule->rt && !rule->direction) 1270 error = EINVAL; 1271 if (rule->scrub_flags & PFSTATE_SETPRIO && 1272 (rule->set_prio[0] > IFQ_MAXPRIO || 1273 rule->set_prio[1] > IFQ_MAXPRIO)) 1274 error = EINVAL; 1275 1276 if (error) { 1277 pf_rm_rule(NULL, rule); 1278 PF_UNLOCK(); 1279 break; 1280 } 1281 TAILQ_INSERT_TAIL(ruleset->rules.inactive.ptr, 1282 rule, entries); 1283 rule->ruleset = ruleset; 1284 ruleset->rules.inactive.rcount++; 1285 PF_UNLOCK(); 1286 break; 1287 } 1288 1289 case DIOCGETRULES: { 1290 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 1291 struct pf_ruleset *ruleset; 1292 struct pf_rule *tail; 1293 1294 PF_LOCK(); 1295 pr->anchor[sizeof(pr->anchor) - 1] = '\0'; 1296 ruleset = pf_find_ruleset(pr->anchor); 1297 if (ruleset == NULL) { 1298 error = EINVAL; 1299 PF_UNLOCK(); 1300 break; 1301 } 1302 tail = TAILQ_LAST(ruleset->rules.active.ptr, pf_rulequeue); 1303 if (tail) 1304 pr->nr = tail->nr + 1; 1305 else 1306 pr->nr = 0; 1307 pr->ticket = ruleset->rules.active.ticket; 1308 PF_UNLOCK(); 1309 break; 1310 } 1311 1312 case DIOCGETRULE: { 1313 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 1314 struct pf_ruleset *ruleset; 1315 struct pf_rule *rule; 1316 int i; 1317 1318 PF_LOCK(); 1319 pr->anchor[sizeof(pr->anchor) - 1] = '\0'; 1320 ruleset = pf_find_ruleset(pr->anchor); 1321 if (ruleset == NULL) { 1322 error = EINVAL; 1323 PF_UNLOCK(); 1324 break; 1325 } 1326 if (pr->ticket != ruleset->rules.active.ticket) { 1327 error = EBUSY; 1328 PF_UNLOCK(); 1329 break; 1330 } 1331 rule = TAILQ_FIRST(ruleset->rules.active.ptr); 1332 while ((rule != NULL) && (rule->nr != pr->nr)) 1333 rule = TAILQ_NEXT(rule, entries); 1334 if (rule == NULL) { 1335 error = EBUSY; 1336 PF_UNLOCK(); 1337 break; 1338 } 1339 memcpy(&pr->rule, rule, sizeof(struct pf_rule)); 1340 memset(&pr->rule.entries, 0, sizeof(pr->rule.entries)); 1341 pr->rule.kif = NULL; 1342 pr->rule.nat.kif = NULL; 1343 pr->rule.rdr.kif = NULL; 1344 pr->rule.route.kif = NULL; 1345 pr->rule.rcv_kif = NULL; 1346 pr->rule.anchor = NULL; 1347 pr->rule.overload_tbl = NULL; 1348 pr->rule.pktrate.limit /= PF_THRESHOLD_MULT; 1349 memset(&pr->rule.gcle, 0, sizeof(pr->rule.gcle)); 1350 pr->rule.ruleset = NULL; 1351 if (pf_anchor_copyout(ruleset, rule, pr)) { 1352 error = EBUSY; 1353 PF_UNLOCK(); 1354 break; 1355 } 1356 pf_addr_copyout(&pr->rule.src.addr); 1357 pf_addr_copyout(&pr->rule.dst.addr); 1358 pf_addr_copyout(&pr->rule.rdr.addr); 1359 pf_addr_copyout(&pr->rule.nat.addr); 1360 pf_addr_copyout(&pr->rule.route.addr); 1361 for (i = 0; i < PF_SKIP_COUNT; ++i) 1362 if (rule->skip[i].ptr == NULL) 1363 pr->rule.skip[i].nr = (u_int32_t)-1; 1364 else 1365 pr->rule.skip[i].nr = 1366 rule->skip[i].ptr->nr; 1367 1368 if (pr->action == PF_GET_CLR_CNTR) { 1369 rule->evaluations = 0; 1370 rule->packets[0] = rule->packets[1] = 0; 1371 rule->bytes[0] = rule->bytes[1] = 0; 1372 rule->states_tot = 0; 1373 } 1374 PF_UNLOCK(); 1375 break; 1376 } 1377 1378 case DIOCCHANGERULE: { 1379 struct pfioc_rule *pcr = (struct pfioc_rule *)addr; 1380 struct pf_ruleset *ruleset; 1381 struct pf_rule *oldrule = NULL, *newrule = NULL; 1382 u_int32_t nr = 0; 1383 1384 if (pcr->action < PF_CHANGE_ADD_HEAD || 1385 pcr->action > PF_CHANGE_GET_TICKET) { 1386 error = EINVAL; 1387 break; 1388 } 1389 PF_LOCK(); 1390 ruleset = pf_find_ruleset(pcr->anchor); 1391 if (ruleset == NULL) { 1392 error = EINVAL; 1393 PF_UNLOCK(); 1394 break; 1395 } 1396 1397 if (pcr->action == PF_CHANGE_GET_TICKET) { 1398 pcr->ticket = ++ruleset->rules.active.ticket; 1399 PF_UNLOCK(); 1400 break; 1401 } else { 1402 if (pcr->ticket != 1403 ruleset->rules.active.ticket) { 1404 error = EINVAL; 1405 PF_UNLOCK(); 1406 break; 1407 } 1408 if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) { 1409 error = EINVAL; 1410 PF_UNLOCK(); 1411 break; 1412 } 1413 } 1414 1415 if (pcr->action != PF_CHANGE_REMOVE) { 1416 newrule = pool_get(&pf_rule_pl, 1417 PR_WAITOK|PR_LIMITFAIL|PR_ZERO); 1418 if (newrule == NULL) { 1419 error = ENOMEM; 1420 PF_UNLOCK(); 1421 break; 1422 } 1423 pf_rule_copyin(&pcr->rule, newrule, ruleset); 1424 newrule->cuid = p->p_ucred->cr_ruid; 1425 newrule->cpid = p->p_p->ps_pid; 1426 1427 switch (newrule->af) { 1428 case 0: 1429 break; 1430 case AF_INET: 1431 break; 1432 #ifdef INET6 1433 case AF_INET6: 1434 break; 1435 #endif /* INET6 */ 1436 default: 1437 pf_rm_rule(NULL, newrule); 1438 error = EAFNOSUPPORT; 1439 PF_UNLOCK(); 1440 goto fail; 1441 } 1442 1443 if (newrule->rt && !newrule->direction) 1444 error = EINVAL; 1445 if (pf_addr_setup(ruleset, &newrule->src.addr, newrule->af)) 1446 error = EINVAL; 1447 if (pf_addr_setup(ruleset, &newrule->dst.addr, newrule->af)) 1448 error = EINVAL; 1449 if (pf_addr_setup(ruleset, &newrule->rdr.addr, newrule->af)) 1450 error = EINVAL; 1451 if (pf_addr_setup(ruleset, &newrule->nat.addr, newrule->af)) 1452 error = EINVAL; 1453 if (pf_addr_setup(ruleset, &newrule->route.addr, newrule->af)) 1454 error = EINVAL; 1455 if (pf_anchor_setup(newrule, ruleset, pcr->anchor_call)) 1456 error = EINVAL; 1457 1458 if (error) { 1459 pf_rm_rule(NULL, newrule); 1460 PF_UNLOCK(); 1461 break; 1462 } 1463 } 1464 1465 if (pcr->action == PF_CHANGE_ADD_HEAD) 1466 oldrule = TAILQ_FIRST(ruleset->rules.active.ptr); 1467 else if (pcr->action == PF_CHANGE_ADD_TAIL) 1468 oldrule = TAILQ_LAST(ruleset->rules.active.ptr, 1469 pf_rulequeue); 1470 else { 1471 oldrule = TAILQ_FIRST(ruleset->rules.active.ptr); 1472 while ((oldrule != NULL) && (oldrule->nr != pcr->nr)) 1473 oldrule = TAILQ_NEXT(oldrule, entries); 1474 if (oldrule == NULL) { 1475 if (newrule != NULL) 1476 pf_rm_rule(NULL, newrule); 1477 error = EINVAL; 1478 PF_UNLOCK(); 1479 break; 1480 } 1481 } 1482 1483 if (pcr->action == PF_CHANGE_REMOVE) { 1484 pf_rm_rule(ruleset->rules.active.ptr, oldrule); 1485 ruleset->rules.active.rcount--; 1486 } else { 1487 if (oldrule == NULL) 1488 TAILQ_INSERT_TAIL( 1489 ruleset->rules.active.ptr, 1490 newrule, entries); 1491 else if (pcr->action == PF_CHANGE_ADD_HEAD || 1492 pcr->action == PF_CHANGE_ADD_BEFORE) 1493 TAILQ_INSERT_BEFORE(oldrule, newrule, entries); 1494 else 1495 TAILQ_INSERT_AFTER( 1496 ruleset->rules.active.ptr, 1497 oldrule, newrule, entries); 1498 ruleset->rules.active.rcount++; 1499 } 1500 1501 nr = 0; 1502 TAILQ_FOREACH(oldrule, ruleset->rules.active.ptr, entries) 1503 oldrule->nr = nr++; 1504 1505 ruleset->rules.active.ticket++; 1506 1507 pf_calc_skip_steps(ruleset->rules.active.ptr); 1508 pf_remove_if_empty_ruleset(ruleset); 1509 1510 PF_UNLOCK(); 1511 break; 1512 } 1513 1514 case DIOCCLRSTATES: { 1515 struct pf_state *s, *nexts; 1516 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr; 1517 u_int killed = 0; 1518 1519 PF_LOCK(); 1520 PF_STATE_ENTER_WRITE(); 1521 for (s = RB_MIN(pf_state_tree_id, &tree_id); s; s = nexts) { 1522 nexts = RB_NEXT(pf_state_tree_id, &tree_id, s); 1523 1524 if (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname, 1525 s->kif->pfik_name)) { 1526 #if NPFSYNC > 0 1527 /* don't send out individual delete messages */ 1528 SET(s->state_flags, PFSTATE_NOSYNC); 1529 #endif /* NPFSYNC > 0 */ 1530 pf_remove_state(s); 1531 killed++; 1532 } 1533 } 1534 PF_STATE_EXIT_WRITE(); 1535 psk->psk_killed = killed; 1536 #if NPFSYNC > 0 1537 pfsync_clear_states(pf_status.hostid, psk->psk_ifname); 1538 #endif /* NPFSYNC > 0 */ 1539 PF_UNLOCK(); 1540 break; 1541 } 1542 1543 case DIOCKILLSTATES: { 1544 struct pf_state *s, *nexts; 1545 struct pf_state_item *si, *sit; 1546 struct pf_state_key *sk, key; 1547 struct pf_addr *srcaddr, *dstaddr; 1548 u_int16_t srcport, dstport; 1549 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr; 1550 u_int i, killed = 0; 1551 const int dirs[] = { PF_IN, PF_OUT }; 1552 int sidx, didx; 1553 1554 if (psk->psk_pfcmp.id) { 1555 if (psk->psk_pfcmp.creatorid == 0) 1556 psk->psk_pfcmp.creatorid = pf_status.hostid; 1557 PF_LOCK(); 1558 PF_STATE_ENTER_WRITE(); 1559 if ((s = pf_find_state_byid(&psk->psk_pfcmp))) { 1560 pf_remove_state(s); 1561 psk->psk_killed = 1; 1562 } 1563 PF_STATE_EXIT_WRITE(); 1564 PF_UNLOCK(); 1565 break; 1566 } 1567 1568 if (psk->psk_af && psk->psk_proto && 1569 psk->psk_src.port_op == PF_OP_EQ && 1570 psk->psk_dst.port_op == PF_OP_EQ) { 1571 1572 key.af = psk->psk_af; 1573 key.proto = psk->psk_proto; 1574 key.rdomain = psk->psk_rdomain; 1575 1576 PF_LOCK(); 1577 PF_STATE_ENTER_WRITE(); 1578 for (i = 0; i < nitems(dirs); i++) { 1579 if (dirs[i] == PF_IN) { 1580 sidx = 0; 1581 didx = 1; 1582 } else { 1583 sidx = 1; 1584 didx = 0; 1585 } 1586 pf_addrcpy(&key.addr[sidx], 1587 &psk->psk_src.addr.v.a.addr, key.af); 1588 pf_addrcpy(&key.addr[didx], 1589 &psk->psk_dst.addr.v.a.addr, key.af); 1590 key.port[sidx] = psk->psk_src.port[0]; 1591 key.port[didx] = psk->psk_dst.port[0]; 1592 1593 sk = RB_FIND(pf_state_tree, &pf_statetbl, &key); 1594 if (sk == NULL) 1595 continue; 1596 1597 TAILQ_FOREACH_SAFE(si, &sk->states, entry, sit) 1598 if (((si->s->key[PF_SK_WIRE]->af == 1599 si->s->key[PF_SK_STACK]->af && 1600 sk == (dirs[i] == PF_IN ? 1601 si->s->key[PF_SK_WIRE] : 1602 si->s->key[PF_SK_STACK])) || 1603 (si->s->key[PF_SK_WIRE]->af != 1604 si->s->key[PF_SK_STACK]->af && 1605 dirs[i] == PF_IN && 1606 (sk == si->s->key[PF_SK_STACK] || 1607 sk == si->s->key[PF_SK_WIRE]))) && 1608 (!psk->psk_ifname[0] || 1609 (si->s->kif != pfi_all && 1610 !strcmp(psk->psk_ifname, 1611 si->s->kif->pfik_name)))) { 1612 pf_remove_state(si->s); 1613 killed++; 1614 } 1615 } 1616 if (killed) 1617 psk->psk_killed = killed; 1618 PF_STATE_EXIT_WRITE(); 1619 PF_UNLOCK(); 1620 break; 1621 } 1622 1623 PF_LOCK(); 1624 PF_STATE_ENTER_WRITE(); 1625 for (s = RB_MIN(pf_state_tree_id, &tree_id); s; 1626 s = nexts) { 1627 nexts = RB_NEXT(pf_state_tree_id, &tree_id, s); 1628 1629 if (s->direction == PF_OUT) { 1630 sk = s->key[PF_SK_STACK]; 1631 srcaddr = &sk->addr[1]; 1632 dstaddr = &sk->addr[0]; 1633 srcport = sk->port[1]; 1634 dstport = sk->port[0]; 1635 } else { 1636 sk = s->key[PF_SK_WIRE]; 1637 srcaddr = &sk->addr[0]; 1638 dstaddr = &sk->addr[1]; 1639 srcport = sk->port[0]; 1640 dstport = sk->port[1]; 1641 } 1642 if ((!psk->psk_af || sk->af == psk->psk_af) 1643 && (!psk->psk_proto || psk->psk_proto == 1644 sk->proto) && psk->psk_rdomain == sk->rdomain && 1645 pf_match_addr(psk->psk_src.neg, 1646 &psk->psk_src.addr.v.a.addr, 1647 &psk->psk_src.addr.v.a.mask, 1648 srcaddr, sk->af) && 1649 pf_match_addr(psk->psk_dst.neg, 1650 &psk->psk_dst.addr.v.a.addr, 1651 &psk->psk_dst.addr.v.a.mask, 1652 dstaddr, sk->af) && 1653 (psk->psk_src.port_op == 0 || 1654 pf_match_port(psk->psk_src.port_op, 1655 psk->psk_src.port[0], psk->psk_src.port[1], 1656 srcport)) && 1657 (psk->psk_dst.port_op == 0 || 1658 pf_match_port(psk->psk_dst.port_op, 1659 psk->psk_dst.port[0], psk->psk_dst.port[1], 1660 dstport)) && 1661 (!psk->psk_label[0] || (s->rule.ptr->label[0] && 1662 !strcmp(psk->psk_label, s->rule.ptr->label))) && 1663 (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname, 1664 s->kif->pfik_name))) { 1665 pf_remove_state(s); 1666 killed++; 1667 } 1668 } 1669 psk->psk_killed = killed; 1670 PF_STATE_EXIT_WRITE(); 1671 PF_UNLOCK(); 1672 break; 1673 } 1674 1675 #if NPFSYNC > 0 1676 case DIOCADDSTATE: { 1677 struct pfioc_state *ps = (struct pfioc_state *)addr; 1678 struct pfsync_state *sp = &ps->state; 1679 1680 if (sp->timeout >= PFTM_MAX) { 1681 error = EINVAL; 1682 break; 1683 } 1684 PF_LOCK(); 1685 PF_STATE_ENTER_WRITE(); 1686 error = pfsync_state_import(sp, PFSYNC_SI_IOCTL); 1687 PF_STATE_EXIT_WRITE(); 1688 PF_UNLOCK(); 1689 break; 1690 } 1691 #endif /* NPFSYNC > 0 */ 1692 1693 case DIOCGETSTATE: { 1694 struct pfioc_state *ps = (struct pfioc_state *)addr; 1695 struct pf_state *s; 1696 struct pf_state_cmp id_key; 1697 1698 memset(&id_key, 0, sizeof(id_key)); 1699 id_key.id = ps->state.id; 1700 id_key.creatorid = ps->state.creatorid; 1701 1702 PF_STATE_ENTER_READ(); 1703 s = pf_find_state_byid(&id_key); 1704 s = pf_state_ref(s); 1705 PF_STATE_EXIT_READ(); 1706 if (s == NULL) { 1707 error = ENOENT; 1708 break; 1709 } 1710 1711 pf_state_export(&ps->state, s); 1712 pf_state_unref(s); 1713 break; 1714 } 1715 1716 case DIOCGETSTATES: { 1717 struct pfioc_states *ps = (struct pfioc_states *)addr; 1718 struct pf_state *state; 1719 struct pfsync_state *p, *pstore; 1720 u_int32_t nr = 0; 1721 1722 if (ps->ps_len == 0) { 1723 nr = pf_status.states; 1724 ps->ps_len = sizeof(struct pfsync_state) * nr; 1725 break; 1726 } 1727 1728 pstore = malloc(sizeof(*pstore), M_TEMP, M_WAITOK); 1729 1730 p = ps->ps_states; 1731 1732 PF_STATE_ENTER_READ(); 1733 state = TAILQ_FIRST(&state_list); 1734 while (state) { 1735 if (state->timeout != PFTM_UNLINKED) { 1736 if ((nr+1) * sizeof(*p) > ps->ps_len) 1737 break; 1738 pf_state_export(pstore, state); 1739 error = copyout(pstore, p, sizeof(*p)); 1740 if (error) { 1741 free(pstore, M_TEMP, sizeof(*pstore)); 1742 PF_STATE_EXIT_READ(); 1743 goto fail; 1744 } 1745 p++; 1746 nr++; 1747 } 1748 state = TAILQ_NEXT(state, entry_list); 1749 } 1750 PF_STATE_EXIT_READ(); 1751 1752 ps->ps_len = sizeof(struct pfsync_state) * nr; 1753 1754 free(pstore, M_TEMP, sizeof(*pstore)); 1755 break; 1756 } 1757 1758 case DIOCGETSTATUS: { 1759 struct pf_status *s = (struct pf_status *)addr; 1760 PF_LOCK(); 1761 memcpy(s, &pf_status, sizeof(struct pf_status)); 1762 pfi_update_status(s->ifname, s); 1763 PF_UNLOCK(); 1764 break; 1765 } 1766 1767 case DIOCSETSTATUSIF: { 1768 struct pfioc_iface *pi = (struct pfioc_iface *)addr; 1769 1770 PF_LOCK(); 1771 if (pi->pfiio_name[0] == 0) { 1772 memset(pf_status.ifname, 0, IFNAMSIZ); 1773 PF_UNLOCK(); 1774 break; 1775 } 1776 strlcpy(pf_trans_set.statusif, pi->pfiio_name, IFNAMSIZ); 1777 pf_trans_set.mask |= PF_TSET_STATUSIF; 1778 PF_UNLOCK(); 1779 break; 1780 } 1781 1782 case DIOCCLRSTATUS: { 1783 struct pfioc_iface *pi = (struct pfioc_iface *)addr; 1784 1785 PF_LOCK(); 1786 /* if ifname is specified, clear counters there only */ 1787 if (pi->pfiio_name[0]) { 1788 pfi_update_status(pi->pfiio_name, NULL); 1789 PF_UNLOCK(); 1790 break; 1791 } 1792 1793 memset(pf_status.counters, 0, sizeof(pf_status.counters)); 1794 memset(pf_status.fcounters, 0, sizeof(pf_status.fcounters)); 1795 memset(pf_status.scounters, 0, sizeof(pf_status.scounters)); 1796 pf_status.since = time_uptime; 1797 1798 PF_UNLOCK(); 1799 break; 1800 } 1801 1802 case DIOCNATLOOK: { 1803 struct pfioc_natlook *pnl = (struct pfioc_natlook *)addr; 1804 struct pf_state_key *sk; 1805 struct pf_state *state; 1806 struct pf_state_key_cmp key; 1807 int m = 0, direction = pnl->direction; 1808 int sidx, didx; 1809 1810 switch (pnl->af) { 1811 case AF_INET: 1812 break; 1813 #ifdef INET6 1814 case AF_INET6: 1815 break; 1816 #endif /* INET6 */ 1817 default: 1818 error = EAFNOSUPPORT; 1819 goto fail; 1820 } 1821 1822 /* NATLOOK src and dst are reversed, so reverse sidx/didx */ 1823 sidx = (direction == PF_IN) ? 1 : 0; 1824 didx = (direction == PF_IN) ? 0 : 1; 1825 1826 if (!pnl->proto || 1827 PF_AZERO(&pnl->saddr, pnl->af) || 1828 PF_AZERO(&pnl->daddr, pnl->af) || 1829 ((pnl->proto == IPPROTO_TCP || 1830 pnl->proto == IPPROTO_UDP) && 1831 (!pnl->dport || !pnl->sport)) || 1832 pnl->rdomain > RT_TABLEID_MAX) 1833 error = EINVAL; 1834 else { 1835 key.af = pnl->af; 1836 key.proto = pnl->proto; 1837 key.rdomain = pnl->rdomain; 1838 pf_addrcpy(&key.addr[sidx], &pnl->saddr, pnl->af); 1839 key.port[sidx] = pnl->sport; 1840 pf_addrcpy(&key.addr[didx], &pnl->daddr, pnl->af); 1841 key.port[didx] = pnl->dport; 1842 1843 PF_STATE_ENTER_READ(); 1844 state = pf_find_state_all(&key, direction, &m); 1845 state = pf_state_ref(state); 1846 PF_STATE_EXIT_READ(); 1847 1848 if (m > 1) 1849 error = E2BIG; /* more than one state */ 1850 else if (state != NULL) { 1851 sk = state->key[sidx]; 1852 pf_addrcpy(&pnl->rsaddr, &sk->addr[sidx], 1853 sk->af); 1854 pnl->rsport = sk->port[sidx]; 1855 pf_addrcpy(&pnl->rdaddr, &sk->addr[didx], 1856 sk->af); 1857 pnl->rdport = sk->port[didx]; 1858 pnl->rrdomain = sk->rdomain; 1859 } else 1860 error = ENOENT; 1861 pf_state_unref(state); 1862 } 1863 break; 1864 } 1865 1866 case DIOCSETTIMEOUT: { 1867 struct pfioc_tm *pt = (struct pfioc_tm *)addr; 1868 1869 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX || 1870 pt->seconds < 0) { 1871 error = EINVAL; 1872 goto fail; 1873 } 1874 PF_LOCK(); 1875 if (pt->timeout == PFTM_INTERVAL && pt->seconds == 0) 1876 pt->seconds = 1; 1877 pf_default_rule_new.timeout[pt->timeout] = pt->seconds; 1878 pt->seconds = pf_default_rule.timeout[pt->timeout]; 1879 PF_UNLOCK(); 1880 break; 1881 } 1882 1883 case DIOCGETTIMEOUT: { 1884 struct pfioc_tm *pt = (struct pfioc_tm *)addr; 1885 1886 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) { 1887 error = EINVAL; 1888 goto fail; 1889 } 1890 PF_LOCK(); 1891 pt->seconds = pf_default_rule.timeout[pt->timeout]; 1892 PF_UNLOCK(); 1893 break; 1894 } 1895 1896 case DIOCGETLIMIT: { 1897 struct pfioc_limit *pl = (struct pfioc_limit *)addr; 1898 1899 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) { 1900 error = EINVAL; 1901 goto fail; 1902 } 1903 PF_LOCK(); 1904 pl->limit = pf_pool_limits[pl->index].limit; 1905 PF_UNLOCK(); 1906 break; 1907 } 1908 1909 case DIOCSETLIMIT: { 1910 struct pfioc_limit *pl = (struct pfioc_limit *)addr; 1911 1912 PF_LOCK(); 1913 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX || 1914 pf_pool_limits[pl->index].pp == NULL) { 1915 error = EINVAL; 1916 PF_UNLOCK(); 1917 goto fail; 1918 } 1919 if (((struct pool *)pf_pool_limits[pl->index].pp)->pr_nout > 1920 pl->limit) { 1921 error = EBUSY; 1922 PF_UNLOCK(); 1923 goto fail; 1924 } 1925 /* Fragments reference mbuf clusters. */ 1926 if (pl->index == PF_LIMIT_FRAGS && pl->limit > nmbclust) { 1927 error = EINVAL; 1928 PF_UNLOCK(); 1929 goto fail; 1930 } 1931 1932 pf_pool_limits[pl->index].limit_new = pl->limit; 1933 pl->limit = pf_pool_limits[pl->index].limit; 1934 PF_UNLOCK(); 1935 break; 1936 } 1937 1938 case DIOCSETDEBUG: { 1939 u_int32_t *level = (u_int32_t *)addr; 1940 1941 PF_LOCK(); 1942 pf_trans_set.debug = *level; 1943 pf_trans_set.mask |= PF_TSET_DEBUG; 1944 PF_UNLOCK(); 1945 break; 1946 } 1947 1948 case DIOCGETRULESETS: { 1949 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr; 1950 struct pf_ruleset *ruleset; 1951 struct pf_anchor *anchor; 1952 1953 PF_LOCK(); 1954 pr->path[sizeof(pr->path) - 1] = '\0'; 1955 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) { 1956 error = EINVAL; 1957 PF_UNLOCK(); 1958 break; 1959 } 1960 pr->nr = 0; 1961 if (ruleset == &pf_main_ruleset) { 1962 /* XXX kludge for pf_main_ruleset */ 1963 RB_FOREACH(anchor, pf_anchor_global, &pf_anchors) 1964 if (anchor->parent == NULL) 1965 pr->nr++; 1966 } else { 1967 RB_FOREACH(anchor, pf_anchor_node, 1968 &ruleset->anchor->children) 1969 pr->nr++; 1970 } 1971 PF_UNLOCK(); 1972 break; 1973 } 1974 1975 case DIOCGETRULESET: { 1976 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr; 1977 struct pf_ruleset *ruleset; 1978 struct pf_anchor *anchor; 1979 u_int32_t nr = 0; 1980 1981 PF_LOCK(); 1982 pr->path[sizeof(pr->path) - 1] = '\0'; 1983 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) { 1984 error = EINVAL; 1985 PF_UNLOCK(); 1986 break; 1987 } 1988 pr->name[0] = '\0'; 1989 if (ruleset == &pf_main_ruleset) { 1990 /* XXX kludge for pf_main_ruleset */ 1991 RB_FOREACH(anchor, pf_anchor_global, &pf_anchors) 1992 if (anchor->parent == NULL && nr++ == pr->nr) { 1993 strlcpy(pr->name, anchor->name, 1994 sizeof(pr->name)); 1995 break; 1996 } 1997 } else { 1998 RB_FOREACH(anchor, pf_anchor_node, 1999 &ruleset->anchor->children) 2000 if (nr++ == pr->nr) { 2001 strlcpy(pr->name, anchor->name, 2002 sizeof(pr->name)); 2003 break; 2004 } 2005 } 2006 PF_UNLOCK(); 2007 if (!pr->name[0]) 2008 error = EBUSY; 2009 break; 2010 } 2011 2012 case DIOCRCLRTABLES: { 2013 struct pfioc_table *io = (struct pfioc_table *)addr; 2014 2015 if (io->pfrio_esize != 0) { 2016 error = ENODEV; 2017 break; 2018 } 2019 PF_LOCK(); 2020 error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel, 2021 io->pfrio_flags | PFR_FLAG_USERIOCTL); 2022 PF_UNLOCK(); 2023 break; 2024 } 2025 2026 case DIOCRADDTABLES: { 2027 struct pfioc_table *io = (struct pfioc_table *)addr; 2028 2029 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2030 error = ENODEV; 2031 break; 2032 } 2033 PF_LOCK(); 2034 error = pfr_add_tables(io->pfrio_buffer, io->pfrio_size, 2035 &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2036 PF_UNLOCK(); 2037 break; 2038 } 2039 2040 case DIOCRDELTABLES: { 2041 struct pfioc_table *io = (struct pfioc_table *)addr; 2042 2043 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2044 error = ENODEV; 2045 break; 2046 } 2047 PF_LOCK(); 2048 error = pfr_del_tables(io->pfrio_buffer, io->pfrio_size, 2049 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2050 PF_UNLOCK(); 2051 break; 2052 } 2053 2054 case DIOCRGETTABLES: { 2055 struct pfioc_table *io = (struct pfioc_table *)addr; 2056 2057 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2058 error = ENODEV; 2059 break; 2060 } 2061 PF_LOCK(); 2062 error = pfr_get_tables(&io->pfrio_table, io->pfrio_buffer, 2063 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2064 PF_UNLOCK(); 2065 break; 2066 } 2067 2068 case DIOCRGETTSTATS: { 2069 struct pfioc_table *io = (struct pfioc_table *)addr; 2070 2071 if (io->pfrio_esize != sizeof(struct pfr_tstats)) { 2072 error = ENODEV; 2073 break; 2074 } 2075 PF_LOCK(); 2076 error = pfr_get_tstats(&io->pfrio_table, io->pfrio_buffer, 2077 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2078 PF_UNLOCK(); 2079 break; 2080 } 2081 2082 case DIOCRCLRTSTATS: { 2083 struct pfioc_table *io = (struct pfioc_table *)addr; 2084 2085 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2086 error = ENODEV; 2087 break; 2088 } 2089 PF_LOCK(); 2090 error = pfr_clr_tstats(io->pfrio_buffer, io->pfrio_size, 2091 &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2092 PF_UNLOCK(); 2093 break; 2094 } 2095 2096 case DIOCRSETTFLAGS: { 2097 struct pfioc_table *io = (struct pfioc_table *)addr; 2098 2099 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2100 error = ENODEV; 2101 break; 2102 } 2103 PF_LOCK(); 2104 error = pfr_set_tflags(io->pfrio_buffer, io->pfrio_size, 2105 io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange, 2106 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2107 PF_UNLOCK(); 2108 break; 2109 } 2110 2111 case DIOCRCLRADDRS: { 2112 struct pfioc_table *io = (struct pfioc_table *)addr; 2113 2114 if (io->pfrio_esize != 0) { 2115 error = ENODEV; 2116 break; 2117 } 2118 PF_LOCK(); 2119 error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel, 2120 io->pfrio_flags | PFR_FLAG_USERIOCTL); 2121 PF_UNLOCK(); 2122 break; 2123 } 2124 2125 case DIOCRADDADDRS: { 2126 struct pfioc_table *io = (struct pfioc_table *)addr; 2127 2128 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2129 error = ENODEV; 2130 break; 2131 } 2132 PF_LOCK(); 2133 error = pfr_add_addrs(&io->pfrio_table, io->pfrio_buffer, 2134 io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags | 2135 PFR_FLAG_USERIOCTL); 2136 PF_UNLOCK(); 2137 break; 2138 } 2139 2140 case DIOCRDELADDRS: { 2141 struct pfioc_table *io = (struct pfioc_table *)addr; 2142 2143 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2144 error = ENODEV; 2145 break; 2146 } 2147 PF_LOCK(); 2148 error = pfr_del_addrs(&io->pfrio_table, io->pfrio_buffer, 2149 io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags | 2150 PFR_FLAG_USERIOCTL); 2151 PF_UNLOCK(); 2152 break; 2153 } 2154 2155 case DIOCRSETADDRS: { 2156 struct pfioc_table *io = (struct pfioc_table *)addr; 2157 2158 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2159 error = ENODEV; 2160 break; 2161 } 2162 PF_LOCK(); 2163 error = pfr_set_addrs(&io->pfrio_table, io->pfrio_buffer, 2164 io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd, 2165 &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags | 2166 PFR_FLAG_USERIOCTL, 0); 2167 PF_UNLOCK(); 2168 break; 2169 } 2170 2171 case DIOCRGETADDRS: { 2172 struct pfioc_table *io = (struct pfioc_table *)addr; 2173 2174 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2175 error = ENODEV; 2176 break; 2177 } 2178 PF_LOCK(); 2179 error = pfr_get_addrs(&io->pfrio_table, io->pfrio_buffer, 2180 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2181 PF_UNLOCK(); 2182 break; 2183 } 2184 2185 case DIOCRGETASTATS: { 2186 struct pfioc_table *io = (struct pfioc_table *)addr; 2187 2188 if (io->pfrio_esize != sizeof(struct pfr_astats)) { 2189 error = ENODEV; 2190 break; 2191 } 2192 PF_LOCK(); 2193 error = pfr_get_astats(&io->pfrio_table, io->pfrio_buffer, 2194 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2195 PF_UNLOCK(); 2196 break; 2197 } 2198 2199 case DIOCRCLRASTATS: { 2200 struct pfioc_table *io = (struct pfioc_table *)addr; 2201 2202 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2203 error = ENODEV; 2204 break; 2205 } 2206 PF_LOCK(); 2207 error = pfr_clr_astats(&io->pfrio_table, io->pfrio_buffer, 2208 io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags | 2209 PFR_FLAG_USERIOCTL); 2210 PF_UNLOCK(); 2211 break; 2212 } 2213 2214 case DIOCRTSTADDRS: { 2215 struct pfioc_table *io = (struct pfioc_table *)addr; 2216 2217 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2218 error = ENODEV; 2219 break; 2220 } 2221 PF_LOCK(); 2222 error = pfr_tst_addrs(&io->pfrio_table, io->pfrio_buffer, 2223 io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags | 2224 PFR_FLAG_USERIOCTL); 2225 PF_UNLOCK(); 2226 break; 2227 } 2228 2229 case DIOCRINADEFINE: { 2230 struct pfioc_table *io = (struct pfioc_table *)addr; 2231 2232 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2233 error = ENODEV; 2234 break; 2235 } 2236 PF_LOCK(); 2237 error = pfr_ina_define(&io->pfrio_table, io->pfrio_buffer, 2238 io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr, 2239 io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2240 PF_UNLOCK(); 2241 break; 2242 } 2243 2244 case DIOCOSFPADD: { 2245 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr; 2246 PF_LOCK(); 2247 error = pf_osfp_add(io); 2248 PF_UNLOCK(); 2249 break; 2250 } 2251 2252 case DIOCOSFPGET: { 2253 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr; 2254 PF_LOCK(); 2255 error = pf_osfp_get(io); 2256 PF_UNLOCK(); 2257 break; 2258 } 2259 2260 case DIOCXBEGIN: { 2261 struct pfioc_trans *io = (struct pfioc_trans *)addr; 2262 struct pfioc_trans_e *ioe; 2263 struct pfr_table *table; 2264 int i; 2265 2266 if (io->esize != sizeof(*ioe)) { 2267 error = ENODEV; 2268 goto fail; 2269 } 2270 PF_LOCK(); 2271 ioe = malloc(sizeof(*ioe), M_TEMP, M_WAITOK); 2272 table = malloc(sizeof(*table), M_TEMP, M_WAITOK); 2273 pf_default_rule_new = pf_default_rule; 2274 memset(&pf_trans_set, 0, sizeof(pf_trans_set)); 2275 for (i = 0; i < io->size; i++) { 2276 if (copyin(io->array+i, ioe, sizeof(*ioe))) { 2277 free(table, M_TEMP, sizeof(*table)); 2278 free(ioe, M_TEMP, sizeof(*ioe)); 2279 error = EFAULT; 2280 PF_UNLOCK(); 2281 goto fail; 2282 } 2283 if (strnlen(ioe->anchor, sizeof(ioe->anchor)) == 2284 sizeof(ioe->anchor)) { 2285 free(table, M_TEMP, sizeof(*table)); 2286 free(ioe, M_TEMP, sizeof(*ioe)); 2287 error = ENAMETOOLONG; 2288 PF_UNLOCK(); 2289 goto fail; 2290 } 2291 switch (ioe->type) { 2292 case PF_TRANS_TABLE: 2293 memset(table, 0, sizeof(*table)); 2294 strlcpy(table->pfrt_anchor, ioe->anchor, 2295 sizeof(table->pfrt_anchor)); 2296 if ((error = pfr_ina_begin(table, 2297 &ioe->ticket, NULL, 0))) { 2298 free(table, M_TEMP, sizeof(*table)); 2299 free(ioe, M_TEMP, sizeof(*ioe)); 2300 PF_UNLOCK(); 2301 goto fail; 2302 } 2303 break; 2304 case PF_TRANS_RULESET: 2305 if ((error = pf_begin_rules(&ioe->ticket, 2306 ioe->anchor))) { 2307 free(table, M_TEMP, sizeof(*table)); 2308 free(ioe, M_TEMP, sizeof(*ioe)); 2309 PF_UNLOCK(); 2310 goto fail; 2311 } 2312 break; 2313 default: 2314 free(table, M_TEMP, sizeof(*table)); 2315 free(ioe, M_TEMP, sizeof(*ioe)); 2316 error = EINVAL; 2317 PF_UNLOCK(); 2318 goto fail; 2319 } 2320 if (copyout(ioe, io->array+i, sizeof(io->array[i]))) { 2321 free(table, M_TEMP, sizeof(*table)); 2322 free(ioe, M_TEMP, sizeof(*ioe)); 2323 error = EFAULT; 2324 PF_UNLOCK(); 2325 goto fail; 2326 } 2327 } 2328 free(table, M_TEMP, sizeof(*table)); 2329 free(ioe, M_TEMP, sizeof(*ioe)); 2330 PF_UNLOCK(); 2331 break; 2332 } 2333 2334 case DIOCXROLLBACK: { 2335 struct pfioc_trans *io = (struct pfioc_trans *)addr; 2336 struct pfioc_trans_e *ioe; 2337 struct pfr_table *table; 2338 int i; 2339 2340 if (io->esize != sizeof(*ioe)) { 2341 error = ENODEV; 2342 goto fail; 2343 } 2344 PF_LOCK(); 2345 ioe = malloc(sizeof(*ioe), M_TEMP, M_WAITOK); 2346 table = malloc(sizeof(*table), M_TEMP, M_WAITOK); 2347 for (i = 0; i < io->size; i++) { 2348 if (copyin(io->array+i, ioe, sizeof(*ioe))) { 2349 free(table, M_TEMP, sizeof(*table)); 2350 free(ioe, M_TEMP, sizeof(*ioe)); 2351 error = EFAULT; 2352 PF_UNLOCK(); 2353 goto fail; 2354 } 2355 if (strnlen(ioe->anchor, sizeof(ioe->anchor)) == 2356 sizeof(ioe->anchor)) { 2357 free(table, M_TEMP, sizeof(*table)); 2358 free(ioe, M_TEMP, sizeof(*ioe)); 2359 error = ENAMETOOLONG; 2360 PF_UNLOCK(); 2361 goto fail; 2362 } 2363 switch (ioe->type) { 2364 case PF_TRANS_TABLE: 2365 memset(table, 0, sizeof(*table)); 2366 strlcpy(table->pfrt_anchor, ioe->anchor, 2367 sizeof(table->pfrt_anchor)); 2368 if ((error = pfr_ina_rollback(table, 2369 ioe->ticket, NULL, 0))) { 2370 free(table, M_TEMP, sizeof(*table)); 2371 free(ioe, M_TEMP, sizeof(*ioe)); 2372 PF_UNLOCK(); 2373 goto fail; /* really bad */ 2374 } 2375 break; 2376 case PF_TRANS_RULESET: 2377 if ((error = pf_rollback_rules(ioe->ticket, 2378 ioe->anchor))) { 2379 free(table, M_TEMP, sizeof(*table)); 2380 free(ioe, M_TEMP, sizeof(*ioe)); 2381 PF_UNLOCK(); 2382 goto fail; /* really bad */ 2383 } 2384 break; 2385 default: 2386 free(table, M_TEMP, sizeof(*table)); 2387 free(ioe, M_TEMP, sizeof(*ioe)); 2388 error = EINVAL; 2389 PF_UNLOCK(); 2390 goto fail; /* really bad */ 2391 } 2392 } 2393 free(table, M_TEMP, sizeof(*table)); 2394 free(ioe, M_TEMP, sizeof(*ioe)); 2395 PF_UNLOCK(); 2396 break; 2397 } 2398 2399 case DIOCXCOMMIT: { 2400 struct pfioc_trans *io = (struct pfioc_trans *)addr; 2401 struct pfioc_trans_e *ioe; 2402 struct pfr_table *table; 2403 struct pf_ruleset *rs; 2404 int i; 2405 2406 if (io->esize != sizeof(*ioe)) { 2407 error = ENODEV; 2408 goto fail; 2409 } 2410 PF_LOCK(); 2411 ioe = malloc(sizeof(*ioe), M_TEMP, M_WAITOK); 2412 table = malloc(sizeof(*table), M_TEMP, M_WAITOK); 2413 /* first makes sure everything will succeed */ 2414 for (i = 0; i < io->size; i++) { 2415 if (copyin(io->array+i, ioe, sizeof(*ioe))) { 2416 free(table, M_TEMP, sizeof(*table)); 2417 free(ioe, M_TEMP, sizeof(*ioe)); 2418 error = EFAULT; 2419 PF_UNLOCK(); 2420 goto fail; 2421 } 2422 if (strnlen(ioe->anchor, sizeof(ioe->anchor)) == 2423 sizeof(ioe->anchor)) { 2424 free(table, M_TEMP, sizeof(*table)); 2425 free(ioe, M_TEMP, sizeof(*ioe)); 2426 error = ENAMETOOLONG; 2427 PF_UNLOCK(); 2428 goto fail; 2429 } 2430 switch (ioe->type) { 2431 case PF_TRANS_TABLE: 2432 rs = pf_find_ruleset(ioe->anchor); 2433 if (rs == NULL || !rs->topen || ioe->ticket != 2434 rs->tticket) { 2435 free(table, M_TEMP, sizeof(*table)); 2436 free(ioe, M_TEMP, sizeof(*ioe)); 2437 error = EBUSY; 2438 PF_UNLOCK(); 2439 goto fail; 2440 } 2441 break; 2442 case PF_TRANS_RULESET: 2443 rs = pf_find_ruleset(ioe->anchor); 2444 if (rs == NULL || 2445 !rs->rules.inactive.open || 2446 rs->rules.inactive.ticket != 2447 ioe->ticket) { 2448 free(table, M_TEMP, sizeof(*table)); 2449 free(ioe, M_TEMP, sizeof(*ioe)); 2450 error = EBUSY; 2451 PF_UNLOCK(); 2452 goto fail; 2453 } 2454 break; 2455 default: 2456 free(table, M_TEMP, sizeof(*table)); 2457 free(ioe, M_TEMP, sizeof(*ioe)); 2458 error = EINVAL; 2459 PF_UNLOCK(); 2460 goto fail; 2461 } 2462 } 2463 2464 /* 2465 * Checked already in DIOCSETLIMIT, but check again as the 2466 * situation might have changed. 2467 */ 2468 for (i = 0; i < PF_LIMIT_MAX; i++) { 2469 if (((struct pool *)pf_pool_limits[i].pp)->pr_nout > 2470 pf_pool_limits[i].limit_new) { 2471 free(table, M_TEMP, sizeof(*table)); 2472 free(ioe, M_TEMP, sizeof(*ioe)); 2473 error = EBUSY; 2474 PF_UNLOCK(); 2475 goto fail; 2476 } 2477 } 2478 /* now do the commit - no errors should happen here */ 2479 for (i = 0; i < io->size; i++) { 2480 if (copyin(io->array+i, ioe, sizeof(*ioe))) { 2481 free(table, M_TEMP, sizeof(*table)); 2482 free(ioe, M_TEMP, sizeof(*ioe)); 2483 error = EFAULT; 2484 PF_UNLOCK(); 2485 goto fail; 2486 } 2487 if (strnlen(ioe->anchor, sizeof(ioe->anchor)) == 2488 sizeof(ioe->anchor)) { 2489 free(table, M_TEMP, sizeof(*table)); 2490 free(ioe, M_TEMP, sizeof(*ioe)); 2491 error = ENAMETOOLONG; 2492 PF_UNLOCK(); 2493 goto fail; 2494 } 2495 switch (ioe->type) { 2496 case PF_TRANS_TABLE: 2497 memset(table, 0, sizeof(*table)); 2498 strlcpy(table->pfrt_anchor, ioe->anchor, 2499 sizeof(table->pfrt_anchor)); 2500 if ((error = pfr_ina_commit(table, ioe->ticket, 2501 NULL, NULL, 0))) { 2502 free(table, M_TEMP, sizeof(*table)); 2503 free(ioe, M_TEMP, sizeof(*ioe)); 2504 PF_UNLOCK(); 2505 goto fail; /* really bad */ 2506 } 2507 break; 2508 case PF_TRANS_RULESET: 2509 if ((error = pf_commit_rules(ioe->ticket, 2510 ioe->anchor))) { 2511 free(table, M_TEMP, sizeof(*table)); 2512 free(ioe, M_TEMP, sizeof(*ioe)); 2513 PF_UNLOCK(); 2514 goto fail; /* really bad */ 2515 } 2516 break; 2517 default: 2518 free(table, M_TEMP, sizeof(*table)); 2519 free(ioe, M_TEMP, sizeof(*ioe)); 2520 error = EINVAL; 2521 PF_UNLOCK(); 2522 goto fail; /* really bad */ 2523 } 2524 } 2525 for (i = 0; i < PF_LIMIT_MAX; i++) { 2526 if (pf_pool_limits[i].limit_new != 2527 pf_pool_limits[i].limit && 2528 pool_sethardlimit(pf_pool_limits[i].pp, 2529 pf_pool_limits[i].limit_new, NULL, 0) != 0) { 2530 free(table, M_TEMP, sizeof(*table)); 2531 free(ioe, M_TEMP, sizeof(*ioe)); 2532 error = EBUSY; 2533 PF_UNLOCK(); 2534 goto fail; /* really bad */ 2535 } 2536 pf_pool_limits[i].limit = pf_pool_limits[i].limit_new; 2537 } 2538 for (i = 0; i < PFTM_MAX; i++) { 2539 int old = pf_default_rule.timeout[i]; 2540 2541 pf_default_rule.timeout[i] = 2542 pf_default_rule_new.timeout[i]; 2543 if (pf_default_rule.timeout[i] == PFTM_INTERVAL && 2544 pf_default_rule.timeout[i] < old) 2545 task_add(net_tq(0), &pf_purge_task); 2546 } 2547 pfi_xcommit(); 2548 pf_trans_set_commit(); 2549 free(table, M_TEMP, sizeof(*table)); 2550 free(ioe, M_TEMP, sizeof(*ioe)); 2551 PF_UNLOCK(); 2552 break; 2553 } 2554 2555 case DIOCGETSRCNODES: { 2556 struct pfioc_src_nodes *psn = (struct pfioc_src_nodes *)addr; 2557 struct pf_src_node *n, *p, *pstore; 2558 u_int32_t nr = 0; 2559 size_t space = psn->psn_len; 2560 2561 PF_LOCK(); 2562 if (space == 0) { 2563 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) 2564 nr++; 2565 psn->psn_len = sizeof(struct pf_src_node) * nr; 2566 PF_UNLOCK(); 2567 break; 2568 } 2569 2570 pstore = malloc(sizeof(*pstore), M_TEMP, M_WAITOK); 2571 2572 p = psn->psn_src_nodes; 2573 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) { 2574 int secs = time_uptime, diff; 2575 2576 if ((nr + 1) * sizeof(*p) > psn->psn_len) 2577 break; 2578 2579 memcpy(pstore, n, sizeof(*pstore)); 2580 memset(&pstore->entry, 0, sizeof(pstore->entry)); 2581 pstore->rule.ptr = NULL; 2582 pstore->kif = NULL; 2583 pstore->rule.nr = n->rule.ptr->nr; 2584 pstore->creation = secs - pstore->creation; 2585 if (pstore->expire > secs) 2586 pstore->expire -= secs; 2587 else 2588 pstore->expire = 0; 2589 2590 /* adjust the connection rate estimate */ 2591 diff = secs - n->conn_rate.last; 2592 if (diff >= n->conn_rate.seconds) 2593 pstore->conn_rate.count = 0; 2594 else 2595 pstore->conn_rate.count -= 2596 n->conn_rate.count * diff / 2597 n->conn_rate.seconds; 2598 2599 error = copyout(pstore, p, sizeof(*p)); 2600 if (error) { 2601 free(pstore, M_TEMP, sizeof(*pstore)); 2602 PF_UNLOCK(); 2603 goto fail; 2604 } 2605 p++; 2606 nr++; 2607 } 2608 psn->psn_len = sizeof(struct pf_src_node) * nr; 2609 2610 free(pstore, M_TEMP, sizeof(*pstore)); 2611 PF_UNLOCK(); 2612 break; 2613 } 2614 2615 case DIOCCLRSRCNODES: { 2616 struct pf_src_node *n; 2617 struct pf_state *state; 2618 2619 PF_LOCK(); 2620 PF_STATE_ENTER_WRITE(); 2621 RB_FOREACH(state, pf_state_tree_id, &tree_id) 2622 pf_src_tree_remove_state(state); 2623 PF_STATE_EXIT_WRITE(); 2624 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) 2625 n->expire = 1; 2626 pf_purge_expired_src_nodes(); 2627 PF_UNLOCK(); 2628 break; 2629 } 2630 2631 case DIOCKILLSRCNODES: { 2632 struct pf_src_node *sn; 2633 struct pf_state *s; 2634 struct pfioc_src_node_kill *psnk = 2635 (struct pfioc_src_node_kill *)addr; 2636 u_int killed = 0; 2637 2638 PF_LOCK(); 2639 RB_FOREACH(sn, pf_src_tree, &tree_src_tracking) { 2640 if (pf_match_addr(psnk->psnk_src.neg, 2641 &psnk->psnk_src.addr.v.a.addr, 2642 &psnk->psnk_src.addr.v.a.mask, 2643 &sn->addr, sn->af) && 2644 pf_match_addr(psnk->psnk_dst.neg, 2645 &psnk->psnk_dst.addr.v.a.addr, 2646 &psnk->psnk_dst.addr.v.a.mask, 2647 &sn->raddr, sn->af)) { 2648 /* Handle state to src_node linkage */ 2649 if (sn->states != 0) { 2650 PF_ASSERT_LOCKED(); 2651 PF_STATE_ENTER_WRITE(); 2652 RB_FOREACH(s, pf_state_tree_id, 2653 &tree_id) 2654 pf_state_rm_src_node(s, sn); 2655 PF_STATE_EXIT_WRITE(); 2656 } 2657 sn->expire = 1; 2658 killed++; 2659 } 2660 } 2661 2662 if (killed > 0) 2663 pf_purge_expired_src_nodes(); 2664 2665 psnk->psnk_killed = killed; 2666 PF_UNLOCK(); 2667 break; 2668 } 2669 2670 case DIOCSETHOSTID: { 2671 u_int32_t *hostid = (u_int32_t *)addr; 2672 2673 PF_LOCK(); 2674 if (*hostid == 0) 2675 pf_trans_set.hostid = arc4random(); 2676 else 2677 pf_trans_set.hostid = *hostid; 2678 pf_trans_set.mask |= PF_TSET_HOSTID; 2679 PF_UNLOCK(); 2680 break; 2681 } 2682 2683 case DIOCOSFPFLUSH: 2684 PF_LOCK(); 2685 pf_osfp_flush(); 2686 PF_UNLOCK(); 2687 break; 2688 2689 case DIOCIGETIFACES: { 2690 struct pfioc_iface *io = (struct pfioc_iface *)addr; 2691 2692 if (io->pfiio_esize != sizeof(struct pfi_kif)) { 2693 error = ENODEV; 2694 break; 2695 } 2696 PF_LOCK(); 2697 error = pfi_get_ifaces(io->pfiio_name, io->pfiio_buffer, 2698 &io->pfiio_size); 2699 PF_UNLOCK(); 2700 break; 2701 } 2702 2703 case DIOCSETIFFLAG: { 2704 struct pfioc_iface *io = (struct pfioc_iface *)addr; 2705 2706 PF_LOCK(); 2707 error = pfi_set_flags(io->pfiio_name, io->pfiio_flags); 2708 PF_UNLOCK(); 2709 break; 2710 } 2711 2712 case DIOCCLRIFFLAG: { 2713 struct pfioc_iface *io = (struct pfioc_iface *)addr; 2714 2715 PF_LOCK(); 2716 error = pfi_clear_flags(io->pfiio_name, io->pfiio_flags); 2717 PF_UNLOCK(); 2718 break; 2719 } 2720 2721 case DIOCSETREASS: { 2722 u_int32_t *reass = (u_int32_t *)addr; 2723 2724 PF_LOCK(); 2725 pf_trans_set.reass = *reass; 2726 pf_trans_set.mask |= PF_TSET_REASS; 2727 PF_UNLOCK(); 2728 break; 2729 } 2730 2731 case DIOCSETSYNFLWATS: { 2732 struct pfioc_synflwats *io = (struct pfioc_synflwats *)addr; 2733 2734 PF_LOCK(); 2735 error = pf_syncookies_setwats(io->hiwat, io->lowat); 2736 PF_UNLOCK(); 2737 break; 2738 } 2739 2740 case DIOCGETSYNFLWATS: { 2741 struct pfioc_synflwats *io = (struct pfioc_synflwats *)addr; 2742 2743 PF_LOCK(); 2744 error = pf_syncookies_getwats(io); 2745 PF_UNLOCK(); 2746 break; 2747 } 2748 2749 case DIOCSETSYNCOOKIES: { 2750 u_int8_t *mode = (u_int8_t *)addr; 2751 2752 PF_LOCK(); 2753 error = pf_syncookies_setmode(*mode); 2754 PF_UNLOCK(); 2755 break; 2756 } 2757 2758 default: 2759 error = ENODEV; 2760 break; 2761 } 2762 fail: 2763 NET_UNLOCK(); 2764 return (error); 2765 } 2766 2767 void 2768 pf_trans_set_commit(void) 2769 { 2770 if (pf_trans_set.mask & PF_TSET_STATUSIF) 2771 strlcpy(pf_status.ifname, pf_trans_set.statusif, IFNAMSIZ); 2772 if (pf_trans_set.mask & PF_TSET_DEBUG) 2773 pf_status.debug = pf_trans_set.debug; 2774 if (pf_trans_set.mask & PF_TSET_HOSTID) 2775 pf_status.hostid = pf_trans_set.hostid; 2776 if (pf_trans_set.mask & PF_TSET_REASS) 2777 pf_status.reass = pf_trans_set.reass; 2778 } 2779 2780 void 2781 pf_pool_copyin(struct pf_pool *from, struct pf_pool *to) 2782 { 2783 memmove(to, from, sizeof(*to)); 2784 to->kif = NULL; 2785 } 2786 2787 int 2788 pf_rule_copyin(struct pf_rule *from, struct pf_rule *to, 2789 struct pf_ruleset *ruleset) 2790 { 2791 int i; 2792 2793 to->src = from->src; 2794 to->dst = from->dst; 2795 2796 /* XXX union skip[] */ 2797 2798 strlcpy(to->label, from->label, sizeof(to->label)); 2799 strlcpy(to->ifname, from->ifname, sizeof(to->ifname)); 2800 strlcpy(to->rcv_ifname, from->rcv_ifname, sizeof(to->rcv_ifname)); 2801 strlcpy(to->qname, from->qname, sizeof(to->qname)); 2802 strlcpy(to->pqname, from->pqname, sizeof(to->pqname)); 2803 strlcpy(to->tagname, from->tagname, sizeof(to->tagname)); 2804 strlcpy(to->match_tagname, from->match_tagname, 2805 sizeof(to->match_tagname)); 2806 strlcpy(to->overload_tblname, from->overload_tblname, 2807 sizeof(to->overload_tblname)); 2808 2809 pf_pool_copyin(&from->nat, &to->nat); 2810 pf_pool_copyin(&from->rdr, &to->rdr); 2811 pf_pool_copyin(&from->route, &to->route); 2812 2813 if (pf_kif_setup(to->ifname, &to->kif)) 2814 return (EINVAL); 2815 if (pf_kif_setup(to->rcv_ifname, &to->rcv_kif)) 2816 return (EINVAL); 2817 if (to->overload_tblname[0]) { 2818 if ((to->overload_tbl = pfr_attach_table(ruleset, 2819 to->overload_tblname, 0)) == NULL) 2820 return (EINVAL); 2821 else 2822 to->overload_tbl->pfrkt_flags |= PFR_TFLAG_ACTIVE; 2823 } 2824 2825 if (pf_kif_setup(to->rdr.ifname, &to->rdr.kif)) 2826 return (EINVAL); 2827 if (pf_kif_setup(to->nat.ifname, &to->nat.kif)) 2828 return (EINVAL); 2829 if (pf_kif_setup(to->route.ifname, &to->route.kif)) 2830 return (EINVAL); 2831 2832 to->os_fingerprint = from->os_fingerprint; 2833 2834 to->rtableid = from->rtableid; 2835 if (to->rtableid >= 0 && !rtable_exists(to->rtableid)) 2836 return (EBUSY); 2837 to->onrdomain = from->onrdomain; 2838 if (to->onrdomain >= 0 && !rtable_exists(to->onrdomain)) 2839 return (EBUSY); 2840 if (to->onrdomain >= 0) /* make sure it is a real rdomain */ 2841 to->onrdomain = rtable_l2(to->onrdomain); 2842 2843 for (i = 0; i < PFTM_MAX; i++) 2844 to->timeout[i] = from->timeout[i]; 2845 to->states_tot = from->states_tot; 2846 to->max_states = from->max_states; 2847 to->max_src_nodes = from->max_src_nodes; 2848 to->max_src_states = from->max_src_states; 2849 to->max_src_conn = from->max_src_conn; 2850 to->max_src_conn_rate.limit = from->max_src_conn_rate.limit; 2851 to->max_src_conn_rate.seconds = from->max_src_conn_rate.seconds; 2852 pf_init_threshold(&to->pktrate, from->pktrate.limit, 2853 from->pktrate.seconds); 2854 2855 if (to->qname[0] != 0) { 2856 if ((to->qid = pf_qname2qid(to->qname, 0)) == 0) 2857 return (EBUSY); 2858 if (to->pqname[0] != 0) { 2859 if ((to->pqid = pf_qname2qid(to->pqname, 0)) == 0) 2860 return (EBUSY); 2861 } else 2862 to->pqid = to->qid; 2863 } 2864 to->rt_listid = from->rt_listid; 2865 to->prob = from->prob; 2866 to->return_icmp = from->return_icmp; 2867 to->return_icmp6 = from->return_icmp6; 2868 to->max_mss = from->max_mss; 2869 if (to->tagname[0]) 2870 if ((to->tag = pf_tagname2tag(to->tagname, 1)) == 0) 2871 return (EBUSY); 2872 if (to->match_tagname[0]) 2873 if ((to->match_tag = pf_tagname2tag(to->match_tagname, 1)) == 0) 2874 return (EBUSY); 2875 to->scrub_flags = from->scrub_flags; 2876 to->delay = from->delay; 2877 to->uid = from->uid; 2878 to->gid = from->gid; 2879 to->rule_flag = from->rule_flag; 2880 to->action = from->action; 2881 to->direction = from->direction; 2882 to->log = from->log; 2883 to->logif = from->logif; 2884 #if NPFLOG > 0 2885 if (!to->log) 2886 to->logif = 0; 2887 #endif /* NPFLOG > 0 */ 2888 to->quick = from->quick; 2889 to->ifnot = from->ifnot; 2890 to->rcvifnot = from->rcvifnot; 2891 to->match_tag_not = from->match_tag_not; 2892 to->keep_state = from->keep_state; 2893 to->af = from->af; 2894 to->naf = from->naf; 2895 to->proto = from->proto; 2896 to->type = from->type; 2897 to->code = from->code; 2898 to->flags = from->flags; 2899 to->flagset = from->flagset; 2900 to->min_ttl = from->min_ttl; 2901 to->allow_opts = from->allow_opts; 2902 to->rt = from->rt; 2903 to->return_ttl = from->return_ttl; 2904 to->tos = from->tos; 2905 to->set_tos = from->set_tos; 2906 to->anchor_relative = from->anchor_relative; /* XXX */ 2907 to->anchor_wildcard = from->anchor_wildcard; /* XXX */ 2908 to->flush = from->flush; 2909 to->divert.addr = from->divert.addr; 2910 to->divert.port = from->divert.port; 2911 to->divert.type = from->divert.type; 2912 to->prio = from->prio; 2913 to->set_prio[0] = from->set_prio[0]; 2914 to->set_prio[1] = from->set_prio[1]; 2915 2916 return (0); 2917 } 2918 2919 int 2920 pf_sysctl(void *oldp, size_t *oldlenp, void *newp, size_t newlen) 2921 { 2922 struct pf_status pfs; 2923 2924 NET_RLOCK(); 2925 PF_LOCK(); 2926 memcpy(&pfs, &pf_status, sizeof(struct pf_status)); 2927 pfi_update_status(pfs.ifname, &pfs); 2928 PF_UNLOCK(); 2929 NET_RUNLOCK(); 2930 2931 return sysctl_rdstruct(oldp, oldlenp, newp, &pfs, sizeof(pfs)); 2932 } 2933