1 /* $OpenBSD: pf_ioctl.c,v 1.344 2019/05/09 14:59:30 claudio Exp $ */ 2 3 /* 4 * Copyright (c) 2001 Daniel Hartmeier 5 * Copyright (c) 2002 - 2018 Henning Brauer <henning@openbsd.org> 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * - Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * - Redistributions in binary form must reproduce the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer in the documentation and/or other materials provided 17 * with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 20 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 22 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 23 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 25 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 27 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 29 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 * POSSIBILITY OF SUCH DAMAGE. 31 * 32 * Effort sponsored in part by the Defense Advanced Research Projects 33 * Agency (DARPA) and Air Force Research Laboratory, Air Force 34 * Materiel Command, USAF, under agreement number F30602-01-2-0537. 35 * 36 */ 37 38 #include "pfsync.h" 39 #include "pflog.h" 40 41 #include <sys/param.h> 42 #include <sys/systm.h> 43 #include <sys/sysctl.h> 44 #include <sys/mbuf.h> 45 #include <sys/filio.h> 46 #include <sys/fcntl.h> 47 #include <sys/socket.h> 48 #include <sys/socketvar.h> 49 #include <sys/kernel.h> 50 #include <sys/time.h> 51 #include <sys/timeout.h> 52 #include <sys/pool.h> 53 #include <sys/malloc.h> 54 #include <sys/kthread.h> 55 #include <sys/rwlock.h> 56 #include <sys/syslog.h> 57 #include <uvm/uvm_extern.h> 58 59 #include <crypto/md5.h> 60 61 #include <net/if.h> 62 #include <net/if_var.h> 63 #include <net/route.h> 64 #include <net/hfsc.h> 65 #include <net/fq_codel.h> 66 67 #include <netinet/in.h> 68 #include <netinet/ip.h> 69 #include <netinet/in_pcb.h> 70 #include <netinet/ip_var.h> 71 #include <netinet/ip_icmp.h> 72 #include <netinet/tcp.h> 73 #include <netinet/udp.h> 74 75 #ifdef INET6 76 #include <netinet/ip6.h> 77 #include <netinet/icmp6.h> 78 #endif /* INET6 */ 79 80 #include <net/pfvar.h> 81 #include <net/pfvar_priv.h> 82 83 #if NPFSYNC > 0 84 #include <netinet/ip_ipsp.h> 85 #include <net/if_pfsync.h> 86 #endif /* NPFSYNC > 0 */ 87 88 struct pool pf_tag_pl; 89 90 void pfattach(int); 91 void pf_thread_create(void *); 92 int pfopen(dev_t, int, int, struct proc *); 93 int pfclose(dev_t, int, int, struct proc *); 94 int pfioctl(dev_t, u_long, caddr_t, int, struct proc *); 95 int pf_begin_rules(u_int32_t *, const char *); 96 int pf_rollback_rules(u_int32_t, char *); 97 void pf_remove_queues(void); 98 int pf_commit_queues(void); 99 void pf_free_queues(struct pf_queuehead *); 100 int pf_setup_pfsync_matching(struct pf_ruleset *); 101 void pf_hash_rule(MD5_CTX *, struct pf_rule *); 102 void pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *); 103 int pf_commit_rules(u_int32_t, char *); 104 int pf_addr_setup(struct pf_ruleset *, 105 struct pf_addr_wrap *, sa_family_t); 106 int pf_kif_setup(char *, struct pfi_kif **); 107 void pf_addr_copyout(struct pf_addr_wrap *); 108 void pf_trans_set_commit(void); 109 void pf_pool_copyin(struct pf_pool *, struct pf_pool *); 110 int pf_rule_copyin(struct pf_rule *, struct pf_rule *, 111 struct pf_ruleset *); 112 u_int16_t pf_qname2qid(char *, int); 113 void pf_qid2qname(u_int16_t, char *); 114 void pf_qid_unref(u_int16_t); 115 116 struct pf_rule pf_default_rule, pf_default_rule_new; 117 118 struct { 119 char statusif[IFNAMSIZ]; 120 u_int32_t debug; 121 u_int32_t hostid; 122 u_int32_t reass; 123 u_int32_t mask; 124 } pf_trans_set; 125 126 #define PF_TSET_STATUSIF 0x01 127 #define PF_TSET_DEBUG 0x02 128 #define PF_TSET_HOSTID 0x04 129 #define PF_TSET_REASS 0x08 130 131 #define TAGID_MAX 50000 132 TAILQ_HEAD(pf_tags, pf_tagname) pf_tags = TAILQ_HEAD_INITIALIZER(pf_tags), 133 pf_qids = TAILQ_HEAD_INITIALIZER(pf_qids); 134 135 #ifdef WITH_PF_LOCK 136 /* 137 * pf_lock protects consistency of PF data structures, which don't have 138 * their dedicated lock yet. The pf_lock currently protects: 139 * - rules, 140 * - radix tables, 141 * - source nodes 142 * All callers must grab pf_lock exclusively. 143 * 144 * pf_state_lock protects consistency of state table. Packets, which do state 145 * look up grab the lock as readers. If packet must create state, then it must 146 * grab the lock as writer. Whenever packet creates state it grabs pf_lock 147 * first then it locks pf_state_lock as the writer. 148 */ 149 struct rwlock pf_lock = RWLOCK_INITIALIZER("pf_lock"); 150 struct rwlock pf_state_lock = RWLOCK_INITIALIZER("pf_state_lock"); 151 #endif /* WITH_PF_LOCK */ 152 153 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE) 154 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE 155 #endif 156 u_int16_t tagname2tag(struct pf_tags *, char *, int); 157 void tag2tagname(struct pf_tags *, u_int16_t, char *); 158 void tag_unref(struct pf_tags *, u_int16_t); 159 int pf_rtlabel_add(struct pf_addr_wrap *); 160 void pf_rtlabel_remove(struct pf_addr_wrap *); 161 void pf_rtlabel_copyout(struct pf_addr_wrap *); 162 163 164 void 165 pfattach(int num) 166 { 167 u_int32_t *timeout = pf_default_rule.timeout; 168 169 pool_init(&pf_rule_pl, sizeof(struct pf_rule), 0, 170 IPL_SOFTNET, 0, "pfrule", NULL); 171 pool_init(&pf_src_tree_pl, sizeof(struct pf_src_node), 0, 172 IPL_SOFTNET, 0, "pfsrctr", NULL); 173 pool_init(&pf_sn_item_pl, sizeof(struct pf_sn_item), 0, 174 IPL_SOFTNET, 0, "pfsnitem", NULL); 175 pool_init(&pf_state_pl, sizeof(struct pf_state), 0, 176 IPL_SOFTNET, 0, "pfstate", NULL); 177 pool_init(&pf_state_key_pl, sizeof(struct pf_state_key), 0, 178 IPL_SOFTNET, 0, "pfstkey", NULL); 179 pool_init(&pf_state_item_pl, sizeof(struct pf_state_item), 0, 180 IPL_SOFTNET, 0, "pfstitem", NULL); 181 pool_init(&pf_rule_item_pl, sizeof(struct pf_rule_item), 0, 182 IPL_SOFTNET, 0, "pfruleitem", NULL); 183 pool_init(&pf_queue_pl, sizeof(struct pf_queuespec), 0, 184 IPL_SOFTNET, 0, "pfqueue", NULL); 185 pool_init(&pf_tag_pl, sizeof(struct pf_tagname), 0, 186 IPL_SOFTNET, 0, "pftag", NULL); 187 pool_init(&pf_pktdelay_pl, sizeof(struct pf_pktdelay), 0, 188 IPL_SOFTNET, 0, "pfpktdelay", NULL); 189 190 hfsc_initialize(); 191 pfr_initialize(); 192 pfi_initialize(); 193 pf_osfp_initialize(); 194 pf_syncookies_init(); 195 196 pool_sethardlimit(pf_pool_limits[PF_LIMIT_STATES].pp, 197 pf_pool_limits[PF_LIMIT_STATES].limit, NULL, 0); 198 199 if (physmem <= atop(100*1024*1024)) 200 pf_pool_limits[PF_LIMIT_TABLE_ENTRIES].limit = 201 PFR_KENTRY_HIWAT_SMALL; 202 203 RB_INIT(&tree_src_tracking); 204 RB_INIT(&pf_anchors); 205 pf_init_ruleset(&pf_main_ruleset); 206 TAILQ_INIT(&pf_queues[0]); 207 TAILQ_INIT(&pf_queues[1]); 208 pf_queues_active = &pf_queues[0]; 209 pf_queues_inactive = &pf_queues[1]; 210 TAILQ_INIT(&state_list); 211 212 /* default rule should never be garbage collected */ 213 pf_default_rule.entries.tqe_prev = &pf_default_rule.entries.tqe_next; 214 pf_default_rule.action = PF_PASS; 215 pf_default_rule.nr = (u_int32_t)-1; 216 pf_default_rule.rtableid = -1; 217 218 /* initialize default timeouts */ 219 timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL; 220 timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL; 221 timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL; 222 timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL; 223 timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL; 224 timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL; 225 timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL; 226 timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL; 227 timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL; 228 timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL; 229 timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL; 230 timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL; 231 timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL; 232 timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL; 233 timeout[PFTM_FRAG] = PFTM_FRAG_VAL; 234 timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL; 235 timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL; 236 timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL; 237 timeout[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START; 238 timeout[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END; 239 240 pf_default_rule.src.addr.type = PF_ADDR_ADDRMASK; 241 pf_default_rule.dst.addr.type = PF_ADDR_ADDRMASK; 242 pf_default_rule.rdr.addr.type = PF_ADDR_NONE; 243 pf_default_rule.nat.addr.type = PF_ADDR_NONE; 244 pf_default_rule.route.addr.type = PF_ADDR_NONE; 245 246 pf_normalize_init(); 247 memset(&pf_status, 0, sizeof(pf_status)); 248 pf_status.debug = LOG_ERR; 249 pf_status.reass = PF_REASS_ENABLED; 250 251 /* XXX do our best to avoid a conflict */ 252 pf_status.hostid = arc4random(); 253 } 254 255 int 256 pfopen(dev_t dev, int flags, int fmt, struct proc *p) 257 { 258 if (minor(dev) >= 1) 259 return (ENXIO); 260 return (0); 261 } 262 263 int 264 pfclose(dev_t dev, int flags, int fmt, struct proc *p) 265 { 266 if (minor(dev) >= 1) 267 return (ENXIO); 268 return (0); 269 } 270 271 void 272 pf_rm_rule(struct pf_rulequeue *rulequeue, struct pf_rule *rule) 273 { 274 if (rulequeue != NULL) { 275 if (rule->states_cur == 0 && rule->src_nodes == 0) { 276 /* 277 * XXX - we need to remove the table *before* detaching 278 * the rule to make sure the table code does not delete 279 * the anchor under our feet. 280 */ 281 pf_tbladdr_remove(&rule->src.addr); 282 pf_tbladdr_remove(&rule->dst.addr); 283 pf_tbladdr_remove(&rule->rdr.addr); 284 pf_tbladdr_remove(&rule->nat.addr); 285 pf_tbladdr_remove(&rule->route.addr); 286 if (rule->overload_tbl) 287 pfr_detach_table(rule->overload_tbl); 288 } 289 TAILQ_REMOVE(rulequeue, rule, entries); 290 rule->entries.tqe_prev = NULL; 291 rule->nr = (u_int32_t)-1; 292 } 293 294 if (rule->states_cur > 0 || rule->src_nodes > 0 || 295 rule->entries.tqe_prev != NULL) 296 return; 297 pf_tag_unref(rule->tag); 298 pf_tag_unref(rule->match_tag); 299 pf_rtlabel_remove(&rule->src.addr); 300 pf_rtlabel_remove(&rule->dst.addr); 301 pfi_dynaddr_remove(&rule->src.addr); 302 pfi_dynaddr_remove(&rule->dst.addr); 303 pfi_dynaddr_remove(&rule->rdr.addr); 304 pfi_dynaddr_remove(&rule->nat.addr); 305 pfi_dynaddr_remove(&rule->route.addr); 306 if (rulequeue == NULL) { 307 pf_tbladdr_remove(&rule->src.addr); 308 pf_tbladdr_remove(&rule->dst.addr); 309 pf_tbladdr_remove(&rule->rdr.addr); 310 pf_tbladdr_remove(&rule->nat.addr); 311 pf_tbladdr_remove(&rule->route.addr); 312 if (rule->overload_tbl) 313 pfr_detach_table(rule->overload_tbl); 314 } 315 pfi_kif_unref(rule->rcv_kif, PFI_KIF_REF_RULE); 316 pfi_kif_unref(rule->kif, PFI_KIF_REF_RULE); 317 pfi_kif_unref(rule->rdr.kif, PFI_KIF_REF_RULE); 318 pfi_kif_unref(rule->nat.kif, PFI_KIF_REF_RULE); 319 pfi_kif_unref(rule->route.kif, PFI_KIF_REF_RULE); 320 pf_remove_anchor(rule); 321 pool_put(&pf_rule_pl, rule); 322 } 323 324 void 325 pf_purge_rule(struct pf_rule *rule) 326 { 327 u_int32_t nr = 0; 328 struct pf_ruleset *ruleset; 329 330 KASSERT((rule != NULL) && (rule->ruleset != NULL)); 331 ruleset = rule->ruleset; 332 333 pf_rm_rule(ruleset->rules.active.ptr, rule); 334 ruleset->rules.active.rcount--; 335 TAILQ_FOREACH(rule, ruleset->rules.active.ptr, entries) 336 rule->nr = nr++; 337 ruleset->rules.active.ticket++; 338 pf_calc_skip_steps(ruleset->rules.active.ptr); 339 pf_remove_if_empty_ruleset(ruleset); 340 } 341 342 u_int16_t 343 tagname2tag(struct pf_tags *head, char *tagname, int create) 344 { 345 struct pf_tagname *tag, *p = NULL; 346 u_int16_t new_tagid = 1; 347 348 TAILQ_FOREACH(tag, head, entries) 349 if (strcmp(tagname, tag->name) == 0) { 350 tag->ref++; 351 return (tag->tag); 352 } 353 354 if (!create) 355 return (0); 356 357 /* 358 * to avoid fragmentation, we do a linear search from the beginning 359 * and take the first free slot we find. if there is none or the list 360 * is empty, append a new entry at the end. 361 */ 362 363 /* new entry */ 364 TAILQ_FOREACH(p, head, entries) { 365 if (p->tag != new_tagid) 366 break; 367 new_tagid = p->tag + 1; 368 } 369 370 if (new_tagid > TAGID_MAX) 371 return (0); 372 373 /* allocate and fill new struct pf_tagname */ 374 tag = pool_get(&pf_tag_pl, PR_NOWAIT | PR_ZERO); 375 if (tag == NULL) 376 return (0); 377 strlcpy(tag->name, tagname, sizeof(tag->name)); 378 tag->tag = new_tagid; 379 tag->ref++; 380 381 if (p != NULL) /* insert new entry before p */ 382 TAILQ_INSERT_BEFORE(p, tag, entries); 383 else /* either list empty or no free slot in between */ 384 TAILQ_INSERT_TAIL(head, tag, entries); 385 386 return (tag->tag); 387 } 388 389 void 390 tag2tagname(struct pf_tags *head, u_int16_t tagid, char *p) 391 { 392 struct pf_tagname *tag; 393 394 TAILQ_FOREACH(tag, head, entries) 395 if (tag->tag == tagid) { 396 strlcpy(p, tag->name, PF_TAG_NAME_SIZE); 397 return; 398 } 399 } 400 401 void 402 tag_unref(struct pf_tags *head, u_int16_t tag) 403 { 404 struct pf_tagname *p, *next; 405 406 if (tag == 0) 407 return; 408 409 TAILQ_FOREACH_SAFE(p, head, entries, next) { 410 if (tag == p->tag) { 411 if (--p->ref == 0) { 412 TAILQ_REMOVE(head, p, entries); 413 pool_put(&pf_tag_pl, p); 414 } 415 break; 416 } 417 } 418 } 419 420 u_int16_t 421 pf_tagname2tag(char *tagname, int create) 422 { 423 return (tagname2tag(&pf_tags, tagname, create)); 424 } 425 426 void 427 pf_tag2tagname(u_int16_t tagid, char *p) 428 { 429 tag2tagname(&pf_tags, tagid, p); 430 } 431 432 void 433 pf_tag_ref(u_int16_t tag) 434 { 435 struct pf_tagname *t; 436 437 TAILQ_FOREACH(t, &pf_tags, entries) 438 if (t->tag == tag) 439 break; 440 if (t != NULL) 441 t->ref++; 442 } 443 444 void 445 pf_tag_unref(u_int16_t tag) 446 { 447 tag_unref(&pf_tags, tag); 448 } 449 450 int 451 pf_rtlabel_add(struct pf_addr_wrap *a) 452 { 453 if (a->type == PF_ADDR_RTLABEL && 454 (a->v.rtlabel = rtlabel_name2id(a->v.rtlabelname)) == 0) 455 return (-1); 456 return (0); 457 } 458 459 void 460 pf_rtlabel_remove(struct pf_addr_wrap *a) 461 { 462 if (a->type == PF_ADDR_RTLABEL) 463 rtlabel_unref(a->v.rtlabel); 464 } 465 466 void 467 pf_rtlabel_copyout(struct pf_addr_wrap *a) 468 { 469 const char *name; 470 471 if (a->type == PF_ADDR_RTLABEL && a->v.rtlabel) { 472 if ((name = rtlabel_id2name(a->v.rtlabel)) == NULL) 473 strlcpy(a->v.rtlabelname, "?", 474 sizeof(a->v.rtlabelname)); 475 else 476 strlcpy(a->v.rtlabelname, name, 477 sizeof(a->v.rtlabelname)); 478 } 479 } 480 481 u_int16_t 482 pf_qname2qid(char *qname, int create) 483 { 484 return (tagname2tag(&pf_qids, qname, create)); 485 } 486 487 void 488 pf_qid2qname(u_int16_t qid, char *p) 489 { 490 tag2tagname(&pf_qids, qid, p); 491 } 492 493 void 494 pf_qid_unref(u_int16_t qid) 495 { 496 tag_unref(&pf_qids, (u_int16_t)qid); 497 } 498 499 int 500 pf_begin_rules(u_int32_t *ticket, const char *anchor) 501 { 502 struct pf_ruleset *rs; 503 struct pf_rule *rule; 504 505 if ((rs = pf_find_or_create_ruleset(anchor)) == NULL) 506 return (EINVAL); 507 while ((rule = TAILQ_FIRST(rs->rules.inactive.ptr)) != NULL) { 508 pf_rm_rule(rs->rules.inactive.ptr, rule); 509 rs->rules.inactive.rcount--; 510 } 511 *ticket = ++rs->rules.inactive.ticket; 512 rs->rules.inactive.open = 1; 513 return (0); 514 } 515 516 int 517 pf_rollback_rules(u_int32_t ticket, char *anchor) 518 { 519 struct pf_ruleset *rs; 520 struct pf_rule *rule; 521 522 rs = pf_find_ruleset(anchor); 523 if (rs == NULL || !rs->rules.inactive.open || 524 rs->rules.inactive.ticket != ticket) 525 return (0); 526 while ((rule = TAILQ_FIRST(rs->rules.inactive.ptr)) != NULL) { 527 pf_rm_rule(rs->rules.inactive.ptr, rule); 528 rs->rules.inactive.rcount--; 529 } 530 rs->rules.inactive.open = 0; 531 532 /* queue defs only in the main ruleset */ 533 if (anchor[0]) 534 return (0); 535 536 pf_free_queues(pf_queues_inactive); 537 538 return (0); 539 } 540 541 void 542 pf_free_queues(struct pf_queuehead *where) 543 { 544 struct pf_queuespec *q, *qtmp; 545 546 TAILQ_FOREACH_SAFE(q, where, entries, qtmp) { 547 TAILQ_REMOVE(where, q, entries); 548 pfi_kif_unref(q->kif, PFI_KIF_REF_RULE); 549 pool_put(&pf_queue_pl, q); 550 } 551 } 552 553 void 554 pf_remove_queues(void) 555 { 556 struct pf_queuespec *q; 557 struct ifnet *ifp; 558 559 /* put back interfaces in normal queueing mode */ 560 TAILQ_FOREACH(q, pf_queues_active, entries) { 561 if (q->parent_qid != 0) 562 continue; 563 564 ifp = q->kif->pfik_ifp; 565 if (ifp == NULL) 566 continue; 567 568 ifq_attach(&ifp->if_snd, ifq_priq_ops, NULL); 569 } 570 } 571 572 struct pf_queue_if { 573 struct ifnet *ifp; 574 const struct ifq_ops *ifqops; 575 const struct pfq_ops *pfqops; 576 void *disc; 577 struct pf_queue_if *next; 578 }; 579 580 static inline struct pf_queue_if * 581 pf_ifp2q(struct pf_queue_if *list, struct ifnet *ifp) 582 { 583 struct pf_queue_if *qif = list; 584 585 while (qif != NULL) { 586 if (qif->ifp == ifp) 587 return (qif); 588 589 qif = qif->next; 590 } 591 592 return (qif); 593 } 594 595 int 596 pf_create_queues(void) 597 { 598 struct pf_queuespec *q; 599 struct ifnet *ifp; 600 struct pf_queue_if *list = NULL, *qif; 601 int error; 602 603 /* 604 * Find root queues and allocate traffic conditioner 605 * private data for these interfaces 606 */ 607 TAILQ_FOREACH(q, pf_queues_active, entries) { 608 if (q->parent_qid != 0) 609 continue; 610 611 ifp = q->kif->pfik_ifp; 612 if (ifp == NULL) 613 continue; 614 615 qif = malloc(sizeof(*qif), M_TEMP, M_WAITOK); 616 qif->ifp = ifp; 617 618 if (q->flags & PFQS_ROOTCLASS) { 619 qif->ifqops = ifq_hfsc_ops; 620 qif->pfqops = pfq_hfsc_ops; 621 } else { 622 qif->ifqops = ifq_fqcodel_ops; 623 qif->pfqops = pfq_fqcodel_ops; 624 } 625 626 qif->disc = qif->pfqops->pfq_alloc(ifp); 627 628 qif->next = list; 629 list = qif; 630 } 631 632 /* and now everything */ 633 TAILQ_FOREACH(q, pf_queues_active, entries) { 634 ifp = q->kif->pfik_ifp; 635 if (ifp == NULL) 636 continue; 637 638 qif = pf_ifp2q(list, ifp); 639 KASSERT(qif != NULL); 640 641 error = qif->pfqops->pfq_addqueue(qif->disc, q); 642 if (error != 0) 643 goto error; 644 } 645 646 /* find root queues in old list to disable them if necessary */ 647 TAILQ_FOREACH(q, pf_queues_inactive, entries) { 648 if (q->parent_qid != 0) 649 continue; 650 651 ifp = q->kif->pfik_ifp; 652 if (ifp == NULL) 653 continue; 654 655 qif = pf_ifp2q(list, ifp); 656 if (qif != NULL) 657 continue; 658 659 ifq_attach(&ifp->if_snd, ifq_priq_ops, NULL); 660 } 661 662 /* commit the new queues */ 663 while (list != NULL) { 664 qif = list; 665 list = qif->next; 666 667 ifp = qif->ifp; 668 669 ifq_attach(&ifp->if_snd, qif->ifqops, qif->disc); 670 free(qif, M_TEMP, sizeof(*qif)); 671 } 672 673 return (0); 674 675 error: 676 while (list != NULL) { 677 qif = list; 678 list = qif->next; 679 680 qif->pfqops->pfq_free(qif->disc); 681 free(qif, M_TEMP, sizeof(*qif)); 682 } 683 684 return (error); 685 } 686 687 int 688 pf_commit_queues(void) 689 { 690 struct pf_queuehead *qswap; 691 int error; 692 693 /* swap */ 694 qswap = pf_queues_active; 695 pf_queues_active = pf_queues_inactive; 696 pf_queues_inactive = qswap; 697 698 error = pf_create_queues(); 699 if (error != 0) { 700 pf_queues_inactive = pf_queues_active; 701 pf_queues_active = qswap; 702 return (error); 703 } 704 705 pf_free_queues(pf_queues_inactive); 706 707 return (0); 708 } 709 710 const struct pfq_ops * 711 pf_queue_manager(struct pf_queuespec *q) 712 { 713 if (q->flags & PFQS_FLOWQUEUE) 714 return pfq_fqcodel_ops; 715 return (/* pfq_default_ops */ NULL); 716 } 717 718 #define PF_MD5_UPD(st, elm) \ 719 MD5Update(ctx, (u_int8_t *) &(st)->elm, sizeof((st)->elm)) 720 721 #define PF_MD5_UPD_STR(st, elm) \ 722 MD5Update(ctx, (u_int8_t *) (st)->elm, strlen((st)->elm)) 723 724 #define PF_MD5_UPD_HTONL(st, elm, stor) do { \ 725 (stor) = htonl((st)->elm); \ 726 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int32_t));\ 727 } while (0) 728 729 #define PF_MD5_UPD_HTONS(st, elm, stor) do { \ 730 (stor) = htons((st)->elm); \ 731 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int16_t));\ 732 } while (0) 733 734 void 735 pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr) 736 { 737 PF_MD5_UPD(pfr, addr.type); 738 switch (pfr->addr.type) { 739 case PF_ADDR_DYNIFTL: 740 PF_MD5_UPD(pfr, addr.v.ifname); 741 PF_MD5_UPD(pfr, addr.iflags); 742 break; 743 case PF_ADDR_TABLE: 744 PF_MD5_UPD(pfr, addr.v.tblname); 745 break; 746 case PF_ADDR_ADDRMASK: 747 /* XXX ignore af? */ 748 PF_MD5_UPD(pfr, addr.v.a.addr.addr32); 749 PF_MD5_UPD(pfr, addr.v.a.mask.addr32); 750 break; 751 case PF_ADDR_RTLABEL: 752 PF_MD5_UPD(pfr, addr.v.rtlabelname); 753 break; 754 } 755 756 PF_MD5_UPD(pfr, port[0]); 757 PF_MD5_UPD(pfr, port[1]); 758 PF_MD5_UPD(pfr, neg); 759 PF_MD5_UPD(pfr, port_op); 760 } 761 762 void 763 pf_hash_rule(MD5_CTX *ctx, struct pf_rule *rule) 764 { 765 u_int16_t x; 766 u_int32_t y; 767 768 pf_hash_rule_addr(ctx, &rule->src); 769 pf_hash_rule_addr(ctx, &rule->dst); 770 PF_MD5_UPD_STR(rule, label); 771 PF_MD5_UPD_STR(rule, ifname); 772 PF_MD5_UPD_STR(rule, rcv_ifname); 773 PF_MD5_UPD_STR(rule, match_tagname); 774 PF_MD5_UPD_HTONS(rule, match_tag, x); /* dup? */ 775 PF_MD5_UPD_HTONL(rule, os_fingerprint, y); 776 PF_MD5_UPD_HTONL(rule, prob, y); 777 PF_MD5_UPD_HTONL(rule, uid.uid[0], y); 778 PF_MD5_UPD_HTONL(rule, uid.uid[1], y); 779 PF_MD5_UPD(rule, uid.op); 780 PF_MD5_UPD_HTONL(rule, gid.gid[0], y); 781 PF_MD5_UPD_HTONL(rule, gid.gid[1], y); 782 PF_MD5_UPD(rule, gid.op); 783 PF_MD5_UPD_HTONL(rule, rule_flag, y); 784 PF_MD5_UPD(rule, action); 785 PF_MD5_UPD(rule, direction); 786 PF_MD5_UPD(rule, af); 787 PF_MD5_UPD(rule, quick); 788 PF_MD5_UPD(rule, ifnot); 789 PF_MD5_UPD(rule, rcvifnot); 790 PF_MD5_UPD(rule, match_tag_not); 791 PF_MD5_UPD(rule, keep_state); 792 PF_MD5_UPD(rule, proto); 793 PF_MD5_UPD(rule, type); 794 PF_MD5_UPD(rule, code); 795 PF_MD5_UPD(rule, flags); 796 PF_MD5_UPD(rule, flagset); 797 PF_MD5_UPD(rule, allow_opts); 798 PF_MD5_UPD(rule, rt); 799 PF_MD5_UPD(rule, tos); 800 } 801 802 int 803 pf_commit_rules(u_int32_t ticket, char *anchor) 804 { 805 struct pf_ruleset *rs; 806 struct pf_rule *rule, **old_array; 807 struct pf_rulequeue *old_rules; 808 int error; 809 u_int32_t old_rcount; 810 811 /* Make sure any expired rules get removed from active rules first. */ 812 pf_purge_expired_rules(); 813 814 rs = pf_find_ruleset(anchor); 815 if (rs == NULL || !rs->rules.inactive.open || 816 ticket != rs->rules.inactive.ticket) 817 return (EBUSY); 818 819 /* Calculate checksum for the main ruleset */ 820 if (rs == &pf_main_ruleset) { 821 error = pf_setup_pfsync_matching(rs); 822 if (error != 0) 823 return (error); 824 } 825 826 /* Swap rules, keep the old. */ 827 old_rules = rs->rules.active.ptr; 828 old_rcount = rs->rules.active.rcount; 829 old_array = rs->rules.active.ptr_array; 830 831 rs->rules.active.ptr = rs->rules.inactive.ptr; 832 rs->rules.active.ptr_array = rs->rules.inactive.ptr_array; 833 rs->rules.active.rcount = rs->rules.inactive.rcount; 834 rs->rules.inactive.ptr = old_rules; 835 rs->rules.inactive.ptr_array = old_array; 836 rs->rules.inactive.rcount = old_rcount; 837 838 rs->rules.active.ticket = rs->rules.inactive.ticket; 839 pf_calc_skip_steps(rs->rules.active.ptr); 840 841 842 /* Purge the old rule list. */ 843 while ((rule = TAILQ_FIRST(old_rules)) != NULL) 844 pf_rm_rule(old_rules, rule); 845 if (rs->rules.inactive.ptr_array) 846 free(rs->rules.inactive.ptr_array, M_TEMP, 0); 847 rs->rules.inactive.ptr_array = NULL; 848 rs->rules.inactive.rcount = 0; 849 rs->rules.inactive.open = 0; 850 pf_remove_if_empty_ruleset(rs); 851 852 /* queue defs only in the main ruleset */ 853 if (anchor[0]) 854 return (0); 855 return (pf_commit_queues()); 856 } 857 858 int 859 pf_setup_pfsync_matching(struct pf_ruleset *rs) 860 { 861 MD5_CTX ctx; 862 struct pf_rule *rule; 863 u_int8_t digest[PF_MD5_DIGEST_LENGTH]; 864 865 MD5Init(&ctx); 866 if (rs->rules.inactive.ptr_array) 867 free(rs->rules.inactive.ptr_array, M_TEMP, 0); 868 rs->rules.inactive.ptr_array = NULL; 869 870 if (rs->rules.inactive.rcount) { 871 rs->rules.inactive.ptr_array = 872 mallocarray(rs->rules.inactive.rcount, sizeof(caddr_t), 873 M_TEMP, M_NOWAIT); 874 875 if (!rs->rules.inactive.ptr_array) 876 return (ENOMEM); 877 878 TAILQ_FOREACH(rule, rs->rules.inactive.ptr, entries) { 879 pf_hash_rule(&ctx, rule); 880 (rs->rules.inactive.ptr_array)[rule->nr] = rule; 881 } 882 } 883 884 MD5Final(digest, &ctx); 885 memcpy(pf_status.pf_chksum, digest, sizeof(pf_status.pf_chksum)); 886 return (0); 887 } 888 889 int 890 pf_addr_setup(struct pf_ruleset *ruleset, struct pf_addr_wrap *addr, 891 sa_family_t af) 892 { 893 if (pfi_dynaddr_setup(addr, af) || 894 pf_tbladdr_setup(ruleset, addr) || 895 pf_rtlabel_add(addr)) 896 return (EINVAL); 897 898 return (0); 899 } 900 901 int 902 pf_kif_setup(char *ifname, struct pfi_kif **kif) 903 { 904 if (ifname[0]) { 905 *kif = pfi_kif_get(ifname); 906 if (*kif == NULL) 907 return (EINVAL); 908 909 pfi_kif_ref(*kif, PFI_KIF_REF_RULE); 910 } else 911 *kif = NULL; 912 913 return (0); 914 } 915 916 void 917 pf_addr_copyout(struct pf_addr_wrap *addr) 918 { 919 pfi_dynaddr_copyout(addr); 920 pf_tbladdr_copyout(addr); 921 pf_rtlabel_copyout(addr); 922 } 923 924 int 925 pfioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct proc *p) 926 { 927 int error = 0; 928 929 /* XXX keep in sync with switch() below */ 930 if (securelevel > 1) 931 switch (cmd) { 932 case DIOCGETRULES: 933 case DIOCGETRULE: 934 case DIOCGETSTATE: 935 case DIOCSETSTATUSIF: 936 case DIOCGETSTATUS: 937 case DIOCCLRSTATUS: 938 case DIOCNATLOOK: 939 case DIOCSETDEBUG: 940 case DIOCGETSTATES: 941 case DIOCGETTIMEOUT: 942 case DIOCGETLIMIT: 943 case DIOCGETRULESETS: 944 case DIOCGETRULESET: 945 case DIOCGETQUEUES: 946 case DIOCGETQUEUE: 947 case DIOCGETQSTATS: 948 case DIOCRGETTABLES: 949 case DIOCRGETTSTATS: 950 case DIOCRCLRTSTATS: 951 case DIOCRCLRADDRS: 952 case DIOCRADDADDRS: 953 case DIOCRDELADDRS: 954 case DIOCRSETADDRS: 955 case DIOCRGETADDRS: 956 case DIOCRGETASTATS: 957 case DIOCRCLRASTATS: 958 case DIOCRTSTADDRS: 959 case DIOCOSFPGET: 960 case DIOCGETSRCNODES: 961 case DIOCCLRSRCNODES: 962 case DIOCIGETIFACES: 963 case DIOCSETIFFLAG: 964 case DIOCCLRIFFLAG: 965 case DIOCGETSYNFLWATS: 966 break; 967 case DIOCRCLRTABLES: 968 case DIOCRADDTABLES: 969 case DIOCRDELTABLES: 970 case DIOCRSETTFLAGS: 971 if (((struct pfioc_table *)addr)->pfrio_flags & 972 PFR_FLAG_DUMMY) 973 break; /* dummy operation ok */ 974 return (EPERM); 975 default: 976 return (EPERM); 977 } 978 979 if (!(flags & FWRITE)) 980 switch (cmd) { 981 case DIOCGETRULES: 982 case DIOCGETSTATE: 983 case DIOCGETSTATUS: 984 case DIOCGETSTATES: 985 case DIOCGETTIMEOUT: 986 case DIOCGETLIMIT: 987 case DIOCGETRULESETS: 988 case DIOCGETRULESET: 989 case DIOCGETQUEUES: 990 case DIOCGETQUEUE: 991 case DIOCGETQSTATS: 992 case DIOCNATLOOK: 993 case DIOCRGETTABLES: 994 case DIOCRGETTSTATS: 995 case DIOCRGETADDRS: 996 case DIOCRGETASTATS: 997 case DIOCRTSTADDRS: 998 case DIOCOSFPGET: 999 case DIOCGETSRCNODES: 1000 case DIOCIGETIFACES: 1001 case DIOCGETSYNFLWATS: 1002 break; 1003 case DIOCRCLRTABLES: 1004 case DIOCRADDTABLES: 1005 case DIOCRDELTABLES: 1006 case DIOCRCLRTSTATS: 1007 case DIOCRCLRADDRS: 1008 case DIOCRADDADDRS: 1009 case DIOCRDELADDRS: 1010 case DIOCRSETADDRS: 1011 case DIOCRSETTFLAGS: 1012 if (((struct pfioc_table *)addr)->pfrio_flags & 1013 PFR_FLAG_DUMMY) { 1014 flags |= FWRITE; /* need write lock for dummy */ 1015 break; /* dummy operation ok */ 1016 } 1017 return (EACCES); 1018 case DIOCGETRULE: 1019 if (((struct pfioc_rule *)addr)->action == 1020 PF_GET_CLR_CNTR) 1021 return (EACCES); 1022 break; 1023 default: 1024 return (EACCES); 1025 } 1026 1027 NET_LOCK(); 1028 switch (cmd) { 1029 1030 case DIOCSTART: 1031 PF_LOCK(); 1032 if (pf_status.running) 1033 error = EEXIST; 1034 else { 1035 pf_status.running = 1; 1036 pf_status.since = time_uptime; 1037 if (pf_status.stateid == 0) { 1038 pf_status.stateid = time_second; 1039 pf_status.stateid = pf_status.stateid << 32; 1040 } 1041 timeout_add_sec(&pf_purge_to, 1); 1042 pf_create_queues(); 1043 DPFPRINTF(LOG_NOTICE, "pf: started"); 1044 } 1045 PF_UNLOCK(); 1046 break; 1047 1048 case DIOCSTOP: 1049 PF_LOCK(); 1050 if (!pf_status.running) 1051 error = ENOENT; 1052 else { 1053 pf_status.running = 0; 1054 pf_status.since = time_uptime; 1055 pf_remove_queues(); 1056 DPFPRINTF(LOG_NOTICE, "pf: stopped"); 1057 } 1058 PF_UNLOCK(); 1059 break; 1060 1061 case DIOCGETQUEUES: { 1062 struct pfioc_queue *pq = (struct pfioc_queue *)addr; 1063 struct pf_queuespec *qs; 1064 u_int32_t nr = 0; 1065 1066 PF_LOCK(); 1067 pq->ticket = pf_main_ruleset.rules.active.ticket; 1068 1069 /* save state to not run over them all each time? */ 1070 qs = TAILQ_FIRST(pf_queues_active); 1071 while (qs != NULL) { 1072 qs = TAILQ_NEXT(qs, entries); 1073 nr++; 1074 } 1075 pq->nr = nr; 1076 PF_UNLOCK(); 1077 break; 1078 } 1079 1080 case DIOCGETQUEUE: { 1081 struct pfioc_queue *pq = (struct pfioc_queue *)addr; 1082 struct pf_queuespec *qs; 1083 u_int32_t nr = 0; 1084 1085 PF_LOCK(); 1086 if (pq->ticket != pf_main_ruleset.rules.active.ticket) { 1087 error = EBUSY; 1088 PF_UNLOCK(); 1089 break; 1090 } 1091 1092 /* save state to not run over them all each time? */ 1093 qs = TAILQ_FIRST(pf_queues_active); 1094 while ((qs != NULL) && (nr++ < pq->nr)) 1095 qs = TAILQ_NEXT(qs, entries); 1096 if (qs == NULL) { 1097 error = EBUSY; 1098 PF_UNLOCK(); 1099 break; 1100 } 1101 memcpy(&pq->queue, qs, sizeof(pq->queue)); 1102 PF_UNLOCK(); 1103 break; 1104 } 1105 1106 case DIOCGETQSTATS: { 1107 struct pfioc_qstats *pq = (struct pfioc_qstats *)addr; 1108 struct pf_queuespec *qs; 1109 u_int32_t nr; 1110 int nbytes; 1111 1112 PF_LOCK(); 1113 if (pq->ticket != pf_main_ruleset.rules.active.ticket) { 1114 error = EBUSY; 1115 PF_UNLOCK(); 1116 break; 1117 } 1118 nbytes = pq->nbytes; 1119 nr = 0; 1120 1121 /* save state to not run over them all each time? */ 1122 qs = TAILQ_FIRST(pf_queues_active); 1123 while ((qs != NULL) && (nr++ < pq->nr)) 1124 qs = TAILQ_NEXT(qs, entries); 1125 if (qs == NULL) { 1126 error = EBUSY; 1127 PF_UNLOCK(); 1128 break; 1129 } 1130 memcpy(&pq->queue, qs, sizeof(pq->queue)); 1131 /* It's a root flow queue but is not an HFSC root class */ 1132 if ((qs->flags & PFQS_FLOWQUEUE) && qs->parent_qid == 0 && 1133 !(qs->flags & PFQS_ROOTCLASS)) 1134 error = pfq_fqcodel_ops->pfq_qstats(qs, pq->buf, 1135 &nbytes); 1136 else 1137 error = pfq_hfsc_ops->pfq_qstats(qs, pq->buf, 1138 &nbytes); 1139 if (error == 0) 1140 pq->nbytes = nbytes; 1141 PF_UNLOCK(); 1142 break; 1143 } 1144 1145 case DIOCADDQUEUE: { 1146 struct pfioc_queue *q = (struct pfioc_queue *)addr; 1147 struct pf_queuespec *qs; 1148 1149 PF_LOCK(); 1150 if (q->ticket != pf_main_ruleset.rules.inactive.ticket) { 1151 error = EBUSY; 1152 PF_UNLOCK(); 1153 break; 1154 } 1155 qs = pool_get(&pf_queue_pl, PR_WAITOK|PR_LIMITFAIL|PR_ZERO); 1156 if (qs == NULL) { 1157 error = ENOMEM; 1158 PF_UNLOCK(); 1159 break; 1160 } 1161 memcpy(qs, &q->queue, sizeof(*qs)); 1162 qs->qid = pf_qname2qid(qs->qname, 1); 1163 if (qs->qid == 0) { 1164 pool_put(&pf_queue_pl, qs); 1165 error = EBUSY; 1166 PF_UNLOCK(); 1167 break; 1168 } 1169 if (qs->parent[0] && (qs->parent_qid = 1170 pf_qname2qid(qs->parent, 0)) == 0) { 1171 pool_put(&pf_queue_pl, qs); 1172 error = ESRCH; 1173 PF_UNLOCK(); 1174 break; 1175 } 1176 qs->kif = pfi_kif_get(qs->ifname); 1177 if (qs->kif == NULL) { 1178 pool_put(&pf_queue_pl, qs); 1179 error = ESRCH; 1180 PF_UNLOCK(); 1181 break; 1182 } 1183 /* XXX resolve bw percentage specs */ 1184 pfi_kif_ref(qs->kif, PFI_KIF_REF_RULE); 1185 1186 TAILQ_INSERT_TAIL(pf_queues_inactive, qs, entries); 1187 PF_UNLOCK(); 1188 1189 break; 1190 } 1191 1192 case DIOCADDRULE: { 1193 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 1194 struct pf_ruleset *ruleset; 1195 struct pf_rule *rule, *tail; 1196 1197 PF_LOCK(); 1198 pr->anchor[sizeof(pr->anchor) - 1] = 0; 1199 ruleset = pf_find_ruleset(pr->anchor); 1200 if (ruleset == NULL) { 1201 error = EINVAL; 1202 PF_UNLOCK(); 1203 break; 1204 } 1205 if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) { 1206 error = EINVAL; 1207 PF_UNLOCK(); 1208 break; 1209 } 1210 if (pr->ticket != ruleset->rules.inactive.ticket) { 1211 error = EBUSY; 1212 PF_UNLOCK(); 1213 break; 1214 } 1215 rule = pool_get(&pf_rule_pl, PR_WAITOK|PR_LIMITFAIL|PR_ZERO); 1216 if (rule == NULL) { 1217 error = ENOMEM; 1218 PF_UNLOCK(); 1219 break; 1220 } 1221 if ((error = pf_rule_copyin(&pr->rule, rule, ruleset))) { 1222 pf_rm_rule(NULL, rule); 1223 rule = NULL; 1224 PF_UNLOCK(); 1225 break; 1226 } 1227 rule->cuid = p->p_ucred->cr_ruid; 1228 rule->cpid = p->p_p->ps_pid; 1229 1230 switch (rule->af) { 1231 case 0: 1232 break; 1233 case AF_INET: 1234 break; 1235 #ifdef INET6 1236 case AF_INET6: 1237 break; 1238 #endif /* INET6 */ 1239 default: 1240 pf_rm_rule(NULL, rule); 1241 rule = NULL; 1242 error = EAFNOSUPPORT; 1243 PF_UNLOCK(); 1244 goto fail; 1245 } 1246 tail = TAILQ_LAST(ruleset->rules.inactive.ptr, 1247 pf_rulequeue); 1248 if (tail) 1249 rule->nr = tail->nr + 1; 1250 else 1251 rule->nr = 0; 1252 1253 if (rule->src.addr.type == PF_ADDR_NONE || 1254 rule->dst.addr.type == PF_ADDR_NONE) 1255 error = EINVAL; 1256 1257 if (pf_addr_setup(ruleset, &rule->src.addr, rule->af)) 1258 error = EINVAL; 1259 if (pf_addr_setup(ruleset, &rule->dst.addr, rule->af)) 1260 error = EINVAL; 1261 if (pf_addr_setup(ruleset, &rule->rdr.addr, rule->af)) 1262 error = EINVAL; 1263 if (pf_addr_setup(ruleset, &rule->nat.addr, rule->af)) 1264 error = EINVAL; 1265 if (pf_addr_setup(ruleset, &rule->route.addr, rule->af)) 1266 error = EINVAL; 1267 if (pf_anchor_setup(rule, ruleset, pr->anchor_call)) 1268 error = EINVAL; 1269 if (rule->rt && !rule->direction) 1270 error = EINVAL; 1271 if (rule->scrub_flags & PFSTATE_SETPRIO && 1272 (rule->set_prio[0] > IFQ_MAXPRIO || 1273 rule->set_prio[1] > IFQ_MAXPRIO)) 1274 error = EINVAL; 1275 1276 if (error) { 1277 pf_rm_rule(NULL, rule); 1278 PF_UNLOCK(); 1279 break; 1280 } 1281 TAILQ_INSERT_TAIL(ruleset->rules.inactive.ptr, 1282 rule, entries); 1283 rule->ruleset = ruleset; 1284 ruleset->rules.inactive.rcount++; 1285 PF_UNLOCK(); 1286 break; 1287 } 1288 1289 case DIOCGETRULES: { 1290 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 1291 struct pf_ruleset *ruleset; 1292 struct pf_rule *tail; 1293 1294 PF_LOCK(); 1295 pr->anchor[sizeof(pr->anchor) - 1] = 0; 1296 ruleset = pf_find_ruleset(pr->anchor); 1297 if (ruleset == NULL) { 1298 error = EINVAL; 1299 PF_UNLOCK(); 1300 break; 1301 } 1302 tail = TAILQ_LAST(ruleset->rules.active.ptr, pf_rulequeue); 1303 if (tail) 1304 pr->nr = tail->nr + 1; 1305 else 1306 pr->nr = 0; 1307 pr->ticket = ruleset->rules.active.ticket; 1308 PF_UNLOCK(); 1309 break; 1310 } 1311 1312 case DIOCGETRULE: { 1313 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 1314 struct pf_ruleset *ruleset; 1315 struct pf_rule *rule; 1316 int i; 1317 1318 PF_LOCK(); 1319 pr->anchor[sizeof(pr->anchor) - 1] = 0; 1320 ruleset = pf_find_ruleset(pr->anchor); 1321 if (ruleset == NULL) { 1322 error = EINVAL; 1323 PF_UNLOCK(); 1324 break; 1325 } 1326 if (pr->ticket != ruleset->rules.active.ticket) { 1327 error = EBUSY; 1328 PF_UNLOCK(); 1329 break; 1330 } 1331 rule = TAILQ_FIRST(ruleset->rules.active.ptr); 1332 while ((rule != NULL) && (rule->nr != pr->nr)) 1333 rule = TAILQ_NEXT(rule, entries); 1334 if (rule == NULL) { 1335 error = EBUSY; 1336 PF_UNLOCK(); 1337 break; 1338 } 1339 memcpy(&pr->rule, rule, sizeof(struct pf_rule)); 1340 memset(&pr->rule.entries, 0, sizeof(pr->rule.entries)); 1341 pr->rule.kif = NULL; 1342 pr->rule.nat.kif = NULL; 1343 pr->rule.rdr.kif = NULL; 1344 pr->rule.route.kif = NULL; 1345 pr->rule.rcv_kif = NULL; 1346 pr->rule.anchor = NULL; 1347 pr->rule.overload_tbl = NULL; 1348 pr->rule.pktrate.limit /= PF_THRESHOLD_MULT; 1349 memset(&pr->rule.gcle, 0, sizeof(pr->rule.gcle)); 1350 pr->rule.ruleset = NULL; 1351 if (pf_anchor_copyout(ruleset, rule, pr)) { 1352 error = EBUSY; 1353 PF_UNLOCK(); 1354 break; 1355 } 1356 pf_addr_copyout(&pr->rule.src.addr); 1357 pf_addr_copyout(&pr->rule.dst.addr); 1358 pf_addr_copyout(&pr->rule.rdr.addr); 1359 pf_addr_copyout(&pr->rule.nat.addr); 1360 pf_addr_copyout(&pr->rule.route.addr); 1361 for (i = 0; i < PF_SKIP_COUNT; ++i) 1362 if (rule->skip[i].ptr == NULL) 1363 pr->rule.skip[i].nr = (u_int32_t)-1; 1364 else 1365 pr->rule.skip[i].nr = 1366 rule->skip[i].ptr->nr; 1367 1368 if (pr->action == PF_GET_CLR_CNTR) { 1369 rule->evaluations = 0; 1370 rule->packets[0] = rule->packets[1] = 0; 1371 rule->bytes[0] = rule->bytes[1] = 0; 1372 rule->states_tot = 0; 1373 } 1374 PF_UNLOCK(); 1375 break; 1376 } 1377 1378 case DIOCCHANGERULE: { 1379 struct pfioc_rule *pcr = (struct pfioc_rule *)addr; 1380 struct pf_ruleset *ruleset; 1381 struct pf_rule *oldrule = NULL, *newrule = NULL; 1382 u_int32_t nr = 0; 1383 1384 if (pcr->action < PF_CHANGE_ADD_HEAD || 1385 pcr->action > PF_CHANGE_GET_TICKET) { 1386 error = EINVAL; 1387 break; 1388 } 1389 PF_LOCK(); 1390 ruleset = pf_find_ruleset(pcr->anchor); 1391 if (ruleset == NULL) { 1392 error = EINVAL; 1393 PF_UNLOCK(); 1394 break; 1395 } 1396 1397 if (pcr->action == PF_CHANGE_GET_TICKET) { 1398 pcr->ticket = ++ruleset->rules.active.ticket; 1399 PF_UNLOCK(); 1400 break; 1401 } else { 1402 if (pcr->ticket != 1403 ruleset->rules.active.ticket) { 1404 error = EINVAL; 1405 PF_UNLOCK(); 1406 break; 1407 } 1408 if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) { 1409 error = EINVAL; 1410 PF_UNLOCK(); 1411 break; 1412 } 1413 } 1414 1415 if (pcr->action != PF_CHANGE_REMOVE) { 1416 newrule = pool_get(&pf_rule_pl, 1417 PR_WAITOK|PR_LIMITFAIL|PR_ZERO); 1418 if (newrule == NULL) { 1419 error = ENOMEM; 1420 PF_UNLOCK(); 1421 break; 1422 } 1423 pf_rule_copyin(&pcr->rule, newrule, ruleset); 1424 newrule->cuid = p->p_ucred->cr_ruid; 1425 newrule->cpid = p->p_p->ps_pid; 1426 1427 switch (newrule->af) { 1428 case 0: 1429 break; 1430 case AF_INET: 1431 break; 1432 #ifdef INET6 1433 case AF_INET6: 1434 break; 1435 #endif /* INET6 */ 1436 default: 1437 pf_rm_rule(NULL, newrule); 1438 error = EAFNOSUPPORT; 1439 PF_UNLOCK(); 1440 goto fail; 1441 } 1442 1443 if (newrule->rt && !newrule->direction) 1444 error = EINVAL; 1445 if (pf_addr_setup(ruleset, &newrule->src.addr, newrule->af)) 1446 error = EINVAL; 1447 if (pf_addr_setup(ruleset, &newrule->dst.addr, newrule->af)) 1448 error = EINVAL; 1449 if (pf_addr_setup(ruleset, &newrule->rdr.addr, newrule->af)) 1450 error = EINVAL; 1451 if (pf_addr_setup(ruleset, &newrule->nat.addr, newrule->af)) 1452 error = EINVAL; 1453 if (pf_addr_setup(ruleset, &newrule->route.addr, newrule->af)) 1454 error = EINVAL; 1455 if (pf_anchor_setup(newrule, ruleset, pcr->anchor_call)) 1456 error = EINVAL; 1457 1458 if (error) { 1459 pf_rm_rule(NULL, newrule); 1460 PF_UNLOCK(); 1461 break; 1462 } 1463 } 1464 1465 if (pcr->action == PF_CHANGE_ADD_HEAD) 1466 oldrule = TAILQ_FIRST(ruleset->rules.active.ptr); 1467 else if (pcr->action == PF_CHANGE_ADD_TAIL) 1468 oldrule = TAILQ_LAST(ruleset->rules.active.ptr, 1469 pf_rulequeue); 1470 else { 1471 oldrule = TAILQ_FIRST(ruleset->rules.active.ptr); 1472 while ((oldrule != NULL) && (oldrule->nr != pcr->nr)) 1473 oldrule = TAILQ_NEXT(oldrule, entries); 1474 if (oldrule == NULL) { 1475 if (newrule != NULL) 1476 pf_rm_rule(NULL, newrule); 1477 error = EINVAL; 1478 PF_UNLOCK(); 1479 break; 1480 } 1481 } 1482 1483 if (pcr->action == PF_CHANGE_REMOVE) { 1484 pf_rm_rule(ruleset->rules.active.ptr, oldrule); 1485 ruleset->rules.active.rcount--; 1486 } else { 1487 if (oldrule == NULL) 1488 TAILQ_INSERT_TAIL( 1489 ruleset->rules.active.ptr, 1490 newrule, entries); 1491 else if (pcr->action == PF_CHANGE_ADD_HEAD || 1492 pcr->action == PF_CHANGE_ADD_BEFORE) 1493 TAILQ_INSERT_BEFORE(oldrule, newrule, entries); 1494 else 1495 TAILQ_INSERT_AFTER( 1496 ruleset->rules.active.ptr, 1497 oldrule, newrule, entries); 1498 ruleset->rules.active.rcount++; 1499 } 1500 1501 nr = 0; 1502 TAILQ_FOREACH(oldrule, ruleset->rules.active.ptr, entries) 1503 oldrule->nr = nr++; 1504 1505 ruleset->rules.active.ticket++; 1506 1507 pf_calc_skip_steps(ruleset->rules.active.ptr); 1508 pf_remove_if_empty_ruleset(ruleset); 1509 1510 PF_UNLOCK(); 1511 break; 1512 } 1513 1514 case DIOCCLRSTATES: { 1515 struct pf_state *s, *nexts; 1516 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr; 1517 u_int killed = 0; 1518 1519 PF_LOCK(); 1520 PF_STATE_ENTER_WRITE(); 1521 for (s = RB_MIN(pf_state_tree_id, &tree_id); s; s = nexts) { 1522 nexts = RB_NEXT(pf_state_tree_id, &tree_id, s); 1523 1524 if (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname, 1525 s->kif->pfik_name)) { 1526 #if NPFSYNC > 0 1527 /* don't send out individual delete messages */ 1528 SET(s->state_flags, PFSTATE_NOSYNC); 1529 #endif /* NPFSYNC > 0 */ 1530 pf_remove_state(s); 1531 killed++; 1532 } 1533 } 1534 PF_STATE_EXIT_WRITE(); 1535 psk->psk_killed = killed; 1536 #if NPFSYNC > 0 1537 pfsync_clear_states(pf_status.hostid, psk->psk_ifname); 1538 #endif /* NPFSYNC > 0 */ 1539 PF_UNLOCK(); 1540 break; 1541 } 1542 1543 case DIOCKILLSTATES: { 1544 struct pf_state *s, *nexts; 1545 struct pf_state_item *si, *sit; 1546 struct pf_state_key *sk, key; 1547 struct pf_addr *srcaddr, *dstaddr; 1548 u_int16_t srcport, dstport; 1549 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr; 1550 u_int i, killed = 0; 1551 const int dirs[] = { PF_IN, PF_OUT }; 1552 int sidx, didx; 1553 1554 if (psk->psk_pfcmp.id) { 1555 if (psk->psk_pfcmp.creatorid == 0) 1556 psk->psk_pfcmp.creatorid = pf_status.hostid; 1557 PF_LOCK(); 1558 PF_STATE_ENTER_WRITE(); 1559 if ((s = pf_find_state_byid(&psk->psk_pfcmp))) { 1560 pf_remove_state(s); 1561 psk->psk_killed = 1; 1562 } 1563 PF_STATE_EXIT_WRITE(); 1564 PF_UNLOCK(); 1565 break; 1566 } 1567 1568 if (psk->psk_af && psk->psk_proto && 1569 psk->psk_src.port_op == PF_OP_EQ && 1570 psk->psk_dst.port_op == PF_OP_EQ) { 1571 1572 key.af = psk->psk_af; 1573 key.proto = psk->psk_proto; 1574 key.rdomain = psk->psk_rdomain; 1575 1576 PF_LOCK(); 1577 PF_STATE_ENTER_WRITE(); 1578 for (i = 0; i < nitems(dirs); i++) { 1579 if (dirs[i] == PF_IN) { 1580 sidx = 0; 1581 didx = 1; 1582 } else { 1583 sidx = 1; 1584 didx = 0; 1585 } 1586 pf_addrcpy(&key.addr[sidx], 1587 &psk->psk_src.addr.v.a.addr, key.af); 1588 pf_addrcpy(&key.addr[didx], 1589 &psk->psk_dst.addr.v.a.addr, key.af); 1590 key.port[sidx] = psk->psk_src.port[0]; 1591 key.port[didx] = psk->psk_dst.port[0]; 1592 1593 sk = RB_FIND(pf_state_tree, &pf_statetbl, &key); 1594 if (sk == NULL) 1595 continue; 1596 1597 TAILQ_FOREACH_SAFE(si, &sk->states, entry, sit) 1598 if (((si->s->key[PF_SK_WIRE]->af == 1599 si->s->key[PF_SK_STACK]->af && 1600 sk == (dirs[i] == PF_IN ? 1601 si->s->key[PF_SK_WIRE] : 1602 si->s->key[PF_SK_STACK])) || 1603 (si->s->key[PF_SK_WIRE]->af != 1604 si->s->key[PF_SK_STACK]->af && 1605 dirs[i] == PF_IN && 1606 (sk == si->s->key[PF_SK_STACK] || 1607 sk == si->s->key[PF_SK_WIRE]))) && 1608 (!psk->psk_ifname[0] || 1609 (si->s->kif != pfi_all && 1610 !strcmp(psk->psk_ifname, 1611 si->s->kif->pfik_name)))) { 1612 pf_remove_state(si->s); 1613 killed++; 1614 } 1615 } 1616 if (killed) 1617 psk->psk_killed = killed; 1618 PF_STATE_EXIT_WRITE(); 1619 PF_UNLOCK(); 1620 break; 1621 } 1622 1623 PF_LOCK(); 1624 PF_STATE_ENTER_WRITE(); 1625 for (s = RB_MIN(pf_state_tree_id, &tree_id); s; 1626 s = nexts) { 1627 nexts = RB_NEXT(pf_state_tree_id, &tree_id, s); 1628 1629 if (s->direction == PF_OUT) { 1630 sk = s->key[PF_SK_STACK]; 1631 srcaddr = &sk->addr[1]; 1632 dstaddr = &sk->addr[0]; 1633 srcport = sk->port[1]; 1634 dstport = sk->port[0]; 1635 } else { 1636 sk = s->key[PF_SK_WIRE]; 1637 srcaddr = &sk->addr[0]; 1638 dstaddr = &sk->addr[1]; 1639 srcport = sk->port[0]; 1640 dstport = sk->port[1]; 1641 } 1642 if ((!psk->psk_af || sk->af == psk->psk_af) 1643 && (!psk->psk_proto || psk->psk_proto == 1644 sk->proto) && psk->psk_rdomain == sk->rdomain && 1645 pf_match_addr(psk->psk_src.neg, 1646 &psk->psk_src.addr.v.a.addr, 1647 &psk->psk_src.addr.v.a.mask, 1648 srcaddr, sk->af) && 1649 pf_match_addr(psk->psk_dst.neg, 1650 &psk->psk_dst.addr.v.a.addr, 1651 &psk->psk_dst.addr.v.a.mask, 1652 dstaddr, sk->af) && 1653 (psk->psk_src.port_op == 0 || 1654 pf_match_port(psk->psk_src.port_op, 1655 psk->psk_src.port[0], psk->psk_src.port[1], 1656 srcport)) && 1657 (psk->psk_dst.port_op == 0 || 1658 pf_match_port(psk->psk_dst.port_op, 1659 psk->psk_dst.port[0], psk->psk_dst.port[1], 1660 dstport)) && 1661 (!psk->psk_label[0] || (s->rule.ptr->label[0] && 1662 !strcmp(psk->psk_label, s->rule.ptr->label))) && 1663 (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname, 1664 s->kif->pfik_name))) { 1665 pf_remove_state(s); 1666 killed++; 1667 } 1668 } 1669 psk->psk_killed = killed; 1670 PF_STATE_EXIT_WRITE(); 1671 PF_UNLOCK(); 1672 break; 1673 } 1674 1675 #if NPFSYNC > 0 1676 case DIOCADDSTATE: { 1677 struct pfioc_state *ps = (struct pfioc_state *)addr; 1678 struct pfsync_state *sp = &ps->state; 1679 1680 if (sp->timeout >= PFTM_MAX) { 1681 error = EINVAL; 1682 break; 1683 } 1684 PF_LOCK(); 1685 PF_STATE_ENTER_WRITE(); 1686 error = pfsync_state_import(sp, PFSYNC_SI_IOCTL); 1687 PF_STATE_EXIT_WRITE(); 1688 PF_UNLOCK(); 1689 break; 1690 } 1691 #endif /* NPFSYNC > 0 */ 1692 1693 case DIOCGETSTATE: { 1694 struct pfioc_state *ps = (struct pfioc_state *)addr; 1695 struct pf_state *s; 1696 struct pf_state_cmp id_key; 1697 1698 memset(&id_key, 0, sizeof(id_key)); 1699 id_key.id = ps->state.id; 1700 id_key.creatorid = ps->state.creatorid; 1701 1702 PF_STATE_ENTER_READ(); 1703 s = pf_find_state_byid(&id_key); 1704 s = pf_state_ref(s); 1705 PF_STATE_EXIT_READ(); 1706 if (s == NULL) { 1707 error = ENOENT; 1708 break; 1709 } 1710 1711 pf_state_export(&ps->state, s); 1712 pf_state_unref(s); 1713 break; 1714 } 1715 1716 case DIOCGETSTATES: { 1717 struct pfioc_states *ps = (struct pfioc_states *)addr; 1718 struct pf_state *state; 1719 struct pfsync_state *p, *pstore; 1720 u_int32_t nr = 0; 1721 1722 if (ps->ps_len == 0) { 1723 nr = pf_status.states; 1724 ps->ps_len = sizeof(struct pfsync_state) * nr; 1725 break; 1726 } 1727 1728 pstore = malloc(sizeof(*pstore), M_TEMP, M_WAITOK); 1729 1730 p = ps->ps_states; 1731 1732 PF_STATE_ENTER_READ(); 1733 state = TAILQ_FIRST(&state_list); 1734 while (state) { 1735 if (state->timeout != PFTM_UNLINKED) { 1736 if ((nr+1) * sizeof(*p) > ps->ps_len) 1737 break; 1738 pf_state_export(pstore, state); 1739 error = copyout(pstore, p, sizeof(*p)); 1740 if (error) { 1741 free(pstore, M_TEMP, sizeof(*pstore)); 1742 PF_STATE_EXIT_READ(); 1743 goto fail; 1744 } 1745 p++; 1746 nr++; 1747 } 1748 state = TAILQ_NEXT(state, entry_list); 1749 } 1750 PF_STATE_EXIT_READ(); 1751 1752 ps->ps_len = sizeof(struct pfsync_state) * nr; 1753 1754 free(pstore, M_TEMP, sizeof(*pstore)); 1755 break; 1756 } 1757 1758 case DIOCGETSTATUS: { 1759 struct pf_status *s = (struct pf_status *)addr; 1760 PF_LOCK(); 1761 memcpy(s, &pf_status, sizeof(struct pf_status)); 1762 pfi_update_status(s->ifname, s); 1763 PF_UNLOCK(); 1764 break; 1765 } 1766 1767 case DIOCSETSTATUSIF: { 1768 struct pfioc_iface *pi = (struct pfioc_iface *)addr; 1769 1770 PF_LOCK(); 1771 if (pi->pfiio_name[0] == 0) { 1772 memset(pf_status.ifname, 0, IFNAMSIZ); 1773 PF_UNLOCK(); 1774 break; 1775 } 1776 strlcpy(pf_trans_set.statusif, pi->pfiio_name, IFNAMSIZ); 1777 pf_trans_set.mask |= PF_TSET_STATUSIF; 1778 PF_UNLOCK(); 1779 break; 1780 } 1781 1782 case DIOCCLRSTATUS: { 1783 struct pfioc_iface *pi = (struct pfioc_iface *)addr; 1784 1785 PF_LOCK(); 1786 /* if ifname is specified, clear counters there only */ 1787 if (pi->pfiio_name[0]) { 1788 pfi_update_status(pi->pfiio_name, NULL); 1789 PF_UNLOCK(); 1790 break; 1791 } 1792 1793 memset(pf_status.counters, 0, sizeof(pf_status.counters)); 1794 memset(pf_status.fcounters, 0, sizeof(pf_status.fcounters)); 1795 memset(pf_status.scounters, 0, sizeof(pf_status.scounters)); 1796 pf_status.since = time_uptime; 1797 1798 PF_UNLOCK(); 1799 break; 1800 } 1801 1802 case DIOCNATLOOK: { 1803 struct pfioc_natlook *pnl = (struct pfioc_natlook *)addr; 1804 struct pf_state_key *sk; 1805 struct pf_state *state; 1806 struct pf_state_key_cmp key; 1807 int m = 0, direction = pnl->direction; 1808 int sidx, didx; 1809 1810 /* NATLOOK src and dst are reversed, so reverse sidx/didx */ 1811 sidx = (direction == PF_IN) ? 1 : 0; 1812 didx = (direction == PF_IN) ? 0 : 1; 1813 1814 if (!pnl->proto || 1815 PF_AZERO(&pnl->saddr, pnl->af) || 1816 PF_AZERO(&pnl->daddr, pnl->af) || 1817 ((pnl->proto == IPPROTO_TCP || 1818 pnl->proto == IPPROTO_UDP) && 1819 (!pnl->dport || !pnl->sport)) || 1820 pnl->rdomain > RT_TABLEID_MAX) 1821 error = EINVAL; 1822 else { 1823 key.af = pnl->af; 1824 key.proto = pnl->proto; 1825 key.rdomain = pnl->rdomain; 1826 pf_addrcpy(&key.addr[sidx], &pnl->saddr, pnl->af); 1827 key.port[sidx] = pnl->sport; 1828 pf_addrcpy(&key.addr[didx], &pnl->daddr, pnl->af); 1829 key.port[didx] = pnl->dport; 1830 1831 PF_STATE_ENTER_READ(); 1832 state = pf_find_state_all(&key, direction, &m); 1833 state = pf_state_ref(state); 1834 PF_STATE_EXIT_READ(); 1835 1836 if (m > 1) 1837 error = E2BIG; /* more than one state */ 1838 else if (state != NULL) { 1839 sk = state->key[sidx]; 1840 pf_addrcpy(&pnl->rsaddr, &sk->addr[sidx], 1841 sk->af); 1842 pnl->rsport = sk->port[sidx]; 1843 pf_addrcpy(&pnl->rdaddr, &sk->addr[didx], 1844 sk->af); 1845 pnl->rdport = sk->port[didx]; 1846 pnl->rrdomain = sk->rdomain; 1847 } else 1848 error = ENOENT; 1849 pf_state_unref(state); 1850 } 1851 break; 1852 } 1853 1854 case DIOCSETTIMEOUT: { 1855 struct pfioc_tm *pt = (struct pfioc_tm *)addr; 1856 1857 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX || 1858 pt->seconds < 0) { 1859 error = EINVAL; 1860 goto fail; 1861 } 1862 PF_LOCK(); 1863 if (pt->timeout == PFTM_INTERVAL && pt->seconds == 0) 1864 pt->seconds = 1; 1865 pf_default_rule_new.timeout[pt->timeout] = pt->seconds; 1866 pt->seconds = pf_default_rule.timeout[pt->timeout]; 1867 PF_UNLOCK(); 1868 break; 1869 } 1870 1871 case DIOCGETTIMEOUT: { 1872 struct pfioc_tm *pt = (struct pfioc_tm *)addr; 1873 1874 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) { 1875 error = EINVAL; 1876 goto fail; 1877 } 1878 PF_LOCK(); 1879 pt->seconds = pf_default_rule.timeout[pt->timeout]; 1880 PF_UNLOCK(); 1881 break; 1882 } 1883 1884 case DIOCGETLIMIT: { 1885 struct pfioc_limit *pl = (struct pfioc_limit *)addr; 1886 1887 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) { 1888 error = EINVAL; 1889 goto fail; 1890 } 1891 PF_LOCK(); 1892 pl->limit = pf_pool_limits[pl->index].limit; 1893 PF_UNLOCK(); 1894 break; 1895 } 1896 1897 case DIOCSETLIMIT: { 1898 struct pfioc_limit *pl = (struct pfioc_limit *)addr; 1899 1900 PF_LOCK(); 1901 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX || 1902 pf_pool_limits[pl->index].pp == NULL) { 1903 error = EINVAL; 1904 PF_UNLOCK(); 1905 goto fail; 1906 } 1907 if (((struct pool *)pf_pool_limits[pl->index].pp)->pr_nout > 1908 pl->limit) { 1909 error = EBUSY; 1910 PF_UNLOCK(); 1911 goto fail; 1912 } 1913 /* Fragments reference mbuf clusters. */ 1914 if (pl->index == PF_LIMIT_FRAGS && pl->limit > nmbclust) { 1915 error = EINVAL; 1916 PF_UNLOCK(); 1917 goto fail; 1918 } 1919 1920 pf_pool_limits[pl->index].limit_new = pl->limit; 1921 pl->limit = pf_pool_limits[pl->index].limit; 1922 PF_UNLOCK(); 1923 break; 1924 } 1925 1926 case DIOCSETDEBUG: { 1927 u_int32_t *level = (u_int32_t *)addr; 1928 1929 PF_LOCK(); 1930 pf_trans_set.debug = *level; 1931 pf_trans_set.mask |= PF_TSET_DEBUG; 1932 PF_UNLOCK(); 1933 break; 1934 } 1935 1936 case DIOCGETRULESETS: { 1937 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr; 1938 struct pf_ruleset *ruleset; 1939 struct pf_anchor *anchor; 1940 1941 PF_LOCK(); 1942 pr->path[sizeof(pr->path) - 1] = 0; 1943 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) { 1944 error = EINVAL; 1945 PF_UNLOCK(); 1946 break; 1947 } 1948 pr->nr = 0; 1949 if (ruleset == &pf_main_ruleset) { 1950 /* XXX kludge for pf_main_ruleset */ 1951 RB_FOREACH(anchor, pf_anchor_global, &pf_anchors) 1952 if (anchor->parent == NULL) 1953 pr->nr++; 1954 } else { 1955 RB_FOREACH(anchor, pf_anchor_node, 1956 &ruleset->anchor->children) 1957 pr->nr++; 1958 } 1959 PF_UNLOCK(); 1960 break; 1961 } 1962 1963 case DIOCGETRULESET: { 1964 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr; 1965 struct pf_ruleset *ruleset; 1966 struct pf_anchor *anchor; 1967 u_int32_t nr = 0; 1968 1969 PF_LOCK(); 1970 pr->path[sizeof(pr->path) - 1] = 0; 1971 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) { 1972 error = EINVAL; 1973 PF_UNLOCK(); 1974 break; 1975 } 1976 pr->name[0] = 0; 1977 if (ruleset == &pf_main_ruleset) { 1978 /* XXX kludge for pf_main_ruleset */ 1979 RB_FOREACH(anchor, pf_anchor_global, &pf_anchors) 1980 if (anchor->parent == NULL && nr++ == pr->nr) { 1981 strlcpy(pr->name, anchor->name, 1982 sizeof(pr->name)); 1983 PF_UNLOCK(); 1984 break; 1985 } 1986 } else { 1987 RB_FOREACH(anchor, pf_anchor_node, 1988 &ruleset->anchor->children) 1989 if (nr++ == pr->nr) { 1990 strlcpy(pr->name, anchor->name, 1991 sizeof(pr->name)); 1992 PF_UNLOCK(); 1993 break; 1994 } 1995 } 1996 if (!pr->name[0]) 1997 error = EBUSY; 1998 PF_UNLOCK(); 1999 break; 2000 } 2001 2002 case DIOCRCLRTABLES: { 2003 struct pfioc_table *io = (struct pfioc_table *)addr; 2004 2005 if (io->pfrio_esize != 0) { 2006 error = ENODEV; 2007 break; 2008 } 2009 PF_LOCK(); 2010 error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel, 2011 io->pfrio_flags | PFR_FLAG_USERIOCTL); 2012 PF_UNLOCK(); 2013 break; 2014 } 2015 2016 case DIOCRADDTABLES: { 2017 struct pfioc_table *io = (struct pfioc_table *)addr; 2018 2019 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2020 error = ENODEV; 2021 break; 2022 } 2023 PF_LOCK(); 2024 error = pfr_add_tables(io->pfrio_buffer, io->pfrio_size, 2025 &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2026 PF_UNLOCK(); 2027 break; 2028 } 2029 2030 case DIOCRDELTABLES: { 2031 struct pfioc_table *io = (struct pfioc_table *)addr; 2032 2033 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2034 error = ENODEV; 2035 break; 2036 } 2037 PF_LOCK(); 2038 error = pfr_del_tables(io->pfrio_buffer, io->pfrio_size, 2039 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2040 PF_UNLOCK(); 2041 break; 2042 } 2043 2044 case DIOCRGETTABLES: { 2045 struct pfioc_table *io = (struct pfioc_table *)addr; 2046 2047 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2048 error = ENODEV; 2049 break; 2050 } 2051 PF_LOCK(); 2052 error = pfr_get_tables(&io->pfrio_table, io->pfrio_buffer, 2053 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2054 PF_UNLOCK(); 2055 break; 2056 } 2057 2058 case DIOCRGETTSTATS: { 2059 struct pfioc_table *io = (struct pfioc_table *)addr; 2060 2061 if (io->pfrio_esize != sizeof(struct pfr_tstats)) { 2062 error = ENODEV; 2063 break; 2064 } 2065 PF_LOCK(); 2066 error = pfr_get_tstats(&io->pfrio_table, io->pfrio_buffer, 2067 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2068 PF_UNLOCK(); 2069 break; 2070 } 2071 2072 case DIOCRCLRTSTATS: { 2073 struct pfioc_table *io = (struct pfioc_table *)addr; 2074 2075 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2076 error = ENODEV; 2077 break; 2078 } 2079 PF_LOCK(); 2080 error = pfr_clr_tstats(io->pfrio_buffer, io->pfrio_size, 2081 &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2082 PF_UNLOCK(); 2083 break; 2084 } 2085 2086 case DIOCRSETTFLAGS: { 2087 struct pfioc_table *io = (struct pfioc_table *)addr; 2088 2089 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2090 error = ENODEV; 2091 break; 2092 } 2093 PF_LOCK(); 2094 error = pfr_set_tflags(io->pfrio_buffer, io->pfrio_size, 2095 io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange, 2096 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2097 PF_UNLOCK(); 2098 break; 2099 } 2100 2101 case DIOCRCLRADDRS: { 2102 struct pfioc_table *io = (struct pfioc_table *)addr; 2103 2104 if (io->pfrio_esize != 0) { 2105 error = ENODEV; 2106 break; 2107 } 2108 PF_LOCK(); 2109 error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel, 2110 io->pfrio_flags | PFR_FLAG_USERIOCTL); 2111 PF_UNLOCK(); 2112 break; 2113 } 2114 2115 case DIOCRADDADDRS: { 2116 struct pfioc_table *io = (struct pfioc_table *)addr; 2117 2118 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2119 error = ENODEV; 2120 break; 2121 } 2122 PF_LOCK(); 2123 error = pfr_add_addrs(&io->pfrio_table, io->pfrio_buffer, 2124 io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags | 2125 PFR_FLAG_USERIOCTL); 2126 PF_UNLOCK(); 2127 break; 2128 } 2129 2130 case DIOCRDELADDRS: { 2131 struct pfioc_table *io = (struct pfioc_table *)addr; 2132 2133 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2134 error = ENODEV; 2135 break; 2136 } 2137 PF_LOCK(); 2138 error = pfr_del_addrs(&io->pfrio_table, io->pfrio_buffer, 2139 io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags | 2140 PFR_FLAG_USERIOCTL); 2141 PF_UNLOCK(); 2142 break; 2143 } 2144 2145 case DIOCRSETADDRS: { 2146 struct pfioc_table *io = (struct pfioc_table *)addr; 2147 2148 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2149 error = ENODEV; 2150 break; 2151 } 2152 PF_LOCK(); 2153 error = pfr_set_addrs(&io->pfrio_table, io->pfrio_buffer, 2154 io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd, 2155 &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags | 2156 PFR_FLAG_USERIOCTL, 0); 2157 PF_UNLOCK(); 2158 break; 2159 } 2160 2161 case DIOCRGETADDRS: { 2162 struct pfioc_table *io = (struct pfioc_table *)addr; 2163 2164 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2165 error = ENODEV; 2166 break; 2167 } 2168 PF_LOCK(); 2169 error = pfr_get_addrs(&io->pfrio_table, io->pfrio_buffer, 2170 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2171 PF_UNLOCK(); 2172 break; 2173 } 2174 2175 case DIOCRGETASTATS: { 2176 struct pfioc_table *io = (struct pfioc_table *)addr; 2177 2178 if (io->pfrio_esize != sizeof(struct pfr_astats)) { 2179 error = ENODEV; 2180 break; 2181 } 2182 PF_LOCK(); 2183 error = pfr_get_astats(&io->pfrio_table, io->pfrio_buffer, 2184 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2185 PF_UNLOCK(); 2186 break; 2187 } 2188 2189 case DIOCRCLRASTATS: { 2190 struct pfioc_table *io = (struct pfioc_table *)addr; 2191 2192 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2193 error = ENODEV; 2194 break; 2195 } 2196 PF_LOCK(); 2197 error = pfr_clr_astats(&io->pfrio_table, io->pfrio_buffer, 2198 io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags | 2199 PFR_FLAG_USERIOCTL); 2200 PF_UNLOCK(); 2201 break; 2202 } 2203 2204 case DIOCRTSTADDRS: { 2205 struct pfioc_table *io = (struct pfioc_table *)addr; 2206 2207 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2208 error = ENODEV; 2209 break; 2210 } 2211 PF_LOCK(); 2212 error = pfr_tst_addrs(&io->pfrio_table, io->pfrio_buffer, 2213 io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags | 2214 PFR_FLAG_USERIOCTL); 2215 PF_UNLOCK(); 2216 break; 2217 } 2218 2219 case DIOCRINADEFINE: { 2220 struct pfioc_table *io = (struct pfioc_table *)addr; 2221 2222 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2223 error = ENODEV; 2224 break; 2225 } 2226 PF_LOCK(); 2227 error = pfr_ina_define(&io->pfrio_table, io->pfrio_buffer, 2228 io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr, 2229 io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2230 PF_UNLOCK(); 2231 break; 2232 } 2233 2234 case DIOCOSFPADD: { 2235 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr; 2236 PF_LOCK(); 2237 error = pf_osfp_add(io); 2238 PF_UNLOCK(); 2239 break; 2240 } 2241 2242 case DIOCOSFPGET: { 2243 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr; 2244 PF_LOCK(); 2245 error = pf_osfp_get(io); 2246 PF_UNLOCK(); 2247 break; 2248 } 2249 2250 case DIOCXBEGIN: { 2251 struct pfioc_trans *io = (struct pfioc_trans *)addr; 2252 struct pfioc_trans_e *ioe; 2253 struct pfr_table *table; 2254 int i; 2255 2256 if (io->esize != sizeof(*ioe)) { 2257 error = ENODEV; 2258 goto fail; 2259 } 2260 PF_LOCK(); 2261 ioe = malloc(sizeof(*ioe), M_TEMP, M_WAITOK); 2262 table = malloc(sizeof(*table), M_TEMP, M_WAITOK); 2263 pf_default_rule_new = pf_default_rule; 2264 memset(&pf_trans_set, 0, sizeof(pf_trans_set)); 2265 for (i = 0; i < io->size; i++) { 2266 if (copyin(io->array+i, ioe, sizeof(*ioe))) { 2267 free(table, M_TEMP, sizeof(*table)); 2268 free(ioe, M_TEMP, sizeof(*ioe)); 2269 error = EFAULT; 2270 PF_UNLOCK(); 2271 goto fail; 2272 } 2273 if (strnlen(ioe->anchor, sizeof(ioe->anchor)) == 2274 sizeof(ioe->anchor)) { 2275 free(table, M_TEMP, sizeof(*table)); 2276 free(ioe, M_TEMP, sizeof(*ioe)); 2277 error = ENAMETOOLONG; 2278 PF_UNLOCK(); 2279 goto fail; 2280 } 2281 switch (ioe->type) { 2282 case PF_TRANS_TABLE: 2283 memset(table, 0, sizeof(*table)); 2284 strlcpy(table->pfrt_anchor, ioe->anchor, 2285 sizeof(table->pfrt_anchor)); 2286 if ((error = pfr_ina_begin(table, 2287 &ioe->ticket, NULL, 0))) { 2288 free(table, M_TEMP, sizeof(*table)); 2289 free(ioe, M_TEMP, sizeof(*ioe)); 2290 PF_UNLOCK(); 2291 goto fail; 2292 } 2293 break; 2294 case PF_TRANS_RULESET: 2295 if ((error = pf_begin_rules(&ioe->ticket, 2296 ioe->anchor))) { 2297 free(table, M_TEMP, sizeof(*table)); 2298 free(ioe, M_TEMP, sizeof(*ioe)); 2299 PF_UNLOCK(); 2300 goto fail; 2301 } 2302 break; 2303 default: 2304 free(table, M_TEMP, sizeof(*table)); 2305 free(ioe, M_TEMP, sizeof(*ioe)); 2306 error = EINVAL; 2307 PF_UNLOCK(); 2308 goto fail; 2309 } 2310 if (copyout(ioe, io->array+i, sizeof(io->array[i]))) { 2311 free(table, M_TEMP, sizeof(*table)); 2312 free(ioe, M_TEMP, sizeof(*ioe)); 2313 error = EFAULT; 2314 PF_UNLOCK(); 2315 goto fail; 2316 } 2317 } 2318 free(table, M_TEMP, sizeof(*table)); 2319 free(ioe, M_TEMP, sizeof(*ioe)); 2320 PF_UNLOCK(); 2321 break; 2322 } 2323 2324 case DIOCXROLLBACK: { 2325 struct pfioc_trans *io = (struct pfioc_trans *)addr; 2326 struct pfioc_trans_e *ioe; 2327 struct pfr_table *table; 2328 int i; 2329 2330 if (io->esize != sizeof(*ioe)) { 2331 error = ENODEV; 2332 goto fail; 2333 } 2334 PF_LOCK(); 2335 ioe = malloc(sizeof(*ioe), M_TEMP, M_WAITOK); 2336 table = malloc(sizeof(*table), M_TEMP, M_WAITOK); 2337 for (i = 0; i < io->size; i++) { 2338 if (copyin(io->array+i, ioe, sizeof(*ioe))) { 2339 free(table, M_TEMP, sizeof(*table)); 2340 free(ioe, M_TEMP, sizeof(*ioe)); 2341 error = EFAULT; 2342 PF_UNLOCK(); 2343 goto fail; 2344 } 2345 if (strnlen(ioe->anchor, sizeof(ioe->anchor)) == 2346 sizeof(ioe->anchor)) { 2347 free(table, M_TEMP, sizeof(*table)); 2348 free(ioe, M_TEMP, sizeof(*ioe)); 2349 error = ENAMETOOLONG; 2350 PF_UNLOCK(); 2351 goto fail; 2352 } 2353 switch (ioe->type) { 2354 case PF_TRANS_TABLE: 2355 memset(table, 0, sizeof(*table)); 2356 strlcpy(table->pfrt_anchor, ioe->anchor, 2357 sizeof(table->pfrt_anchor)); 2358 if ((error = pfr_ina_rollback(table, 2359 ioe->ticket, NULL, 0))) { 2360 free(table, M_TEMP, sizeof(*table)); 2361 free(ioe, M_TEMP, sizeof(*ioe)); 2362 PF_UNLOCK(); 2363 goto fail; /* really bad */ 2364 } 2365 break; 2366 case PF_TRANS_RULESET: 2367 if ((error = pf_rollback_rules(ioe->ticket, 2368 ioe->anchor))) { 2369 free(table, M_TEMP, sizeof(*table)); 2370 free(ioe, M_TEMP, sizeof(*ioe)); 2371 PF_UNLOCK(); 2372 goto fail; /* really bad */ 2373 } 2374 break; 2375 default: 2376 free(table, M_TEMP, sizeof(*table)); 2377 free(ioe, M_TEMP, sizeof(*ioe)); 2378 error = EINVAL; 2379 PF_UNLOCK(); 2380 goto fail; /* really bad */ 2381 } 2382 } 2383 free(table, M_TEMP, sizeof(*table)); 2384 free(ioe, M_TEMP, sizeof(*ioe)); 2385 PF_UNLOCK(); 2386 break; 2387 } 2388 2389 case DIOCXCOMMIT: { 2390 struct pfioc_trans *io = (struct pfioc_trans *)addr; 2391 struct pfioc_trans_e *ioe; 2392 struct pfr_table *table; 2393 struct pf_ruleset *rs; 2394 int i; 2395 2396 if (io->esize != sizeof(*ioe)) { 2397 error = ENODEV; 2398 goto fail; 2399 } 2400 PF_LOCK(); 2401 ioe = malloc(sizeof(*ioe), M_TEMP, M_WAITOK); 2402 table = malloc(sizeof(*table), M_TEMP, M_WAITOK); 2403 /* first makes sure everything will succeed */ 2404 for (i = 0; i < io->size; i++) { 2405 if (copyin(io->array+i, ioe, sizeof(*ioe))) { 2406 free(table, M_TEMP, sizeof(*table)); 2407 free(ioe, M_TEMP, sizeof(*ioe)); 2408 error = EFAULT; 2409 PF_UNLOCK(); 2410 goto fail; 2411 } 2412 if (strnlen(ioe->anchor, sizeof(ioe->anchor)) == 2413 sizeof(ioe->anchor)) { 2414 free(table, M_TEMP, sizeof(*table)); 2415 free(ioe, M_TEMP, sizeof(*ioe)); 2416 error = ENAMETOOLONG; 2417 PF_UNLOCK(); 2418 goto fail; 2419 } 2420 switch (ioe->type) { 2421 case PF_TRANS_TABLE: 2422 rs = pf_find_ruleset(ioe->anchor); 2423 if (rs == NULL || !rs->topen || ioe->ticket != 2424 rs->tticket) { 2425 free(table, M_TEMP, sizeof(*table)); 2426 free(ioe, M_TEMP, sizeof(*ioe)); 2427 error = EBUSY; 2428 PF_UNLOCK(); 2429 goto fail; 2430 } 2431 break; 2432 case PF_TRANS_RULESET: 2433 rs = pf_find_ruleset(ioe->anchor); 2434 if (rs == NULL || 2435 !rs->rules.inactive.open || 2436 rs->rules.inactive.ticket != 2437 ioe->ticket) { 2438 free(table, M_TEMP, sizeof(*table)); 2439 free(ioe, M_TEMP, sizeof(*ioe)); 2440 error = EBUSY; 2441 PF_UNLOCK(); 2442 goto fail; 2443 } 2444 break; 2445 default: 2446 free(table, M_TEMP, sizeof(*table)); 2447 free(ioe, M_TEMP, sizeof(*ioe)); 2448 error = EINVAL; 2449 PF_UNLOCK(); 2450 goto fail; 2451 } 2452 } 2453 2454 /* 2455 * Checked already in DIOCSETLIMIT, but check again as the 2456 * situation might have changed. 2457 */ 2458 for (i = 0; i < PF_LIMIT_MAX; i++) { 2459 if (((struct pool *)pf_pool_limits[i].pp)->pr_nout > 2460 pf_pool_limits[i].limit_new) { 2461 free(table, M_TEMP, sizeof(*table)); 2462 free(ioe, M_TEMP, sizeof(*ioe)); 2463 error = EBUSY; 2464 PF_UNLOCK(); 2465 goto fail; 2466 } 2467 } 2468 /* now do the commit - no errors should happen here */ 2469 for (i = 0; i < io->size; i++) { 2470 if (copyin(io->array+i, ioe, sizeof(*ioe))) { 2471 free(table, M_TEMP, sizeof(*table)); 2472 free(ioe, M_TEMP, sizeof(*ioe)); 2473 error = EFAULT; 2474 PF_UNLOCK(); 2475 goto fail; 2476 } 2477 if (strnlen(ioe->anchor, sizeof(ioe->anchor)) == 2478 sizeof(ioe->anchor)) { 2479 free(table, M_TEMP, sizeof(*table)); 2480 free(ioe, M_TEMP, sizeof(*ioe)); 2481 error = ENAMETOOLONG; 2482 PF_UNLOCK(); 2483 goto fail; 2484 } 2485 switch (ioe->type) { 2486 case PF_TRANS_TABLE: 2487 memset(table, 0, sizeof(*table)); 2488 strlcpy(table->pfrt_anchor, ioe->anchor, 2489 sizeof(table->pfrt_anchor)); 2490 if ((error = pfr_ina_commit(table, ioe->ticket, 2491 NULL, NULL, 0))) { 2492 free(table, M_TEMP, sizeof(*table)); 2493 free(ioe, M_TEMP, sizeof(*ioe)); 2494 PF_UNLOCK(); 2495 goto fail; /* really bad */ 2496 } 2497 break; 2498 case PF_TRANS_RULESET: 2499 if ((error = pf_commit_rules(ioe->ticket, 2500 ioe->anchor))) { 2501 free(table, M_TEMP, sizeof(*table)); 2502 free(ioe, M_TEMP, sizeof(*ioe)); 2503 PF_UNLOCK(); 2504 goto fail; /* really bad */ 2505 } 2506 break; 2507 default: 2508 free(table, M_TEMP, sizeof(*table)); 2509 free(ioe, M_TEMP, sizeof(*ioe)); 2510 error = EINVAL; 2511 PF_UNLOCK(); 2512 goto fail; /* really bad */ 2513 } 2514 } 2515 for (i = 0; i < PF_LIMIT_MAX; i++) { 2516 if (pf_pool_limits[i].limit_new != 2517 pf_pool_limits[i].limit && 2518 pool_sethardlimit(pf_pool_limits[i].pp, 2519 pf_pool_limits[i].limit_new, NULL, 0) != 0) { 2520 free(table, M_TEMP, sizeof(*table)); 2521 free(ioe, M_TEMP, sizeof(*ioe)); 2522 error = EBUSY; 2523 PF_UNLOCK(); 2524 goto fail; /* really bad */ 2525 } 2526 pf_pool_limits[i].limit = pf_pool_limits[i].limit_new; 2527 } 2528 for (i = 0; i < PFTM_MAX; i++) { 2529 int old = pf_default_rule.timeout[i]; 2530 2531 pf_default_rule.timeout[i] = 2532 pf_default_rule_new.timeout[i]; 2533 if (pf_default_rule.timeout[i] == PFTM_INTERVAL && 2534 pf_default_rule.timeout[i] < old) 2535 task_add(net_tq(0), &pf_purge_task); 2536 } 2537 pfi_xcommit(); 2538 pf_trans_set_commit(); 2539 free(table, M_TEMP, sizeof(*table)); 2540 free(ioe, M_TEMP, sizeof(*ioe)); 2541 PF_UNLOCK(); 2542 break; 2543 } 2544 2545 case DIOCGETSRCNODES: { 2546 struct pfioc_src_nodes *psn = (struct pfioc_src_nodes *)addr; 2547 struct pf_src_node *n, *p, *pstore; 2548 u_int32_t nr = 0; 2549 size_t space = psn->psn_len; 2550 2551 PF_LOCK(); 2552 if (space == 0) { 2553 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) 2554 nr++; 2555 psn->psn_len = sizeof(struct pf_src_node) * nr; 2556 PF_UNLOCK(); 2557 break; 2558 } 2559 2560 pstore = malloc(sizeof(*pstore), M_TEMP, M_WAITOK); 2561 2562 p = psn->psn_src_nodes; 2563 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) { 2564 int secs = time_uptime, diff; 2565 2566 if ((nr + 1) * sizeof(*p) > psn->psn_len) 2567 break; 2568 2569 memcpy(pstore, n, sizeof(*pstore)); 2570 memset(&pstore->entry, 0, sizeof(pstore->entry)); 2571 pstore->rule.ptr = NULL; 2572 pstore->kif = NULL; 2573 pstore->rule.nr = n->rule.ptr->nr; 2574 pstore->creation = secs - pstore->creation; 2575 if (pstore->expire > secs) 2576 pstore->expire -= secs; 2577 else 2578 pstore->expire = 0; 2579 2580 /* adjust the connection rate estimate */ 2581 diff = secs - n->conn_rate.last; 2582 if (diff >= n->conn_rate.seconds) 2583 pstore->conn_rate.count = 0; 2584 else 2585 pstore->conn_rate.count -= 2586 n->conn_rate.count * diff / 2587 n->conn_rate.seconds; 2588 2589 error = copyout(pstore, p, sizeof(*p)); 2590 if (error) { 2591 free(pstore, M_TEMP, sizeof(*pstore)); 2592 PF_UNLOCK(); 2593 goto fail; 2594 } 2595 p++; 2596 nr++; 2597 } 2598 psn->psn_len = sizeof(struct pf_src_node) * nr; 2599 2600 free(pstore, M_TEMP, sizeof(*pstore)); 2601 PF_UNLOCK(); 2602 break; 2603 } 2604 2605 case DIOCCLRSRCNODES: { 2606 struct pf_src_node *n; 2607 struct pf_state *state; 2608 2609 PF_LOCK(); 2610 PF_STATE_ENTER_WRITE(); 2611 RB_FOREACH(state, pf_state_tree_id, &tree_id) 2612 pf_src_tree_remove_state(state); 2613 PF_STATE_EXIT_WRITE(); 2614 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) 2615 n->expire = 1; 2616 pf_purge_expired_src_nodes(); 2617 PF_UNLOCK(); 2618 break; 2619 } 2620 2621 case DIOCKILLSRCNODES: { 2622 struct pf_src_node *sn; 2623 struct pf_state *s; 2624 struct pfioc_src_node_kill *psnk = 2625 (struct pfioc_src_node_kill *)addr; 2626 u_int killed = 0; 2627 2628 PF_LOCK(); 2629 RB_FOREACH(sn, pf_src_tree, &tree_src_tracking) { 2630 if (pf_match_addr(psnk->psnk_src.neg, 2631 &psnk->psnk_src.addr.v.a.addr, 2632 &psnk->psnk_src.addr.v.a.mask, 2633 &sn->addr, sn->af) && 2634 pf_match_addr(psnk->psnk_dst.neg, 2635 &psnk->psnk_dst.addr.v.a.addr, 2636 &psnk->psnk_dst.addr.v.a.mask, 2637 &sn->raddr, sn->af)) { 2638 /* Handle state to src_node linkage */ 2639 if (sn->states != 0) { 2640 PF_ASSERT_LOCKED(); 2641 PF_STATE_ENTER_WRITE(); 2642 RB_FOREACH(s, pf_state_tree_id, 2643 &tree_id) 2644 pf_state_rm_src_node(s, sn); 2645 PF_STATE_EXIT_WRITE(); 2646 } 2647 sn->expire = 1; 2648 killed++; 2649 } 2650 } 2651 2652 if (killed > 0) 2653 pf_purge_expired_src_nodes(); 2654 2655 psnk->psnk_killed = killed; 2656 PF_UNLOCK(); 2657 break; 2658 } 2659 2660 case DIOCSETHOSTID: { 2661 u_int32_t *hostid = (u_int32_t *)addr; 2662 2663 PF_LOCK(); 2664 if (*hostid == 0) 2665 pf_trans_set.hostid = arc4random(); 2666 else 2667 pf_trans_set.hostid = *hostid; 2668 pf_trans_set.mask |= PF_TSET_HOSTID; 2669 PF_UNLOCK(); 2670 break; 2671 } 2672 2673 case DIOCOSFPFLUSH: 2674 PF_LOCK(); 2675 pf_osfp_flush(); 2676 PF_UNLOCK(); 2677 break; 2678 2679 case DIOCIGETIFACES: { 2680 struct pfioc_iface *io = (struct pfioc_iface *)addr; 2681 2682 if (io->pfiio_esize != sizeof(struct pfi_kif)) { 2683 error = ENODEV; 2684 break; 2685 } 2686 PF_LOCK(); 2687 error = pfi_get_ifaces(io->pfiio_name, io->pfiio_buffer, 2688 &io->pfiio_size); 2689 PF_UNLOCK(); 2690 break; 2691 } 2692 2693 case DIOCSETIFFLAG: { 2694 struct pfioc_iface *io = (struct pfioc_iface *)addr; 2695 2696 PF_LOCK(); 2697 error = pfi_set_flags(io->pfiio_name, io->pfiio_flags); 2698 PF_UNLOCK(); 2699 break; 2700 } 2701 2702 case DIOCCLRIFFLAG: { 2703 struct pfioc_iface *io = (struct pfioc_iface *)addr; 2704 2705 PF_LOCK(); 2706 error = pfi_clear_flags(io->pfiio_name, io->pfiio_flags); 2707 PF_UNLOCK(); 2708 break; 2709 } 2710 2711 case DIOCSETREASS: { 2712 u_int32_t *reass = (u_int32_t *)addr; 2713 2714 PF_LOCK(); 2715 pf_trans_set.reass = *reass; 2716 pf_trans_set.mask |= PF_TSET_REASS; 2717 PF_UNLOCK(); 2718 break; 2719 } 2720 2721 case DIOCSETSYNFLWATS: { 2722 struct pfioc_synflwats *io = (struct pfioc_synflwats *)addr; 2723 2724 PF_LOCK(); 2725 error = pf_syncookies_setwats(io->hiwat, io->lowat); 2726 PF_UNLOCK(); 2727 break; 2728 } 2729 2730 case DIOCGETSYNFLWATS: { 2731 struct pfioc_synflwats *io = (struct pfioc_synflwats *)addr; 2732 2733 PF_LOCK(); 2734 error = pf_syncookies_getwats(io); 2735 PF_UNLOCK(); 2736 break; 2737 } 2738 2739 case DIOCSETSYNCOOKIES: { 2740 u_int8_t *mode = (u_int8_t *)addr; 2741 2742 PF_LOCK(); 2743 error = pf_syncookies_setmode(*mode); 2744 PF_UNLOCK(); 2745 break; 2746 } 2747 2748 default: 2749 error = ENODEV; 2750 break; 2751 } 2752 fail: 2753 NET_UNLOCK(); 2754 return (error); 2755 } 2756 2757 void 2758 pf_trans_set_commit(void) 2759 { 2760 if (pf_trans_set.mask & PF_TSET_STATUSIF) 2761 strlcpy(pf_status.ifname, pf_trans_set.statusif, IFNAMSIZ); 2762 if (pf_trans_set.mask & PF_TSET_DEBUG) 2763 pf_status.debug = pf_trans_set.debug; 2764 if (pf_trans_set.mask & PF_TSET_HOSTID) 2765 pf_status.hostid = pf_trans_set.hostid; 2766 if (pf_trans_set.mask & PF_TSET_REASS) 2767 pf_status.reass = pf_trans_set.reass; 2768 } 2769 2770 void 2771 pf_pool_copyin(struct pf_pool *from, struct pf_pool *to) 2772 { 2773 memmove(to, from, sizeof(*to)); 2774 to->kif = NULL; 2775 } 2776 2777 int 2778 pf_rule_copyin(struct pf_rule *from, struct pf_rule *to, 2779 struct pf_ruleset *ruleset) 2780 { 2781 int i; 2782 2783 to->src = from->src; 2784 to->dst = from->dst; 2785 2786 /* XXX union skip[] */ 2787 2788 strlcpy(to->label, from->label, sizeof(to->label)); 2789 strlcpy(to->ifname, from->ifname, sizeof(to->ifname)); 2790 strlcpy(to->rcv_ifname, from->rcv_ifname, sizeof(to->rcv_ifname)); 2791 strlcpy(to->qname, from->qname, sizeof(to->qname)); 2792 strlcpy(to->pqname, from->pqname, sizeof(to->pqname)); 2793 strlcpy(to->tagname, from->tagname, sizeof(to->tagname)); 2794 strlcpy(to->match_tagname, from->match_tagname, 2795 sizeof(to->match_tagname)); 2796 strlcpy(to->overload_tblname, from->overload_tblname, 2797 sizeof(to->overload_tblname)); 2798 2799 pf_pool_copyin(&from->nat, &to->nat); 2800 pf_pool_copyin(&from->rdr, &to->rdr); 2801 pf_pool_copyin(&from->route, &to->route); 2802 2803 if (pf_kif_setup(to->ifname, &to->kif)) 2804 return (EINVAL); 2805 if (pf_kif_setup(to->rcv_ifname, &to->rcv_kif)) 2806 return (EINVAL); 2807 if (to->overload_tblname[0]) { 2808 if ((to->overload_tbl = pfr_attach_table(ruleset, 2809 to->overload_tblname, 0)) == NULL) 2810 return (EINVAL); 2811 else 2812 to->overload_tbl->pfrkt_flags |= PFR_TFLAG_ACTIVE; 2813 } 2814 2815 if (pf_kif_setup(to->rdr.ifname, &to->rdr.kif)) 2816 return (EINVAL); 2817 if (pf_kif_setup(to->nat.ifname, &to->nat.kif)) 2818 return (EINVAL); 2819 if (pf_kif_setup(to->route.ifname, &to->route.kif)) 2820 return (EINVAL); 2821 2822 to->os_fingerprint = from->os_fingerprint; 2823 2824 to->rtableid = from->rtableid; 2825 if (to->rtableid >= 0 && !rtable_exists(to->rtableid)) 2826 return (EBUSY); 2827 to->onrdomain = from->onrdomain; 2828 if (to->onrdomain >= 0 && !rtable_exists(to->onrdomain)) 2829 return (EBUSY); 2830 if (to->onrdomain >= 0) /* make sure it is a real rdomain */ 2831 to->onrdomain = rtable_l2(to->onrdomain); 2832 2833 for (i = 0; i < PFTM_MAX; i++) 2834 to->timeout[i] = from->timeout[i]; 2835 to->states_tot = from->states_tot; 2836 to->max_states = from->max_states; 2837 to->max_src_nodes = from->max_src_nodes; 2838 to->max_src_states = from->max_src_states; 2839 to->max_src_conn = from->max_src_conn; 2840 to->max_src_conn_rate.limit = from->max_src_conn_rate.limit; 2841 to->max_src_conn_rate.seconds = from->max_src_conn_rate.seconds; 2842 pf_init_threshold(&to->pktrate, from->pktrate.limit, 2843 from->pktrate.seconds); 2844 2845 if (to->qname[0] != 0) { 2846 if ((to->qid = pf_qname2qid(to->qname, 0)) == 0) 2847 return (EBUSY); 2848 if (to->pqname[0] != 0) { 2849 if ((to->pqid = pf_qname2qid(to->pqname, 0)) == 0) 2850 return (EBUSY); 2851 } else 2852 to->pqid = to->qid; 2853 } 2854 to->rt_listid = from->rt_listid; 2855 to->prob = from->prob; 2856 to->return_icmp = from->return_icmp; 2857 to->return_icmp6 = from->return_icmp6; 2858 to->max_mss = from->max_mss; 2859 if (to->tagname[0]) 2860 if ((to->tag = pf_tagname2tag(to->tagname, 1)) == 0) 2861 return (EBUSY); 2862 if (to->match_tagname[0]) 2863 if ((to->match_tag = pf_tagname2tag(to->match_tagname, 1)) == 0) 2864 return (EBUSY); 2865 to->scrub_flags = from->scrub_flags; 2866 to->uid = from->uid; 2867 to->gid = from->gid; 2868 to->rule_flag = from->rule_flag; 2869 to->action = from->action; 2870 to->direction = from->direction; 2871 to->log = from->log; 2872 to->logif = from->logif; 2873 #if NPFLOG > 0 2874 if (!to->log) 2875 to->logif = 0; 2876 #endif /* NPFLOG > 0 */ 2877 to->quick = from->quick; 2878 to->ifnot = from->ifnot; 2879 to->rcvifnot = from->rcvifnot; 2880 to->match_tag_not = from->match_tag_not; 2881 to->keep_state = from->keep_state; 2882 to->af = from->af; 2883 to->naf = from->naf; 2884 to->proto = from->proto; 2885 to->type = from->type; 2886 to->code = from->code; 2887 to->flags = from->flags; 2888 to->flagset = from->flagset; 2889 to->min_ttl = from->min_ttl; 2890 to->allow_opts = from->allow_opts; 2891 to->rt = from->rt; 2892 to->return_ttl = from->return_ttl; 2893 to->tos = from->tos; 2894 to->set_tos = from->set_tos; 2895 to->anchor_relative = from->anchor_relative; /* XXX */ 2896 to->anchor_wildcard = from->anchor_wildcard; /* XXX */ 2897 to->flush = from->flush; 2898 to->divert.addr = from->divert.addr; 2899 to->divert.port = from->divert.port; 2900 to->divert.type = from->divert.type; 2901 to->prio = from->prio; 2902 to->set_prio[0] = from->set_prio[0]; 2903 to->set_prio[1] = from->set_prio[1]; 2904 2905 return (0); 2906 } 2907 2908 int 2909 pf_sysctl(void *oldp, size_t *oldlenp, void *newp, size_t newlen) 2910 { 2911 struct pf_status pfs; 2912 2913 NET_RLOCK(); 2914 PF_LOCK(); 2915 memcpy(&pfs, &pf_status, sizeof(struct pf_status)); 2916 pfi_update_status(pfs.ifname, &pfs); 2917 PF_UNLOCK(); 2918 NET_RUNLOCK(); 2919 2920 return sysctl_rdstruct(oldp, oldlenp, newp, &pfs, sizeof(pfs)); 2921 } 2922