1 /* $OpenBSD: pf_ioctl.c,v 1.415 2023/07/06 04:55:05 dlg Exp $ */ 2 3 /* 4 * Copyright (c) 2001 Daniel Hartmeier 5 * Copyright (c) 2002 - 2018 Henning Brauer <henning@openbsd.org> 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * - Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * - Redistributions in binary form must reproduce the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer in the documentation and/or other materials provided 17 * with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 20 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 22 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 23 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 25 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 27 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 29 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 * POSSIBILITY OF SUCH DAMAGE. 31 * 32 * Effort sponsored in part by the Defense Advanced Research Projects 33 * Agency (DARPA) and Air Force Research Laboratory, Air Force 34 * Materiel Command, USAF, under agreement number F30602-01-2-0537. 35 * 36 */ 37 38 #include "pfsync.h" 39 #include "pflog.h" 40 41 #include <sys/param.h> 42 #include <sys/systm.h> 43 #include <sys/sysctl.h> 44 #include <sys/mbuf.h> 45 #include <sys/filio.h> 46 #include <sys/fcntl.h> 47 #include <sys/socket.h> 48 #include <sys/socketvar.h> 49 #include <sys/kernel.h> 50 #include <sys/time.h> 51 #include <sys/timeout.h> 52 #include <sys/pool.h> 53 #include <sys/malloc.h> 54 #include <sys/proc.h> 55 #include <sys/rwlock.h> 56 #include <sys/syslog.h> 57 #include <sys/specdev.h> 58 #include <uvm/uvm_extern.h> 59 60 #include <crypto/md5.h> 61 62 #include <net/if.h> 63 #include <net/if_var.h> 64 #include <net/route.h> 65 #include <net/hfsc.h> 66 #include <net/fq_codel.h> 67 68 #include <netinet/in.h> 69 #include <netinet/ip.h> 70 #include <netinet/in_pcb.h> 71 #include <netinet/ip_var.h> 72 #include <netinet/ip_icmp.h> 73 #include <netinet/tcp.h> 74 #include <netinet/udp.h> 75 76 #ifdef INET6 77 #include <netinet/ip6.h> 78 #include <netinet/icmp6.h> 79 #endif /* INET6 */ 80 81 #include <net/pfvar.h> 82 #include <net/pfvar_priv.h> 83 84 #if NPFSYNC > 0 85 #include <netinet/ip_ipsp.h> 86 #include <net/if_pfsync.h> 87 #endif /* NPFSYNC > 0 */ 88 89 struct pool pf_tag_pl; 90 91 void pfattach(int); 92 void pf_thread_create(void *); 93 int pfopen(dev_t, int, int, struct proc *); 94 int pfclose(dev_t, int, int, struct proc *); 95 int pfioctl(dev_t, u_long, caddr_t, int, struct proc *); 96 int pf_begin_rules(u_int32_t *, const char *); 97 void pf_rollback_rules(u_int32_t, char *); 98 void pf_remove_queues(void); 99 int pf_commit_queues(void); 100 void pf_free_queues(struct pf_queuehead *); 101 void pf_calc_chksum(struct pf_ruleset *); 102 void pf_hash_rule(MD5_CTX *, struct pf_rule *); 103 void pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *); 104 int pf_commit_rules(u_int32_t, char *); 105 int pf_addr_setup(struct pf_ruleset *, 106 struct pf_addr_wrap *, sa_family_t); 107 struct pfi_kif *pf_kif_setup(struct pfi_kif *); 108 void pf_addr_copyout(struct pf_addr_wrap *); 109 void pf_trans_set_commit(void); 110 void pf_pool_copyin(struct pf_pool *, struct pf_pool *); 111 int pf_validate_range(u_int8_t, u_int16_t[2], int); 112 int pf_rule_copyin(struct pf_rule *, struct pf_rule *); 113 int pf_rule_checkaf(struct pf_rule *); 114 u_int16_t pf_qname2qid(char *, int); 115 void pf_qid2qname(u_int16_t, char *); 116 void pf_qid_unref(u_int16_t); 117 int pf_states_clr(struct pfioc_state_kill *); 118 int pf_states_get(struct pfioc_states *); 119 120 struct pf_trans *pf_open_trans(uint32_t); 121 struct pf_trans *pf_find_trans(uint32_t, uint64_t); 122 void pf_free_trans(struct pf_trans *); 123 void pf_rollback_trans(struct pf_trans *); 124 125 void pf_init_tgetrule(struct pf_trans *, 126 struct pf_anchor *, uint32_t, struct pf_rule *); 127 void pf_cleanup_tgetrule(struct pf_trans *t); 128 129 struct pf_rule pf_default_rule, pf_default_rule_new; 130 131 struct { 132 char statusif[IFNAMSIZ]; 133 u_int32_t debug; 134 u_int32_t hostid; 135 u_int32_t reass; 136 u_int32_t mask; 137 } pf_trans_set; 138 139 #define PF_ORDER_HOST 0 140 #define PF_ORDER_NET 1 141 142 #define PF_TSET_STATUSIF 0x01 143 #define PF_TSET_DEBUG 0x02 144 #define PF_TSET_HOSTID 0x04 145 #define PF_TSET_REASS 0x08 146 147 #define TAGID_MAX 50000 148 TAILQ_HEAD(pf_tags, pf_tagname) pf_tags = TAILQ_HEAD_INITIALIZER(pf_tags), 149 pf_qids = TAILQ_HEAD_INITIALIZER(pf_qids); 150 151 /* 152 * pf_lock protects consistency of PF data structures, which don't have 153 * their dedicated lock yet. The pf_lock currently protects: 154 * - rules, 155 * - radix tables, 156 * - source nodes 157 * All callers must grab pf_lock exclusively. 158 * 159 * pf_state_lock protects consistency of state table. Packets, which do state 160 * look up grab the lock as readers. If packet must create state, then it must 161 * grab the lock as writer. Whenever packet creates state it grabs pf_lock 162 * first then it locks pf_state_lock as the writer. 163 */ 164 struct rwlock pf_lock = RWLOCK_INITIALIZER("pf_lock"); 165 struct rwlock pf_state_lock = RWLOCK_INITIALIZER("pf_state_lock"); 166 struct rwlock pfioctl_rw = RWLOCK_INITIALIZER("pfioctl_rw"); 167 168 struct cpumem *pf_anchor_stack; 169 170 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE) 171 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE 172 #endif 173 u_int16_t tagname2tag(struct pf_tags *, char *, int); 174 void tag2tagname(struct pf_tags *, u_int16_t, char *); 175 void tag_unref(struct pf_tags *, u_int16_t); 176 int pf_rtlabel_add(struct pf_addr_wrap *); 177 void pf_rtlabel_remove(struct pf_addr_wrap *); 178 void pf_rtlabel_copyout(struct pf_addr_wrap *); 179 180 LIST_HEAD(, pf_trans) pf_ioctl_trans = LIST_HEAD_INITIALIZER(pf_trans); 181 182 /* counts transactions opened by a device */ 183 unsigned int pf_tcount[CLONE_MAPSZ * NBBY]; 184 #define pf_unit2idx(_unit_) ((_unit_) >> CLONE_SHIFT) 185 186 void 187 pfattach(int num) 188 { 189 u_int32_t *timeout = pf_default_rule.timeout; 190 struct pf_anchor_stackframe *sf; 191 struct cpumem_iter cmi; 192 193 pool_init(&pf_rule_pl, sizeof(struct pf_rule), 0, 194 IPL_SOFTNET, 0, "pfrule", NULL); 195 pool_init(&pf_src_tree_pl, sizeof(struct pf_src_node), 0, 196 IPL_SOFTNET, 0, "pfsrctr", NULL); 197 pool_init(&pf_sn_item_pl, sizeof(struct pf_sn_item), 0, 198 IPL_SOFTNET, 0, "pfsnitem", NULL); 199 pool_init(&pf_state_pl, sizeof(struct pf_state), 0, 200 IPL_SOFTNET, 0, "pfstate", NULL); 201 pool_init(&pf_state_key_pl, sizeof(struct pf_state_key), 0, 202 IPL_SOFTNET, 0, "pfstkey", NULL); 203 pool_init(&pf_state_item_pl, sizeof(struct pf_state_item), 0, 204 IPL_SOFTNET, 0, "pfstitem", NULL); 205 pool_init(&pf_rule_item_pl, sizeof(struct pf_rule_item), 0, 206 IPL_SOFTNET, 0, "pfruleitem", NULL); 207 pool_init(&pf_queue_pl, sizeof(struct pf_queuespec), 0, 208 IPL_SOFTNET, 0, "pfqueue", NULL); 209 pool_init(&pf_tag_pl, sizeof(struct pf_tagname), 0, 210 IPL_SOFTNET, 0, "pftag", NULL); 211 pool_init(&pf_pktdelay_pl, sizeof(struct pf_pktdelay), 0, 212 IPL_SOFTNET, 0, "pfpktdelay", NULL); 213 pool_init(&pf_anchor_pl, sizeof(struct pf_anchor), 0, 214 IPL_SOFTNET, 0, "pfanchor", NULL); 215 216 hfsc_initialize(); 217 pfr_initialize(); 218 pfi_initialize(); 219 pf_osfp_initialize(); 220 pf_syncookies_init(); 221 222 pool_sethardlimit(pf_pool_limits[PF_LIMIT_STATES].pp, 223 pf_pool_limits[PF_LIMIT_STATES].limit, NULL, 0); 224 pool_sethardlimit(pf_pool_limits[PF_LIMIT_ANCHORS].pp, 225 pf_pool_limits[PF_LIMIT_ANCHORS].limit, NULL, 0); 226 227 if (physmem <= atop(100*1024*1024)) 228 pf_pool_limits[PF_LIMIT_TABLE_ENTRIES].limit = 229 PFR_KENTRY_HIWAT_SMALL; 230 231 RB_INIT(&tree_src_tracking); 232 RB_INIT(&pf_anchors); 233 pf_init_ruleset(&pf_main_ruleset); 234 TAILQ_INIT(&pf_queues[0]); 235 TAILQ_INIT(&pf_queues[1]); 236 pf_queues_active = &pf_queues[0]; 237 pf_queues_inactive = &pf_queues[1]; 238 239 /* default rule should never be garbage collected */ 240 pf_default_rule.entries.tqe_prev = &pf_default_rule.entries.tqe_next; 241 pf_default_rule.action = PF_PASS; 242 pf_default_rule.nr = (u_int32_t)-1; 243 pf_default_rule.rtableid = -1; 244 245 /* initialize default timeouts */ 246 timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL; 247 timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL; 248 timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL; 249 timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL; 250 timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL; 251 timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL; 252 timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL; 253 timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL; 254 timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL; 255 timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL; 256 timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL; 257 timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL; 258 timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL; 259 timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL; 260 timeout[PFTM_FRAG] = PFTM_FRAG_VAL; 261 timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL; 262 timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL; 263 timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL; 264 timeout[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START; 265 timeout[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END; 266 267 pf_default_rule.src.addr.type = PF_ADDR_ADDRMASK; 268 pf_default_rule.dst.addr.type = PF_ADDR_ADDRMASK; 269 pf_default_rule.rdr.addr.type = PF_ADDR_NONE; 270 pf_default_rule.nat.addr.type = PF_ADDR_NONE; 271 pf_default_rule.route.addr.type = PF_ADDR_NONE; 272 273 pf_normalize_init(); 274 memset(&pf_status, 0, sizeof(pf_status)); 275 pf_status.debug = LOG_ERR; 276 pf_status.reass = PF_REASS_ENABLED; 277 278 /* XXX do our best to avoid a conflict */ 279 pf_status.hostid = arc4random(); 280 281 pf_default_rule_new = pf_default_rule; 282 283 /* 284 * we waste two stack frames as meta-data. 285 * frame[0] always presents a top, which can not be used for data 286 * frame[PF_ANCHOR_STACK_MAX] denotes a bottom of the stack and keeps 287 * the pointer to currently used stack frame. 288 */ 289 pf_anchor_stack = cpumem_malloc( 290 sizeof(struct pf_anchor_stackframe) * (PF_ANCHOR_STACK_MAX + 2), 291 M_WAITOK|M_ZERO); 292 CPUMEM_FOREACH(sf, &cmi, pf_anchor_stack) 293 sf[PF_ANCHOR_STACK_MAX].sf_stack_top = &sf[0]; 294 } 295 296 int 297 pfopen(dev_t dev, int flags, int fmt, struct proc *p) 298 { 299 int unit = minor(dev); 300 301 if (unit & ((1 << CLONE_SHIFT) - 1)) 302 return (ENXIO); 303 304 return (0); 305 } 306 307 int 308 pfclose(dev_t dev, int flags, int fmt, struct proc *p) 309 { 310 struct pf_trans *w, *s; 311 LIST_HEAD(, pf_trans) tmp_list; 312 uint32_t unit = minor(dev); 313 314 LIST_INIT(&tmp_list); 315 rw_enter_write(&pfioctl_rw); 316 LIST_FOREACH_SAFE(w, &pf_ioctl_trans, pft_entry, s) { 317 if (w->pft_unit == unit) { 318 LIST_REMOVE(w, pft_entry); 319 LIST_INSERT_HEAD(&tmp_list, w, pft_entry); 320 } 321 } 322 rw_exit_write(&pfioctl_rw); 323 324 while ((w = LIST_FIRST(&tmp_list)) != NULL) { 325 LIST_REMOVE(w, pft_entry); 326 pf_free_trans(w); 327 } 328 329 return (0); 330 } 331 332 void 333 pf_rule_free(struct pf_rule *rule) 334 { 335 if (rule == NULL) 336 return; 337 338 pfi_kif_free(rule->kif); 339 pfi_kif_free(rule->rcv_kif); 340 pfi_kif_free(rule->rdr.kif); 341 pfi_kif_free(rule->nat.kif); 342 pfi_kif_free(rule->route.kif); 343 344 pool_put(&pf_rule_pl, rule); 345 } 346 347 void 348 pf_rm_rule(struct pf_rulequeue *rulequeue, struct pf_rule *rule) 349 { 350 if (rulequeue != NULL) { 351 if (rule->states_cur == 0 && rule->src_nodes == 0) { 352 /* 353 * XXX - we need to remove the table *before* detaching 354 * the rule to make sure the table code does not delete 355 * the anchor under our feet. 356 */ 357 pf_tbladdr_remove(&rule->src.addr); 358 pf_tbladdr_remove(&rule->dst.addr); 359 pf_tbladdr_remove(&rule->rdr.addr); 360 pf_tbladdr_remove(&rule->nat.addr); 361 pf_tbladdr_remove(&rule->route.addr); 362 if (rule->overload_tbl) 363 pfr_detach_table(rule->overload_tbl); 364 } 365 TAILQ_REMOVE(rulequeue, rule, entries); 366 rule->entries.tqe_prev = NULL; 367 rule->nr = (u_int32_t)-1; 368 } 369 370 if (rule->states_cur > 0 || rule->src_nodes > 0 || 371 rule->entries.tqe_prev != NULL) 372 return; 373 pf_tag_unref(rule->tag); 374 pf_tag_unref(rule->match_tag); 375 pf_rtlabel_remove(&rule->src.addr); 376 pf_rtlabel_remove(&rule->dst.addr); 377 pfi_dynaddr_remove(&rule->src.addr); 378 pfi_dynaddr_remove(&rule->dst.addr); 379 pfi_dynaddr_remove(&rule->rdr.addr); 380 pfi_dynaddr_remove(&rule->nat.addr); 381 pfi_dynaddr_remove(&rule->route.addr); 382 if (rulequeue == NULL) { 383 pf_tbladdr_remove(&rule->src.addr); 384 pf_tbladdr_remove(&rule->dst.addr); 385 pf_tbladdr_remove(&rule->rdr.addr); 386 pf_tbladdr_remove(&rule->nat.addr); 387 pf_tbladdr_remove(&rule->route.addr); 388 if (rule->overload_tbl) 389 pfr_detach_table(rule->overload_tbl); 390 } 391 pfi_kif_unref(rule->rcv_kif, PFI_KIF_REF_RULE); 392 pfi_kif_unref(rule->kif, PFI_KIF_REF_RULE); 393 pfi_kif_unref(rule->rdr.kif, PFI_KIF_REF_RULE); 394 pfi_kif_unref(rule->nat.kif, PFI_KIF_REF_RULE); 395 pfi_kif_unref(rule->route.kif, PFI_KIF_REF_RULE); 396 pf_remove_anchor(rule); 397 pool_put(&pf_rule_pl, rule); 398 } 399 400 u_int16_t 401 tagname2tag(struct pf_tags *head, char *tagname, int create) 402 { 403 struct pf_tagname *tag, *p = NULL; 404 u_int16_t new_tagid = 1; 405 406 TAILQ_FOREACH(tag, head, entries) 407 if (strcmp(tagname, tag->name) == 0) { 408 tag->ref++; 409 return (tag->tag); 410 } 411 412 if (!create) 413 return (0); 414 415 /* 416 * to avoid fragmentation, we do a linear search from the beginning 417 * and take the first free slot we find. if there is none or the list 418 * is empty, append a new entry at the end. 419 */ 420 421 /* new entry */ 422 TAILQ_FOREACH(p, head, entries) { 423 if (p->tag != new_tagid) 424 break; 425 new_tagid = p->tag + 1; 426 } 427 428 if (new_tagid > TAGID_MAX) 429 return (0); 430 431 /* allocate and fill new struct pf_tagname */ 432 tag = pool_get(&pf_tag_pl, PR_NOWAIT | PR_ZERO); 433 if (tag == NULL) 434 return (0); 435 strlcpy(tag->name, tagname, sizeof(tag->name)); 436 tag->tag = new_tagid; 437 tag->ref++; 438 439 if (p != NULL) /* insert new entry before p */ 440 TAILQ_INSERT_BEFORE(p, tag, entries); 441 else /* either list empty or no free slot in between */ 442 TAILQ_INSERT_TAIL(head, tag, entries); 443 444 return (tag->tag); 445 } 446 447 void 448 tag2tagname(struct pf_tags *head, u_int16_t tagid, char *p) 449 { 450 struct pf_tagname *tag; 451 452 TAILQ_FOREACH(tag, head, entries) 453 if (tag->tag == tagid) { 454 strlcpy(p, tag->name, PF_TAG_NAME_SIZE); 455 return; 456 } 457 } 458 459 void 460 tag_unref(struct pf_tags *head, u_int16_t tag) 461 { 462 struct pf_tagname *p, *next; 463 464 if (tag == 0) 465 return; 466 467 TAILQ_FOREACH_SAFE(p, head, entries, next) { 468 if (tag == p->tag) { 469 if (--p->ref == 0) { 470 TAILQ_REMOVE(head, p, entries); 471 pool_put(&pf_tag_pl, p); 472 } 473 break; 474 } 475 } 476 } 477 478 u_int16_t 479 pf_tagname2tag(char *tagname, int create) 480 { 481 return (tagname2tag(&pf_tags, tagname, create)); 482 } 483 484 void 485 pf_tag2tagname(u_int16_t tagid, char *p) 486 { 487 tag2tagname(&pf_tags, tagid, p); 488 } 489 490 void 491 pf_tag_ref(u_int16_t tag) 492 { 493 struct pf_tagname *t; 494 495 TAILQ_FOREACH(t, &pf_tags, entries) 496 if (t->tag == tag) 497 break; 498 if (t != NULL) 499 t->ref++; 500 } 501 502 void 503 pf_tag_unref(u_int16_t tag) 504 { 505 tag_unref(&pf_tags, tag); 506 } 507 508 int 509 pf_rtlabel_add(struct pf_addr_wrap *a) 510 { 511 if (a->type == PF_ADDR_RTLABEL && 512 (a->v.rtlabel = rtlabel_name2id(a->v.rtlabelname)) == 0) 513 return (-1); 514 return (0); 515 } 516 517 void 518 pf_rtlabel_remove(struct pf_addr_wrap *a) 519 { 520 if (a->type == PF_ADDR_RTLABEL) 521 rtlabel_unref(a->v.rtlabel); 522 } 523 524 void 525 pf_rtlabel_copyout(struct pf_addr_wrap *a) 526 { 527 if (a->type == PF_ADDR_RTLABEL && a->v.rtlabel) { 528 if (rtlabel_id2name(a->v.rtlabel, a->v.rtlabelname, 529 sizeof(a->v.rtlabelname)) == NULL) 530 strlcpy(a->v.rtlabelname, "?", 531 sizeof(a->v.rtlabelname)); 532 } 533 } 534 535 u_int16_t 536 pf_qname2qid(char *qname, int create) 537 { 538 return (tagname2tag(&pf_qids, qname, create)); 539 } 540 541 void 542 pf_qid2qname(u_int16_t qid, char *p) 543 { 544 tag2tagname(&pf_qids, qid, p); 545 } 546 547 void 548 pf_qid_unref(u_int16_t qid) 549 { 550 tag_unref(&pf_qids, (u_int16_t)qid); 551 } 552 553 int 554 pf_begin_rules(u_int32_t *version, const char *anchor) 555 { 556 struct pf_ruleset *rs; 557 struct pf_rule *rule; 558 559 if ((rs = pf_find_or_create_ruleset(anchor)) == NULL) 560 return (EINVAL); 561 while ((rule = TAILQ_FIRST(rs->rules.inactive.ptr)) != NULL) { 562 pf_rm_rule(rs->rules.inactive.ptr, rule); 563 rs->rules.inactive.rcount--; 564 } 565 *version = ++rs->rules.inactive.version; 566 rs->rules.inactive.open = 1; 567 return (0); 568 } 569 570 void 571 pf_rollback_rules(u_int32_t version, char *anchor) 572 { 573 struct pf_ruleset *rs; 574 struct pf_rule *rule; 575 576 rs = pf_find_ruleset(anchor); 577 if (rs == NULL || !rs->rules.inactive.open || 578 rs->rules.inactive.version != version) 579 return; 580 while ((rule = TAILQ_FIRST(rs->rules.inactive.ptr)) != NULL) { 581 pf_rm_rule(rs->rules.inactive.ptr, rule); 582 rs->rules.inactive.rcount--; 583 } 584 rs->rules.inactive.open = 0; 585 586 /* queue defs only in the main ruleset */ 587 if (anchor[0]) 588 return; 589 590 pf_free_queues(pf_queues_inactive); 591 } 592 593 void 594 pf_free_queues(struct pf_queuehead *where) 595 { 596 struct pf_queuespec *q, *qtmp; 597 598 TAILQ_FOREACH_SAFE(q, where, entries, qtmp) { 599 TAILQ_REMOVE(where, q, entries); 600 pfi_kif_unref(q->kif, PFI_KIF_REF_RULE); 601 pool_put(&pf_queue_pl, q); 602 } 603 } 604 605 void 606 pf_remove_queues(void) 607 { 608 struct pf_queuespec *q; 609 struct ifnet *ifp; 610 611 /* put back interfaces in normal queueing mode */ 612 TAILQ_FOREACH(q, pf_queues_active, entries) { 613 if (q->parent_qid != 0) 614 continue; 615 616 ifp = q->kif->pfik_ifp; 617 if (ifp == NULL) 618 continue; 619 620 ifq_attach(&ifp->if_snd, ifq_priq_ops, NULL); 621 } 622 } 623 624 struct pf_queue_if { 625 struct ifnet *ifp; 626 const struct ifq_ops *ifqops; 627 const struct pfq_ops *pfqops; 628 void *disc; 629 struct pf_queue_if *next; 630 }; 631 632 static inline struct pf_queue_if * 633 pf_ifp2q(struct pf_queue_if *list, struct ifnet *ifp) 634 { 635 struct pf_queue_if *qif = list; 636 637 while (qif != NULL) { 638 if (qif->ifp == ifp) 639 return (qif); 640 641 qif = qif->next; 642 } 643 644 return (qif); 645 } 646 647 int 648 pf_create_queues(void) 649 { 650 struct pf_queuespec *q; 651 struct ifnet *ifp; 652 struct pf_queue_if *list = NULL, *qif; 653 int error; 654 655 /* 656 * Find root queues and allocate traffic conditioner 657 * private data for these interfaces 658 */ 659 TAILQ_FOREACH(q, pf_queues_active, entries) { 660 if (q->parent_qid != 0) 661 continue; 662 663 ifp = q->kif->pfik_ifp; 664 if (ifp == NULL) 665 continue; 666 667 qif = malloc(sizeof(*qif), M_PF, M_WAITOK); 668 qif->ifp = ifp; 669 670 if (q->flags & PFQS_ROOTCLASS) { 671 qif->ifqops = ifq_hfsc_ops; 672 qif->pfqops = pfq_hfsc_ops; 673 } else { 674 qif->ifqops = ifq_fqcodel_ops; 675 qif->pfqops = pfq_fqcodel_ops; 676 } 677 678 qif->disc = qif->pfqops->pfq_alloc(ifp); 679 680 qif->next = list; 681 list = qif; 682 } 683 684 /* and now everything */ 685 TAILQ_FOREACH(q, pf_queues_active, entries) { 686 ifp = q->kif->pfik_ifp; 687 if (ifp == NULL) 688 continue; 689 690 qif = pf_ifp2q(list, ifp); 691 KASSERT(qif != NULL); 692 693 error = qif->pfqops->pfq_addqueue(qif->disc, q); 694 if (error != 0) 695 goto error; 696 } 697 698 /* find root queues in old list to disable them if necessary */ 699 TAILQ_FOREACH(q, pf_queues_inactive, entries) { 700 if (q->parent_qid != 0) 701 continue; 702 703 ifp = q->kif->pfik_ifp; 704 if (ifp == NULL) 705 continue; 706 707 qif = pf_ifp2q(list, ifp); 708 if (qif != NULL) 709 continue; 710 711 ifq_attach(&ifp->if_snd, ifq_priq_ops, NULL); 712 } 713 714 /* commit the new queues */ 715 while (list != NULL) { 716 qif = list; 717 list = qif->next; 718 719 ifp = qif->ifp; 720 721 ifq_attach(&ifp->if_snd, qif->ifqops, qif->disc); 722 free(qif, M_PF, sizeof(*qif)); 723 } 724 725 return (0); 726 727 error: 728 while (list != NULL) { 729 qif = list; 730 list = qif->next; 731 732 qif->pfqops->pfq_free(qif->disc); 733 free(qif, M_PF, sizeof(*qif)); 734 } 735 736 return (error); 737 } 738 739 int 740 pf_commit_queues(void) 741 { 742 struct pf_queuehead *qswap; 743 int error; 744 745 /* swap */ 746 qswap = pf_queues_active; 747 pf_queues_active = pf_queues_inactive; 748 pf_queues_inactive = qswap; 749 750 error = pf_create_queues(); 751 if (error != 0) { 752 pf_queues_inactive = pf_queues_active; 753 pf_queues_active = qswap; 754 return (error); 755 } 756 757 pf_free_queues(pf_queues_inactive); 758 759 return (0); 760 } 761 762 const struct pfq_ops * 763 pf_queue_manager(struct pf_queuespec *q) 764 { 765 if (q->flags & PFQS_FLOWQUEUE) 766 return pfq_fqcodel_ops; 767 return (/* pfq_default_ops */ NULL); 768 } 769 770 #define PF_MD5_UPD(st, elm) \ 771 MD5Update(ctx, (u_int8_t *) &(st)->elm, sizeof((st)->elm)) 772 773 #define PF_MD5_UPD_STR(st, elm) \ 774 MD5Update(ctx, (u_int8_t *) (st)->elm, strlen((st)->elm)) 775 776 #define PF_MD5_UPD_HTONL(st, elm, stor) do { \ 777 (stor) = htonl((st)->elm); \ 778 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int32_t));\ 779 } while (0) 780 781 #define PF_MD5_UPD_HTONS(st, elm, stor) do { \ 782 (stor) = htons((st)->elm); \ 783 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int16_t));\ 784 } while (0) 785 786 void 787 pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr) 788 { 789 PF_MD5_UPD(pfr, addr.type); 790 switch (pfr->addr.type) { 791 case PF_ADDR_DYNIFTL: 792 PF_MD5_UPD(pfr, addr.v.ifname); 793 PF_MD5_UPD(pfr, addr.iflags); 794 break; 795 case PF_ADDR_TABLE: 796 if (strncmp(pfr->addr.v.tblname, PF_OPTIMIZER_TABLE_PFX, 797 strlen(PF_OPTIMIZER_TABLE_PFX))) 798 PF_MD5_UPD(pfr, addr.v.tblname); 799 break; 800 case PF_ADDR_ADDRMASK: 801 /* XXX ignore af? */ 802 PF_MD5_UPD(pfr, addr.v.a.addr.addr32); 803 PF_MD5_UPD(pfr, addr.v.a.mask.addr32); 804 break; 805 case PF_ADDR_RTLABEL: 806 PF_MD5_UPD(pfr, addr.v.rtlabelname); 807 break; 808 } 809 810 PF_MD5_UPD(pfr, port[0]); 811 PF_MD5_UPD(pfr, port[1]); 812 PF_MD5_UPD(pfr, neg); 813 PF_MD5_UPD(pfr, port_op); 814 } 815 816 void 817 pf_hash_rule(MD5_CTX *ctx, struct pf_rule *rule) 818 { 819 u_int16_t x; 820 u_int32_t y; 821 822 pf_hash_rule_addr(ctx, &rule->src); 823 pf_hash_rule_addr(ctx, &rule->dst); 824 PF_MD5_UPD_STR(rule, label); 825 PF_MD5_UPD_STR(rule, ifname); 826 PF_MD5_UPD_STR(rule, rcv_ifname); 827 PF_MD5_UPD_STR(rule, match_tagname); 828 PF_MD5_UPD_HTONS(rule, match_tag, x); /* dup? */ 829 PF_MD5_UPD_HTONL(rule, os_fingerprint, y); 830 PF_MD5_UPD_HTONL(rule, prob, y); 831 PF_MD5_UPD_HTONL(rule, uid.uid[0], y); 832 PF_MD5_UPD_HTONL(rule, uid.uid[1], y); 833 PF_MD5_UPD(rule, uid.op); 834 PF_MD5_UPD_HTONL(rule, gid.gid[0], y); 835 PF_MD5_UPD_HTONL(rule, gid.gid[1], y); 836 PF_MD5_UPD(rule, gid.op); 837 PF_MD5_UPD_HTONL(rule, rule_flag, y); 838 PF_MD5_UPD(rule, action); 839 PF_MD5_UPD(rule, direction); 840 PF_MD5_UPD(rule, af); 841 PF_MD5_UPD(rule, quick); 842 PF_MD5_UPD(rule, ifnot); 843 PF_MD5_UPD(rule, rcvifnot); 844 PF_MD5_UPD(rule, match_tag_not); 845 PF_MD5_UPD(rule, keep_state); 846 PF_MD5_UPD(rule, proto); 847 PF_MD5_UPD(rule, type); 848 PF_MD5_UPD(rule, code); 849 PF_MD5_UPD(rule, flags); 850 PF_MD5_UPD(rule, flagset); 851 PF_MD5_UPD(rule, allow_opts); 852 PF_MD5_UPD(rule, rt); 853 PF_MD5_UPD(rule, tos); 854 } 855 856 int 857 pf_commit_rules(u_int32_t version, char *anchor) 858 { 859 struct pf_ruleset *rs; 860 struct pf_rule *rule; 861 struct pf_rulequeue *old_rules; 862 u_int32_t old_rcount; 863 864 PF_ASSERT_LOCKED(); 865 866 rs = pf_find_ruleset(anchor); 867 if (rs == NULL || !rs->rules.inactive.open || 868 version != rs->rules.inactive.version) 869 return (EBUSY); 870 871 if (rs == &pf_main_ruleset) 872 pf_calc_chksum(rs); 873 874 /* Swap rules, keep the old. */ 875 old_rules = rs->rules.active.ptr; 876 old_rcount = rs->rules.active.rcount; 877 878 rs->rules.active.ptr = rs->rules.inactive.ptr; 879 rs->rules.active.rcount = rs->rules.inactive.rcount; 880 rs->rules.inactive.ptr = old_rules; 881 rs->rules.inactive.rcount = old_rcount; 882 883 rs->rules.active.version = rs->rules.inactive.version; 884 pf_calc_skip_steps(rs->rules.active.ptr); 885 886 887 /* Purge the old rule list. */ 888 while ((rule = TAILQ_FIRST(old_rules)) != NULL) 889 pf_rm_rule(old_rules, rule); 890 rs->rules.inactive.rcount = 0; 891 rs->rules.inactive.open = 0; 892 pf_remove_if_empty_ruleset(rs); 893 894 /* queue defs only in the main ruleset */ 895 if (anchor[0]) 896 return (0); 897 return (pf_commit_queues()); 898 } 899 900 void 901 pf_calc_chksum(struct pf_ruleset *rs) 902 { 903 MD5_CTX ctx; 904 struct pf_rule *rule; 905 u_int8_t digest[PF_MD5_DIGEST_LENGTH]; 906 907 MD5Init(&ctx); 908 909 if (rs->rules.inactive.rcount) { 910 TAILQ_FOREACH(rule, rs->rules.inactive.ptr, entries) { 911 pf_hash_rule(&ctx, rule); 912 } 913 } 914 915 MD5Final(digest, &ctx); 916 memcpy(pf_status.pf_chksum, digest, sizeof(pf_status.pf_chksum)); 917 } 918 919 int 920 pf_addr_setup(struct pf_ruleset *ruleset, struct pf_addr_wrap *addr, 921 sa_family_t af) 922 { 923 if (pfi_dynaddr_setup(addr, af, PR_WAITOK) || 924 pf_tbladdr_setup(ruleset, addr, PR_WAITOK) || 925 pf_rtlabel_add(addr)) 926 return (EINVAL); 927 928 return (0); 929 } 930 931 struct pfi_kif * 932 pf_kif_setup(struct pfi_kif *kif_buf) 933 { 934 struct pfi_kif *kif; 935 936 if (kif_buf == NULL) 937 return (NULL); 938 939 KASSERT(kif_buf->pfik_name[0] != '\0'); 940 941 kif = pfi_kif_get(kif_buf->pfik_name, &kif_buf); 942 if (kif_buf != NULL) 943 pfi_kif_free(kif_buf); 944 pfi_kif_ref(kif, PFI_KIF_REF_RULE); 945 946 return (kif); 947 } 948 949 void 950 pf_addr_copyout(struct pf_addr_wrap *addr) 951 { 952 pfi_dynaddr_copyout(addr); 953 pf_tbladdr_copyout(addr); 954 pf_rtlabel_copyout(addr); 955 } 956 957 int 958 pf_states_clr(struct pfioc_state_kill *psk) 959 { 960 struct pf_state *st, *nextst; 961 struct pf_state *head, *tail; 962 u_int killed = 0; 963 int error; 964 965 NET_LOCK(); 966 967 /* lock against the gc removing an item from the list */ 968 error = rw_enter(&pf_state_list.pfs_rwl, RW_READ|RW_INTR); 969 if (error != 0) 970 goto unlock; 971 972 /* get a snapshot view of the ends of the list to traverse between */ 973 mtx_enter(&pf_state_list.pfs_mtx); 974 head = TAILQ_FIRST(&pf_state_list.pfs_list); 975 tail = TAILQ_LAST(&pf_state_list.pfs_list, pf_state_queue); 976 mtx_leave(&pf_state_list.pfs_mtx); 977 978 st = NULL; 979 nextst = head; 980 981 PF_LOCK(); 982 PF_STATE_ENTER_WRITE(); 983 984 while (st != tail) { 985 st = nextst; 986 nextst = TAILQ_NEXT(st, entry_list); 987 988 if (st->timeout == PFTM_UNLINKED) 989 continue; 990 991 if (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname, 992 st->kif->pfik_name)) { 993 #if NPFSYNC > 0 994 /* don't send out individual delete messages */ 995 SET(st->state_flags, PFSTATE_NOSYNC); 996 #endif /* NPFSYNC > 0 */ 997 pf_remove_state(st); 998 killed++; 999 } 1000 } 1001 1002 PF_STATE_EXIT_WRITE(); 1003 PF_UNLOCK(); 1004 rw_exit(&pf_state_list.pfs_rwl); 1005 1006 psk->psk_killed = killed; 1007 1008 #if NPFSYNC > 0 1009 pfsync_clear_states(pf_status.hostid, psk->psk_ifname); 1010 #endif /* NPFSYNC > 0 */ 1011 unlock: 1012 NET_UNLOCK(); 1013 1014 return (error); 1015 } 1016 1017 int 1018 pf_states_get(struct pfioc_states *ps) 1019 { 1020 struct pf_state *st, *nextst; 1021 struct pf_state *head, *tail; 1022 struct pfsync_state *p, pstore; 1023 u_int32_t nr = 0; 1024 int error; 1025 1026 if (ps->ps_len == 0) { 1027 nr = pf_status.states; 1028 ps->ps_len = sizeof(struct pfsync_state) * nr; 1029 return (0); 1030 } 1031 1032 p = ps->ps_states; 1033 1034 /* lock against the gc removing an item from the list */ 1035 error = rw_enter(&pf_state_list.pfs_rwl, RW_READ|RW_INTR); 1036 if (error != 0) 1037 return (error); 1038 1039 /* get a snapshot view of the ends of the list to traverse between */ 1040 mtx_enter(&pf_state_list.pfs_mtx); 1041 head = TAILQ_FIRST(&pf_state_list.pfs_list); 1042 tail = TAILQ_LAST(&pf_state_list.pfs_list, pf_state_queue); 1043 mtx_leave(&pf_state_list.pfs_mtx); 1044 1045 st = NULL; 1046 nextst = head; 1047 1048 while (st != tail) { 1049 st = nextst; 1050 nextst = TAILQ_NEXT(st, entry_list); 1051 1052 if (st->timeout == PFTM_UNLINKED) 1053 continue; 1054 1055 if ((nr+1) * sizeof(*p) > ps->ps_len) 1056 break; 1057 1058 pf_state_export(&pstore, st); 1059 error = copyout(&pstore, p, sizeof(*p)); 1060 if (error) 1061 goto fail; 1062 1063 p++; 1064 nr++; 1065 } 1066 ps->ps_len = sizeof(struct pfsync_state) * nr; 1067 1068 fail: 1069 rw_exit(&pf_state_list.pfs_rwl); 1070 1071 return (error); 1072 } 1073 1074 int 1075 pfioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct proc *p) 1076 { 1077 int error = 0; 1078 1079 /* XXX keep in sync with switch() below */ 1080 if (securelevel > 1) 1081 switch (cmd) { 1082 case DIOCGETRULES: 1083 case DIOCGETRULE: 1084 case DIOCGETSTATE: 1085 case DIOCSETSTATUSIF: 1086 case DIOCGETSTATUS: 1087 case DIOCCLRSTATUS: 1088 case DIOCNATLOOK: 1089 case DIOCSETDEBUG: 1090 case DIOCGETSTATES: 1091 case DIOCGETTIMEOUT: 1092 case DIOCGETLIMIT: 1093 case DIOCGETRULESETS: 1094 case DIOCGETRULESET: 1095 case DIOCGETQUEUES: 1096 case DIOCGETQUEUE: 1097 case DIOCGETQSTATS: 1098 case DIOCRGETTABLES: 1099 case DIOCRGETTSTATS: 1100 case DIOCRCLRTSTATS: 1101 case DIOCRCLRADDRS: 1102 case DIOCRADDADDRS: 1103 case DIOCRDELADDRS: 1104 case DIOCRSETADDRS: 1105 case DIOCRGETADDRS: 1106 case DIOCRGETASTATS: 1107 case DIOCRCLRASTATS: 1108 case DIOCRTSTADDRS: 1109 case DIOCOSFPGET: 1110 case DIOCGETSRCNODES: 1111 case DIOCCLRSRCNODES: 1112 case DIOCIGETIFACES: 1113 case DIOCSETIFFLAG: 1114 case DIOCCLRIFFLAG: 1115 case DIOCGETSYNFLWATS: 1116 break; 1117 case DIOCRCLRTABLES: 1118 case DIOCRADDTABLES: 1119 case DIOCRDELTABLES: 1120 case DIOCRSETTFLAGS: 1121 if (((struct pfioc_table *)addr)->pfrio_flags & 1122 PFR_FLAG_DUMMY) 1123 break; /* dummy operation ok */ 1124 return (EPERM); 1125 default: 1126 return (EPERM); 1127 } 1128 1129 if (!(flags & FWRITE)) 1130 switch (cmd) { 1131 case DIOCGETRULES: 1132 case DIOCGETSTATE: 1133 case DIOCGETSTATUS: 1134 case DIOCGETSTATES: 1135 case DIOCGETTIMEOUT: 1136 case DIOCGETLIMIT: 1137 case DIOCGETRULESETS: 1138 case DIOCGETRULESET: 1139 case DIOCGETQUEUES: 1140 case DIOCGETQUEUE: 1141 case DIOCGETQSTATS: 1142 case DIOCNATLOOK: 1143 case DIOCRGETTABLES: 1144 case DIOCRGETTSTATS: 1145 case DIOCRGETADDRS: 1146 case DIOCRGETASTATS: 1147 case DIOCRTSTADDRS: 1148 case DIOCOSFPGET: 1149 case DIOCGETSRCNODES: 1150 case DIOCIGETIFACES: 1151 case DIOCGETSYNFLWATS: 1152 case DIOCXEND: 1153 break; 1154 case DIOCRCLRTABLES: 1155 case DIOCRADDTABLES: 1156 case DIOCRDELTABLES: 1157 case DIOCRCLRTSTATS: 1158 case DIOCRCLRADDRS: 1159 case DIOCRADDADDRS: 1160 case DIOCRDELADDRS: 1161 case DIOCRSETADDRS: 1162 case DIOCRSETTFLAGS: 1163 if (((struct pfioc_table *)addr)->pfrio_flags & 1164 PFR_FLAG_DUMMY) { 1165 flags |= FWRITE; /* need write lock for dummy */ 1166 break; /* dummy operation ok */ 1167 } 1168 return (EACCES); 1169 case DIOCGETRULE: 1170 if (((struct pfioc_rule *)addr)->action == 1171 PF_GET_CLR_CNTR) 1172 return (EACCES); 1173 break; 1174 default: 1175 return (EACCES); 1176 } 1177 1178 rw_enter_write(&pfioctl_rw); 1179 1180 switch (cmd) { 1181 1182 case DIOCSTART: 1183 NET_LOCK(); 1184 PF_LOCK(); 1185 if (pf_status.running) 1186 error = EEXIST; 1187 else { 1188 pf_status.running = 1; 1189 pf_status.since = getuptime(); 1190 if (pf_status.stateid == 0) { 1191 pf_status.stateid = gettime(); 1192 pf_status.stateid = pf_status.stateid << 32; 1193 } 1194 timeout_add_sec(&pf_purge_states_to, 1); 1195 timeout_add_sec(&pf_purge_to, 1); 1196 pf_create_queues(); 1197 DPFPRINTF(LOG_NOTICE, "pf: started"); 1198 } 1199 PF_UNLOCK(); 1200 NET_UNLOCK(); 1201 break; 1202 1203 case DIOCSTOP: 1204 NET_LOCK(); 1205 PF_LOCK(); 1206 if (!pf_status.running) 1207 error = ENOENT; 1208 else { 1209 pf_status.running = 0; 1210 pf_status.since = getuptime(); 1211 pf_remove_queues(); 1212 DPFPRINTF(LOG_NOTICE, "pf: stopped"); 1213 } 1214 PF_UNLOCK(); 1215 NET_UNLOCK(); 1216 break; 1217 1218 case DIOCGETQUEUES: { 1219 struct pfioc_queue *pq = (struct pfioc_queue *)addr; 1220 struct pf_queuespec *qs; 1221 u_int32_t nr = 0; 1222 1223 PF_LOCK(); 1224 pq->ticket = pf_main_ruleset.rules.active.version; 1225 1226 /* save state to not run over them all each time? */ 1227 qs = TAILQ_FIRST(pf_queues_active); 1228 while (qs != NULL) { 1229 qs = TAILQ_NEXT(qs, entries); 1230 nr++; 1231 } 1232 pq->nr = nr; 1233 PF_UNLOCK(); 1234 break; 1235 } 1236 1237 case DIOCGETQUEUE: { 1238 struct pfioc_queue *pq = (struct pfioc_queue *)addr; 1239 struct pf_queuespec *qs; 1240 u_int32_t nr = 0; 1241 1242 PF_LOCK(); 1243 if (pq->ticket != pf_main_ruleset.rules.active.version) { 1244 error = EBUSY; 1245 PF_UNLOCK(); 1246 goto fail; 1247 } 1248 1249 /* save state to not run over them all each time? */ 1250 qs = TAILQ_FIRST(pf_queues_active); 1251 while ((qs != NULL) && (nr++ < pq->nr)) 1252 qs = TAILQ_NEXT(qs, entries); 1253 if (qs == NULL) { 1254 error = EBUSY; 1255 PF_UNLOCK(); 1256 goto fail; 1257 } 1258 memcpy(&pq->queue, qs, sizeof(pq->queue)); 1259 PF_UNLOCK(); 1260 break; 1261 } 1262 1263 case DIOCGETQSTATS: { 1264 struct pfioc_qstats *pq = (struct pfioc_qstats *)addr; 1265 struct pf_queuespec *qs; 1266 u_int32_t nr; 1267 int nbytes; 1268 1269 NET_LOCK(); 1270 PF_LOCK(); 1271 if (pq->ticket != pf_main_ruleset.rules.active.version) { 1272 error = EBUSY; 1273 PF_UNLOCK(); 1274 NET_UNLOCK(); 1275 goto fail; 1276 } 1277 nbytes = pq->nbytes; 1278 nr = 0; 1279 1280 /* save state to not run over them all each time? */ 1281 qs = TAILQ_FIRST(pf_queues_active); 1282 while ((qs != NULL) && (nr++ < pq->nr)) 1283 qs = TAILQ_NEXT(qs, entries); 1284 if (qs == NULL) { 1285 error = EBUSY; 1286 PF_UNLOCK(); 1287 NET_UNLOCK(); 1288 goto fail; 1289 } 1290 memcpy(&pq->queue, qs, sizeof(pq->queue)); 1291 /* It's a root flow queue but is not an HFSC root class */ 1292 if ((qs->flags & PFQS_FLOWQUEUE) && qs->parent_qid == 0 && 1293 !(qs->flags & PFQS_ROOTCLASS)) 1294 error = pfq_fqcodel_ops->pfq_qstats(qs, pq->buf, 1295 &nbytes); 1296 else 1297 error = pfq_hfsc_ops->pfq_qstats(qs, pq->buf, 1298 &nbytes); 1299 if (error == 0) 1300 pq->nbytes = nbytes; 1301 PF_UNLOCK(); 1302 NET_UNLOCK(); 1303 break; 1304 } 1305 1306 case DIOCADDQUEUE: { 1307 struct pfioc_queue *q = (struct pfioc_queue *)addr; 1308 struct pf_queuespec *qs; 1309 1310 qs = pool_get(&pf_queue_pl, PR_WAITOK|PR_LIMITFAIL|PR_ZERO); 1311 if (qs == NULL) { 1312 error = ENOMEM; 1313 goto fail; 1314 } 1315 1316 NET_LOCK(); 1317 PF_LOCK(); 1318 if (q->ticket != pf_main_ruleset.rules.inactive.version) { 1319 error = EBUSY; 1320 PF_UNLOCK(); 1321 NET_UNLOCK(); 1322 pool_put(&pf_queue_pl, qs); 1323 goto fail; 1324 } 1325 memcpy(qs, &q->queue, sizeof(*qs)); 1326 qs->qid = pf_qname2qid(qs->qname, 1); 1327 if (qs->qid == 0) { 1328 error = EBUSY; 1329 PF_UNLOCK(); 1330 NET_UNLOCK(); 1331 pool_put(&pf_queue_pl, qs); 1332 goto fail; 1333 } 1334 if (qs->parent[0] && (qs->parent_qid = 1335 pf_qname2qid(qs->parent, 0)) == 0) { 1336 error = ESRCH; 1337 PF_UNLOCK(); 1338 NET_UNLOCK(); 1339 pool_put(&pf_queue_pl, qs); 1340 goto fail; 1341 } 1342 qs->kif = pfi_kif_get(qs->ifname, NULL); 1343 if (qs->kif == NULL) { 1344 error = ESRCH; 1345 PF_UNLOCK(); 1346 NET_UNLOCK(); 1347 pool_put(&pf_queue_pl, qs); 1348 goto fail; 1349 } 1350 /* XXX resolve bw percentage specs */ 1351 pfi_kif_ref(qs->kif, PFI_KIF_REF_RULE); 1352 1353 TAILQ_INSERT_TAIL(pf_queues_inactive, qs, entries); 1354 PF_UNLOCK(); 1355 NET_UNLOCK(); 1356 1357 break; 1358 } 1359 1360 case DIOCADDRULE: { 1361 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 1362 struct pf_ruleset *ruleset; 1363 struct pf_rule *rule, *tail; 1364 1365 rule = pool_get(&pf_rule_pl, PR_WAITOK|PR_LIMITFAIL|PR_ZERO); 1366 if (rule == NULL) { 1367 error = ENOMEM; 1368 goto fail; 1369 } 1370 1371 if ((error = pf_rule_copyin(&pr->rule, rule))) { 1372 pf_rule_free(rule); 1373 rule = NULL; 1374 goto fail; 1375 } 1376 1377 if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) { 1378 error = EINVAL; 1379 pf_rule_free(rule); 1380 rule = NULL; 1381 goto fail; 1382 } 1383 if ((error = pf_rule_checkaf(rule))) { 1384 pf_rule_free(rule); 1385 rule = NULL; 1386 goto fail; 1387 } 1388 if (rule->src.addr.type == PF_ADDR_NONE || 1389 rule->dst.addr.type == PF_ADDR_NONE) { 1390 error = EINVAL; 1391 pf_rule_free(rule); 1392 rule = NULL; 1393 goto fail; 1394 } 1395 1396 if (rule->rt && !rule->direction) { 1397 error = EINVAL; 1398 pf_rule_free(rule); 1399 rule = NULL; 1400 goto fail; 1401 } 1402 1403 NET_LOCK(); 1404 PF_LOCK(); 1405 pr->anchor[sizeof(pr->anchor) - 1] = '\0'; 1406 ruleset = pf_find_ruleset(pr->anchor); 1407 if (ruleset == NULL) { 1408 error = EINVAL; 1409 PF_UNLOCK(); 1410 NET_UNLOCK(); 1411 pf_rule_free(rule); 1412 goto fail; 1413 } 1414 if (pr->ticket != ruleset->rules.inactive.version) { 1415 error = EBUSY; 1416 PF_UNLOCK(); 1417 NET_UNLOCK(); 1418 pf_rule_free(rule); 1419 goto fail; 1420 } 1421 rule->cuid = p->p_ucred->cr_ruid; 1422 rule->cpid = p->p_p->ps_pid; 1423 1424 tail = TAILQ_LAST(ruleset->rules.inactive.ptr, 1425 pf_rulequeue); 1426 if (tail) 1427 rule->nr = tail->nr + 1; 1428 else 1429 rule->nr = 0; 1430 1431 rule->kif = pf_kif_setup(rule->kif); 1432 rule->rcv_kif = pf_kif_setup(rule->rcv_kif); 1433 rule->rdr.kif = pf_kif_setup(rule->rdr.kif); 1434 rule->nat.kif = pf_kif_setup(rule->nat.kif); 1435 rule->route.kif = pf_kif_setup(rule->route.kif); 1436 1437 if (rule->overload_tblname[0]) { 1438 if ((rule->overload_tbl = pfr_attach_table(ruleset, 1439 rule->overload_tblname, PR_WAITOK)) == NULL) 1440 error = EINVAL; 1441 else 1442 rule->overload_tbl->pfrkt_flags |= PFR_TFLAG_ACTIVE; 1443 } 1444 1445 if (pf_addr_setup(ruleset, &rule->src.addr, rule->af)) 1446 error = EINVAL; 1447 if (pf_addr_setup(ruleset, &rule->dst.addr, rule->af)) 1448 error = EINVAL; 1449 if (pf_addr_setup(ruleset, &rule->rdr.addr, rule->af)) 1450 error = EINVAL; 1451 if (pf_addr_setup(ruleset, &rule->nat.addr, rule->af)) 1452 error = EINVAL; 1453 if (pf_addr_setup(ruleset, &rule->route.addr, rule->af)) 1454 error = EINVAL; 1455 if (pf_anchor_setup(rule, ruleset, pr->anchor_call)) 1456 error = EINVAL; 1457 1458 if (error) { 1459 pf_rm_rule(NULL, rule); 1460 PF_UNLOCK(); 1461 NET_UNLOCK(); 1462 goto fail; 1463 } 1464 TAILQ_INSERT_TAIL(ruleset->rules.inactive.ptr, 1465 rule, entries); 1466 ruleset->rules.inactive.rcount++; 1467 PF_UNLOCK(); 1468 NET_UNLOCK(); 1469 break; 1470 } 1471 1472 case DIOCGETRULES: { 1473 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 1474 struct pf_ruleset *ruleset; 1475 struct pf_rule *rule; 1476 struct pf_trans *t; 1477 u_int32_t ruleset_version; 1478 1479 NET_LOCK(); 1480 PF_LOCK(); 1481 pr->anchor[sizeof(pr->anchor) - 1] = '\0'; 1482 ruleset = pf_find_ruleset(pr->anchor); 1483 if (ruleset == NULL) { 1484 error = EINVAL; 1485 PF_UNLOCK(); 1486 NET_UNLOCK(); 1487 goto fail; 1488 } 1489 rule = TAILQ_LAST(ruleset->rules.active.ptr, pf_rulequeue); 1490 if (rule) 1491 pr->nr = rule->nr + 1; 1492 else 1493 pr->nr = 0; 1494 ruleset_version = ruleset->rules.active.version; 1495 pf_anchor_take(ruleset->anchor); 1496 rule = TAILQ_FIRST(ruleset->rules.active.ptr); 1497 PF_UNLOCK(); 1498 NET_UNLOCK(); 1499 1500 t = pf_open_trans(minor(dev)); 1501 if (t == NULL) { 1502 error = EBUSY; 1503 goto fail; 1504 } 1505 pf_init_tgetrule(t, ruleset->anchor, ruleset_version, rule); 1506 pr->ticket = t->pft_ticket; 1507 1508 break; 1509 } 1510 1511 case DIOCGETRULE: { 1512 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 1513 struct pf_ruleset *ruleset; 1514 struct pf_rule *rule; 1515 struct pf_trans *t; 1516 int i; 1517 1518 t = pf_find_trans(minor(dev), pr->ticket); 1519 if (t == NULL) { 1520 error = ENXIO; 1521 goto fail; 1522 } 1523 KASSERT(t->pft_unit == minor(dev)); 1524 if (t->pft_type != PF_TRANS_GETRULE) { 1525 error = EINVAL; 1526 goto fail; 1527 } 1528 1529 NET_LOCK(); 1530 PF_LOCK(); 1531 KASSERT(t->pftgr_anchor != NULL); 1532 ruleset = &t->pftgr_anchor->ruleset; 1533 if (t->pftgr_version != ruleset->rules.active.version) { 1534 error = EBUSY; 1535 PF_UNLOCK(); 1536 NET_UNLOCK(); 1537 goto fail; 1538 } 1539 rule = t->pftgr_rule; 1540 if (rule == NULL) { 1541 error = ENOENT; 1542 PF_UNLOCK(); 1543 NET_UNLOCK(); 1544 goto fail; 1545 } 1546 memcpy(&pr->rule, rule, sizeof(struct pf_rule)); 1547 memset(&pr->rule.entries, 0, sizeof(pr->rule.entries)); 1548 pr->rule.kif = NULL; 1549 pr->rule.nat.kif = NULL; 1550 pr->rule.rdr.kif = NULL; 1551 pr->rule.route.kif = NULL; 1552 pr->rule.rcv_kif = NULL; 1553 pr->rule.anchor = NULL; 1554 pr->rule.overload_tbl = NULL; 1555 pr->rule.pktrate.limit /= PF_THRESHOLD_MULT; 1556 if (pf_anchor_copyout(ruleset, rule, pr)) { 1557 error = EBUSY; 1558 PF_UNLOCK(); 1559 NET_UNLOCK(); 1560 goto fail; 1561 } 1562 pf_addr_copyout(&pr->rule.src.addr); 1563 pf_addr_copyout(&pr->rule.dst.addr); 1564 pf_addr_copyout(&pr->rule.rdr.addr); 1565 pf_addr_copyout(&pr->rule.nat.addr); 1566 pf_addr_copyout(&pr->rule.route.addr); 1567 for (i = 0; i < PF_SKIP_COUNT; ++i) 1568 if (rule->skip[i].ptr == NULL) 1569 pr->rule.skip[i].nr = (u_int32_t)-1; 1570 else 1571 pr->rule.skip[i].nr = 1572 rule->skip[i].ptr->nr; 1573 1574 if (pr->action == PF_GET_CLR_CNTR) { 1575 rule->evaluations = 0; 1576 rule->packets[0] = rule->packets[1] = 0; 1577 rule->bytes[0] = rule->bytes[1] = 0; 1578 rule->states_tot = 0; 1579 } 1580 pr->nr = rule->nr; 1581 t->pftgr_rule = TAILQ_NEXT(rule, entries); 1582 PF_UNLOCK(); 1583 NET_UNLOCK(); 1584 break; 1585 } 1586 1587 case DIOCCHANGERULE: { 1588 struct pfioc_rule *pcr = (struct pfioc_rule *)addr; 1589 struct pf_ruleset *ruleset; 1590 struct pf_rule *oldrule = NULL, *newrule = NULL; 1591 u_int32_t nr = 0; 1592 1593 if (pcr->action < PF_CHANGE_ADD_HEAD || 1594 pcr->action > PF_CHANGE_GET_TICKET) { 1595 error = EINVAL; 1596 goto fail; 1597 } 1598 1599 if (pcr->action == PF_CHANGE_GET_TICKET) { 1600 NET_LOCK(); 1601 PF_LOCK(); 1602 1603 ruleset = pf_find_ruleset(pcr->anchor); 1604 if (ruleset == NULL) 1605 error = EINVAL; 1606 else 1607 pcr->ticket = ++ruleset->rules.active.version; 1608 1609 PF_UNLOCK(); 1610 NET_UNLOCK(); 1611 goto fail; 1612 } 1613 1614 if (pcr->action != PF_CHANGE_REMOVE) { 1615 newrule = pool_get(&pf_rule_pl, 1616 PR_WAITOK|PR_LIMITFAIL|PR_ZERO); 1617 if (newrule == NULL) { 1618 error = ENOMEM; 1619 goto fail; 1620 } 1621 1622 if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) { 1623 error = EINVAL; 1624 pool_put(&pf_rule_pl, newrule); 1625 goto fail; 1626 } 1627 error = pf_rule_copyin(&pcr->rule, newrule); 1628 if (error != 0) { 1629 pf_rule_free(newrule); 1630 newrule = NULL; 1631 goto fail; 1632 } 1633 if ((error = pf_rule_checkaf(newrule))) { 1634 pf_rule_free(newrule); 1635 newrule = NULL; 1636 goto fail; 1637 } 1638 if (newrule->rt && !newrule->direction) { 1639 pf_rule_free(newrule); 1640 error = EINVAL; 1641 newrule = NULL; 1642 goto fail; 1643 } 1644 } 1645 1646 NET_LOCK(); 1647 PF_LOCK(); 1648 ruleset = pf_find_ruleset(pcr->anchor); 1649 if (ruleset == NULL) { 1650 error = EINVAL; 1651 PF_UNLOCK(); 1652 NET_UNLOCK(); 1653 pf_rule_free(newrule); 1654 goto fail; 1655 } 1656 1657 if (pcr->ticket != ruleset->rules.active.version) { 1658 error = EINVAL; 1659 PF_UNLOCK(); 1660 NET_UNLOCK(); 1661 pf_rule_free(newrule); 1662 goto fail; 1663 } 1664 1665 if (pcr->action != PF_CHANGE_REMOVE) { 1666 KASSERT(newrule != NULL); 1667 newrule->cuid = p->p_ucred->cr_ruid; 1668 newrule->cpid = p->p_p->ps_pid; 1669 1670 newrule->kif = pf_kif_setup(newrule->kif); 1671 newrule->rcv_kif = pf_kif_setup(newrule->rcv_kif); 1672 newrule->rdr.kif = pf_kif_setup(newrule->rdr.kif); 1673 newrule->nat.kif = pf_kif_setup(newrule->nat.kif); 1674 newrule->route.kif = pf_kif_setup(newrule->route.kif); 1675 1676 if (newrule->overload_tblname[0]) { 1677 newrule->overload_tbl = pfr_attach_table( 1678 ruleset, newrule->overload_tblname, 1679 PR_WAITOK); 1680 if (newrule->overload_tbl == NULL) 1681 error = EINVAL; 1682 else 1683 newrule->overload_tbl->pfrkt_flags |= 1684 PFR_TFLAG_ACTIVE; 1685 } 1686 1687 if (pf_addr_setup(ruleset, &newrule->src.addr, 1688 newrule->af)) 1689 error = EINVAL; 1690 if (pf_addr_setup(ruleset, &newrule->dst.addr, 1691 newrule->af)) 1692 error = EINVAL; 1693 if (pf_addr_setup(ruleset, &newrule->rdr.addr, 1694 newrule->af)) 1695 error = EINVAL; 1696 if (pf_addr_setup(ruleset, &newrule->nat.addr, 1697 newrule->af)) 1698 error = EINVAL; 1699 if (pf_addr_setup(ruleset, &newrule->route.addr, 1700 newrule->af)) 1701 error = EINVAL; 1702 if (pf_anchor_setup(newrule, ruleset, pcr->anchor_call)) 1703 error = EINVAL; 1704 1705 if (error) { 1706 pf_rm_rule(NULL, newrule); 1707 PF_UNLOCK(); 1708 NET_UNLOCK(); 1709 goto fail; 1710 } 1711 } 1712 1713 if (pcr->action == PF_CHANGE_ADD_HEAD) 1714 oldrule = TAILQ_FIRST(ruleset->rules.active.ptr); 1715 else if (pcr->action == PF_CHANGE_ADD_TAIL) 1716 oldrule = TAILQ_LAST(ruleset->rules.active.ptr, 1717 pf_rulequeue); 1718 else { 1719 oldrule = TAILQ_FIRST(ruleset->rules.active.ptr); 1720 while ((oldrule != NULL) && (oldrule->nr != pcr->nr)) 1721 oldrule = TAILQ_NEXT(oldrule, entries); 1722 if (oldrule == NULL) { 1723 if (newrule != NULL) 1724 pf_rm_rule(NULL, newrule); 1725 error = EINVAL; 1726 PF_UNLOCK(); 1727 NET_UNLOCK(); 1728 goto fail; 1729 } 1730 } 1731 1732 if (pcr->action == PF_CHANGE_REMOVE) { 1733 pf_rm_rule(ruleset->rules.active.ptr, oldrule); 1734 ruleset->rules.active.rcount--; 1735 } else { 1736 if (oldrule == NULL) 1737 TAILQ_INSERT_TAIL( 1738 ruleset->rules.active.ptr, 1739 newrule, entries); 1740 else if (pcr->action == PF_CHANGE_ADD_HEAD || 1741 pcr->action == PF_CHANGE_ADD_BEFORE) 1742 TAILQ_INSERT_BEFORE(oldrule, newrule, entries); 1743 else 1744 TAILQ_INSERT_AFTER( 1745 ruleset->rules.active.ptr, 1746 oldrule, newrule, entries); 1747 ruleset->rules.active.rcount++; 1748 } 1749 1750 nr = 0; 1751 TAILQ_FOREACH(oldrule, ruleset->rules.active.ptr, entries) 1752 oldrule->nr = nr++; 1753 1754 ruleset->rules.active.version++; 1755 1756 pf_calc_skip_steps(ruleset->rules.active.ptr); 1757 pf_remove_if_empty_ruleset(ruleset); 1758 1759 PF_UNLOCK(); 1760 NET_UNLOCK(); 1761 break; 1762 } 1763 1764 case DIOCCLRSTATES: 1765 error = pf_states_clr((struct pfioc_state_kill *)addr); 1766 break; 1767 1768 case DIOCKILLSTATES: { 1769 struct pf_state *st, *nextst; 1770 struct pf_state_item *si, *sit; 1771 struct pf_state_key *sk, key; 1772 struct pf_addr *srcaddr, *dstaddr; 1773 u_int16_t srcport, dstport; 1774 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr; 1775 u_int i, killed = 0; 1776 const int dirs[] = { PF_IN, PF_OUT }; 1777 int sidx, didx; 1778 1779 if (psk->psk_pfcmp.id) { 1780 if (psk->psk_pfcmp.creatorid == 0) 1781 psk->psk_pfcmp.creatorid = pf_status.hostid; 1782 NET_LOCK(); 1783 PF_LOCK(); 1784 PF_STATE_ENTER_WRITE(); 1785 if ((st = pf_find_state_byid(&psk->psk_pfcmp))) { 1786 pf_remove_state(st); 1787 psk->psk_killed = 1; 1788 } 1789 PF_STATE_EXIT_WRITE(); 1790 PF_UNLOCK(); 1791 NET_UNLOCK(); 1792 goto fail; 1793 } 1794 1795 if (psk->psk_af && psk->psk_proto && 1796 psk->psk_src.port_op == PF_OP_EQ && 1797 psk->psk_dst.port_op == PF_OP_EQ) { 1798 1799 key.af = psk->psk_af; 1800 key.proto = psk->psk_proto; 1801 key.rdomain = psk->psk_rdomain; 1802 1803 NET_LOCK(); 1804 PF_LOCK(); 1805 PF_STATE_ENTER_WRITE(); 1806 for (i = 0; i < nitems(dirs); i++) { 1807 if (dirs[i] == PF_IN) { 1808 sidx = 0; 1809 didx = 1; 1810 } else { 1811 sidx = 1; 1812 didx = 0; 1813 } 1814 pf_addrcpy(&key.addr[sidx], 1815 &psk->psk_src.addr.v.a.addr, key.af); 1816 pf_addrcpy(&key.addr[didx], 1817 &psk->psk_dst.addr.v.a.addr, key.af); 1818 key.port[sidx] = psk->psk_src.port[0]; 1819 key.port[didx] = psk->psk_dst.port[0]; 1820 1821 sk = RBT_FIND(pf_state_tree, &pf_statetbl, 1822 &key); 1823 if (sk == NULL) 1824 continue; 1825 1826 TAILQ_FOREACH_SAFE(si, &sk->sk_states, 1827 si_entry, sit) { 1828 struct pf_state *sist = si->si_st; 1829 if (((sist->key[PF_SK_WIRE]->af == 1830 sist->key[PF_SK_STACK]->af && 1831 sk == (dirs[i] == PF_IN ? 1832 sist->key[PF_SK_WIRE] : 1833 sist->key[PF_SK_STACK])) || 1834 (sist->key[PF_SK_WIRE]->af != 1835 sist->key[PF_SK_STACK]->af && 1836 dirs[i] == PF_IN && 1837 (sk == sist->key[PF_SK_STACK] || 1838 sk == sist->key[PF_SK_WIRE]))) && 1839 (!psk->psk_ifname[0] || 1840 (sist->kif != pfi_all && 1841 !strcmp(psk->psk_ifname, 1842 sist->kif->pfik_name)))) { 1843 pf_remove_state(sist); 1844 killed++; 1845 } 1846 } 1847 } 1848 if (killed) 1849 psk->psk_killed = killed; 1850 PF_STATE_EXIT_WRITE(); 1851 PF_UNLOCK(); 1852 NET_UNLOCK(); 1853 goto fail; 1854 } 1855 1856 NET_LOCK(); 1857 PF_LOCK(); 1858 PF_STATE_ENTER_WRITE(); 1859 RBT_FOREACH_SAFE(st, pf_state_tree_id, &tree_id, nextst) { 1860 if (st->direction == PF_OUT) { 1861 sk = st->key[PF_SK_STACK]; 1862 srcaddr = &sk->addr[1]; 1863 dstaddr = &sk->addr[0]; 1864 srcport = sk->port[1]; 1865 dstport = sk->port[0]; 1866 } else { 1867 sk = st->key[PF_SK_WIRE]; 1868 srcaddr = &sk->addr[0]; 1869 dstaddr = &sk->addr[1]; 1870 srcport = sk->port[0]; 1871 dstport = sk->port[1]; 1872 } 1873 if ((!psk->psk_af || sk->af == psk->psk_af) 1874 && (!psk->psk_proto || psk->psk_proto == 1875 sk->proto) && psk->psk_rdomain == sk->rdomain && 1876 pf_match_addr(psk->psk_src.neg, 1877 &psk->psk_src.addr.v.a.addr, 1878 &psk->psk_src.addr.v.a.mask, 1879 srcaddr, sk->af) && 1880 pf_match_addr(psk->psk_dst.neg, 1881 &psk->psk_dst.addr.v.a.addr, 1882 &psk->psk_dst.addr.v.a.mask, 1883 dstaddr, sk->af) && 1884 (psk->psk_src.port_op == 0 || 1885 pf_match_port(psk->psk_src.port_op, 1886 psk->psk_src.port[0], psk->psk_src.port[1], 1887 srcport)) && 1888 (psk->psk_dst.port_op == 0 || 1889 pf_match_port(psk->psk_dst.port_op, 1890 psk->psk_dst.port[0], psk->psk_dst.port[1], 1891 dstport)) && 1892 (!psk->psk_label[0] || (st->rule.ptr->label[0] && 1893 !strcmp(psk->psk_label, st->rule.ptr->label))) && 1894 (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname, 1895 st->kif->pfik_name))) { 1896 pf_remove_state(st); 1897 killed++; 1898 } 1899 } 1900 psk->psk_killed = killed; 1901 PF_STATE_EXIT_WRITE(); 1902 PF_UNLOCK(); 1903 NET_UNLOCK(); 1904 break; 1905 } 1906 1907 #if NPFSYNC > 0 1908 case DIOCADDSTATE: { 1909 struct pfioc_state *ps = (struct pfioc_state *)addr; 1910 struct pfsync_state *sp = &ps->state; 1911 1912 if (sp->timeout >= PFTM_MAX) { 1913 error = EINVAL; 1914 goto fail; 1915 } 1916 NET_LOCK(); 1917 PF_LOCK(); 1918 error = pf_state_import(sp, PFSYNC_SI_IOCTL); 1919 PF_UNLOCK(); 1920 NET_UNLOCK(); 1921 break; 1922 } 1923 #endif /* NPFSYNC > 0 */ 1924 1925 case DIOCGETSTATE: { 1926 struct pfioc_state *ps = (struct pfioc_state *)addr; 1927 struct pf_state *st; 1928 struct pf_state_cmp id_key; 1929 1930 memset(&id_key, 0, sizeof(id_key)); 1931 id_key.id = ps->state.id; 1932 id_key.creatorid = ps->state.creatorid; 1933 1934 NET_LOCK(); 1935 PF_STATE_ENTER_READ(); 1936 st = pf_find_state_byid(&id_key); 1937 st = pf_state_ref(st); 1938 PF_STATE_EXIT_READ(); 1939 NET_UNLOCK(); 1940 if (st == NULL) { 1941 error = ENOENT; 1942 goto fail; 1943 } 1944 1945 pf_state_export(&ps->state, st); 1946 pf_state_unref(st); 1947 break; 1948 } 1949 1950 case DIOCGETSTATES: 1951 error = pf_states_get((struct pfioc_states *)addr); 1952 break; 1953 1954 case DIOCGETSTATUS: { 1955 struct pf_status *s = (struct pf_status *)addr; 1956 NET_LOCK(); 1957 PF_LOCK(); 1958 memcpy(s, &pf_status, sizeof(struct pf_status)); 1959 pfi_update_status(s->ifname, s); 1960 PF_UNLOCK(); 1961 NET_UNLOCK(); 1962 break; 1963 } 1964 1965 case DIOCSETSTATUSIF: { 1966 struct pfioc_iface *pi = (struct pfioc_iface *)addr; 1967 1968 NET_LOCK(); 1969 PF_LOCK(); 1970 if (pi->pfiio_name[0] == 0) { 1971 memset(pf_status.ifname, 0, IFNAMSIZ); 1972 PF_UNLOCK(); 1973 NET_UNLOCK(); 1974 goto fail; 1975 } 1976 strlcpy(pf_trans_set.statusif, pi->pfiio_name, IFNAMSIZ); 1977 pf_trans_set.mask |= PF_TSET_STATUSIF; 1978 PF_UNLOCK(); 1979 NET_UNLOCK(); 1980 break; 1981 } 1982 1983 case DIOCCLRSTATUS: { 1984 struct pfioc_iface *pi = (struct pfioc_iface *)addr; 1985 1986 NET_LOCK(); 1987 PF_LOCK(); 1988 /* if ifname is specified, clear counters there only */ 1989 if (pi->pfiio_name[0]) { 1990 pfi_update_status(pi->pfiio_name, NULL); 1991 PF_UNLOCK(); 1992 NET_UNLOCK(); 1993 goto fail; 1994 } 1995 1996 memset(pf_status.counters, 0, sizeof(pf_status.counters)); 1997 memset(pf_status.fcounters, 0, sizeof(pf_status.fcounters)); 1998 memset(pf_status.scounters, 0, sizeof(pf_status.scounters)); 1999 pf_status.since = getuptime(); 2000 2001 PF_UNLOCK(); 2002 NET_UNLOCK(); 2003 break; 2004 } 2005 2006 case DIOCNATLOOK: { 2007 struct pfioc_natlook *pnl = (struct pfioc_natlook *)addr; 2008 struct pf_state_key *sk; 2009 struct pf_state *st; 2010 struct pf_state_key_cmp key; 2011 int m = 0, direction = pnl->direction; 2012 int sidx, didx; 2013 2014 switch (pnl->af) { 2015 case AF_INET: 2016 break; 2017 #ifdef INET6 2018 case AF_INET6: 2019 break; 2020 #endif /* INET6 */ 2021 default: 2022 error = EAFNOSUPPORT; 2023 goto fail; 2024 } 2025 2026 /* NATLOOK src and dst are reversed, so reverse sidx/didx */ 2027 sidx = (direction == PF_IN) ? 1 : 0; 2028 didx = (direction == PF_IN) ? 0 : 1; 2029 2030 if (!pnl->proto || 2031 PF_AZERO(&pnl->saddr, pnl->af) || 2032 PF_AZERO(&pnl->daddr, pnl->af) || 2033 ((pnl->proto == IPPROTO_TCP || 2034 pnl->proto == IPPROTO_UDP) && 2035 (!pnl->dport || !pnl->sport)) || 2036 pnl->rdomain > RT_TABLEID_MAX) 2037 error = EINVAL; 2038 else { 2039 key.af = pnl->af; 2040 key.proto = pnl->proto; 2041 key.rdomain = pnl->rdomain; 2042 pf_addrcpy(&key.addr[sidx], &pnl->saddr, pnl->af); 2043 key.port[sidx] = pnl->sport; 2044 pf_addrcpy(&key.addr[didx], &pnl->daddr, pnl->af); 2045 key.port[didx] = pnl->dport; 2046 2047 NET_LOCK(); 2048 PF_STATE_ENTER_READ(); 2049 st = pf_find_state_all(&key, direction, &m); 2050 st = pf_state_ref(st); 2051 PF_STATE_EXIT_READ(); 2052 NET_UNLOCK(); 2053 2054 if (m > 1) 2055 error = E2BIG; /* more than one state */ 2056 else if (st != NULL) { 2057 sk = st->key[sidx]; 2058 pf_addrcpy(&pnl->rsaddr, &sk->addr[sidx], 2059 sk->af); 2060 pnl->rsport = sk->port[sidx]; 2061 pf_addrcpy(&pnl->rdaddr, &sk->addr[didx], 2062 sk->af); 2063 pnl->rdport = sk->port[didx]; 2064 pnl->rrdomain = sk->rdomain; 2065 } else 2066 error = ENOENT; 2067 pf_state_unref(st); 2068 } 2069 break; 2070 } 2071 2072 case DIOCSETTIMEOUT: { 2073 struct pfioc_tm *pt = (struct pfioc_tm *)addr; 2074 2075 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX || 2076 pt->seconds < 0) { 2077 error = EINVAL; 2078 goto fail; 2079 } 2080 NET_LOCK(); 2081 PF_LOCK(); 2082 if (pt->timeout == PFTM_INTERVAL && pt->seconds == 0) 2083 pt->seconds = 1; 2084 pf_default_rule_new.timeout[pt->timeout] = pt->seconds; 2085 pt->seconds = pf_default_rule.timeout[pt->timeout]; 2086 PF_UNLOCK(); 2087 NET_UNLOCK(); 2088 break; 2089 } 2090 2091 case DIOCGETTIMEOUT: { 2092 struct pfioc_tm *pt = (struct pfioc_tm *)addr; 2093 2094 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) { 2095 error = EINVAL; 2096 goto fail; 2097 } 2098 PF_LOCK(); 2099 pt->seconds = pf_default_rule.timeout[pt->timeout]; 2100 PF_UNLOCK(); 2101 break; 2102 } 2103 2104 case DIOCGETLIMIT: { 2105 struct pfioc_limit *pl = (struct pfioc_limit *)addr; 2106 2107 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) { 2108 error = EINVAL; 2109 goto fail; 2110 } 2111 PF_LOCK(); 2112 pl->limit = pf_pool_limits[pl->index].limit; 2113 PF_UNLOCK(); 2114 break; 2115 } 2116 2117 case DIOCSETLIMIT: { 2118 struct pfioc_limit *pl = (struct pfioc_limit *)addr; 2119 2120 PF_LOCK(); 2121 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) { 2122 error = EINVAL; 2123 PF_UNLOCK(); 2124 goto fail; 2125 } 2126 if (((struct pool *)pf_pool_limits[pl->index].pp)->pr_nout > 2127 pl->limit) { 2128 error = EBUSY; 2129 PF_UNLOCK(); 2130 goto fail; 2131 } 2132 /* Fragments reference mbuf clusters. */ 2133 if (pl->index == PF_LIMIT_FRAGS && pl->limit > nmbclust) { 2134 error = EINVAL; 2135 PF_UNLOCK(); 2136 goto fail; 2137 } 2138 2139 pf_pool_limits[pl->index].limit_new = pl->limit; 2140 pl->limit = pf_pool_limits[pl->index].limit; 2141 PF_UNLOCK(); 2142 break; 2143 } 2144 2145 case DIOCSETDEBUG: { 2146 u_int32_t *level = (u_int32_t *)addr; 2147 2148 NET_LOCK(); 2149 PF_LOCK(); 2150 pf_trans_set.debug = *level; 2151 pf_trans_set.mask |= PF_TSET_DEBUG; 2152 PF_UNLOCK(); 2153 NET_UNLOCK(); 2154 break; 2155 } 2156 2157 case DIOCGETRULESETS: { 2158 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr; 2159 struct pf_ruleset *ruleset; 2160 struct pf_anchor *anchor; 2161 2162 PF_LOCK(); 2163 pr->path[sizeof(pr->path) - 1] = '\0'; 2164 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) { 2165 error = EINVAL; 2166 PF_UNLOCK(); 2167 goto fail; 2168 } 2169 pr->nr = 0; 2170 if (ruleset == &pf_main_ruleset) { 2171 /* XXX kludge for pf_main_ruleset */ 2172 RB_FOREACH(anchor, pf_anchor_global, &pf_anchors) 2173 if (anchor->parent == NULL) 2174 pr->nr++; 2175 } else { 2176 RB_FOREACH(anchor, pf_anchor_node, 2177 &ruleset->anchor->children) 2178 pr->nr++; 2179 } 2180 PF_UNLOCK(); 2181 break; 2182 } 2183 2184 case DIOCGETRULESET: { 2185 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr; 2186 struct pf_ruleset *ruleset; 2187 struct pf_anchor *anchor; 2188 u_int32_t nr = 0; 2189 2190 PF_LOCK(); 2191 pr->path[sizeof(pr->path) - 1] = '\0'; 2192 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) { 2193 error = EINVAL; 2194 PF_UNLOCK(); 2195 goto fail; 2196 } 2197 pr->name[0] = '\0'; 2198 if (ruleset == &pf_main_ruleset) { 2199 /* XXX kludge for pf_main_ruleset */ 2200 RB_FOREACH(anchor, pf_anchor_global, &pf_anchors) 2201 if (anchor->parent == NULL && nr++ == pr->nr) { 2202 strlcpy(pr->name, anchor->name, 2203 sizeof(pr->name)); 2204 break; 2205 } 2206 } else { 2207 RB_FOREACH(anchor, pf_anchor_node, 2208 &ruleset->anchor->children) 2209 if (nr++ == pr->nr) { 2210 strlcpy(pr->name, anchor->name, 2211 sizeof(pr->name)); 2212 break; 2213 } 2214 } 2215 PF_UNLOCK(); 2216 if (!pr->name[0]) 2217 error = EBUSY; 2218 break; 2219 } 2220 2221 case DIOCRCLRTABLES: { 2222 struct pfioc_table *io = (struct pfioc_table *)addr; 2223 2224 if (io->pfrio_esize != 0) { 2225 error = ENODEV; 2226 goto fail; 2227 } 2228 NET_LOCK(); 2229 PF_LOCK(); 2230 error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel, 2231 io->pfrio_flags | PFR_FLAG_USERIOCTL); 2232 PF_UNLOCK(); 2233 NET_UNLOCK(); 2234 break; 2235 } 2236 2237 case DIOCRADDTABLES: { 2238 struct pfioc_table *io = (struct pfioc_table *)addr; 2239 2240 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2241 error = ENODEV; 2242 goto fail; 2243 } 2244 error = pfr_add_tables(io->pfrio_buffer, io->pfrio_size, 2245 &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2246 break; 2247 } 2248 2249 case DIOCRDELTABLES: { 2250 struct pfioc_table *io = (struct pfioc_table *)addr; 2251 2252 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2253 error = ENODEV; 2254 goto fail; 2255 } 2256 NET_LOCK(); 2257 PF_LOCK(); 2258 error = pfr_del_tables(io->pfrio_buffer, io->pfrio_size, 2259 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2260 PF_UNLOCK(); 2261 NET_UNLOCK(); 2262 break; 2263 } 2264 2265 case DIOCRGETTABLES: { 2266 struct pfioc_table *io = (struct pfioc_table *)addr; 2267 2268 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2269 error = ENODEV; 2270 goto fail; 2271 } 2272 NET_LOCK(); 2273 PF_LOCK(); 2274 error = pfr_get_tables(&io->pfrio_table, io->pfrio_buffer, 2275 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2276 PF_UNLOCK(); 2277 NET_UNLOCK(); 2278 break; 2279 } 2280 2281 case DIOCRGETTSTATS: { 2282 struct pfioc_table *io = (struct pfioc_table *)addr; 2283 2284 if (io->pfrio_esize != sizeof(struct pfr_tstats)) { 2285 error = ENODEV; 2286 goto fail; 2287 } 2288 NET_LOCK(); 2289 PF_LOCK(); 2290 error = pfr_get_tstats(&io->pfrio_table, io->pfrio_buffer, 2291 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2292 PF_UNLOCK(); 2293 NET_UNLOCK(); 2294 break; 2295 } 2296 2297 case DIOCRCLRTSTATS: { 2298 struct pfioc_table *io = (struct pfioc_table *)addr; 2299 2300 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2301 error = ENODEV; 2302 goto fail; 2303 } 2304 NET_LOCK(); 2305 PF_LOCK(); 2306 error = pfr_clr_tstats(io->pfrio_buffer, io->pfrio_size, 2307 &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2308 PF_UNLOCK(); 2309 NET_UNLOCK(); 2310 break; 2311 } 2312 2313 case DIOCRSETTFLAGS: { 2314 struct pfioc_table *io = (struct pfioc_table *)addr; 2315 2316 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2317 error = ENODEV; 2318 goto fail; 2319 } 2320 NET_LOCK(); 2321 PF_LOCK(); 2322 error = pfr_set_tflags(io->pfrio_buffer, io->pfrio_size, 2323 io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange, 2324 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2325 PF_UNLOCK(); 2326 NET_UNLOCK(); 2327 break; 2328 } 2329 2330 case DIOCRCLRADDRS: { 2331 struct pfioc_table *io = (struct pfioc_table *)addr; 2332 2333 if (io->pfrio_esize != 0) { 2334 error = ENODEV; 2335 goto fail; 2336 } 2337 NET_LOCK(); 2338 PF_LOCK(); 2339 error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel, 2340 io->pfrio_flags | PFR_FLAG_USERIOCTL); 2341 PF_UNLOCK(); 2342 NET_UNLOCK(); 2343 break; 2344 } 2345 2346 case DIOCRADDADDRS: { 2347 struct pfioc_table *io = (struct pfioc_table *)addr; 2348 2349 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2350 error = ENODEV; 2351 goto fail; 2352 } 2353 error = pfr_add_addrs(&io->pfrio_table, io->pfrio_buffer, 2354 io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags | 2355 PFR_FLAG_USERIOCTL); 2356 break; 2357 } 2358 2359 case DIOCRDELADDRS: { 2360 struct pfioc_table *io = (struct pfioc_table *)addr; 2361 2362 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2363 error = ENODEV; 2364 goto fail; 2365 } 2366 NET_LOCK(); 2367 PF_LOCK(); 2368 error = pfr_del_addrs(&io->pfrio_table, io->pfrio_buffer, 2369 io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags | 2370 PFR_FLAG_USERIOCTL); 2371 PF_UNLOCK(); 2372 NET_UNLOCK(); 2373 break; 2374 } 2375 2376 case DIOCRSETADDRS: { 2377 struct pfioc_table *io = (struct pfioc_table *)addr; 2378 2379 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2380 error = ENODEV; 2381 goto fail; 2382 } 2383 NET_LOCK(); 2384 PF_LOCK(); 2385 error = pfr_set_addrs(&io->pfrio_table, io->pfrio_buffer, 2386 io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd, 2387 &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags | 2388 PFR_FLAG_USERIOCTL, 0); 2389 PF_UNLOCK(); 2390 NET_UNLOCK(); 2391 break; 2392 } 2393 2394 case DIOCRGETADDRS: { 2395 struct pfioc_table *io = (struct pfioc_table *)addr; 2396 2397 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2398 error = ENODEV; 2399 goto fail; 2400 } 2401 NET_LOCK(); 2402 PF_LOCK(); 2403 error = pfr_get_addrs(&io->pfrio_table, io->pfrio_buffer, 2404 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2405 PF_UNLOCK(); 2406 NET_UNLOCK(); 2407 break; 2408 } 2409 2410 case DIOCRGETASTATS: { 2411 struct pfioc_table *io = (struct pfioc_table *)addr; 2412 2413 if (io->pfrio_esize != sizeof(struct pfr_astats)) { 2414 error = ENODEV; 2415 goto fail; 2416 } 2417 NET_LOCK(); 2418 PF_LOCK(); 2419 error = pfr_get_astats(&io->pfrio_table, io->pfrio_buffer, 2420 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2421 PF_UNLOCK(); 2422 NET_UNLOCK(); 2423 break; 2424 } 2425 2426 case DIOCRCLRASTATS: { 2427 struct pfioc_table *io = (struct pfioc_table *)addr; 2428 2429 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2430 error = ENODEV; 2431 goto fail; 2432 } 2433 NET_LOCK(); 2434 PF_LOCK(); 2435 error = pfr_clr_astats(&io->pfrio_table, io->pfrio_buffer, 2436 io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags | 2437 PFR_FLAG_USERIOCTL); 2438 PF_UNLOCK(); 2439 NET_UNLOCK(); 2440 break; 2441 } 2442 2443 case DIOCRTSTADDRS: { 2444 struct pfioc_table *io = (struct pfioc_table *)addr; 2445 2446 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2447 error = ENODEV; 2448 goto fail; 2449 } 2450 NET_LOCK(); 2451 PF_LOCK(); 2452 error = pfr_tst_addrs(&io->pfrio_table, io->pfrio_buffer, 2453 io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags | 2454 PFR_FLAG_USERIOCTL); 2455 PF_UNLOCK(); 2456 NET_UNLOCK(); 2457 break; 2458 } 2459 2460 case DIOCRINADEFINE: { 2461 struct pfioc_table *io = (struct pfioc_table *)addr; 2462 2463 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2464 error = ENODEV; 2465 goto fail; 2466 } 2467 NET_LOCK(); 2468 PF_LOCK(); 2469 error = pfr_ina_define(&io->pfrio_table, io->pfrio_buffer, 2470 io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr, 2471 io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2472 PF_UNLOCK(); 2473 NET_UNLOCK(); 2474 break; 2475 } 2476 2477 case DIOCOSFPADD: { 2478 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr; 2479 error = pf_osfp_add(io); 2480 break; 2481 } 2482 2483 case DIOCOSFPGET: { 2484 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr; 2485 error = pf_osfp_get(io); 2486 break; 2487 } 2488 2489 case DIOCXBEGIN: { 2490 struct pfioc_trans *io = (struct pfioc_trans *)addr; 2491 struct pfioc_trans_e *ioe; 2492 struct pfr_table *table; 2493 int i; 2494 2495 if (io->esize != sizeof(*ioe)) { 2496 error = ENODEV; 2497 goto fail; 2498 } 2499 ioe = malloc(sizeof(*ioe), M_PF, M_WAITOK); 2500 table = malloc(sizeof(*table), M_PF, M_WAITOK); 2501 NET_LOCK(); 2502 PF_LOCK(); 2503 pf_default_rule_new = pf_default_rule; 2504 PF_UNLOCK(); 2505 NET_UNLOCK(); 2506 memset(&pf_trans_set, 0, sizeof(pf_trans_set)); 2507 for (i = 0; i < io->size; i++) { 2508 if (copyin(io->array+i, ioe, sizeof(*ioe))) { 2509 free(table, M_PF, sizeof(*table)); 2510 free(ioe, M_PF, sizeof(*ioe)); 2511 error = EFAULT; 2512 goto fail; 2513 } 2514 if (strnlen(ioe->anchor, sizeof(ioe->anchor)) == 2515 sizeof(ioe->anchor)) { 2516 free(table, M_PF, sizeof(*table)); 2517 free(ioe, M_PF, sizeof(*ioe)); 2518 error = ENAMETOOLONG; 2519 goto fail; 2520 } 2521 NET_LOCK(); 2522 PF_LOCK(); 2523 switch (ioe->type) { 2524 case PF_TRANS_TABLE: 2525 memset(table, 0, sizeof(*table)); 2526 strlcpy(table->pfrt_anchor, ioe->anchor, 2527 sizeof(table->pfrt_anchor)); 2528 if ((error = pfr_ina_begin(table, 2529 &ioe->ticket, NULL, 0))) { 2530 PF_UNLOCK(); 2531 NET_UNLOCK(); 2532 free(table, M_PF, sizeof(*table)); 2533 free(ioe, M_PF, sizeof(*ioe)); 2534 goto fail; 2535 } 2536 break; 2537 case PF_TRANS_RULESET: 2538 if ((error = pf_begin_rules(&ioe->ticket, 2539 ioe->anchor))) { 2540 PF_UNLOCK(); 2541 NET_UNLOCK(); 2542 free(table, M_PF, sizeof(*table)); 2543 free(ioe, M_PF, sizeof(*ioe)); 2544 goto fail; 2545 } 2546 break; 2547 default: 2548 PF_UNLOCK(); 2549 NET_UNLOCK(); 2550 free(table, M_PF, sizeof(*table)); 2551 free(ioe, M_PF, sizeof(*ioe)); 2552 error = EINVAL; 2553 goto fail; 2554 } 2555 PF_UNLOCK(); 2556 NET_UNLOCK(); 2557 if (copyout(ioe, io->array+i, sizeof(io->array[i]))) { 2558 free(table, M_PF, sizeof(*table)); 2559 free(ioe, M_PF, sizeof(*ioe)); 2560 error = EFAULT; 2561 goto fail; 2562 } 2563 } 2564 free(table, M_PF, sizeof(*table)); 2565 free(ioe, M_PF, sizeof(*ioe)); 2566 break; 2567 } 2568 2569 case DIOCXROLLBACK: { 2570 struct pfioc_trans *io = (struct pfioc_trans *)addr; 2571 struct pfioc_trans_e *ioe; 2572 struct pfr_table *table; 2573 int i; 2574 2575 if (io->esize != sizeof(*ioe)) { 2576 error = ENODEV; 2577 goto fail; 2578 } 2579 ioe = malloc(sizeof(*ioe), M_PF, M_WAITOK); 2580 table = malloc(sizeof(*table), M_PF, M_WAITOK); 2581 for (i = 0; i < io->size; i++) { 2582 if (copyin(io->array+i, ioe, sizeof(*ioe))) { 2583 free(table, M_PF, sizeof(*table)); 2584 free(ioe, M_PF, sizeof(*ioe)); 2585 error = EFAULT; 2586 goto fail; 2587 } 2588 if (strnlen(ioe->anchor, sizeof(ioe->anchor)) == 2589 sizeof(ioe->anchor)) { 2590 free(table, M_PF, sizeof(*table)); 2591 free(ioe, M_PF, sizeof(*ioe)); 2592 error = ENAMETOOLONG; 2593 goto fail; 2594 } 2595 NET_LOCK(); 2596 PF_LOCK(); 2597 switch (ioe->type) { 2598 case PF_TRANS_TABLE: 2599 memset(table, 0, sizeof(*table)); 2600 strlcpy(table->pfrt_anchor, ioe->anchor, 2601 sizeof(table->pfrt_anchor)); 2602 if ((error = pfr_ina_rollback(table, 2603 ioe->ticket, NULL, 0))) { 2604 PF_UNLOCK(); 2605 NET_UNLOCK(); 2606 free(table, M_PF, sizeof(*table)); 2607 free(ioe, M_PF, sizeof(*ioe)); 2608 goto fail; /* really bad */ 2609 } 2610 break; 2611 case PF_TRANS_RULESET: 2612 pf_rollback_rules(ioe->ticket, ioe->anchor); 2613 break; 2614 default: 2615 PF_UNLOCK(); 2616 NET_UNLOCK(); 2617 free(table, M_PF, sizeof(*table)); 2618 free(ioe, M_PF, sizeof(*ioe)); 2619 error = EINVAL; 2620 goto fail; /* really bad */ 2621 } 2622 PF_UNLOCK(); 2623 NET_UNLOCK(); 2624 } 2625 free(table, M_PF, sizeof(*table)); 2626 free(ioe, M_PF, sizeof(*ioe)); 2627 break; 2628 } 2629 2630 case DIOCXCOMMIT: { 2631 struct pfioc_trans *io = (struct pfioc_trans *)addr; 2632 struct pfioc_trans_e *ioe; 2633 struct pfr_table *table; 2634 struct pf_ruleset *rs; 2635 int i; 2636 2637 if (io->esize != sizeof(*ioe)) { 2638 error = ENODEV; 2639 goto fail; 2640 } 2641 ioe = malloc(sizeof(*ioe), M_PF, M_WAITOK); 2642 table = malloc(sizeof(*table), M_PF, M_WAITOK); 2643 /* first makes sure everything will succeed */ 2644 for (i = 0; i < io->size; i++) { 2645 if (copyin(io->array+i, ioe, sizeof(*ioe))) { 2646 free(table, M_PF, sizeof(*table)); 2647 free(ioe, M_PF, sizeof(*ioe)); 2648 error = EFAULT; 2649 goto fail; 2650 } 2651 if (strnlen(ioe->anchor, sizeof(ioe->anchor)) == 2652 sizeof(ioe->anchor)) { 2653 free(table, M_PF, sizeof(*table)); 2654 free(ioe, M_PF, sizeof(*ioe)); 2655 error = ENAMETOOLONG; 2656 goto fail; 2657 } 2658 NET_LOCK(); 2659 PF_LOCK(); 2660 switch (ioe->type) { 2661 case PF_TRANS_TABLE: 2662 rs = pf_find_ruleset(ioe->anchor); 2663 if (rs == NULL || !rs->topen || ioe->ticket != 2664 rs->tticket) { 2665 PF_UNLOCK(); 2666 NET_UNLOCK(); 2667 free(table, M_PF, sizeof(*table)); 2668 free(ioe, M_PF, sizeof(*ioe)); 2669 error = EBUSY; 2670 goto fail; 2671 } 2672 break; 2673 case PF_TRANS_RULESET: 2674 rs = pf_find_ruleset(ioe->anchor); 2675 if (rs == NULL || 2676 !rs->rules.inactive.open || 2677 rs->rules.inactive.version != 2678 ioe->ticket) { 2679 PF_UNLOCK(); 2680 NET_UNLOCK(); 2681 free(table, M_PF, sizeof(*table)); 2682 free(ioe, M_PF, sizeof(*ioe)); 2683 error = EBUSY; 2684 goto fail; 2685 } 2686 break; 2687 default: 2688 PF_UNLOCK(); 2689 NET_UNLOCK(); 2690 free(table, M_PF, sizeof(*table)); 2691 free(ioe, M_PF, sizeof(*ioe)); 2692 error = EINVAL; 2693 goto fail; 2694 } 2695 PF_UNLOCK(); 2696 NET_UNLOCK(); 2697 } 2698 NET_LOCK(); 2699 PF_LOCK(); 2700 2701 /* 2702 * Checked already in DIOCSETLIMIT, but check again as the 2703 * situation might have changed. 2704 */ 2705 for (i = 0; i < PF_LIMIT_MAX; i++) { 2706 if (((struct pool *)pf_pool_limits[i].pp)->pr_nout > 2707 pf_pool_limits[i].limit_new) { 2708 PF_UNLOCK(); 2709 NET_UNLOCK(); 2710 free(table, M_PF, sizeof(*table)); 2711 free(ioe, M_PF, sizeof(*ioe)); 2712 error = EBUSY; 2713 goto fail; 2714 } 2715 } 2716 /* now do the commit - no errors should happen here */ 2717 for (i = 0; i < io->size; i++) { 2718 PF_UNLOCK(); 2719 NET_UNLOCK(); 2720 if (copyin(io->array+i, ioe, sizeof(*ioe))) { 2721 free(table, M_PF, sizeof(*table)); 2722 free(ioe, M_PF, sizeof(*ioe)); 2723 error = EFAULT; 2724 goto fail; 2725 } 2726 if (strnlen(ioe->anchor, sizeof(ioe->anchor)) == 2727 sizeof(ioe->anchor)) { 2728 free(table, M_PF, sizeof(*table)); 2729 free(ioe, M_PF, sizeof(*ioe)); 2730 error = ENAMETOOLONG; 2731 goto fail; 2732 } 2733 NET_LOCK(); 2734 PF_LOCK(); 2735 switch (ioe->type) { 2736 case PF_TRANS_TABLE: 2737 memset(table, 0, sizeof(*table)); 2738 strlcpy(table->pfrt_anchor, ioe->anchor, 2739 sizeof(table->pfrt_anchor)); 2740 if ((error = pfr_ina_commit(table, ioe->ticket, 2741 NULL, NULL, 0))) { 2742 PF_UNLOCK(); 2743 NET_UNLOCK(); 2744 free(table, M_PF, sizeof(*table)); 2745 free(ioe, M_PF, sizeof(*ioe)); 2746 goto fail; /* really bad */ 2747 } 2748 break; 2749 case PF_TRANS_RULESET: 2750 if ((error = pf_commit_rules(ioe->ticket, 2751 ioe->anchor))) { 2752 PF_UNLOCK(); 2753 NET_UNLOCK(); 2754 free(table, M_PF, sizeof(*table)); 2755 free(ioe, M_PF, sizeof(*ioe)); 2756 goto fail; /* really bad */ 2757 } 2758 break; 2759 default: 2760 PF_UNLOCK(); 2761 NET_UNLOCK(); 2762 free(table, M_PF, sizeof(*table)); 2763 free(ioe, M_PF, sizeof(*ioe)); 2764 error = EINVAL; 2765 goto fail; /* really bad */ 2766 } 2767 } 2768 for (i = 0; i < PF_LIMIT_MAX; i++) { 2769 if (pf_pool_limits[i].limit_new != 2770 pf_pool_limits[i].limit && 2771 pool_sethardlimit(pf_pool_limits[i].pp, 2772 pf_pool_limits[i].limit_new, NULL, 0) != 0) { 2773 PF_UNLOCK(); 2774 NET_UNLOCK(); 2775 free(table, M_PF, sizeof(*table)); 2776 free(ioe, M_PF, sizeof(*ioe)); 2777 error = EBUSY; 2778 goto fail; /* really bad */ 2779 } 2780 pf_pool_limits[i].limit = pf_pool_limits[i].limit_new; 2781 } 2782 for (i = 0; i < PFTM_MAX; i++) { 2783 int old = pf_default_rule.timeout[i]; 2784 2785 pf_default_rule.timeout[i] = 2786 pf_default_rule_new.timeout[i]; 2787 if (pf_default_rule.timeout[i] == PFTM_INTERVAL && 2788 pf_default_rule.timeout[i] < old && 2789 timeout_del(&pf_purge_to)) 2790 task_add(systqmp, &pf_purge_task); 2791 } 2792 pfi_xcommit(); 2793 pf_trans_set_commit(); 2794 PF_UNLOCK(); 2795 NET_UNLOCK(); 2796 free(table, M_PF, sizeof(*table)); 2797 free(ioe, M_PF, sizeof(*ioe)); 2798 break; 2799 } 2800 2801 case DIOCXEND: { 2802 u_int32_t *ticket = (u_int32_t *)addr; 2803 struct pf_trans *t; 2804 2805 t = pf_find_trans(minor(dev), *ticket); 2806 if (t != NULL) 2807 pf_rollback_trans(t); 2808 else 2809 error = ENXIO; 2810 break; 2811 } 2812 2813 case DIOCGETSRCNODES: { 2814 struct pfioc_src_nodes *psn = (struct pfioc_src_nodes *)addr; 2815 struct pf_src_node *n, *p, *pstore; 2816 u_int32_t nr = 0; 2817 size_t space = psn->psn_len; 2818 2819 pstore = malloc(sizeof(*pstore), M_PF, M_WAITOK); 2820 2821 NET_LOCK(); 2822 PF_LOCK(); 2823 if (space == 0) { 2824 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) 2825 nr++; 2826 psn->psn_len = sizeof(struct pf_src_node) * nr; 2827 PF_UNLOCK(); 2828 NET_UNLOCK(); 2829 free(pstore, M_PF, sizeof(*pstore)); 2830 goto fail; 2831 } 2832 2833 p = psn->psn_src_nodes; 2834 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) { 2835 int secs = getuptime(), diff; 2836 2837 if ((nr + 1) * sizeof(*p) > psn->psn_len) 2838 break; 2839 2840 memcpy(pstore, n, sizeof(*pstore)); 2841 memset(&pstore->entry, 0, sizeof(pstore->entry)); 2842 pstore->rule.ptr = NULL; 2843 pstore->kif = NULL; 2844 pstore->rule.nr = n->rule.ptr->nr; 2845 pstore->creation = secs - pstore->creation; 2846 if (pstore->expire > secs) 2847 pstore->expire -= secs; 2848 else 2849 pstore->expire = 0; 2850 2851 /* adjust the connection rate estimate */ 2852 diff = secs - n->conn_rate.last; 2853 if (diff >= n->conn_rate.seconds) 2854 pstore->conn_rate.count = 0; 2855 else 2856 pstore->conn_rate.count -= 2857 n->conn_rate.count * diff / 2858 n->conn_rate.seconds; 2859 2860 error = copyout(pstore, p, sizeof(*p)); 2861 if (error) { 2862 PF_UNLOCK(); 2863 NET_UNLOCK(); 2864 free(pstore, M_PF, sizeof(*pstore)); 2865 goto fail; 2866 } 2867 p++; 2868 nr++; 2869 } 2870 psn->psn_len = sizeof(struct pf_src_node) * nr; 2871 2872 PF_UNLOCK(); 2873 NET_UNLOCK(); 2874 free(pstore, M_PF, sizeof(*pstore)); 2875 break; 2876 } 2877 2878 case DIOCCLRSRCNODES: { 2879 struct pf_src_node *n; 2880 struct pf_state *st; 2881 2882 NET_LOCK(); 2883 PF_LOCK(); 2884 PF_STATE_ENTER_WRITE(); 2885 RBT_FOREACH(st, pf_state_tree_id, &tree_id) 2886 pf_src_tree_remove_state(st); 2887 PF_STATE_EXIT_WRITE(); 2888 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) 2889 n->expire = 1; 2890 pf_purge_expired_src_nodes(); 2891 PF_UNLOCK(); 2892 NET_UNLOCK(); 2893 break; 2894 } 2895 2896 case DIOCKILLSRCNODES: { 2897 struct pf_src_node *sn; 2898 struct pf_state *st; 2899 struct pfioc_src_node_kill *psnk = 2900 (struct pfioc_src_node_kill *)addr; 2901 u_int killed = 0; 2902 2903 NET_LOCK(); 2904 PF_LOCK(); 2905 RB_FOREACH(sn, pf_src_tree, &tree_src_tracking) { 2906 if (pf_match_addr(psnk->psnk_src.neg, 2907 &psnk->psnk_src.addr.v.a.addr, 2908 &psnk->psnk_src.addr.v.a.mask, 2909 &sn->addr, sn->af) && 2910 pf_match_addr(psnk->psnk_dst.neg, 2911 &psnk->psnk_dst.addr.v.a.addr, 2912 &psnk->psnk_dst.addr.v.a.mask, 2913 &sn->raddr, sn->af)) { 2914 /* Handle state to src_node linkage */ 2915 if (sn->states != 0) { 2916 PF_ASSERT_LOCKED(); 2917 PF_STATE_ENTER_WRITE(); 2918 RBT_FOREACH(st, pf_state_tree_id, 2919 &tree_id) 2920 pf_state_rm_src_node(st, sn); 2921 PF_STATE_EXIT_WRITE(); 2922 } 2923 sn->expire = 1; 2924 killed++; 2925 } 2926 } 2927 2928 if (killed > 0) 2929 pf_purge_expired_src_nodes(); 2930 2931 psnk->psnk_killed = killed; 2932 PF_UNLOCK(); 2933 NET_UNLOCK(); 2934 break; 2935 } 2936 2937 case DIOCSETHOSTID: { 2938 u_int32_t *hostid = (u_int32_t *)addr; 2939 2940 NET_LOCK(); 2941 PF_LOCK(); 2942 if (*hostid == 0) 2943 pf_trans_set.hostid = arc4random(); 2944 else 2945 pf_trans_set.hostid = *hostid; 2946 pf_trans_set.mask |= PF_TSET_HOSTID; 2947 PF_UNLOCK(); 2948 NET_UNLOCK(); 2949 break; 2950 } 2951 2952 case DIOCOSFPFLUSH: 2953 pf_osfp_flush(); 2954 break; 2955 2956 case DIOCIGETIFACES: { 2957 struct pfioc_iface *io = (struct pfioc_iface *)addr; 2958 struct pfi_kif *kif_buf; 2959 int apfiio_size = io->pfiio_size; 2960 2961 if (io->pfiio_esize != sizeof(struct pfi_kif)) { 2962 error = ENODEV; 2963 goto fail; 2964 } 2965 2966 if ((kif_buf = mallocarray(sizeof(*kif_buf), apfiio_size, 2967 M_PF, M_WAITOK|M_CANFAIL)) == NULL) { 2968 error = EINVAL; 2969 goto fail; 2970 } 2971 2972 NET_LOCK_SHARED(); 2973 PF_LOCK(); 2974 pfi_get_ifaces(io->pfiio_name, kif_buf, &io->pfiio_size); 2975 PF_UNLOCK(); 2976 NET_UNLOCK_SHARED(); 2977 if (copyout(kif_buf, io->pfiio_buffer, sizeof(*kif_buf) * 2978 io->pfiio_size)) 2979 error = EFAULT; 2980 free(kif_buf, M_PF, sizeof(*kif_buf) * apfiio_size); 2981 break; 2982 } 2983 2984 case DIOCSETIFFLAG: { 2985 struct pfioc_iface *io = (struct pfioc_iface *)addr; 2986 2987 if (io == NULL) { 2988 error = EINVAL; 2989 goto fail; 2990 } 2991 2992 PF_LOCK(); 2993 error = pfi_set_flags(io->pfiio_name, io->pfiio_flags); 2994 PF_UNLOCK(); 2995 break; 2996 } 2997 2998 case DIOCCLRIFFLAG: { 2999 struct pfioc_iface *io = (struct pfioc_iface *)addr; 3000 3001 if (io == NULL) { 3002 error = EINVAL; 3003 goto fail; 3004 } 3005 3006 PF_LOCK(); 3007 error = pfi_clear_flags(io->pfiio_name, io->pfiio_flags); 3008 PF_UNLOCK(); 3009 break; 3010 } 3011 3012 case DIOCSETREASS: { 3013 u_int32_t *reass = (u_int32_t *)addr; 3014 3015 NET_LOCK(); 3016 PF_LOCK(); 3017 pf_trans_set.reass = *reass; 3018 pf_trans_set.mask |= PF_TSET_REASS; 3019 PF_UNLOCK(); 3020 NET_UNLOCK(); 3021 break; 3022 } 3023 3024 case DIOCSETSYNFLWATS: { 3025 struct pfioc_synflwats *io = (struct pfioc_synflwats *)addr; 3026 3027 NET_LOCK(); 3028 PF_LOCK(); 3029 error = pf_syncookies_setwats(io->hiwat, io->lowat); 3030 PF_UNLOCK(); 3031 NET_UNLOCK(); 3032 break; 3033 } 3034 3035 case DIOCGETSYNFLWATS: { 3036 struct pfioc_synflwats *io = (struct pfioc_synflwats *)addr; 3037 3038 NET_LOCK(); 3039 PF_LOCK(); 3040 error = pf_syncookies_getwats(io); 3041 PF_UNLOCK(); 3042 NET_UNLOCK(); 3043 break; 3044 } 3045 3046 case DIOCSETSYNCOOKIES: { 3047 u_int8_t *mode = (u_int8_t *)addr; 3048 3049 NET_LOCK(); 3050 PF_LOCK(); 3051 error = pf_syncookies_setmode(*mode); 3052 PF_UNLOCK(); 3053 NET_UNLOCK(); 3054 break; 3055 } 3056 3057 default: 3058 error = ENODEV; 3059 break; 3060 } 3061 fail: 3062 rw_exit_write(&pfioctl_rw); 3063 3064 return (error); 3065 } 3066 3067 void 3068 pf_trans_set_commit(void) 3069 { 3070 if (pf_trans_set.mask & PF_TSET_STATUSIF) 3071 strlcpy(pf_status.ifname, pf_trans_set.statusif, IFNAMSIZ); 3072 if (pf_trans_set.mask & PF_TSET_DEBUG) 3073 pf_status.debug = pf_trans_set.debug; 3074 if (pf_trans_set.mask & PF_TSET_HOSTID) 3075 pf_status.hostid = pf_trans_set.hostid; 3076 if (pf_trans_set.mask & PF_TSET_REASS) 3077 pf_status.reass = pf_trans_set.reass; 3078 } 3079 3080 void 3081 pf_pool_copyin(struct pf_pool *from, struct pf_pool *to) 3082 { 3083 memmove(to, from, sizeof(*to)); 3084 to->kif = NULL; 3085 to->addr.p.tbl = NULL; 3086 } 3087 3088 int 3089 pf_validate_range(u_int8_t op, u_int16_t port[2], int order) 3090 { 3091 u_int16_t a = (order == PF_ORDER_NET) ? ntohs(port[0]) : port[0]; 3092 u_int16_t b = (order == PF_ORDER_NET) ? ntohs(port[1]) : port[1]; 3093 3094 if ((op == PF_OP_RRG && a > b) || /* 34:12, i.e. none */ 3095 (op == PF_OP_IRG && a >= b) || /* 34><12, i.e. none */ 3096 (op == PF_OP_XRG && a > b)) /* 34<>22, i.e. all */ 3097 return 1; 3098 return 0; 3099 } 3100 3101 int 3102 pf_rule_copyin(struct pf_rule *from, struct pf_rule *to) 3103 { 3104 int i; 3105 3106 if (from->scrub_flags & PFSTATE_SETPRIO && 3107 (from->set_prio[0] > IFQ_MAXPRIO || 3108 from->set_prio[1] > IFQ_MAXPRIO)) 3109 return (EINVAL); 3110 3111 to->src = from->src; 3112 to->src.addr.p.tbl = NULL; 3113 to->dst = from->dst; 3114 to->dst.addr.p.tbl = NULL; 3115 3116 if (pf_validate_range(to->src.port_op, to->src.port, PF_ORDER_NET)) 3117 return (EINVAL); 3118 if (pf_validate_range(to->dst.port_op, to->dst.port, PF_ORDER_NET)) 3119 return (EINVAL); 3120 3121 /* XXX union skip[] */ 3122 3123 strlcpy(to->label, from->label, sizeof(to->label)); 3124 strlcpy(to->ifname, from->ifname, sizeof(to->ifname)); 3125 strlcpy(to->rcv_ifname, from->rcv_ifname, sizeof(to->rcv_ifname)); 3126 strlcpy(to->qname, from->qname, sizeof(to->qname)); 3127 strlcpy(to->pqname, from->pqname, sizeof(to->pqname)); 3128 strlcpy(to->tagname, from->tagname, sizeof(to->tagname)); 3129 strlcpy(to->match_tagname, from->match_tagname, 3130 sizeof(to->match_tagname)); 3131 strlcpy(to->overload_tblname, from->overload_tblname, 3132 sizeof(to->overload_tblname)); 3133 3134 pf_pool_copyin(&from->nat, &to->nat); 3135 pf_pool_copyin(&from->rdr, &to->rdr); 3136 pf_pool_copyin(&from->route, &to->route); 3137 3138 if (pf_validate_range(to->rdr.port_op, to->rdr.proxy_port, 3139 PF_ORDER_HOST)) 3140 return (EINVAL); 3141 3142 to->kif = (to->ifname[0]) ? 3143 pfi_kif_alloc(to->ifname, M_WAITOK) : NULL; 3144 to->rcv_kif = (to->rcv_ifname[0]) ? 3145 pfi_kif_alloc(to->rcv_ifname, M_WAITOK) : NULL; 3146 to->rdr.kif = (to->rdr.ifname[0]) ? 3147 pfi_kif_alloc(to->rdr.ifname, M_WAITOK) : NULL; 3148 to->nat.kif = (to->nat.ifname[0]) ? 3149 pfi_kif_alloc(to->nat.ifname, M_WAITOK) : NULL; 3150 to->route.kif = (to->route.ifname[0]) ? 3151 pfi_kif_alloc(to->route.ifname, M_WAITOK) : NULL; 3152 3153 to->os_fingerprint = from->os_fingerprint; 3154 3155 to->rtableid = from->rtableid; 3156 if (to->rtableid >= 0 && !rtable_exists(to->rtableid)) 3157 return (EBUSY); 3158 to->onrdomain = from->onrdomain; 3159 if (to->onrdomain != -1 && (to->onrdomain < 0 || 3160 to->onrdomain > RT_TABLEID_MAX)) 3161 return (EINVAL); 3162 3163 for (i = 0; i < PFTM_MAX; i++) 3164 to->timeout[i] = from->timeout[i]; 3165 to->states_tot = from->states_tot; 3166 to->max_states = from->max_states; 3167 to->max_src_nodes = from->max_src_nodes; 3168 to->max_src_states = from->max_src_states; 3169 to->max_src_conn = from->max_src_conn; 3170 to->max_src_conn_rate.limit = from->max_src_conn_rate.limit; 3171 to->max_src_conn_rate.seconds = from->max_src_conn_rate.seconds; 3172 pf_init_threshold(&to->pktrate, from->pktrate.limit, 3173 from->pktrate.seconds); 3174 3175 if (to->qname[0] != 0) { 3176 if ((to->qid = pf_qname2qid(to->qname, 0)) == 0) 3177 return (EBUSY); 3178 if (to->pqname[0] != 0) { 3179 if ((to->pqid = pf_qname2qid(to->pqname, 0)) == 0) 3180 return (EBUSY); 3181 } else 3182 to->pqid = to->qid; 3183 } 3184 to->rt_listid = from->rt_listid; 3185 to->prob = from->prob; 3186 to->return_icmp = from->return_icmp; 3187 to->return_icmp6 = from->return_icmp6; 3188 to->max_mss = from->max_mss; 3189 if (to->tagname[0]) 3190 if ((to->tag = pf_tagname2tag(to->tagname, 1)) == 0) 3191 return (EBUSY); 3192 if (to->match_tagname[0]) 3193 if ((to->match_tag = pf_tagname2tag(to->match_tagname, 1)) == 0) 3194 return (EBUSY); 3195 to->scrub_flags = from->scrub_flags; 3196 to->delay = from->delay; 3197 to->uid = from->uid; 3198 to->gid = from->gid; 3199 to->rule_flag = from->rule_flag; 3200 to->action = from->action; 3201 to->direction = from->direction; 3202 to->log = from->log; 3203 to->logif = from->logif; 3204 #if NPFLOG > 0 3205 if (!to->log) 3206 to->logif = 0; 3207 #endif /* NPFLOG > 0 */ 3208 to->quick = from->quick; 3209 to->ifnot = from->ifnot; 3210 to->rcvifnot = from->rcvifnot; 3211 to->match_tag_not = from->match_tag_not; 3212 to->keep_state = from->keep_state; 3213 to->af = from->af; 3214 to->naf = from->naf; 3215 to->proto = from->proto; 3216 to->type = from->type; 3217 to->code = from->code; 3218 to->flags = from->flags; 3219 to->flagset = from->flagset; 3220 to->min_ttl = from->min_ttl; 3221 to->allow_opts = from->allow_opts; 3222 to->rt = from->rt; 3223 to->return_ttl = from->return_ttl; 3224 to->tos = from->tos; 3225 to->set_tos = from->set_tos; 3226 to->anchor_relative = from->anchor_relative; /* XXX */ 3227 to->anchor_wildcard = from->anchor_wildcard; /* XXX */ 3228 to->flush = from->flush; 3229 to->divert.addr = from->divert.addr; 3230 to->divert.port = from->divert.port; 3231 to->divert.type = from->divert.type; 3232 to->prio = from->prio; 3233 to->set_prio[0] = from->set_prio[0]; 3234 to->set_prio[1] = from->set_prio[1]; 3235 3236 return (0); 3237 } 3238 3239 int 3240 pf_rule_checkaf(struct pf_rule *r) 3241 { 3242 switch (r->af) { 3243 case 0: 3244 if (r->rule_flag & PFRULE_AFTO) 3245 return (EPFNOSUPPORT); 3246 break; 3247 case AF_INET: 3248 if ((r->rule_flag & PFRULE_AFTO) && r->naf != AF_INET6) 3249 return (EPFNOSUPPORT); 3250 break; 3251 #ifdef INET6 3252 case AF_INET6: 3253 if ((r->rule_flag & PFRULE_AFTO) && r->naf != AF_INET) 3254 return (EPFNOSUPPORT); 3255 break; 3256 #endif /* INET6 */ 3257 default: 3258 return (EPFNOSUPPORT); 3259 } 3260 3261 if ((r->rule_flag & PFRULE_AFTO) == 0 && r->naf != 0) 3262 return (EPFNOSUPPORT); 3263 3264 return (0); 3265 } 3266 3267 int 3268 pf_sysctl(void *oldp, size_t *oldlenp, void *newp, size_t newlen) 3269 { 3270 struct pf_status pfs; 3271 3272 NET_LOCK_SHARED(); 3273 PF_LOCK(); 3274 memcpy(&pfs, &pf_status, sizeof(struct pf_status)); 3275 pfi_update_status(pfs.ifname, &pfs); 3276 PF_UNLOCK(); 3277 NET_UNLOCK_SHARED(); 3278 3279 return sysctl_rdstruct(oldp, oldlenp, newp, &pfs, sizeof(pfs)); 3280 } 3281 3282 struct pf_trans * 3283 pf_open_trans(uint32_t unit) 3284 { 3285 static uint64_t ticket = 1; 3286 struct pf_trans *t; 3287 3288 rw_assert_wrlock(&pfioctl_rw); 3289 3290 KASSERT(pf_unit2idx(unit) < nitems(pf_tcount)); 3291 if (pf_tcount[pf_unit2idx(unit)] >= (PF_ANCHOR_STACK_MAX * 8)) 3292 return (NULL); 3293 3294 t = malloc(sizeof(*t), M_PF, M_WAITOK|M_ZERO); 3295 t->pft_unit = unit; 3296 t->pft_ticket = ticket++; 3297 pf_tcount[pf_unit2idx(unit)]++; 3298 3299 LIST_INSERT_HEAD(&pf_ioctl_trans, t, pft_entry); 3300 3301 return (t); 3302 } 3303 3304 struct pf_trans * 3305 pf_find_trans(uint32_t unit, uint64_t ticket) 3306 { 3307 struct pf_trans *t; 3308 3309 rw_assert_anylock(&pfioctl_rw); 3310 3311 LIST_FOREACH(t, &pf_ioctl_trans, pft_entry) { 3312 if (t->pft_ticket == ticket && t->pft_unit == unit) 3313 break; 3314 } 3315 3316 return (t); 3317 } 3318 3319 void 3320 pf_init_tgetrule(struct pf_trans *t, struct pf_anchor *a, 3321 uint32_t rs_version, struct pf_rule *r) 3322 { 3323 t->pft_type = PF_TRANS_GETRULE; 3324 if (a == NULL) 3325 t->pftgr_anchor = &pf_main_anchor; 3326 else 3327 t->pftgr_anchor = a; 3328 3329 t->pftgr_version = rs_version; 3330 t->pftgr_rule = r; 3331 } 3332 3333 void 3334 pf_cleanup_tgetrule(struct pf_trans *t) 3335 { 3336 KASSERT(t->pft_type == PF_TRANS_GETRULE); 3337 pf_anchor_rele(t->pftgr_anchor); 3338 } 3339 3340 void 3341 pf_free_trans(struct pf_trans *t) 3342 { 3343 switch (t->pft_type) { 3344 case PF_TRANS_GETRULE: 3345 pf_cleanup_tgetrule(t); 3346 break; 3347 default: 3348 log(LOG_ERR, "%s unknown transaction type: %d\n", 3349 __func__, t->pft_type); 3350 } 3351 3352 KASSERT(pf_unit2idx(t->pft_unit) < nitems(pf_tcount)); 3353 KASSERT(pf_tcount[pf_unit2idx(t->pft_unit)] >= 1); 3354 pf_tcount[pf_unit2idx(t->pft_unit)]--; 3355 3356 free(t, M_PF, sizeof(*t)); 3357 } 3358 3359 void 3360 pf_rollback_trans(struct pf_trans *t) 3361 { 3362 if (t != NULL) { 3363 rw_assert_wrlock(&pfioctl_rw); 3364 LIST_REMOVE(t, pft_entry); 3365 pf_free_trans(t); 3366 } 3367 } 3368