1 /* $NetBSD: pf_ioctl.c,v 1.10 2004/09/06 10:01:39 yamt Exp $ */ 2 /* $OpenBSD: pf_ioctl.c,v 1.112 2004/03/22 04:54:18 mcbride Exp $ */ 3 4 /* 5 * Copyright (c) 2001 Daniel Hartmeier 6 * Copyright (c) 2002,2003 Henning Brauer 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 13 * - Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * - Redistributions in binary form must reproduce the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer in the documentation and/or other materials provided 18 * with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 28 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 30 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 * POSSIBILITY OF SUCH DAMAGE. 32 * 33 * Effort sponsored in part by the Defense Advanced Research Projects 34 * Agency (DARPA) and Air Force Research Laboratory, Air Force 35 * Materiel Command, USAF, under agreement number F30602-01-2-0537. 36 * 37 */ 38 39 #ifdef _KERNEL_OPT 40 #include "opt_inet.h" 41 #include "opt_altq.h" 42 #include "opt_pfil_hooks.h" 43 #endif 44 45 #ifdef __OpenBSD__ 46 #include "pfsync.h" 47 #else 48 #define NPFSYNC 0 49 #endif 50 51 #include <sys/param.h> 52 #include <sys/systm.h> 53 #include <sys/mbuf.h> 54 #include <sys/filio.h> 55 #include <sys/fcntl.h> 56 #include <sys/socket.h> 57 #include <sys/socketvar.h> 58 #include <sys/kernel.h> 59 #include <sys/time.h> 60 #ifdef __OpenBSD__ 61 #include <sys/timeout.h> 62 #else 63 #include <sys/callout.h> 64 #endif 65 #include <sys/pool.h> 66 #include <sys/malloc.h> 67 #ifdef __NetBSD__ 68 #include <sys/conf.h> 69 #endif 70 71 #include <net/if.h> 72 #include <net/if_types.h> 73 #include <net/route.h> 74 75 #include <netinet/in.h> 76 #include <netinet/in_var.h> 77 #include <netinet/in_systm.h> 78 #include <netinet/ip.h> 79 #include <netinet/ip_var.h> 80 #include <netinet/ip_icmp.h> 81 82 #ifdef __OpenBSD__ 83 #include <dev/rndvar.h> 84 #endif 85 #include <net/pfvar.h> 86 87 #if NPFSYNC > 0 88 #include <net/if_pfsync.h> 89 #endif /* NPFSYNC > 0 */ 90 91 #ifdef INET6 92 #include <netinet/ip6.h> 93 #include <netinet/in_pcb.h> 94 #endif /* INET6 */ 95 96 #ifdef ALTQ 97 #include <altq/altq.h> 98 #endif 99 100 void pfattach(int); 101 #ifdef _LKM 102 void pfdetach(void); 103 #endif 104 int pfopen(dev_t, int, int, struct proc *); 105 int pfclose(dev_t, int, int, struct proc *); 106 struct pf_pool *pf_get_pool(char *, char *, u_int32_t, 107 u_int8_t, u_int8_t, u_int8_t, u_int8_t, u_int8_t); 108 int pf_get_ruleset_number(u_int8_t); 109 void pf_init_ruleset(struct pf_ruleset *); 110 void pf_mv_pool(struct pf_palist *, struct pf_palist *); 111 void pf_empty_pool(struct pf_palist *); 112 int pfioctl(dev_t, u_long, caddr_t, int, struct proc *); 113 #ifdef ALTQ 114 int pf_begin_altq(u_int32_t *); 115 int pf_rollback_altq(u_int32_t); 116 int pf_commit_altq(u_int32_t); 117 #endif /* ALTQ */ 118 int pf_begin_rules(u_int32_t *, int, char *, char *); 119 int pf_rollback_rules(u_int32_t, int, char *, char *); 120 int pf_commit_rules(u_int32_t, int, char *, char *); 121 122 #ifdef __NetBSD__ 123 const struct cdevsw pf_cdevsw = { 124 pfopen, pfclose, noread, nowrite, pfioctl, 125 nostop, notty, nopoll, nommap, nokqfilter, 126 }; 127 128 static int pf_pfil_attach(void); 129 static int pf_pfil_detach(void); 130 131 static int pf_pfil_attached = 0; 132 #endif 133 134 #ifdef __OpenBSD__ 135 extern struct timeout pf_expire_to; 136 #else 137 extern struct callout pf_expire_to; 138 #endif 139 140 struct pf_rule pf_default_rule; 141 142 #define TAGID_MAX 50000 143 TAILQ_HEAD(pf_tags, pf_tagname) pf_tags = TAILQ_HEAD_INITIALIZER(pf_tags), 144 pf_qids = TAILQ_HEAD_INITIALIZER(pf_qids); 145 146 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE) 147 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE 148 #endif 149 static u_int16_t tagname2tag(struct pf_tags *, char *); 150 static void tag2tagname(struct pf_tags *, u_int16_t, char *); 151 static void tag_unref(struct pf_tags *, u_int16_t); 152 153 #define DPFPRINTF(n, x) if (pf_status.debug >= (n)) printf x 154 155 #ifdef __NetBSD__ 156 extern struct pfil_head if_pfil; 157 #endif 158 159 void 160 pfattach(int num) 161 { 162 u_int32_t *timeout = pf_default_rule.timeout; 163 164 pool_init(&pf_rule_pl, sizeof(struct pf_rule), 0, 0, 0, "pfrulepl", 165 &pool_allocator_nointr); 166 pool_init(&pf_src_tree_pl, sizeof(struct pf_src_node), 0, 0, 0, 167 "pfsrctrpl", NULL); 168 pool_init(&pf_state_pl, sizeof(struct pf_state), 0, 0, 0, "pfstatepl", 169 NULL); 170 pool_init(&pf_altq_pl, sizeof(struct pf_altq), 0, 0, 0, "pfaltqpl", 171 NULL); 172 pool_init(&pf_pooladdr_pl, sizeof(struct pf_pooladdr), 0, 0, 0, 173 "pfpooladdrpl", NULL); 174 175 pfr_initialize(); 176 pfi_initialize(); 177 pf_osfp_initialize(); 178 179 pool_sethardlimit(pf_pool_limits[PF_LIMIT_STATES].pp, 180 pf_pool_limits[PF_LIMIT_STATES].limit, NULL, 0); 181 182 RB_INIT(&tree_src_tracking); 183 TAILQ_INIT(&pf_anchors); 184 pf_init_ruleset(&pf_main_ruleset); 185 TAILQ_INIT(&pf_altqs[0]); 186 TAILQ_INIT(&pf_altqs[1]); 187 TAILQ_INIT(&pf_pabuf); 188 pf_altqs_active = &pf_altqs[0]; 189 pf_altqs_inactive = &pf_altqs[1]; 190 TAILQ_INIT(&state_updates); 191 192 /* default rule should never be garbage collected */ 193 pf_default_rule.entries.tqe_prev = &pf_default_rule.entries.tqe_next; 194 pf_default_rule.action = PF_PASS; 195 pf_default_rule.nr = -1; 196 197 /* initialize default timeouts */ 198 timeout[PFTM_TCP_FIRST_PACKET] = 120; /* First TCP packet */ 199 timeout[PFTM_TCP_OPENING] = 30; /* No response yet */ 200 timeout[PFTM_TCP_ESTABLISHED] = 24*60*60; /* Established */ 201 timeout[PFTM_TCP_CLOSING] = 15 * 60; /* Half closed */ 202 timeout[PFTM_TCP_FIN_WAIT] = 45; /* Got both FINs */ 203 timeout[PFTM_TCP_CLOSED] = 90; /* Got a RST */ 204 timeout[PFTM_UDP_FIRST_PACKET] = 60; /* First UDP packet */ 205 timeout[PFTM_UDP_SINGLE] = 30; /* Unidirectional */ 206 timeout[PFTM_UDP_MULTIPLE] = 60; /* Bidirectional */ 207 timeout[PFTM_ICMP_FIRST_PACKET] = 20; /* First ICMP packet */ 208 timeout[PFTM_ICMP_ERROR_REPLY] = 10; /* Got error response */ 209 timeout[PFTM_OTHER_FIRST_PACKET] = 60; /* First packet */ 210 timeout[PFTM_OTHER_SINGLE] = 30; /* Unidirectional */ 211 timeout[PFTM_OTHER_MULTIPLE] = 60; /* Bidirectional */ 212 timeout[PFTM_FRAG] = 30; /* Fragment expire */ 213 timeout[PFTM_INTERVAL] = 10; /* Expire interval */ 214 timeout[PFTM_SRC_NODE] = 0; /* Source tracking */ 215 216 #ifdef __OpenBSD__ 217 timeout_set(&pf_expire_to, pf_purge_timeout, &pf_expire_to); 218 timeout_add(&pf_expire_to, timeout[PFTM_INTERVAL] * hz); 219 #else 220 callout_init(&pf_expire_to); 221 callout_reset(&pf_expire_to, timeout[PFTM_INTERVAL] * hz, 222 pf_purge_timeout, &pf_expire_to); 223 #endif 224 225 pf_normalize_init(); 226 bzero(&pf_status, sizeof(pf_status)); 227 pf_status.debug = PF_DEBUG_URGENT; 228 229 /* XXX do our best to avoid a conflict */ 230 pf_status.hostid = arc4random(); 231 } 232 233 #ifdef _LKM 234 #define TAILQ_DRAIN(list, element) \ 235 do { \ 236 while ((element = TAILQ_FIRST(list)) != NULL) { \ 237 TAILQ_REMOVE(list, element, entries); \ 238 free(element, M_TEMP); \ 239 } \ 240 } while (0) 241 242 void 243 pfdetach(void) 244 { 245 struct pf_pooladdr *pooladdr_e; 246 struct pf_altq *altq_e; 247 struct pf_anchor *anchor_e; 248 249 (void)pf_pfil_detach(); 250 251 callout_stop(&pf_expire_to); 252 pf_normalize_destroy(); 253 pf_osfp_destroy(); 254 pfi_destroy(); 255 256 TAILQ_DRAIN(&pf_pabuf, pooladdr_e); 257 TAILQ_DRAIN(&pf_altqs[1], altq_e); 258 TAILQ_DRAIN(&pf_altqs[0], altq_e); 259 TAILQ_DRAIN(&pf_anchors, anchor_e); 260 261 pf_remove_if_empty_ruleset(&pf_main_ruleset); 262 pfr_destroy(); 263 pool_destroy(&pf_pooladdr_pl); 264 pool_destroy(&pf_altq_pl); 265 pool_destroy(&pf_state_pl); 266 pool_destroy(&pf_rule_pl); 267 pool_destroy(&pf_src_tree_pl); 268 } 269 #endif 270 271 int 272 pfopen(dev_t dev, int flags, int fmt, struct proc *p) 273 { 274 if (minor(dev) >= 1) 275 return (ENXIO); 276 return (0); 277 } 278 279 int 280 pfclose(dev_t dev, int flags, int fmt, struct proc *p) 281 { 282 if (minor(dev) >= 1) 283 return (ENXIO); 284 return (0); 285 } 286 287 struct pf_pool * 288 pf_get_pool(char *anchorname, char *rulesetname, u_int32_t ticket, 289 u_int8_t rule_action, u_int8_t rule_number, u_int8_t r_last, 290 u_int8_t active, u_int8_t check_ticket) 291 { 292 struct pf_ruleset *ruleset; 293 struct pf_rule *rule; 294 int rs_num; 295 296 ruleset = pf_find_ruleset(anchorname, rulesetname); 297 if (ruleset == NULL) 298 return (NULL); 299 rs_num = pf_get_ruleset_number(rule_action); 300 if (rs_num >= PF_RULESET_MAX) 301 return (NULL); 302 if (active) { 303 if (check_ticket && ticket != 304 ruleset->rules[rs_num].active.ticket) 305 return (NULL); 306 if (r_last) 307 rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr, 308 pf_rulequeue); 309 else 310 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr); 311 } else { 312 if (check_ticket && ticket != 313 ruleset->rules[rs_num].inactive.ticket) 314 return (NULL); 315 if (r_last) 316 rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr, 317 pf_rulequeue); 318 else 319 rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr); 320 } 321 if (!r_last) { 322 while ((rule != NULL) && (rule->nr != rule_number)) 323 rule = TAILQ_NEXT(rule, entries); 324 } 325 if (rule == NULL) 326 return (NULL); 327 328 return (&rule->rpool); 329 } 330 331 int 332 pf_get_ruleset_number(u_int8_t action) 333 { 334 switch (action) { 335 case PF_SCRUB: 336 return (PF_RULESET_SCRUB); 337 break; 338 case PF_PASS: 339 case PF_DROP: 340 return (PF_RULESET_FILTER); 341 break; 342 case PF_NAT: 343 case PF_NONAT: 344 return (PF_RULESET_NAT); 345 break; 346 case PF_BINAT: 347 case PF_NOBINAT: 348 return (PF_RULESET_BINAT); 349 break; 350 case PF_RDR: 351 case PF_NORDR: 352 return (PF_RULESET_RDR); 353 break; 354 default: 355 return (PF_RULESET_MAX); 356 break; 357 } 358 } 359 360 void 361 pf_init_ruleset(struct pf_ruleset *ruleset) 362 { 363 int i; 364 365 memset(ruleset, 0, sizeof(struct pf_ruleset)); 366 for (i = 0; i < PF_RULESET_MAX; i++) { 367 TAILQ_INIT(&ruleset->rules[i].queues[0]); 368 TAILQ_INIT(&ruleset->rules[i].queues[1]); 369 ruleset->rules[i].active.ptr = &ruleset->rules[i].queues[0]; 370 ruleset->rules[i].inactive.ptr = &ruleset->rules[i].queues[1]; 371 } 372 } 373 374 struct pf_anchor * 375 pf_find_anchor(const char *anchorname) 376 { 377 struct pf_anchor *anchor; 378 int n = -1; 379 380 anchor = TAILQ_FIRST(&pf_anchors); 381 while (anchor != NULL && (n = strcmp(anchor->name, anchorname)) < 0) 382 anchor = TAILQ_NEXT(anchor, entries); 383 if (n == 0) 384 return (anchor); 385 else 386 return (NULL); 387 } 388 389 struct pf_ruleset * 390 pf_find_ruleset(char *anchorname, char *rulesetname) 391 { 392 struct pf_anchor *anchor; 393 struct pf_ruleset *ruleset; 394 395 if (!anchorname[0] && !rulesetname[0]) 396 return (&pf_main_ruleset); 397 if (!anchorname[0] || !rulesetname[0]) 398 return (NULL); 399 anchorname[PF_ANCHOR_NAME_SIZE-1] = 0; 400 rulesetname[PF_RULESET_NAME_SIZE-1] = 0; 401 anchor = pf_find_anchor(anchorname); 402 if (anchor == NULL) 403 return (NULL); 404 ruleset = TAILQ_FIRST(&anchor->rulesets); 405 while (ruleset != NULL && strcmp(ruleset->name, rulesetname) < 0) 406 ruleset = TAILQ_NEXT(ruleset, entries); 407 if (ruleset != NULL && !strcmp(ruleset->name, rulesetname)) 408 return (ruleset); 409 else 410 return (NULL); 411 } 412 413 struct pf_ruleset * 414 pf_find_or_create_ruleset(char anchorname[PF_ANCHOR_NAME_SIZE], 415 char rulesetname[PF_RULESET_NAME_SIZE]) 416 { 417 struct pf_anchor *anchor, *a; 418 struct pf_ruleset *ruleset, *r; 419 420 if (!anchorname[0] && !rulesetname[0]) 421 return (&pf_main_ruleset); 422 if (!anchorname[0] || !rulesetname[0]) 423 return (NULL); 424 anchorname[PF_ANCHOR_NAME_SIZE-1] = 0; 425 rulesetname[PF_RULESET_NAME_SIZE-1] = 0; 426 a = TAILQ_FIRST(&pf_anchors); 427 while (a != NULL && strcmp(a->name, anchorname) < 0) 428 a = TAILQ_NEXT(a, entries); 429 if (a != NULL && !strcmp(a->name, anchorname)) 430 anchor = a; 431 else { 432 anchor = (struct pf_anchor *)malloc(sizeof(struct pf_anchor), 433 M_TEMP, M_NOWAIT); 434 if (anchor == NULL) 435 return (NULL); 436 memset(anchor, 0, sizeof(struct pf_anchor)); 437 bcopy(anchorname, anchor->name, sizeof(anchor->name)); 438 TAILQ_INIT(&anchor->rulesets); 439 if (a != NULL) 440 TAILQ_INSERT_BEFORE(a, anchor, entries); 441 else 442 TAILQ_INSERT_TAIL(&pf_anchors, anchor, entries); 443 } 444 r = TAILQ_FIRST(&anchor->rulesets); 445 while (r != NULL && strcmp(r->name, rulesetname) < 0) 446 r = TAILQ_NEXT(r, entries); 447 if (r != NULL && !strcmp(r->name, rulesetname)) 448 return (r); 449 ruleset = (struct pf_ruleset *)malloc(sizeof(struct pf_ruleset), 450 M_TEMP, M_NOWAIT); 451 if (ruleset != NULL) { 452 pf_init_ruleset(ruleset); 453 bcopy(rulesetname, ruleset->name, sizeof(ruleset->name)); 454 ruleset->anchor = anchor; 455 if (r != NULL) 456 TAILQ_INSERT_BEFORE(r, ruleset, entries); 457 else 458 TAILQ_INSERT_TAIL(&anchor->rulesets, ruleset, entries); 459 } 460 return (ruleset); 461 } 462 463 void 464 pf_remove_if_empty_ruleset(struct pf_ruleset *ruleset) 465 { 466 struct pf_anchor *anchor; 467 int i; 468 469 if (ruleset == NULL || ruleset->anchor == NULL || ruleset->tables > 0 || 470 ruleset->topen) 471 return; 472 for (i = 0; i < PF_RULESET_MAX; ++i) 473 if (!TAILQ_EMPTY(ruleset->rules[i].active.ptr) || 474 !TAILQ_EMPTY(ruleset->rules[i].inactive.ptr) || 475 ruleset->rules[i].inactive.open) 476 return; 477 478 anchor = ruleset->anchor; 479 TAILQ_REMOVE(&anchor->rulesets, ruleset, entries); 480 free(ruleset, M_TEMP); 481 482 if (TAILQ_EMPTY(&anchor->rulesets)) { 483 TAILQ_REMOVE(&pf_anchors, anchor, entries); 484 free(anchor, M_TEMP); 485 pf_update_anchor_rules(); 486 } 487 } 488 489 void 490 pf_mv_pool(struct pf_palist *poola, struct pf_palist *poolb) 491 { 492 struct pf_pooladdr *mv_pool_pa; 493 494 while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) { 495 TAILQ_REMOVE(poola, mv_pool_pa, entries); 496 TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries); 497 } 498 } 499 500 void 501 pf_empty_pool(struct pf_palist *poola) 502 { 503 struct pf_pooladdr *empty_pool_pa; 504 505 while ((empty_pool_pa = TAILQ_FIRST(poola)) != NULL) { 506 pfi_dynaddr_remove(&empty_pool_pa->addr); 507 pf_tbladdr_remove(&empty_pool_pa->addr); 508 pfi_detach_rule(empty_pool_pa->kif); 509 TAILQ_REMOVE(poola, empty_pool_pa, entries); 510 pool_put(&pf_pooladdr_pl, empty_pool_pa); 511 } 512 } 513 514 void 515 pf_rm_rule(struct pf_rulequeue *rulequeue, struct pf_rule *rule) 516 { 517 if (rulequeue != NULL) { 518 if (rule->states <= 0) { 519 /* 520 * XXX - we need to remove the table *before* detaching 521 * the rule to make sure the table code does not delete 522 * the anchor under our feet. 523 */ 524 pf_tbladdr_remove(&rule->src.addr); 525 pf_tbladdr_remove(&rule->dst.addr); 526 } 527 TAILQ_REMOVE(rulequeue, rule, entries); 528 rule->entries.tqe_prev = NULL; 529 rule->nr = -1; 530 } 531 532 if (rule->states > 0 || rule->src_nodes > 0 || 533 rule->entries.tqe_prev != NULL) 534 return; 535 pf_tag_unref(rule->tag); 536 pf_tag_unref(rule->match_tag); 537 #ifdef ALTQ 538 if (rule->pqid != rule->qid) 539 pf_qid_unref(rule->pqid); 540 pf_qid_unref(rule->qid); 541 #endif 542 pfi_dynaddr_remove(&rule->src.addr); 543 pfi_dynaddr_remove(&rule->dst.addr); 544 if (rulequeue == NULL) { 545 pf_tbladdr_remove(&rule->src.addr); 546 pf_tbladdr_remove(&rule->dst.addr); 547 } 548 pfi_detach_rule(rule->kif); 549 pf_empty_pool(&rule->rpool.list); 550 pool_put(&pf_rule_pl, rule); 551 } 552 553 static u_int16_t 554 tagname2tag(struct pf_tags *head, char *tagname) 555 { 556 struct pf_tagname *tag, *p = NULL; 557 u_int16_t new_tagid = 1; 558 559 TAILQ_FOREACH(tag, head, entries) 560 if (strcmp(tagname, tag->name) == 0) { 561 tag->ref++; 562 return (tag->tag); 563 } 564 565 /* 566 * to avoid fragmentation, we do a linear search from the beginning 567 * and take the first free slot we find. if there is none or the list 568 * is empty, append a new entry at the end. 569 */ 570 571 /* new entry */ 572 if (!TAILQ_EMPTY(head)) 573 for (p = TAILQ_FIRST(head); p != NULL && 574 p->tag == new_tagid; p = TAILQ_NEXT(p, entries)) 575 new_tagid = p->tag + 1; 576 577 if (new_tagid > TAGID_MAX) 578 return (0); 579 580 /* allocate and fill new struct pf_tagname */ 581 tag = (struct pf_tagname *)malloc(sizeof(struct pf_tagname), 582 M_TEMP, M_NOWAIT); 583 if (tag == NULL) 584 return (0); 585 bzero(tag, sizeof(struct pf_tagname)); 586 strlcpy(tag->name, tagname, sizeof(tag->name)); 587 tag->tag = new_tagid; 588 tag->ref++; 589 590 if (p != NULL) /* insert new entry before p */ 591 TAILQ_INSERT_BEFORE(p, tag, entries); 592 else /* either list empty or no free slot in between */ 593 TAILQ_INSERT_TAIL(head, tag, entries); 594 595 return (tag->tag); 596 } 597 598 static void 599 tag2tagname(struct pf_tags *head, u_int16_t tagid, char *p) 600 { 601 struct pf_tagname *tag; 602 603 TAILQ_FOREACH(tag, head, entries) 604 if (tag->tag == tagid) { 605 strlcpy(p, tag->name, PF_TAG_NAME_SIZE); 606 return; 607 } 608 } 609 610 static void 611 tag_unref(struct pf_tags *head, u_int16_t tag) 612 { 613 struct pf_tagname *p, *next; 614 615 if (tag == 0) 616 return; 617 618 for (p = TAILQ_FIRST(head); p != NULL; p = next) { 619 next = TAILQ_NEXT(p, entries); 620 if (tag == p->tag) { 621 if (--p->ref == 0) { 622 TAILQ_REMOVE(head, p, entries); 623 free(p, M_TEMP); 624 } 625 break; 626 } 627 } 628 } 629 630 u_int16_t 631 pf_tagname2tag(char *tagname) 632 { 633 return (tagname2tag(&pf_tags, tagname)); 634 } 635 636 void 637 pf_tag2tagname(u_int16_t tagid, char *p) 638 { 639 return (tag2tagname(&pf_tags, tagid, p)); 640 } 641 642 void 643 pf_tag_unref(u_int16_t tag) 644 { 645 return (tag_unref(&pf_tags, tag)); 646 } 647 648 #ifdef ALTQ 649 u_int32_t 650 pf_qname2qid(char *qname) 651 { 652 return ((u_int32_t)tagname2tag(&pf_qids, qname)); 653 } 654 655 void 656 pf_qid2qname(u_int32_t qid, char *p) 657 { 658 return (tag2tagname(&pf_qids, (u_int16_t)qid, p)); 659 } 660 661 void 662 pf_qid_unref(u_int32_t qid) 663 { 664 return (tag_unref(&pf_qids, (u_int16_t)qid)); 665 } 666 667 int 668 pf_begin_altq(u_int32_t *ticket) 669 { 670 struct pf_altq *altq; 671 int error = 0; 672 673 /* Purge the old altq list */ 674 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) { 675 TAILQ_REMOVE(pf_altqs_inactive, altq, entries); 676 if (altq->qname[0] == 0) { 677 /* detach and destroy the discipline */ 678 error = altq_remove(altq); 679 } else 680 pf_qid_unref(altq->qid); 681 pool_put(&pf_altq_pl, altq); 682 } 683 if (error) 684 return (error); 685 *ticket = ++ticket_altqs_inactive; 686 altqs_inactive_open = 1; 687 return (0); 688 } 689 690 int 691 pf_rollback_altq(u_int32_t ticket) 692 { 693 struct pf_altq *altq; 694 int error = 0; 695 696 if (!altqs_inactive_open || ticket != ticket_altqs_inactive) 697 return (0); 698 /* Purge the old altq list */ 699 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) { 700 TAILQ_REMOVE(pf_altqs_inactive, altq, entries); 701 if (altq->qname[0] == 0) { 702 /* detach and destroy the discipline */ 703 error = altq_remove(altq); 704 } else 705 pf_qid_unref(altq->qid); 706 pool_put(&pf_altq_pl, altq); 707 } 708 altqs_inactive_open = 0; 709 return (error); 710 } 711 712 int 713 pf_commit_altq(u_int32_t ticket) 714 { 715 struct pf_altqqueue *old_altqs; 716 struct pf_altq *altq; 717 int s, err, error = 0; 718 719 if (!altqs_inactive_open || ticket != ticket_altqs_inactive) 720 return (EBUSY); 721 722 /* swap altqs, keep the old. */ 723 s = splsoftnet(); 724 old_altqs = pf_altqs_active; 725 pf_altqs_active = pf_altqs_inactive; 726 pf_altqs_inactive = old_altqs; 727 ticket_altqs_active = ticket_altqs_inactive; 728 729 /* Attach new disciplines */ 730 TAILQ_FOREACH(altq, pf_altqs_active, entries) { 731 if (altq->qname[0] == 0) { 732 /* attach the discipline */ 733 error = altq_pfattach(altq); 734 if (error) { 735 splx(s); 736 return (error); 737 } 738 } 739 } 740 741 /* Purge the old altq list */ 742 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) { 743 TAILQ_REMOVE(pf_altqs_inactive, altq, entries); 744 if (altq->qname[0] == 0) { 745 /* detach and destroy the discipline */ 746 err = altq_pfdetach(altq); 747 if (err != 0 && error == 0) 748 error = err; 749 err = altq_remove(altq); 750 if (err != 0 && error == 0) 751 error = err; 752 } else 753 pf_qid_unref(altq->qid); 754 pool_put(&pf_altq_pl, altq); 755 } 756 splx(s); 757 758 altqs_inactive_open = 0; 759 return (error); 760 } 761 #endif /* ALTQ */ 762 763 int 764 pf_begin_rules(u_int32_t *ticket, int rs_num, char *anchor, char *ruleset) 765 { 766 struct pf_ruleset *rs; 767 struct pf_rule *rule; 768 769 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) 770 return (EINVAL); 771 rs = pf_find_or_create_ruleset(anchor, ruleset); 772 if (rs == NULL) 773 return (EINVAL); 774 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) 775 pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule); 776 *ticket = ++rs->rules[rs_num].inactive.ticket; 777 rs->rules[rs_num].inactive.open = 1; 778 return (0); 779 } 780 781 int 782 pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor, char *ruleset) 783 { 784 struct pf_ruleset *rs; 785 struct pf_rule *rule; 786 787 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) 788 return (EINVAL); 789 rs = pf_find_ruleset(anchor, ruleset); 790 if (rs == NULL || !rs->rules[rs_num].inactive.open || 791 rs->rules[rs_num].inactive.ticket != ticket) 792 return (0); 793 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) 794 pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule); 795 rs->rules[rs_num].inactive.open = 0; 796 return (0); 797 } 798 799 int 800 pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor, char *ruleset) 801 { 802 struct pf_ruleset *rs; 803 struct pf_rule *rule; 804 struct pf_rulequeue *old_rules; 805 int s; 806 807 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) 808 return (EINVAL); 809 rs = pf_find_ruleset(anchor, ruleset); 810 if (rs == NULL || !rs->rules[rs_num].inactive.open || 811 ticket != rs->rules[rs_num].inactive.ticket) 812 return (EBUSY); 813 814 /* Swap rules, keep the old. */ 815 s = splsoftnet(); 816 old_rules = rs->rules[rs_num].active.ptr; 817 rs->rules[rs_num].active.ptr = 818 rs->rules[rs_num].inactive.ptr; 819 rs->rules[rs_num].inactive.ptr = old_rules; 820 rs->rules[rs_num].active.ticket = 821 rs->rules[rs_num].inactive.ticket; 822 pf_calc_skip_steps(rs->rules[rs_num].active.ptr); 823 824 /* Purge the old rule list. */ 825 while ((rule = TAILQ_FIRST(old_rules)) != NULL) 826 pf_rm_rule(old_rules, rule); 827 rs->rules[rs_num].inactive.open = 0; 828 pf_remove_if_empty_ruleset(rs); 829 pf_update_anchor_rules(); 830 splx(s); 831 return (0); 832 } 833 834 int 835 pfioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct proc *p) 836 { 837 struct pf_pooladdr *pa = NULL; 838 struct pf_pool *pool = NULL; 839 int s; 840 int error = 0; 841 842 /* XXX keep in sync with switch() below */ 843 if (securelevel > 1) 844 switch (cmd) { 845 case DIOCGETRULES: 846 case DIOCGETRULE: 847 case DIOCGETADDRS: 848 case DIOCGETADDR: 849 case DIOCGETSTATE: 850 case DIOCSETSTATUSIF: 851 case DIOCGETSTATUS: 852 case DIOCCLRSTATUS: 853 case DIOCNATLOOK: 854 case DIOCSETDEBUG: 855 case DIOCGETSTATES: 856 case DIOCGETTIMEOUT: 857 case DIOCCLRRULECTRS: 858 case DIOCGETLIMIT: 859 case DIOCGETALTQS: 860 case DIOCGETALTQ: 861 case DIOCGETQSTATS: 862 case DIOCGETANCHORS: 863 case DIOCGETANCHOR: 864 case DIOCGETRULESETS: 865 case DIOCGETRULESET: 866 case DIOCRGETTABLES: 867 case DIOCRGETTSTATS: 868 case DIOCRCLRTSTATS: 869 case DIOCRCLRADDRS: 870 case DIOCRADDADDRS: 871 case DIOCRDELADDRS: 872 case DIOCRSETADDRS: 873 case DIOCRGETADDRS: 874 case DIOCRGETASTATS: 875 case DIOCRCLRASTATS: 876 case DIOCRTSTADDRS: 877 case DIOCOSFPGET: 878 case DIOCGETSRCNODES: 879 case DIOCCLRSRCNODES: 880 case DIOCIGETIFACES: 881 case DIOCICLRISTATS: 882 break; 883 case DIOCRCLRTABLES: 884 case DIOCRADDTABLES: 885 case DIOCRDELTABLES: 886 case DIOCRSETTFLAGS: 887 if (((struct pfioc_table *)addr)->pfrio_flags & 888 PFR_FLAG_DUMMY) 889 break; /* dummy operation ok */ 890 return (EPERM); 891 default: 892 return (EPERM); 893 } 894 895 if (!(flags & FWRITE)) 896 switch (cmd) { 897 case DIOCGETRULES: 898 case DIOCGETRULE: 899 case DIOCGETADDRS: 900 case DIOCGETADDR: 901 case DIOCGETSTATE: 902 case DIOCGETSTATUS: 903 case DIOCGETSTATES: 904 case DIOCGETTIMEOUT: 905 case DIOCGETLIMIT: 906 case DIOCGETALTQS: 907 case DIOCGETALTQ: 908 case DIOCGETQSTATS: 909 case DIOCGETANCHORS: 910 case DIOCGETANCHOR: 911 case DIOCGETRULESETS: 912 case DIOCGETRULESET: 913 case DIOCRGETTABLES: 914 case DIOCRGETTSTATS: 915 case DIOCRGETADDRS: 916 case DIOCRGETASTATS: 917 case DIOCRTSTADDRS: 918 case DIOCOSFPGET: 919 case DIOCGETSRCNODES: 920 case DIOCIGETIFACES: 921 break; 922 case DIOCRCLRTABLES: 923 case DIOCRADDTABLES: 924 case DIOCRDELTABLES: 925 case DIOCRCLRTSTATS: 926 case DIOCRCLRADDRS: 927 case DIOCRADDADDRS: 928 case DIOCRDELADDRS: 929 case DIOCRSETADDRS: 930 case DIOCRSETTFLAGS: 931 if (((struct pfioc_table *)addr)->pfrio_flags & 932 PFR_FLAG_DUMMY) 933 break; /* dummy operation ok */ 934 return (EACCES); 935 default: 936 return (EACCES); 937 } 938 939 switch (cmd) { 940 941 case DIOCSTART: 942 if (pf_status.running) 943 error = EEXIST; 944 else { 945 #ifdef __NetBSD__ 946 error = pf_pfil_attach(); 947 if (error) 948 break; 949 #endif 950 pf_status.running = 1; 951 pf_status.since = time.tv_sec; 952 if (pf_status.stateid == 0) { 953 pf_status.stateid = time.tv_sec; 954 pf_status.stateid = pf_status.stateid << 32; 955 } 956 DPFPRINTF(PF_DEBUG_MISC, ("pf: started\n")); 957 } 958 break; 959 960 case DIOCSTOP: 961 if (!pf_status.running) 962 error = ENOENT; 963 else { 964 #ifdef __NetBSD__ 965 error = pf_pfil_detach(); 966 if (error) 967 break; 968 #endif 969 pf_status.running = 0; 970 pf_status.since = time.tv_sec; 971 DPFPRINTF(PF_DEBUG_MISC, ("pf: stopped\n")); 972 } 973 break; 974 975 case DIOCBEGINRULES: { 976 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 977 978 error = pf_begin_rules(&pr->ticket, pf_get_ruleset_number( 979 pr->rule.action), pr->anchor, pr->ruleset); 980 break; 981 } 982 983 case DIOCADDRULE: { 984 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 985 struct pf_ruleset *ruleset; 986 struct pf_rule *rule, *tail; 987 struct pf_pooladdr *pa; 988 int rs_num; 989 990 ruleset = pf_find_ruleset(pr->anchor, pr->ruleset); 991 if (ruleset == NULL) { 992 error = EINVAL; 993 break; 994 } 995 rs_num = pf_get_ruleset_number(pr->rule.action); 996 if (rs_num >= PF_RULESET_MAX) { 997 error = EINVAL; 998 break; 999 } 1000 if (pr->rule.anchorname[0] && ruleset != &pf_main_ruleset) { 1001 error = EINVAL; 1002 break; 1003 } 1004 if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) { 1005 error = EINVAL; 1006 break; 1007 } 1008 if (pr->ticket != ruleset->rules[rs_num].inactive.ticket) { 1009 error = EBUSY; 1010 break; 1011 } 1012 if (pr->pool_ticket != ticket_pabuf) { 1013 error = EBUSY; 1014 break; 1015 } 1016 rule = pool_get(&pf_rule_pl, PR_NOWAIT); 1017 if (rule == NULL) { 1018 error = ENOMEM; 1019 break; 1020 } 1021 bcopy(&pr->rule, rule, sizeof(struct pf_rule)); 1022 rule->anchor = NULL; 1023 rule->kif = NULL; 1024 TAILQ_INIT(&rule->rpool.list); 1025 /* initialize refcounting */ 1026 rule->states = 0; 1027 rule->src_nodes = 0; 1028 rule->entries.tqe_prev = NULL; 1029 #ifndef INET 1030 if (rule->af == AF_INET) { 1031 pool_put(&pf_rule_pl, rule); 1032 error = EAFNOSUPPORT; 1033 break; 1034 } 1035 #endif /* INET */ 1036 #ifndef INET6 1037 if (rule->af == AF_INET6) { 1038 pool_put(&pf_rule_pl, rule); 1039 error = EAFNOSUPPORT; 1040 break; 1041 } 1042 #endif /* INET6 */ 1043 tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr, 1044 pf_rulequeue); 1045 if (tail) 1046 rule->nr = tail->nr + 1; 1047 else 1048 rule->nr = 0; 1049 if (rule->ifname[0]) { 1050 rule->kif = pfi_attach_rule(rule->ifname); 1051 if (rule->kif == NULL) { 1052 pool_put(&pf_rule_pl, rule); 1053 error = EINVAL; 1054 break; 1055 } 1056 } 1057 1058 #ifdef ALTQ 1059 /* set queue IDs */ 1060 if (rule->qname[0] != 0) { 1061 if ((rule->qid = pf_qname2qid(rule->qname)) == 0) 1062 error = EBUSY; 1063 else if (rule->pqname[0] != 0) { 1064 if ((rule->pqid = 1065 pf_qname2qid(rule->pqname)) == 0) 1066 error = EBUSY; 1067 } else 1068 rule->pqid = rule->qid; 1069 } 1070 #endif 1071 if (rule->tagname[0]) 1072 if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0) 1073 error = EBUSY; 1074 if (rule->match_tagname[0]) 1075 if ((rule->match_tag = 1076 pf_tagname2tag(rule->match_tagname)) == 0) 1077 error = EBUSY; 1078 if (rule->rt && !rule->direction) 1079 error = EINVAL; 1080 if (pfi_dynaddr_setup(&rule->src.addr, rule->af)) 1081 error = EINVAL; 1082 if (pfi_dynaddr_setup(&rule->dst.addr, rule->af)) 1083 error = EINVAL; 1084 if (pf_tbladdr_setup(ruleset, &rule->src.addr)) 1085 error = EINVAL; 1086 if (pf_tbladdr_setup(ruleset, &rule->dst.addr)) 1087 error = EINVAL; 1088 TAILQ_FOREACH(pa, &pf_pabuf, entries) 1089 if (pf_tbladdr_setup(ruleset, &pa->addr)) 1090 error = EINVAL; 1091 1092 pf_mv_pool(&pf_pabuf, &rule->rpool.list); 1093 if (((((rule->action == PF_NAT) || (rule->action == PF_RDR) || 1094 (rule->action == PF_BINAT)) && !rule->anchorname[0]) || 1095 (rule->rt > PF_FASTROUTE)) && 1096 (TAILQ_FIRST(&rule->rpool.list) == NULL)) 1097 error = EINVAL; 1098 1099 if (error) { 1100 pf_rm_rule(NULL, rule); 1101 break; 1102 } 1103 rule->rpool.cur = TAILQ_FIRST(&rule->rpool.list); 1104 rule->evaluations = rule->packets = rule->bytes = 0; 1105 TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr, 1106 rule, entries); 1107 break; 1108 } 1109 1110 case DIOCCOMMITRULES: { 1111 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 1112 1113 error = pf_commit_rules(pr->ticket, pf_get_ruleset_number( 1114 pr->rule.action), pr->anchor, pr->ruleset); 1115 break; 1116 } 1117 1118 case DIOCGETRULES: { 1119 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 1120 struct pf_ruleset *ruleset; 1121 struct pf_rule *tail; 1122 int rs_num; 1123 1124 ruleset = pf_find_ruleset(pr->anchor, pr->ruleset); 1125 if (ruleset == NULL) { 1126 error = EINVAL; 1127 break; 1128 } 1129 rs_num = pf_get_ruleset_number(pr->rule.action); 1130 if (rs_num >= PF_RULESET_MAX) { 1131 error = EINVAL; 1132 break; 1133 } 1134 s = splsoftnet(); 1135 tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr, 1136 pf_rulequeue); 1137 if (tail) 1138 pr->nr = tail->nr + 1; 1139 else 1140 pr->nr = 0; 1141 pr->ticket = ruleset->rules[rs_num].active.ticket; 1142 splx(s); 1143 break; 1144 } 1145 1146 case DIOCGETRULE: { 1147 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 1148 struct pf_ruleset *ruleset; 1149 struct pf_rule *rule; 1150 int rs_num, i; 1151 1152 ruleset = pf_find_ruleset(pr->anchor, pr->ruleset); 1153 if (ruleset == NULL) { 1154 error = EINVAL; 1155 break; 1156 } 1157 rs_num = pf_get_ruleset_number(pr->rule.action); 1158 if (rs_num >= PF_RULESET_MAX) { 1159 error = EINVAL; 1160 break; 1161 } 1162 if (pr->ticket != ruleset->rules[rs_num].active.ticket) { 1163 error = EBUSY; 1164 break; 1165 } 1166 s = splsoftnet(); 1167 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr); 1168 while ((rule != NULL) && (rule->nr != pr->nr)) 1169 rule = TAILQ_NEXT(rule, entries); 1170 if (rule == NULL) { 1171 error = EBUSY; 1172 splx(s); 1173 break; 1174 } 1175 bcopy(rule, &pr->rule, sizeof(struct pf_rule)); 1176 pfi_dynaddr_copyout(&pr->rule.src.addr); 1177 pfi_dynaddr_copyout(&pr->rule.dst.addr); 1178 pf_tbladdr_copyout(&pr->rule.src.addr); 1179 pf_tbladdr_copyout(&pr->rule.dst.addr); 1180 for (i = 0; i < PF_SKIP_COUNT; ++i) 1181 if (rule->skip[i].ptr == NULL) 1182 pr->rule.skip[i].nr = -1; 1183 else 1184 pr->rule.skip[i].nr = 1185 rule->skip[i].ptr->nr; 1186 splx(s); 1187 break; 1188 } 1189 1190 case DIOCCHANGERULE: { 1191 struct pfioc_rule *pcr = (struct pfioc_rule *)addr; 1192 struct pf_ruleset *ruleset; 1193 struct pf_rule *oldrule = NULL, *newrule = NULL; 1194 u_int32_t nr = 0; 1195 int rs_num; 1196 1197 if (!(pcr->action == PF_CHANGE_REMOVE || 1198 pcr->action == PF_CHANGE_GET_TICKET) && 1199 pcr->pool_ticket != ticket_pabuf) { 1200 error = EBUSY; 1201 break; 1202 } 1203 1204 if (pcr->action < PF_CHANGE_ADD_HEAD || 1205 pcr->action > PF_CHANGE_GET_TICKET) { 1206 error = EINVAL; 1207 break; 1208 } 1209 ruleset = pf_find_ruleset(pcr->anchor, pcr->ruleset); 1210 if (ruleset == NULL) { 1211 error = EINVAL; 1212 break; 1213 } 1214 rs_num = pf_get_ruleset_number(pcr->rule.action); 1215 if (rs_num >= PF_RULESET_MAX) { 1216 error = EINVAL; 1217 break; 1218 } 1219 1220 if (pcr->action == PF_CHANGE_GET_TICKET) { 1221 pcr->ticket = ++ruleset->rules[rs_num].active.ticket; 1222 break; 1223 } else { 1224 if (pcr->ticket != 1225 ruleset->rules[rs_num].active.ticket) { 1226 error = EINVAL; 1227 break; 1228 } 1229 if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) { 1230 error = EINVAL; 1231 break; 1232 } 1233 } 1234 1235 if (pcr->action != PF_CHANGE_REMOVE) { 1236 newrule = pool_get(&pf_rule_pl, PR_NOWAIT); 1237 if (newrule == NULL) { 1238 error = ENOMEM; 1239 break; 1240 } 1241 bcopy(&pcr->rule, newrule, sizeof(struct pf_rule)); 1242 TAILQ_INIT(&newrule->rpool.list); 1243 /* initialize refcounting */ 1244 newrule->states = 0; 1245 newrule->entries.tqe_prev = NULL; 1246 #ifndef INET 1247 if (newrule->af == AF_INET) { 1248 pool_put(&pf_rule_pl, newrule); 1249 error = EAFNOSUPPORT; 1250 break; 1251 } 1252 #endif /* INET */ 1253 #ifndef INET6 1254 if (newrule->af == AF_INET6) { 1255 pool_put(&pf_rule_pl, newrule); 1256 error = EAFNOSUPPORT; 1257 break; 1258 } 1259 #endif /* INET6 */ 1260 if (newrule->ifname[0]) { 1261 newrule->kif = pfi_attach_rule(newrule->ifname); 1262 if (newrule->kif == NULL) { 1263 pool_put(&pf_rule_pl, newrule); 1264 error = EINVAL; 1265 break; 1266 } 1267 } else 1268 newrule->kif = NULL; 1269 1270 #ifdef ALTQ 1271 /* set queue IDs */ 1272 if (newrule->qname[0] != 0) { 1273 if ((newrule->qid = 1274 pf_qname2qid(newrule->qname)) == 0) 1275 error = EBUSY; 1276 else if (newrule->pqname[0] != 0) { 1277 if ((newrule->pqid = 1278 pf_qname2qid(newrule->pqname)) == 0) 1279 error = EBUSY; 1280 } else 1281 newrule->pqid = newrule->qid; 1282 } 1283 #endif 1284 if (newrule->tagname[0]) 1285 if ((newrule->tag = 1286 pf_tagname2tag(newrule->tagname)) == 0) 1287 error = EBUSY; 1288 if (newrule->match_tagname[0]) 1289 if ((newrule->match_tag = pf_tagname2tag( 1290 newrule->match_tagname)) == 0) 1291 error = EBUSY; 1292 1293 if (newrule->rt && !newrule->direction) 1294 error = EINVAL; 1295 if (pfi_dynaddr_setup(&newrule->src.addr, newrule->af)) 1296 error = EINVAL; 1297 if (pfi_dynaddr_setup(&newrule->dst.addr, newrule->af)) 1298 error = EINVAL; 1299 if (pf_tbladdr_setup(ruleset, &newrule->src.addr)) 1300 error = EINVAL; 1301 if (pf_tbladdr_setup(ruleset, &newrule->dst.addr)) 1302 error = EINVAL; 1303 1304 pf_mv_pool(&pf_pabuf, &newrule->rpool.list); 1305 if (((((newrule->action == PF_NAT) || 1306 (newrule->action == PF_RDR) || 1307 (newrule->action == PF_BINAT) || 1308 (newrule->rt > PF_FASTROUTE)) && 1309 !newrule->anchorname[0])) && 1310 (TAILQ_FIRST(&newrule->rpool.list) == NULL)) 1311 error = EINVAL; 1312 1313 if (error) { 1314 pf_rm_rule(NULL, newrule); 1315 break; 1316 } 1317 newrule->rpool.cur = TAILQ_FIRST(&newrule->rpool.list); 1318 newrule->evaluations = newrule->packets = 0; 1319 newrule->bytes = 0; 1320 } 1321 pf_empty_pool(&pf_pabuf); 1322 1323 s = splsoftnet(); 1324 1325 if (pcr->action == PF_CHANGE_ADD_HEAD) 1326 oldrule = TAILQ_FIRST( 1327 ruleset->rules[rs_num].active.ptr); 1328 else if (pcr->action == PF_CHANGE_ADD_TAIL) 1329 oldrule = TAILQ_LAST( 1330 ruleset->rules[rs_num].active.ptr, pf_rulequeue); 1331 else { 1332 oldrule = TAILQ_FIRST( 1333 ruleset->rules[rs_num].active.ptr); 1334 while ((oldrule != NULL) && (oldrule->nr != pcr->nr)) 1335 oldrule = TAILQ_NEXT(oldrule, entries); 1336 if (oldrule == NULL) { 1337 pf_rm_rule(NULL, newrule); 1338 error = EINVAL; 1339 splx(s); 1340 break; 1341 } 1342 } 1343 1344 if (pcr->action == PF_CHANGE_REMOVE) 1345 pf_rm_rule(ruleset->rules[rs_num].active.ptr, oldrule); 1346 else { 1347 if (oldrule == NULL) 1348 TAILQ_INSERT_TAIL( 1349 ruleset->rules[rs_num].active.ptr, 1350 newrule, entries); 1351 else if (pcr->action == PF_CHANGE_ADD_HEAD || 1352 pcr->action == PF_CHANGE_ADD_BEFORE) 1353 TAILQ_INSERT_BEFORE(oldrule, newrule, entries); 1354 else 1355 TAILQ_INSERT_AFTER( 1356 ruleset->rules[rs_num].active.ptr, 1357 oldrule, newrule, entries); 1358 } 1359 1360 nr = 0; 1361 TAILQ_FOREACH(oldrule, 1362 ruleset->rules[rs_num].active.ptr, entries) 1363 oldrule->nr = nr++; 1364 1365 pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr); 1366 pf_remove_if_empty_ruleset(ruleset); 1367 pf_update_anchor_rules(); 1368 1369 ruleset->rules[rs_num].active.ticket++; 1370 splx(s); 1371 break; 1372 } 1373 1374 case DIOCCLRSTATES: { 1375 struct pf_state *state; 1376 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr; 1377 int killed = 0; 1378 1379 s = splsoftnet(); 1380 RB_FOREACH(state, pf_state_tree_id, &tree_id) { 1381 if (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname, 1382 state->u.s.kif->pfik_name)) { 1383 state->timeout = PFTM_PURGE; 1384 #if NPFSYNC 1385 /* don't send out individual delete messages */ 1386 state->sync_flags = PFSTATE_NOSYNC; 1387 #endif 1388 killed++; 1389 } 1390 } 1391 pf_purge_expired_states(); 1392 pf_status.states = 0; 1393 psk->psk_af = killed; 1394 #if NPFSYNC 1395 pfsync_clear_states(pf_status.hostid, psk->psk_ifname); 1396 #endif 1397 splx(s); 1398 break; 1399 } 1400 1401 case DIOCKILLSTATES: { 1402 struct pf_state *state; 1403 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr; 1404 int killed = 0; 1405 1406 s = splsoftnet(); 1407 RB_FOREACH(state, pf_state_tree_id, &tree_id) { 1408 if ((!psk->psk_af || state->af == psk->psk_af) 1409 && (!psk->psk_proto || psk->psk_proto == 1410 state->proto) && 1411 PF_MATCHA(psk->psk_src.not, 1412 &psk->psk_src.addr.v.a.addr, 1413 &psk->psk_src.addr.v.a.mask, 1414 &state->lan.addr, state->af) && 1415 PF_MATCHA(psk->psk_dst.not, 1416 &psk->psk_dst.addr.v.a.addr, 1417 &psk->psk_dst.addr.v.a.mask, 1418 &state->ext.addr, state->af) && 1419 (psk->psk_src.port_op == 0 || 1420 pf_match_port(psk->psk_src.port_op, 1421 psk->psk_src.port[0], psk->psk_src.port[1], 1422 state->lan.port)) && 1423 (psk->psk_dst.port_op == 0 || 1424 pf_match_port(psk->psk_dst.port_op, 1425 psk->psk_dst.port[0], psk->psk_dst.port[1], 1426 state->ext.port)) && 1427 (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname, 1428 state->u.s.kif->pfik_name))) { 1429 state->timeout = PFTM_PURGE; 1430 killed++; 1431 } 1432 } 1433 pf_purge_expired_states(); 1434 splx(s); 1435 psk->psk_af = killed; 1436 break; 1437 } 1438 1439 case DIOCADDSTATE: { 1440 struct pfioc_state *ps = (struct pfioc_state *)addr; 1441 struct pf_state *state; 1442 struct pfi_kif *kif; 1443 1444 if (ps->state.timeout >= PFTM_MAX && 1445 ps->state.timeout != PFTM_UNTIL_PACKET) { 1446 error = EINVAL; 1447 break; 1448 } 1449 state = pool_get(&pf_state_pl, PR_NOWAIT); 1450 if (state == NULL) { 1451 error = ENOMEM; 1452 break; 1453 } 1454 s = splsoftnet(); 1455 kif = pfi_lookup_create(ps->state.u.ifname); 1456 if (kif == NULL) { 1457 pool_put(&pf_state_pl, state); 1458 error = ENOENT; 1459 splx(s); 1460 break; 1461 } 1462 bcopy(&ps->state, state, sizeof(struct pf_state)); 1463 bzero(&state->u, sizeof(state->u)); 1464 state->rule.ptr = &pf_default_rule; 1465 state->nat_rule.ptr = NULL; 1466 state->anchor.ptr = NULL; 1467 state->rt_kif = NULL; 1468 state->creation = time.tv_sec; 1469 state->pfsync_time = 0; 1470 state->packets[0] = state->packets[1] = 0; 1471 state->bytes[0] = state->bytes[1] = 0; 1472 1473 if (pf_insert_state(kif, state)) { 1474 pfi_maybe_destroy(kif); 1475 pool_put(&pf_state_pl, state); 1476 error = ENOMEM; 1477 } 1478 splx(s); 1479 break; 1480 } 1481 1482 case DIOCGETSTATE: { 1483 struct pfioc_state *ps = (struct pfioc_state *)addr; 1484 struct pf_state *state; 1485 u_int32_t nr; 1486 1487 nr = 0; 1488 s = splsoftnet(); 1489 RB_FOREACH(state, pf_state_tree_id, &tree_id) { 1490 if (nr >= ps->nr) 1491 break; 1492 nr++; 1493 } 1494 if (state == NULL) { 1495 error = EBUSY; 1496 splx(s); 1497 break; 1498 } 1499 bcopy(state, &ps->state, sizeof(struct pf_state)); 1500 ps->state.rule.nr = state->rule.ptr->nr; 1501 ps->state.nat_rule.nr = (state->nat_rule.ptr == NULL) ? 1502 -1 : state->nat_rule.ptr->nr; 1503 ps->state.anchor.nr = (state->anchor.ptr == NULL) ? 1504 -1 : state->anchor.ptr->nr; 1505 splx(s); 1506 ps->state.expire = pf_state_expires(state); 1507 if (ps->state.expire > time.tv_sec) 1508 ps->state.expire -= time.tv_sec; 1509 else 1510 ps->state.expire = 0; 1511 break; 1512 } 1513 1514 case DIOCGETSTATES: { 1515 struct pfioc_states *ps = (struct pfioc_states *)addr; 1516 struct pf_state *state; 1517 struct pf_state *p, pstore; 1518 struct pfi_kif *kif; 1519 u_int32_t nr = 0; 1520 int space = ps->ps_len; 1521 1522 if (space == 0) { 1523 s = splsoftnet(); 1524 TAILQ_FOREACH(kif, &pfi_statehead, pfik_w_states) 1525 nr += kif->pfik_states; 1526 splx(s); 1527 ps->ps_len = sizeof(struct pf_state) * nr; 1528 return (0); 1529 } 1530 1531 s = splsoftnet(); 1532 p = ps->ps_states; 1533 TAILQ_FOREACH(kif, &pfi_statehead, pfik_w_states) 1534 RB_FOREACH(state, pf_state_tree_ext_gwy, 1535 &kif->pfik_ext_gwy) { 1536 int secs = time.tv_sec; 1537 1538 if ((nr+1) * sizeof(*p) > (unsigned)ps->ps_len) 1539 break; 1540 1541 bcopy(state, &pstore, sizeof(pstore)); 1542 strlcpy(pstore.u.ifname, kif->pfik_name, 1543 sizeof(pstore.u.ifname)); 1544 pstore.rule.nr = state->rule.ptr->nr; 1545 pstore.nat_rule.nr = (state->nat_rule.ptr == 1546 NULL) ? -1 : state->nat_rule.ptr->nr; 1547 pstore.anchor.nr = (state->anchor.ptr == 1548 NULL) ? -1 : state->anchor.ptr->nr; 1549 pstore.creation = secs - pstore.creation; 1550 pstore.expire = pf_state_expires(state); 1551 if (pstore.expire > secs) 1552 pstore.expire -= secs; 1553 else 1554 pstore.expire = 0; 1555 error = copyout(&pstore, p, sizeof(*p)); 1556 if (error) { 1557 splx(s); 1558 goto fail; 1559 } 1560 p++; 1561 nr++; 1562 } 1563 ps->ps_len = sizeof(struct pf_state) * nr; 1564 splx(s); 1565 break; 1566 } 1567 1568 case DIOCGETSTATUS: { 1569 struct pf_status *s = (struct pf_status *)addr; 1570 bcopy(&pf_status, s, sizeof(struct pf_status)); 1571 pfi_fill_oldstatus(s); 1572 break; 1573 } 1574 1575 case DIOCSETSTATUSIF: { 1576 struct pfioc_if *pi = (struct pfioc_if *)addr; 1577 1578 if (pi->ifname[0] == 0) { 1579 bzero(pf_status.ifname, IFNAMSIZ); 1580 break; 1581 } 1582 if (ifunit(pi->ifname) == NULL) { 1583 error = EINVAL; 1584 break; 1585 } 1586 strlcpy(pf_status.ifname, pi->ifname, IFNAMSIZ); 1587 break; 1588 } 1589 1590 case DIOCCLRSTATUS: { 1591 bzero(pf_status.counters, sizeof(pf_status.counters)); 1592 bzero(pf_status.fcounters, sizeof(pf_status.fcounters)); 1593 bzero(pf_status.scounters, sizeof(pf_status.scounters)); 1594 if (*pf_status.ifname) 1595 pfi_clr_istats(pf_status.ifname, NULL, 1596 PFI_FLAG_INSTANCE); 1597 break; 1598 } 1599 1600 case DIOCNATLOOK: { 1601 struct pfioc_natlook *pnl = (struct pfioc_natlook *)addr; 1602 struct pf_state *state; 1603 struct pf_state key; 1604 int m = 0, direction = pnl->direction; 1605 1606 key.af = pnl->af; 1607 key.proto = pnl->proto; 1608 1609 if (!pnl->proto || 1610 PF_AZERO(&pnl->saddr, pnl->af) || 1611 PF_AZERO(&pnl->daddr, pnl->af) || 1612 !pnl->dport || !pnl->sport) 1613 error = EINVAL; 1614 else { 1615 s = splsoftnet(); 1616 1617 /* 1618 * userland gives us source and dest of connection, 1619 * reverse the lookup so we ask for what happens with 1620 * the return traffic, enabling us to find it in the 1621 * state tree. 1622 */ 1623 if (direction == PF_IN) { 1624 PF_ACPY(&key.ext.addr, &pnl->daddr, pnl->af); 1625 key.ext.port = pnl->dport; 1626 PF_ACPY(&key.gwy.addr, &pnl->saddr, pnl->af); 1627 key.gwy.port = pnl->sport; 1628 state = pf_find_state_all(&key, PF_EXT_GWY, &m); 1629 } else { 1630 PF_ACPY(&key.lan.addr, &pnl->daddr, pnl->af); 1631 key.lan.port = pnl->dport; 1632 PF_ACPY(&key.ext.addr, &pnl->saddr, pnl->af); 1633 key.ext.port = pnl->sport; 1634 state = pf_find_state_all(&key, PF_LAN_EXT, &m); 1635 } 1636 if (m > 1) 1637 error = E2BIG; /* more than one state */ 1638 else if (state != NULL) { 1639 if (direction == PF_IN) { 1640 PF_ACPY(&pnl->rsaddr, &state->lan.addr, 1641 state->af); 1642 pnl->rsport = state->lan.port; 1643 PF_ACPY(&pnl->rdaddr, &pnl->daddr, 1644 pnl->af); 1645 pnl->rdport = pnl->dport; 1646 } else { 1647 PF_ACPY(&pnl->rdaddr, &state->gwy.addr, 1648 state->af); 1649 pnl->rdport = state->gwy.port; 1650 PF_ACPY(&pnl->rsaddr, &pnl->saddr, 1651 pnl->af); 1652 pnl->rsport = pnl->sport; 1653 } 1654 } else 1655 error = ENOENT; 1656 splx(s); 1657 } 1658 break; 1659 } 1660 1661 case DIOCSETTIMEOUT: { 1662 struct pfioc_tm *pt = (struct pfioc_tm *)addr; 1663 int old; 1664 1665 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX || 1666 pt->seconds < 0) { 1667 error = EINVAL; 1668 goto fail; 1669 } 1670 old = pf_default_rule.timeout[pt->timeout]; 1671 pf_default_rule.timeout[pt->timeout] = pt->seconds; 1672 pt->seconds = old; 1673 break; 1674 } 1675 1676 case DIOCGETTIMEOUT: { 1677 struct pfioc_tm *pt = (struct pfioc_tm *)addr; 1678 1679 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) { 1680 error = EINVAL; 1681 goto fail; 1682 } 1683 pt->seconds = pf_default_rule.timeout[pt->timeout]; 1684 break; 1685 } 1686 1687 case DIOCGETLIMIT: { 1688 struct pfioc_limit *pl = (struct pfioc_limit *)addr; 1689 1690 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) { 1691 error = EINVAL; 1692 goto fail; 1693 } 1694 pl->limit = pf_pool_limits[pl->index].limit; 1695 break; 1696 } 1697 1698 case DIOCSETLIMIT: { 1699 struct pfioc_limit *pl = (struct pfioc_limit *)addr; 1700 int old_limit; 1701 1702 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX || 1703 pf_pool_limits[pl->index].pp == NULL) { 1704 error = EINVAL; 1705 goto fail; 1706 } 1707 #ifdef __OpenBSD__ 1708 if (pool_sethardlimit(pf_pool_limits[pl->index].pp, 1709 pl->limit, NULL, 0) != 0) { 1710 error = EBUSY; 1711 goto fail; 1712 } 1713 #else 1714 pool_sethardlimit(pf_pool_limits[pl->index].pp, 1715 pl->limit, NULL, 0); 1716 #endif 1717 old_limit = pf_pool_limits[pl->index].limit; 1718 pf_pool_limits[pl->index].limit = pl->limit; 1719 pl->limit = old_limit; 1720 break; 1721 } 1722 1723 case DIOCSETDEBUG: { 1724 u_int32_t *level = (u_int32_t *)addr; 1725 1726 pf_status.debug = *level; 1727 break; 1728 } 1729 1730 case DIOCCLRRULECTRS: { 1731 struct pf_ruleset *ruleset = &pf_main_ruleset; 1732 struct pf_rule *rule; 1733 1734 s = splsoftnet(); 1735 TAILQ_FOREACH(rule, 1736 ruleset->rules[PF_RULESET_FILTER].active.ptr, entries) 1737 rule->evaluations = rule->packets = 1738 rule->bytes = 0; 1739 splx(s); 1740 break; 1741 } 1742 1743 #ifdef ALTQ 1744 case DIOCSTARTALTQ: { 1745 struct pf_altq *altq; 1746 struct ifnet *ifp; 1747 struct tb_profile tb; 1748 1749 /* enable all altq interfaces on active list */ 1750 s = splsoftnet(); 1751 TAILQ_FOREACH(altq, pf_altqs_active, entries) { 1752 if (altq->qname[0] == 0) { 1753 if ((ifp = ifunit(altq->ifname)) == NULL) { 1754 error = EINVAL; 1755 break; 1756 } 1757 if (ifp->if_snd.altq_type != ALTQT_NONE) 1758 error = altq_enable(&ifp->if_snd); 1759 if (error != 0) 1760 break; 1761 /* set tokenbucket regulator */ 1762 tb.rate = altq->ifbandwidth; 1763 tb.depth = altq->tbrsize; 1764 error = tbr_set(&ifp->if_snd, &tb); 1765 if (error != 0) 1766 break; 1767 } 1768 } 1769 if (error == 0) 1770 pfaltq_running = 1; 1771 splx(s); 1772 DPFPRINTF(PF_DEBUG_MISC, ("altq: started\n")); 1773 break; 1774 } 1775 1776 case DIOCSTOPALTQ: { 1777 struct pf_altq *altq; 1778 struct ifnet *ifp; 1779 struct tb_profile tb; 1780 int err; 1781 1782 /* disable all altq interfaces on active list */ 1783 s = splsoftnet(); 1784 TAILQ_FOREACH(altq, pf_altqs_active, entries) { 1785 if (altq->qname[0] == 0) { 1786 if ((ifp = ifunit(altq->ifname)) == NULL) { 1787 error = EINVAL; 1788 break; 1789 } 1790 if (ifp->if_snd.altq_type != ALTQT_NONE) { 1791 err = altq_disable(&ifp->if_snd); 1792 if (err != 0 && error == 0) 1793 error = err; 1794 } 1795 /* clear tokenbucket regulator */ 1796 tb.rate = 0; 1797 err = tbr_set(&ifp->if_snd, &tb); 1798 if (err != 0 && error == 0) 1799 error = err; 1800 } 1801 } 1802 if (error == 0) 1803 pfaltq_running = 0; 1804 splx(s); 1805 DPFPRINTF(PF_DEBUG_MISC, ("altq: stopped\n")); 1806 break; 1807 } 1808 1809 case DIOCBEGINALTQS: { 1810 u_int32_t *ticket = (u_int32_t *)addr; 1811 1812 error = pf_begin_altq(ticket); 1813 break; 1814 } 1815 1816 case DIOCADDALTQ: { 1817 struct pfioc_altq *pa = (struct pfioc_altq *)addr; 1818 struct pf_altq *altq, *a; 1819 1820 if (pa->ticket != ticket_altqs_inactive) { 1821 error = EBUSY; 1822 break; 1823 } 1824 altq = pool_get(&pf_altq_pl, PR_NOWAIT); 1825 if (altq == NULL) { 1826 error = ENOMEM; 1827 break; 1828 } 1829 bcopy(&pa->altq, altq, sizeof(struct pf_altq)); 1830 1831 /* 1832 * if this is for a queue, find the discipline and 1833 * copy the necessary fields 1834 */ 1835 if (altq->qname[0] != 0) { 1836 if ((altq->qid = pf_qname2qid(altq->qname)) == 0) { 1837 error = EBUSY; 1838 pool_put(&pf_altq_pl, altq); 1839 break; 1840 } 1841 TAILQ_FOREACH(a, pf_altqs_inactive, entries) { 1842 if (strncmp(a->ifname, altq->ifname, 1843 IFNAMSIZ) == 0 && a->qname[0] == 0) { 1844 altq->altq_disc = a->altq_disc; 1845 break; 1846 } 1847 } 1848 } 1849 1850 error = altq_add(altq); 1851 if (error) { 1852 pool_put(&pf_altq_pl, altq); 1853 break; 1854 } 1855 1856 TAILQ_INSERT_TAIL(pf_altqs_inactive, altq, entries); 1857 bcopy(altq, &pa->altq, sizeof(struct pf_altq)); 1858 break; 1859 } 1860 1861 case DIOCCOMMITALTQS: { 1862 u_int32_t ticket = *(u_int32_t *)addr; 1863 1864 error = pf_commit_altq(ticket); 1865 break; 1866 } 1867 1868 case DIOCGETALTQS: { 1869 struct pfioc_altq *pa = (struct pfioc_altq *)addr; 1870 struct pf_altq *altq; 1871 1872 pa->nr = 0; 1873 s = splsoftnet(); 1874 TAILQ_FOREACH(altq, pf_altqs_active, entries) 1875 pa->nr++; 1876 pa->ticket = ticket_altqs_active; 1877 splx(s); 1878 break; 1879 } 1880 1881 case DIOCGETALTQ: { 1882 struct pfioc_altq *pa = (struct pfioc_altq *)addr; 1883 struct pf_altq *altq; 1884 u_int32_t nr; 1885 1886 if (pa->ticket != ticket_altqs_active) { 1887 error = EBUSY; 1888 break; 1889 } 1890 nr = 0; 1891 s = splsoftnet(); 1892 altq = TAILQ_FIRST(pf_altqs_active); 1893 while ((altq != NULL) && (nr < pa->nr)) { 1894 altq = TAILQ_NEXT(altq, entries); 1895 nr++; 1896 } 1897 if (altq == NULL) { 1898 error = EBUSY; 1899 splx(s); 1900 break; 1901 } 1902 bcopy(altq, &pa->altq, sizeof(struct pf_altq)); 1903 splx(s); 1904 break; 1905 } 1906 1907 case DIOCCHANGEALTQ: 1908 /* CHANGEALTQ not supported yet! */ 1909 error = ENODEV; 1910 break; 1911 1912 case DIOCGETQSTATS: { 1913 struct pfioc_qstats *pq = (struct pfioc_qstats *)addr; 1914 struct pf_altq *altq; 1915 u_int32_t nr; 1916 int nbytes; 1917 1918 if (pq->ticket != ticket_altqs_active) { 1919 error = EBUSY; 1920 break; 1921 } 1922 nbytes = pq->nbytes; 1923 nr = 0; 1924 s = splsoftnet(); 1925 altq = TAILQ_FIRST(pf_altqs_active); 1926 while ((altq != NULL) && (nr < pq->nr)) { 1927 altq = TAILQ_NEXT(altq, entries); 1928 nr++; 1929 } 1930 if (altq == NULL) { 1931 error = EBUSY; 1932 splx(s); 1933 break; 1934 } 1935 error = altq_getqstats(altq, pq->buf, &nbytes); 1936 splx(s); 1937 if (error == 0) { 1938 pq->scheduler = altq->scheduler; 1939 pq->nbytes = nbytes; 1940 } 1941 break; 1942 } 1943 #endif /* ALTQ */ 1944 1945 case DIOCBEGINADDRS: { 1946 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 1947 1948 pf_empty_pool(&pf_pabuf); 1949 pp->ticket = ++ticket_pabuf; 1950 break; 1951 } 1952 1953 case DIOCADDADDR: { 1954 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 1955 1956 #ifndef INET 1957 if (pp->af == AF_INET) { 1958 error = EAFNOSUPPORT; 1959 break; 1960 } 1961 #endif /* INET */ 1962 #ifndef INET6 1963 if (pp->af == AF_INET6) { 1964 error = EAFNOSUPPORT; 1965 break; 1966 } 1967 #endif /* INET6 */ 1968 if (pp->addr.addr.type != PF_ADDR_ADDRMASK && 1969 pp->addr.addr.type != PF_ADDR_DYNIFTL && 1970 pp->addr.addr.type != PF_ADDR_TABLE) { 1971 error = EINVAL; 1972 break; 1973 } 1974 pa = pool_get(&pf_pooladdr_pl, PR_NOWAIT); 1975 if (pa == NULL) { 1976 error = ENOMEM; 1977 break; 1978 } 1979 bcopy(&pp->addr, pa, sizeof(struct pf_pooladdr)); 1980 if (pa->ifname[0]) { 1981 pa->kif = pfi_attach_rule(pa->ifname); 1982 if (pa->kif == NULL) { 1983 pool_put(&pf_pooladdr_pl, pa); 1984 error = EINVAL; 1985 break; 1986 } 1987 } 1988 if (pfi_dynaddr_setup(&pa->addr, pp->af)) { 1989 pfi_dynaddr_remove(&pa->addr); 1990 pfi_detach_rule(pa->kif); 1991 pool_put(&pf_pooladdr_pl, pa); 1992 error = EINVAL; 1993 break; 1994 } 1995 TAILQ_INSERT_TAIL(&pf_pabuf, pa, entries); 1996 break; 1997 } 1998 1999 case DIOCGETADDRS: { 2000 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 2001 2002 pp->nr = 0; 2003 s = splsoftnet(); 2004 pool = pf_get_pool(pp->anchor, pp->ruleset, pp->ticket, 2005 pp->r_action, pp->r_num, 0, 1, 0); 2006 if (pool == NULL) { 2007 error = EBUSY; 2008 splx(s); 2009 break; 2010 } 2011 TAILQ_FOREACH(pa, &pool->list, entries) 2012 pp->nr++; 2013 splx(s); 2014 break; 2015 } 2016 2017 case DIOCGETADDR: { 2018 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 2019 u_int32_t nr = 0; 2020 2021 s = splsoftnet(); 2022 pool = pf_get_pool(pp->anchor, pp->ruleset, pp->ticket, 2023 pp->r_action, pp->r_num, 0, 1, 1); 2024 if (pool == NULL) { 2025 error = EBUSY; 2026 splx(s); 2027 break; 2028 } 2029 pa = TAILQ_FIRST(&pool->list); 2030 while ((pa != NULL) && (nr < pp->nr)) { 2031 pa = TAILQ_NEXT(pa, entries); 2032 nr++; 2033 } 2034 if (pa == NULL) { 2035 error = EBUSY; 2036 splx(s); 2037 break; 2038 } 2039 bcopy(pa, &pp->addr, sizeof(struct pf_pooladdr)); 2040 pfi_dynaddr_copyout(&pp->addr.addr); 2041 pf_tbladdr_copyout(&pp->addr.addr); 2042 splx(s); 2043 break; 2044 } 2045 2046 case DIOCCHANGEADDR: { 2047 struct pfioc_pooladdr *pca = (struct pfioc_pooladdr *)addr; 2048 struct pf_pooladdr *oldpa = NULL, *newpa = NULL; 2049 struct pf_ruleset *ruleset; 2050 2051 if (pca->action < PF_CHANGE_ADD_HEAD || 2052 pca->action > PF_CHANGE_REMOVE) { 2053 error = EINVAL; 2054 break; 2055 } 2056 if (pca->addr.addr.type != PF_ADDR_ADDRMASK && 2057 pca->addr.addr.type != PF_ADDR_DYNIFTL && 2058 pca->addr.addr.type != PF_ADDR_TABLE) { 2059 error = EINVAL; 2060 break; 2061 } 2062 2063 ruleset = pf_find_ruleset(pca->anchor, pca->ruleset); 2064 if (ruleset == NULL) { 2065 error = EBUSY; 2066 break; 2067 } 2068 pool = pf_get_pool(pca->anchor, pca->ruleset, pca->ticket, 2069 pca->r_action, pca->r_num, pca->r_last, 1, 1); 2070 if (pool == NULL) { 2071 error = EBUSY; 2072 break; 2073 } 2074 if (pca->action != PF_CHANGE_REMOVE) { 2075 newpa = pool_get(&pf_pooladdr_pl, PR_NOWAIT); 2076 if (newpa == NULL) { 2077 error = ENOMEM; 2078 break; 2079 } 2080 bcopy(&pca->addr, newpa, sizeof(struct pf_pooladdr)); 2081 #ifndef INET 2082 if (pca->af == AF_INET) { 2083 pool_put(&pf_pooladdr_pl, newpa); 2084 error = EAFNOSUPPORT; 2085 break; 2086 } 2087 #endif /* INET */ 2088 #ifndef INET6 2089 if (pca->af == AF_INET6) { 2090 pool_put(&pf_pooladdr_pl, newpa); 2091 error = EAFNOSUPPORT; 2092 break; 2093 } 2094 #endif /* INET6 */ 2095 if (newpa->ifname[0]) { 2096 newpa->kif = pfi_attach_rule(newpa->ifname); 2097 if (newpa->kif == NULL) { 2098 pool_put(&pf_pooladdr_pl, newpa); 2099 error = EINVAL; 2100 break; 2101 } 2102 } else 2103 newpa->kif = NULL; 2104 if (pfi_dynaddr_setup(&newpa->addr, pca->af) || 2105 pf_tbladdr_setup(ruleset, &newpa->addr)) { 2106 pfi_dynaddr_remove(&newpa->addr); 2107 pfi_detach_rule(newpa->kif); 2108 pool_put(&pf_pooladdr_pl, newpa); 2109 error = EINVAL; 2110 break; 2111 } 2112 } 2113 2114 s = splsoftnet(); 2115 2116 if (pca->action == PF_CHANGE_ADD_HEAD) 2117 oldpa = TAILQ_FIRST(&pool->list); 2118 else if (pca->action == PF_CHANGE_ADD_TAIL) 2119 oldpa = TAILQ_LAST(&pool->list, pf_palist); 2120 else { 2121 int i = 0; 2122 2123 oldpa = TAILQ_FIRST(&pool->list); 2124 while ((oldpa != NULL) && (i < pca->nr)) { 2125 oldpa = TAILQ_NEXT(oldpa, entries); 2126 i++; 2127 } 2128 if (oldpa == NULL) { 2129 error = EINVAL; 2130 splx(s); 2131 break; 2132 } 2133 } 2134 2135 if (pca->action == PF_CHANGE_REMOVE) { 2136 TAILQ_REMOVE(&pool->list, oldpa, entries); 2137 pfi_dynaddr_remove(&oldpa->addr); 2138 pf_tbladdr_remove(&oldpa->addr); 2139 pfi_detach_rule(oldpa->kif); 2140 pool_put(&pf_pooladdr_pl, oldpa); 2141 } else { 2142 if (oldpa == NULL) 2143 TAILQ_INSERT_TAIL(&pool->list, newpa, entries); 2144 else if (pca->action == PF_CHANGE_ADD_HEAD || 2145 pca->action == PF_CHANGE_ADD_BEFORE) 2146 TAILQ_INSERT_BEFORE(oldpa, newpa, entries); 2147 else 2148 TAILQ_INSERT_AFTER(&pool->list, oldpa, 2149 newpa, entries); 2150 } 2151 2152 pool->cur = TAILQ_FIRST(&pool->list); 2153 PF_ACPY(&pool->counter, &pool->cur->addr.v.a.addr, 2154 pca->af); 2155 splx(s); 2156 break; 2157 } 2158 2159 case DIOCGETANCHORS: { 2160 struct pfioc_anchor *pa = (struct pfioc_anchor *)addr; 2161 struct pf_anchor *anchor; 2162 2163 pa->nr = 0; 2164 TAILQ_FOREACH(anchor, &pf_anchors, entries) 2165 pa->nr++; 2166 break; 2167 } 2168 2169 case DIOCGETANCHOR: { 2170 struct pfioc_anchor *pa = (struct pfioc_anchor *)addr; 2171 struct pf_anchor *anchor; 2172 u_int32_t nr = 0; 2173 2174 anchor = TAILQ_FIRST(&pf_anchors); 2175 while (anchor != NULL && nr < pa->nr) { 2176 anchor = TAILQ_NEXT(anchor, entries); 2177 nr++; 2178 } 2179 if (anchor == NULL) 2180 error = EBUSY; 2181 else 2182 bcopy(anchor->name, pa->name, sizeof(pa->name)); 2183 break; 2184 } 2185 2186 case DIOCGETRULESETS: { 2187 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr; 2188 struct pf_anchor *anchor; 2189 struct pf_ruleset *ruleset; 2190 2191 pr->anchor[PF_ANCHOR_NAME_SIZE-1] = 0; 2192 if ((anchor = pf_find_anchor(pr->anchor)) == NULL) { 2193 error = EINVAL; 2194 break; 2195 } 2196 pr->nr = 0; 2197 TAILQ_FOREACH(ruleset, &anchor->rulesets, entries) 2198 pr->nr++; 2199 break; 2200 } 2201 2202 case DIOCGETRULESET: { 2203 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr; 2204 struct pf_anchor *anchor; 2205 struct pf_ruleset *ruleset; 2206 u_int32_t nr = 0; 2207 2208 if ((anchor = pf_find_anchor(pr->anchor)) == NULL) { 2209 error = EINVAL; 2210 break; 2211 } 2212 ruleset = TAILQ_FIRST(&anchor->rulesets); 2213 while (ruleset != NULL && nr < pr->nr) { 2214 ruleset = TAILQ_NEXT(ruleset, entries); 2215 nr++; 2216 } 2217 if (ruleset == NULL) 2218 error = EBUSY; 2219 else 2220 bcopy(ruleset->name, pr->name, sizeof(pr->name)); 2221 break; 2222 } 2223 2224 case DIOCRCLRTABLES: { 2225 struct pfioc_table *io = (struct pfioc_table *)addr; 2226 2227 if (io->pfrio_esize != 0) { 2228 error = ENODEV; 2229 break; 2230 } 2231 error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel, 2232 io->pfrio_flags | PFR_FLAG_USERIOCTL); 2233 break; 2234 } 2235 2236 case DIOCRADDTABLES: { 2237 struct pfioc_table *io = (struct pfioc_table *)addr; 2238 2239 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2240 error = ENODEV; 2241 break; 2242 } 2243 error = pfr_add_tables(io->pfrio_buffer, io->pfrio_size, 2244 &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2245 break; 2246 } 2247 2248 case DIOCRDELTABLES: { 2249 struct pfioc_table *io = (struct pfioc_table *)addr; 2250 2251 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2252 error = ENODEV; 2253 break; 2254 } 2255 error = pfr_del_tables(io->pfrio_buffer, io->pfrio_size, 2256 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2257 break; 2258 } 2259 2260 case DIOCRGETTABLES: { 2261 struct pfioc_table *io = (struct pfioc_table *)addr; 2262 2263 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2264 error = ENODEV; 2265 break; 2266 } 2267 error = pfr_get_tables(&io->pfrio_table, io->pfrio_buffer, 2268 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2269 break; 2270 } 2271 2272 case DIOCRGETTSTATS: { 2273 struct pfioc_table *io = (struct pfioc_table *)addr; 2274 2275 if (io->pfrio_esize != sizeof(struct pfr_tstats)) { 2276 error = ENODEV; 2277 break; 2278 } 2279 error = pfr_get_tstats(&io->pfrio_table, io->pfrio_buffer, 2280 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2281 break; 2282 } 2283 2284 case DIOCRCLRTSTATS: { 2285 struct pfioc_table *io = (struct pfioc_table *)addr; 2286 2287 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2288 error = ENODEV; 2289 break; 2290 } 2291 error = pfr_clr_tstats(io->pfrio_buffer, io->pfrio_size, 2292 &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2293 break; 2294 } 2295 2296 case DIOCRSETTFLAGS: { 2297 struct pfioc_table *io = (struct pfioc_table *)addr; 2298 2299 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2300 error = ENODEV; 2301 break; 2302 } 2303 error = pfr_set_tflags(io->pfrio_buffer, io->pfrio_size, 2304 io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange, 2305 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2306 break; 2307 } 2308 2309 case DIOCRCLRADDRS: { 2310 struct pfioc_table *io = (struct pfioc_table *)addr; 2311 2312 if (io->pfrio_esize != 0) { 2313 error = ENODEV; 2314 break; 2315 } 2316 error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel, 2317 io->pfrio_flags | PFR_FLAG_USERIOCTL); 2318 break; 2319 } 2320 2321 case DIOCRADDADDRS: { 2322 struct pfioc_table *io = (struct pfioc_table *)addr; 2323 2324 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2325 error = ENODEV; 2326 break; 2327 } 2328 error = pfr_add_addrs(&io->pfrio_table, io->pfrio_buffer, 2329 io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags | 2330 PFR_FLAG_USERIOCTL); 2331 break; 2332 } 2333 2334 case DIOCRDELADDRS: { 2335 struct pfioc_table *io = (struct pfioc_table *)addr; 2336 2337 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2338 error = ENODEV; 2339 break; 2340 } 2341 error = pfr_del_addrs(&io->pfrio_table, io->pfrio_buffer, 2342 io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags | 2343 PFR_FLAG_USERIOCTL); 2344 break; 2345 } 2346 2347 case DIOCRSETADDRS: { 2348 struct pfioc_table *io = (struct pfioc_table *)addr; 2349 2350 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2351 error = ENODEV; 2352 break; 2353 } 2354 error = pfr_set_addrs(&io->pfrio_table, io->pfrio_buffer, 2355 io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd, 2356 &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags | 2357 PFR_FLAG_USERIOCTL); 2358 break; 2359 } 2360 2361 case DIOCRGETADDRS: { 2362 struct pfioc_table *io = (struct pfioc_table *)addr; 2363 2364 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2365 error = ENODEV; 2366 break; 2367 } 2368 error = pfr_get_addrs(&io->pfrio_table, io->pfrio_buffer, 2369 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2370 break; 2371 } 2372 2373 case DIOCRGETASTATS: { 2374 struct pfioc_table *io = (struct pfioc_table *)addr; 2375 2376 if (io->pfrio_esize != sizeof(struct pfr_astats)) { 2377 error = ENODEV; 2378 break; 2379 } 2380 error = pfr_get_astats(&io->pfrio_table, io->pfrio_buffer, 2381 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2382 break; 2383 } 2384 2385 case DIOCRCLRASTATS: { 2386 struct pfioc_table *io = (struct pfioc_table *)addr; 2387 2388 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2389 error = ENODEV; 2390 break; 2391 } 2392 error = pfr_clr_astats(&io->pfrio_table, io->pfrio_buffer, 2393 io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags | 2394 PFR_FLAG_USERIOCTL); 2395 break; 2396 } 2397 2398 case DIOCRTSTADDRS: { 2399 struct pfioc_table *io = (struct pfioc_table *)addr; 2400 2401 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2402 error = ENODEV; 2403 break; 2404 } 2405 error = pfr_tst_addrs(&io->pfrio_table, io->pfrio_buffer, 2406 io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags | 2407 PFR_FLAG_USERIOCTL); 2408 break; 2409 } 2410 2411 case DIOCRINABEGIN: { 2412 struct pfioc_table *io = (struct pfioc_table *)addr; 2413 2414 if (io->pfrio_esize != 0) { 2415 error = ENODEV; 2416 break; 2417 } 2418 error = pfr_ina_begin(&io->pfrio_table, &io->pfrio_ticket, 2419 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2420 break; 2421 } 2422 2423 case DIOCRINACOMMIT: { 2424 struct pfioc_table *io = (struct pfioc_table *)addr; 2425 2426 if (io->pfrio_esize != 0) { 2427 error = ENODEV; 2428 break; 2429 } 2430 error = pfr_ina_commit(&io->pfrio_table, io->pfrio_ticket, 2431 &io->pfrio_nadd, &io->pfrio_nchange, io->pfrio_flags | 2432 PFR_FLAG_USERIOCTL); 2433 break; 2434 } 2435 2436 case DIOCRINADEFINE: { 2437 struct pfioc_table *io = (struct pfioc_table *)addr; 2438 2439 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2440 error = ENODEV; 2441 break; 2442 } 2443 error = pfr_ina_define(&io->pfrio_table, io->pfrio_buffer, 2444 io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr, 2445 io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2446 break; 2447 } 2448 2449 case DIOCOSFPADD: { 2450 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr; 2451 s = splsoftnet(); 2452 error = pf_osfp_add(io); 2453 splx(s); 2454 break; 2455 } 2456 2457 case DIOCOSFPGET: { 2458 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr; 2459 s = splsoftnet(); 2460 error = pf_osfp_get(io); 2461 splx(s); 2462 break; 2463 } 2464 2465 case DIOCXBEGIN: { 2466 struct pfioc_trans *io = (struct pfioc_trans *)addr; 2467 struct pfioc_trans_e ioe; 2468 struct pfr_table table; 2469 int i; 2470 2471 if (io->esize != sizeof(ioe)) { 2472 error = ENODEV; 2473 goto fail; 2474 } 2475 for (i = 0; i < io->size; i++) { 2476 if (copyin(io->array+i, &ioe, sizeof(ioe))) { 2477 error = EFAULT; 2478 goto fail; 2479 } 2480 switch (ioe.rs_num) { 2481 #ifdef ALTQ 2482 case PF_RULESET_ALTQ: 2483 if (ioe.anchor[0] || ioe.ruleset[0]) { 2484 error = EINVAL; 2485 goto fail; 2486 } 2487 if ((error = pf_begin_altq(&ioe.ticket))) 2488 goto fail; 2489 break; 2490 #endif /* ALTQ */ 2491 case PF_RULESET_TABLE: 2492 bzero(&table, sizeof(table)); 2493 strlcpy(table.pfrt_anchor, ioe.anchor, 2494 sizeof(table.pfrt_anchor)); 2495 strlcpy(table.pfrt_ruleset, ioe.ruleset, 2496 sizeof(table.pfrt_ruleset)); 2497 if ((error = pfr_ina_begin(&table, 2498 &ioe.ticket, NULL, 0))) 2499 goto fail; 2500 break; 2501 default: 2502 if ((error = pf_begin_rules(&ioe.ticket, 2503 ioe.rs_num, ioe.anchor, ioe.ruleset))) 2504 goto fail; 2505 break; 2506 } 2507 if (copyout(&ioe, io->array+i, sizeof(io->array[i]))) { 2508 error = EFAULT; 2509 goto fail; 2510 } 2511 } 2512 break; 2513 } 2514 2515 case DIOCXROLLBACK: { 2516 struct pfioc_trans *io = (struct pfioc_trans *)addr; 2517 struct pfioc_trans_e ioe; 2518 struct pfr_table table; 2519 int i; 2520 2521 if (io->esize != sizeof(ioe)) { 2522 error = ENODEV; 2523 goto fail; 2524 } 2525 for (i = 0; i < io->size; i++) { 2526 if (copyin(io->array+i, &ioe, sizeof(ioe))) { 2527 error = EFAULT; 2528 goto fail; 2529 } 2530 switch (ioe.rs_num) { 2531 #ifdef ALTQ 2532 case PF_RULESET_ALTQ: 2533 if (ioe.anchor[0] || ioe.ruleset[0]) { 2534 error = EINVAL; 2535 goto fail; 2536 } 2537 if ((error = pf_rollback_altq(ioe.ticket))) 2538 goto fail; /* really bad */ 2539 break; 2540 #endif /* ALTQ */ 2541 case PF_RULESET_TABLE: 2542 bzero(&table, sizeof(table)); 2543 strlcpy(table.pfrt_anchor, ioe.anchor, 2544 sizeof(table.pfrt_anchor)); 2545 strlcpy(table.pfrt_ruleset, ioe.ruleset, 2546 sizeof(table.pfrt_ruleset)); 2547 if ((error = pfr_ina_rollback(&table, 2548 ioe.ticket, NULL, 0))) 2549 goto fail; /* really bad */ 2550 break; 2551 default: 2552 if ((error = pf_rollback_rules(ioe.ticket, 2553 ioe.rs_num, ioe.anchor, ioe.ruleset))) 2554 goto fail; /* really bad */ 2555 break; 2556 } 2557 } 2558 break; 2559 } 2560 2561 case DIOCXCOMMIT: { 2562 struct pfioc_trans *io = (struct pfioc_trans *)addr; 2563 struct pfioc_trans_e ioe; 2564 struct pfr_table table; 2565 struct pf_ruleset *rs; 2566 int i; 2567 2568 if (io->esize != sizeof(ioe)) { 2569 error = ENODEV; 2570 goto fail; 2571 } 2572 /* first makes sure everything will succeed */ 2573 for (i = 0; i < io->size; i++) { 2574 if (copyin(io->array+i, &ioe, sizeof(ioe))) { 2575 error = EFAULT; 2576 goto fail; 2577 } 2578 switch (ioe.rs_num) { 2579 #ifdef ALTQ 2580 case PF_RULESET_ALTQ: 2581 if (ioe.anchor[0] || ioe.ruleset[0]) { 2582 error = EINVAL; 2583 goto fail; 2584 } 2585 if (!altqs_inactive_open || ioe.ticket != 2586 ticket_altqs_inactive) { 2587 error = EBUSY; 2588 goto fail; 2589 } 2590 break; 2591 #endif /* ALTQ */ 2592 case PF_RULESET_TABLE: 2593 rs = pf_find_ruleset(ioe.anchor, ioe.ruleset); 2594 if (rs == NULL || !rs->topen || ioe.ticket != 2595 rs->tticket) { 2596 error = EBUSY; 2597 goto fail; 2598 } 2599 break; 2600 default: 2601 if (ioe.rs_num < 0 || ioe.rs_num >= 2602 PF_RULESET_MAX) { 2603 error = EINVAL; 2604 goto fail; 2605 } 2606 rs = pf_find_ruleset(ioe.anchor, ioe.ruleset); 2607 if (rs == NULL || 2608 !rs->rules[ioe.rs_num].inactive.open || 2609 rs->rules[ioe.rs_num].inactive.ticket != 2610 ioe.ticket) { 2611 error = EBUSY; 2612 goto fail; 2613 } 2614 break; 2615 } 2616 } 2617 /* now do the commit - no errors should happen here */ 2618 for (i = 0; i < io->size; i++) { 2619 if (copyin(io->array+i, &ioe, sizeof(ioe))) { 2620 error = EFAULT; 2621 goto fail; 2622 } 2623 switch (ioe.rs_num) { 2624 #ifdef ALTQ 2625 case PF_RULESET_ALTQ: 2626 if ((error = pf_commit_altq(ioe.ticket))) 2627 goto fail; /* really bad */ 2628 break; 2629 #endif /* ALTQ */ 2630 case PF_RULESET_TABLE: 2631 bzero(&table, sizeof(table)); 2632 strlcpy(table.pfrt_anchor, ioe.anchor, 2633 sizeof(table.pfrt_anchor)); 2634 strlcpy(table.pfrt_ruleset, ioe.ruleset, 2635 sizeof(table.pfrt_ruleset)); 2636 if ((error = pfr_ina_commit(&table, ioe.ticket, 2637 NULL, NULL, 0))) 2638 goto fail; /* really bad */ 2639 break; 2640 default: 2641 if ((error = pf_commit_rules(ioe.ticket, 2642 ioe.rs_num, ioe.anchor, ioe.ruleset))) 2643 goto fail; /* really bad */ 2644 break; 2645 } 2646 } 2647 break; 2648 } 2649 2650 case DIOCGETSRCNODES: { 2651 struct pfioc_src_nodes *psn = (struct pfioc_src_nodes *)addr; 2652 struct pf_src_node *n; 2653 struct pf_src_node *p, pstore; 2654 u_int32_t nr = 0; 2655 int space = psn->psn_len; 2656 2657 if (space == 0) { 2658 s = splsoftnet(); 2659 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) 2660 nr++; 2661 splx(s); 2662 psn->psn_len = sizeof(struct pf_src_node) * nr; 2663 return (0); 2664 } 2665 2666 s = splsoftnet(); 2667 p = psn->psn_src_nodes; 2668 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) { 2669 int secs = time.tv_sec; 2670 2671 if ((nr + 1) * sizeof(*p) > (unsigned)psn->psn_len) 2672 break; 2673 2674 bcopy(n, &pstore, sizeof(pstore)); 2675 if (n->rule.ptr != NULL) 2676 pstore.rule.nr = n->rule.ptr->nr; 2677 pstore.creation = secs - pstore.creation; 2678 if (pstore.expire > secs) 2679 pstore.expire -= secs; 2680 else 2681 pstore.expire = 0; 2682 error = copyout(&pstore, p, sizeof(*p)); 2683 if (error) { 2684 splx(s); 2685 goto fail; 2686 } 2687 p++; 2688 nr++; 2689 } 2690 psn->psn_len = sizeof(struct pf_src_node) * nr; 2691 splx(s); 2692 break; 2693 } 2694 2695 case DIOCCLRSRCNODES: { 2696 struct pf_src_node *n; 2697 struct pf_state *state; 2698 2699 s = splsoftnet(); 2700 RB_FOREACH(state, pf_state_tree_id, &tree_id) { 2701 state->src_node = NULL; 2702 state->nat_src_node = NULL; 2703 } 2704 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) { 2705 n->expire = 1; 2706 n->states = 0; 2707 } 2708 pf_purge_expired_src_nodes(); 2709 pf_status.src_nodes = 0; 2710 splx(s); 2711 break; 2712 } 2713 2714 case DIOCSETHOSTID: { 2715 u_int32_t *hostid = (u_int32_t *)addr; 2716 2717 if (*hostid == 0) { 2718 error = EINVAL; 2719 goto fail; 2720 } 2721 pf_status.hostid = *hostid; 2722 break; 2723 } 2724 2725 case DIOCOSFPFLUSH: 2726 s = splsoftnet(); 2727 pf_osfp_flush(); 2728 splx(s); 2729 break; 2730 2731 case DIOCIGETIFACES: { 2732 struct pfioc_iface *io = (struct pfioc_iface *)addr; 2733 2734 if (io->pfiio_esize != sizeof(struct pfi_if)) { 2735 error = ENODEV; 2736 break; 2737 } 2738 error = pfi_get_ifaces(io->pfiio_name, io->pfiio_buffer, 2739 &io->pfiio_size, io->pfiio_flags); 2740 break; 2741 } 2742 2743 case DIOCICLRISTATS: { 2744 struct pfioc_iface *io = (struct pfioc_iface *)addr; 2745 2746 error = pfi_clr_istats(io->pfiio_name, &io->pfiio_nzero, 2747 io->pfiio_flags); 2748 break; 2749 } 2750 2751 default: 2752 error = ENODEV; 2753 break; 2754 } 2755 fail: 2756 2757 return (error); 2758 } 2759 2760 #ifdef __NetBSD__ 2761 int 2762 pfil4_wrapper(void *arg, struct mbuf **mp, struct ifnet *ifp, int dir) 2763 { 2764 int error; 2765 2766 /* 2767 * ensure that mbufs are writable beforehand 2768 * as it's assumed by pf code. 2769 * ip hdr (60 bytes) + tcp hdr (60 bytes) should be enough. 2770 * XXX inefficient 2771 */ 2772 error = m_makewritable(mp, 0, 60 + 60, M_DONTWAIT); 2773 if (error) { 2774 m_freem(*mp); 2775 *mp = NULL; 2776 return error; 2777 } 2778 2779 /* 2780 * If the packet is out-bound, we can't delay checksums 2781 * here. For in-bound, the checksum has already been 2782 * validated. 2783 */ 2784 if (dir == PFIL_OUT) { 2785 if ((*mp)->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) { 2786 in_delayed_cksum(*mp); 2787 (*mp)->m_pkthdr.csum_flags &= 2788 ~(M_CSUM_TCPv4|M_CSUM_UDPv4); 2789 } 2790 } 2791 2792 if (pf_test(dir == PFIL_OUT ? PF_OUT : PF_IN, ifp, mp) != PF_PASS) { 2793 m_freem(*mp); 2794 *mp = NULL; 2795 return EHOSTUNREACH; 2796 } else 2797 return (0); 2798 } 2799 2800 #ifdef INET6 2801 int 2802 pfil6_wrapper(void *arg, struct mbuf **mp, struct ifnet *ifp, int dir) 2803 { 2804 int error; 2805 2806 /* 2807 * ensure that mbufs are writable beforehand 2808 * as it's assumed by pf code. 2809 * XXX inefficient 2810 */ 2811 error = m_makewritable(mp, 0, M_COPYALL, M_DONTWAIT); 2812 if (error) { 2813 m_freem(*mp); 2814 *mp = NULL; 2815 return error; 2816 } 2817 2818 if (pf_test6(dir == PFIL_OUT ? PF_OUT : PF_IN, ifp, mp) != PF_PASS) { 2819 m_freem(*mp); 2820 *mp = NULL; 2821 return EHOSTUNREACH; 2822 } else 2823 return (0); 2824 } 2825 #endif 2826 2827 int 2828 pfil_ifnet_wrapper(void *arg, struct mbuf **mp, struct ifnet *ifp, int dir) 2829 { 2830 u_long cmd = (u_long)mp; 2831 2832 switch (cmd) { 2833 case PFIL_IFNET_ATTACH: 2834 pfi_attach_ifnet(ifp); 2835 break; 2836 case PFIL_IFNET_DETACH: 2837 pfi_detach_ifnet(ifp); 2838 break; 2839 } 2840 2841 return (0); 2842 } 2843 2844 int 2845 pfil_ifaddr_wrapper(void *arg, struct mbuf **mp, struct ifnet *ifp, int dir) 2846 { 2847 extern void pfi_kifaddr_update_if(struct ifnet *); 2848 2849 u_long cmd = (u_long)mp; 2850 2851 switch (cmd) { 2852 case SIOCSIFADDR: 2853 case SIOCAIFADDR: 2854 case SIOCDIFADDR: 2855 #ifdef INET6 2856 case SIOCAIFADDR_IN6: 2857 case SIOCDIFADDR_IN6: 2858 #endif 2859 pfi_kifaddr_update_if(ifp); 2860 break; 2861 default: 2862 panic("unexpected ioctl"); 2863 } 2864 2865 return (0); 2866 } 2867 2868 static int 2869 pf_pfil_attach(void) 2870 { 2871 struct pfil_head *ph_inet; 2872 #ifdef INET6 2873 struct pfil_head *ph_inet6; 2874 #endif 2875 int error; 2876 int i; 2877 2878 if (pf_pfil_attached) 2879 return (0); 2880 2881 error = pfil_add_hook(pfil_ifnet_wrapper, NULL, PFIL_IFNET, &if_pfil); 2882 if (error) 2883 goto bad1; 2884 error = pfil_add_hook(pfil_ifaddr_wrapper, NULL, PFIL_IFADDR, &if_pfil); 2885 if (error) 2886 goto bad2; 2887 2888 ph_inet = pfil_head_get(PFIL_TYPE_AF, AF_INET); 2889 if (ph_inet) 2890 error = pfil_add_hook((void *)pfil4_wrapper, NULL, 2891 PFIL_IN|PFIL_OUT, ph_inet); 2892 else 2893 error = ENOENT; 2894 if (error) 2895 goto bad3; 2896 2897 #ifdef INET6 2898 ph_inet6 = pfil_head_get(PFIL_TYPE_AF, AF_INET6); 2899 if (ph_inet6) 2900 error = pfil_add_hook((void *)pfil6_wrapper, NULL, 2901 PFIL_IN|PFIL_OUT, ph_inet6); 2902 else 2903 error = ENOENT; 2904 if (error) 2905 goto bad4; 2906 #endif 2907 2908 for (i = 0; i < if_indexlim; i++) 2909 if (ifindex2ifnet[i]) 2910 pfi_attach_ifnet(ifindex2ifnet[i]); 2911 pf_pfil_attached = 1; 2912 2913 return (0); 2914 2915 #ifdef INET6 2916 bad4: 2917 pfil_remove_hook(pfil4_wrapper, NULL, PFIL_IN|PFIL_OUT, ph_inet); 2918 #endif 2919 bad3: 2920 pfil_remove_hook(pfil_ifaddr_wrapper, NULL, PFIL_IFADDR, &if_pfil); 2921 bad2: 2922 pfil_remove_hook(pfil_ifnet_wrapper, NULL, PFIL_IFNET, &if_pfil); 2923 bad1: 2924 return (error); 2925 } 2926 2927 static int 2928 pf_pfil_detach(void) 2929 { 2930 struct pfil_head *ph_inet; 2931 #ifdef INET6 2932 struct pfil_head *ph_inet6; 2933 #endif 2934 int i; 2935 2936 if (pf_pfil_attached == 0) 2937 return (0); 2938 2939 for (i = 0; i < if_indexlim; i++) 2940 if (pfi_index2kif[i]) 2941 pfi_detach_ifnet(ifindex2ifnet[i]); 2942 2943 pfil_remove_hook(pfil_ifaddr_wrapper, NULL, PFIL_IFADDR, &if_pfil); 2944 pfil_remove_hook(pfil_ifnet_wrapper, NULL, PFIL_IFNET, &if_pfil); 2945 2946 ph_inet = pfil_head_get(PFIL_TYPE_AF, AF_INET); 2947 if (ph_inet) 2948 pfil_remove_hook((void *)pfil4_wrapper, NULL, 2949 PFIL_IN|PFIL_OUT, ph_inet); 2950 #ifdef INET6 2951 ph_inet6 = pfil_head_get(PFIL_TYPE_AF, AF_INET6); 2952 if (ph_inet6) 2953 pfil_remove_hook((void *)pfil6_wrapper, NULL, 2954 PFIL_IN|PFIL_OUT, ph_inet6); 2955 #endif 2956 pf_pfil_attached = 0; 2957 2958 return (0); 2959 } 2960 #endif 2961