1 /* $OpenBSD: pf.c,v 1.181 2001/12/31 16:46:39 mickey Exp $ */ 2 3 /* 4 * Copyright (c) 2001 Daniel Hartmeier 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * - Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * - Redistributions in binary form must reproduce the above 14 * copyright notice, this list of conditions and the following 15 * disclaimer in the documentation and/or other materials provided 16 * with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 21 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 22 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 24 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 26 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 28 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 * 31 */ 32 33 #include <sys/param.h> 34 #include <sys/systm.h> 35 #include <sys/mbuf.h> 36 #include <sys/filio.h> 37 #include <sys/fcntl.h> 38 #include <sys/socket.h> 39 #include <sys/kernel.h> 40 #include <sys/malloc.h> 41 #include <sys/time.h> 42 #include <sys/pool.h> 43 44 #include <net/if.h> 45 #include <net/if_types.h> 46 #include <net/bpf.h> 47 #include <net/route.h> 48 #include <net/if_pflog.h> 49 50 #include <netinet/in.h> 51 #include <netinet/in_var.h> 52 #include <netinet/in_systm.h> 53 #include <netinet/ip.h> 54 #include <netinet/ip_var.h> 55 #include <netinet/tcp.h> 56 #include <netinet/tcp_fsm.h> 57 #include <netinet/tcp_seq.h> 58 #include <netinet/udp.h> 59 #include <netinet/ip_icmp.h> 60 #include <netinet/in_pcb.h> 61 #include <netinet/tcp_timer.h> 62 #include <netinet/tcp_var.h> 63 #include <netinet/udp_var.h> 64 65 #include <dev/rndvar.h> 66 #include <net/pfvar.h> 67 68 #include "bpfilter.h" 69 #include "pflog.h" 70 71 #ifdef INET6 72 #include <netinet/ip6.h> 73 #include <netinet/in_pcb.h> 74 #include <netinet/icmp6.h> 75 #endif /* INET6 */ 76 77 78 #define DPFPRINTF(n, x) if (pf_status.debug >= (n)) printf x 79 80 /* 81 * Tree data structure 82 */ 83 84 struct pf_tree_node { 85 struct pf_tree_key key; 86 struct pf_state *state; 87 struct pf_tree_node *parent; 88 struct pf_tree_node *left; 89 struct pf_tree_node *right; 90 int balance; 91 }; 92 93 struct pf_port_node { 94 LIST_ENTRY(pf_port_node) next; 95 u_int16_t port; 96 }; 97 LIST_HEAD(pf_port_list, pf_port_node); 98 99 /* structure for ipsec and ipv6 option header template */ 100 struct _opt6 { 101 u_int8_t opt6_nxt; /* next header */ 102 u_int8_t opt6_hlen; /* header extension length */ 103 u_int16_t _pad; 104 u_int32_t ah_spi; /* security parameter index 105 for authentication header */ 106 }; 107 108 /* 109 * Global variables 110 */ 111 112 TAILQ_HEAD(pf_natqueue, pf_nat) pf_nats[2]; 113 TAILQ_HEAD(pf_binatqueue, pf_binat) pf_binats[2]; 114 TAILQ_HEAD(pf_rdrqueue, pf_rdr) pf_rdrs[2]; 115 struct pf_rulequeue pf_rules[2]; 116 struct pf_rulequeue *pf_rules_active; 117 struct pf_rulequeue *pf_rules_inactive; 118 struct pf_natqueue *pf_nats_active; 119 struct pf_natqueue *pf_nats_inactive; 120 struct pf_binatqueue *pf_binats_active; 121 struct pf_binatqueue *pf_binats_inactive; 122 struct pf_rdrqueue *pf_rdrs_active; 123 struct pf_rdrqueue *pf_rdrs_inactive; 124 struct pf_tree_node *tree_lan_ext, *tree_ext_gwy; 125 struct timeval pftv; 126 struct pf_status pf_status; 127 struct ifnet *status_ifp; 128 129 u_int32_t pf_last_purge; 130 u_int32_t ticket_rules_active; 131 u_int32_t ticket_rules_inactive; 132 u_int32_t ticket_nats_active; 133 u_int32_t ticket_nats_inactive; 134 u_int32_t ticket_binats_active; 135 u_int32_t ticket_binats_inactive; 136 u_int32_t ticket_rdrs_active; 137 u_int32_t ticket_rdrs_inactive; 138 struct pf_port_list pf_tcp_ports; 139 struct pf_port_list pf_udp_ports; 140 141 /* Timeouts */ 142 int pftm_tcp_first_packet = 120; /* First TCP packet */ 143 int pftm_tcp_opening = 30; /* No response yet */ 144 int pftm_tcp_established = 24*60*60; /* established */ 145 int pftm_tcp_closing = 15 * 60; /* Half closed */ 146 int pftm_tcp_fin_wait = 45; /* Got both FINs */ 147 int pftm_tcp_closed = 90; /* Got a RST */ 148 149 int pftm_udp_first_packet = 60; /* First UDP packet */ 150 int pftm_udp_single = 30; /* Unidirectional */ 151 int pftm_udp_multiple = 60; /* Bidirectional */ 152 153 int pftm_icmp_first_packet = 20; /* First ICMP packet */ 154 int pftm_icmp_error_reply = 10; /* Got error response */ 155 156 int pftm_other_first_packet = 60; /* First packet */ 157 int pftm_other_single = 30; /* Unidirectional */ 158 int pftm_other_multiple = 60; /* Bidirectional */ 159 160 int pftm_frag = 30; /* Fragment expire */ 161 162 int pftm_interval = 10; /* expire interval */ 163 164 int *pftm_timeouts[PFTM_MAX] = { &pftm_tcp_first_packet, 165 &pftm_tcp_opening, &pftm_tcp_established, 166 &pftm_tcp_closing, &pftm_tcp_fin_wait, 167 &pftm_tcp_closed, &pftm_udp_first_packet, 168 &pftm_udp_single, &pftm_udp_multiple, 169 &pftm_icmp_first_packet, &pftm_icmp_error_reply, 170 &pftm_other_first_packet, &pftm_other_single, 171 &pftm_other_multiple, &pftm_frag, &pftm_interval }; 172 173 174 struct pool pf_tree_pl, pf_rule_pl, pf_nat_pl, pf_sport_pl; 175 struct pool pf_rdr_pl, pf_state_pl, pf_binat_pl; 176 177 int pf_tree_key_compare(struct pf_tree_key *, 178 struct pf_tree_key *); 179 int pf_compare_addr(struct pf_addr *, struct pf_addr *, 180 u_int8_t); 181 void pf_addrcpy(struct pf_addr *, struct pf_addr *, 182 u_int8_t); 183 int pf_compare_rules(struct pf_rule *, 184 struct pf_rule *); 185 int pf_compare_nats(struct pf_nat *, struct pf_nat *); 186 int pf_compare_binats(struct pf_binat *, 187 struct pf_binat *); 188 int pf_compare_rdrs(struct pf_rdr *, struct pf_rdr *); 189 void pf_tree_rotate_left(struct pf_tree_node **); 190 void pf_tree_rotate_right(struct pf_tree_node **); 191 struct pf_tree_node *pf_tree_first(struct pf_tree_node *); 192 struct pf_tree_node *pf_tree_next(struct pf_tree_node *); 193 struct pf_tree_node *pf_tree_search(struct pf_tree_node *, 194 struct pf_tree_key *); 195 void pf_insert_state(struct pf_state *); 196 void pf_purge_expired_states(void); 197 198 void pf_print_host(struct pf_addr *, u_int16_t, u_int8_t); 199 void pf_print_state(struct pf_state *); 200 void pf_print_flags(u_int8_t); 201 202 void pfattach(int); 203 int pfopen(dev_t, int, int, struct proc *); 204 int pfclose(dev_t, int, int, struct proc *); 205 int pfioctl(dev_t, u_long, caddr_t, int, struct proc *); 206 207 u_int16_t pf_cksum_fixup(u_int16_t, u_int16_t, u_int16_t, 208 u_int8_t); 209 void pf_change_ap(struct pf_addr *, u_int16_t *, 210 u_int16_t *, u_int16_t *, struct pf_addr *, 211 u_int16_t, u_int8_t, int); 212 void pf_change_a(u_int32_t *, u_int16_t *, u_int32_t, 213 u_int8_t); 214 #ifdef INET6 215 void pf_change_a6(struct pf_addr *, u_int16_t *, 216 struct pf_addr *, u_int8_t); 217 #endif /* INET6 */ 218 void pf_change_icmp(struct pf_addr *, u_int16_t *, 219 struct pf_addr *, struct pf_addr *, u_int16_t, 220 u_int16_t *, u_int16_t *, u_int16_t *, 221 u_int16_t *, u_int8_t, int); 222 void pf_send_reset(int, struct tcphdr *, 223 struct pf_pdesc *, int); 224 void pf_send_icmp(struct mbuf *, u_int8_t, u_int8_t, int); 225 u_int16_t pf_map_port_range(struct pf_rdr *, u_int16_t); 226 struct pf_nat *pf_get_nat(struct ifnet *, u_int8_t, 227 struct pf_addr *, struct pf_addr *, int); 228 struct pf_binat *pf_get_binat(int, struct ifnet *, u_int8_t, 229 struct pf_addr *, struct pf_addr *, int); 230 struct pf_rdr *pf_get_rdr(struct ifnet *, u_int8_t, 231 struct pf_addr *, struct pf_addr *, u_int16_t, int); 232 int pf_test_tcp(struct pf_rule **, int, struct ifnet *, 233 struct mbuf *, int, int, void *, struct pf_pdesc *); 234 int pf_test_udp(struct pf_rule **, int, struct ifnet *, 235 struct mbuf *, int, int, void *, struct pf_pdesc *); 236 int pf_test_icmp(struct pf_rule **, int, struct ifnet *, 237 struct mbuf *, int, int, void *, struct pf_pdesc *); 238 int pf_test_other(struct pf_rule **, int, struct ifnet *, 239 struct mbuf *, void *, struct pf_pdesc *); 240 int pf_test_state_tcp(struct pf_state **, int, 241 struct ifnet *, struct mbuf *, int, int, 242 void *, struct pf_pdesc *); 243 int pf_test_state_udp(struct pf_state **, int, 244 struct ifnet *, struct mbuf *, int, int, 245 void *, struct pf_pdesc *); 246 int pf_test_state_icmp(struct pf_state **, int, 247 struct ifnet *, struct mbuf *, int, int, 248 void *, struct pf_pdesc *); 249 int pf_test_state_other(struct pf_state **, int, 250 struct ifnet *, struct pf_pdesc *); 251 void *pf_pull_hdr(struct mbuf *, int, void *, int, 252 u_short *, u_short *, int); 253 void pf_calc_skip_steps(struct pf_rulequeue *); 254 255 int pf_get_sport(u_int8_t, u_int16_t, u_int16_t, 256 u_int16_t *); 257 void pf_put_sport(u_int8_t, u_int16_t); 258 int pf_add_sport(struct pf_port_list *, u_int16_t); 259 int pf_chk_sport(struct pf_port_list *, u_int16_t); 260 int pf_normalize_tcp(int, struct ifnet *, struct mbuf *, 261 int, int, void *, struct pf_pdesc *); 262 void pf_route(struct mbuf *, struct pf_rule *); 263 void pf_route6(struct mbuf *, struct pf_rule *); 264 265 266 #if NPFLOG > 0 267 #define PFLOG_PACKET(i,x,a,b,c,d,e) \ 268 do { \ 269 if (b == AF_INET) { \ 270 HTONS(((struct ip *)x)->ip_len); \ 271 HTONS(((struct ip *)x)->ip_off); \ 272 pflog_packet(i,a,b,c,d,e); \ 273 NTOHS(((struct ip *)x)->ip_len); \ 274 NTOHS(((struct ip *)x)->ip_off); \ 275 } else { \ 276 pflog_packet(i,a,b,c,d,e); \ 277 } \ 278 } while (0) 279 #else 280 #define PFLOG_PACKET(i,x,a,b,c,d,e) ((void)0) 281 #endif 282 283 #define STATE_TRANSLATE(s) \ 284 (s)->lan.addr.addr32[0] != (s)->gwy.addr.addr32[0] || \ 285 ((s)->af == AF_INET6 && \ 286 ((s)->lan.addr.addr32[1] != (s)->gwy.addr.addr32[1] || \ 287 (s)->lan.addr.addr32[2] != (s)->gwy.addr.addr32[2] || \ 288 (s)->lan.addr.addr32[3] != (s)->gwy.addr.addr32[3])) || \ 289 (s)->lan.port != (s)->gwy.port 290 291 int 292 pf_tree_key_compare(struct pf_tree_key *a, struct pf_tree_key *b) 293 { 294 register int diff; 295 296 /* 297 * could use memcmp(), but with the best manual order, we can 298 * minimize the average number of compares. what is faster? 299 */ 300 if ((diff = a->proto - b->proto) != 0) 301 return (diff); 302 if ((diff = a->af - b->af) != 0) 303 return (diff); 304 switch (a->af) { 305 #ifdef INET 306 case AF_INET: 307 if (a->addr[0].addr32[0] > b->addr[0].addr32[0]) 308 return 1; 309 if (a->addr[0].addr32[0] < b->addr[0].addr32[0]) 310 return -1; 311 if (a->addr[1].addr32[0] > b->addr[1].addr32[0]) 312 return 1; 313 if (a->addr[1].addr32[0] < b->addr[1].addr32[0]) 314 return -1; 315 break; 316 #endif /* INET */ 317 #ifdef INET6 318 case AF_INET6: 319 if (a->addr[0].addr32[0] > b->addr[0].addr32[0]) 320 return 1; 321 if (a->addr[0].addr32[0] < b->addr[0].addr32[0]) 322 return -1; 323 if (a->addr[0].addr32[1] > b->addr[0].addr32[1]) 324 return 1; 325 if (a->addr[0].addr32[1] < b->addr[0].addr32[1]) 326 return -1; 327 if (a->addr[0].addr32[2] > b->addr[0].addr32[2]) 328 return 1; 329 if (a->addr[0].addr32[2] < b->addr[0].addr32[2]) 330 return -1; 331 if (a->addr[0].addr32[3] > b->addr[0].addr32[3]) 332 return 1; 333 if (a->addr[0].addr32[3] < b->addr[0].addr32[3]) 334 return -1; 335 if (a->addr[1].addr32[0] > b->addr[1].addr32[0]) 336 return 1; 337 if (a->addr[1].addr32[0] < b->addr[1].addr32[0]) 338 return -1; 339 if (a->addr[1].addr32[1] > b->addr[1].addr32[1]) 340 return 1; 341 if (a->addr[1].addr32[1] < b->addr[1].addr32[1]) 342 return -1; 343 if (a->addr[1].addr32[2] > b->addr[1].addr32[2]) 344 return 1; 345 if (a->addr[1].addr32[2] < b->addr[1].addr32[2]) 346 return -1; 347 if (a->addr[1].addr32[3] > b->addr[1].addr32[3]) 348 return 1; 349 if (a->addr[1].addr32[3] < b->addr[1].addr32[3]) 350 return -1; 351 break; 352 #endif /* INET6 */ 353 } 354 355 if ((diff = a->port[0] - b->port[0]) != 0) 356 return (diff); 357 if ((diff = a->port[1] - b->port[1]) != 0) 358 return (diff); 359 360 return (0); 361 } 362 363 #ifdef INET6 364 void 365 pf_addrcpy(struct pf_addr *dst, struct pf_addr *src, u_int8_t af) 366 { 367 switch(af) { 368 #ifdef INET 369 case AF_INET: 370 dst->addr32[0] = src->addr32[0]; 371 break; 372 #endif /* INET */ 373 case AF_INET6: 374 dst->addr32[0] = src->addr32[0]; 375 dst->addr32[1] = src->addr32[1]; 376 dst->addr32[2] = src->addr32[2]; 377 dst->addr32[3] = src->addr32[3]; 378 break; 379 } 380 } 381 #endif 382 383 int 384 pf_compare_rules(struct pf_rule *a, struct pf_rule *b) 385 { 386 if (a->return_icmp != b->return_icmp || 387 a->action != b->action || 388 a->direction != b->direction || 389 a->log != b->log || 390 a->quick != b->quick || 391 a->keep_state != b->keep_state || 392 a->af != b->af || 393 a->proto != b->proto || 394 a->type != b->type || 395 a->code != b->code || 396 a->flags != b->flags || 397 a->flagset != b->flagset || 398 a->rule_flag != b->rule_flag || 399 a->min_ttl != b->min_ttl || 400 a->allow_opts != b->allow_opts) 401 return (1); 402 if (memcmp(&a->src, &b->src, sizeof(struct pf_rule_addr))) 403 return (1); 404 if (memcmp(&a->dst, &b->dst, sizeof(struct pf_rule_addr))) 405 return (1); 406 if (strcmp(a->ifname, b->ifname)) 407 return (1); 408 return (0); 409 } 410 411 int 412 pf_compare_nats(struct pf_nat *a, struct pf_nat *b) 413 { 414 if (a->proto != b->proto || 415 a->af != b->af || 416 a->snot != b->snot || 417 a->dnot != b->dnot || 418 a->ifnot != b->ifnot) 419 return (1); 420 if (PF_ANEQ(&a->saddr, &b->saddr, a->af)) 421 return (1); 422 if (PF_ANEQ(&a->smask, &b->smask, a->af)) 423 return (1); 424 if (PF_ANEQ(&a->daddr, &b->daddr, a->af)) 425 return (1); 426 if (PF_ANEQ(&a->dmask, &b->dmask, a->af)) 427 return (1); 428 if (PF_ANEQ(&a->raddr, &b->raddr, a->af)) 429 return (1); 430 if (strcmp(a->ifname, b->ifname)) 431 return (1); 432 return (0); 433 } 434 435 int 436 pf_compare_binats(struct pf_binat *a, struct pf_binat *b) 437 { 438 if (PF_ANEQ(&a->saddr, &b->saddr, a->af)) 439 return (1); 440 if (PF_ANEQ(&a->daddr, &b->daddr, a->af)) 441 return (1); 442 if (PF_ANEQ(&a->dmask, &b->dmask, a->af)) 443 return (1); 444 if (PF_ANEQ(&a->raddr, &b->raddr, a->af)) 445 return (1); 446 if (a->proto != b->proto || 447 a->dnot != b->dnot || 448 a->af != b->af) 449 return (1); 450 if (strcmp(a->ifname, b->ifname)) 451 return (1); 452 return (0); 453 } 454 455 int 456 pf_compare_rdrs(struct pf_rdr *a, struct pf_rdr *b) 457 { 458 if (a->dport != b->dport || 459 a->dport2 != b->dport2 || 460 a->rport != b->rport || 461 a->proto != b->proto || 462 a->af != b->af || 463 a->snot != b->snot || 464 a->dnot != b->dnot || 465 a->ifnot != b->ifnot || 466 a->opts != b->opts) 467 return (1); 468 if (PF_ANEQ(&a->saddr, &b->saddr, a->af)) 469 return (1); 470 if (PF_ANEQ(&a->smask, &b->smask, a->af)) 471 return (1); 472 if (PF_ANEQ(&a->daddr, &b->daddr, a->af)) 473 return (1); 474 if (PF_ANEQ(&a->dmask, &b->dmask, a->af)) 475 return (1); 476 if (PF_ANEQ(&a->raddr, &b->raddr, a->af)) 477 return (1); 478 if (strcmp(a->ifname, b->ifname)) 479 return (1); 480 return (0); 481 } 482 483 void 484 pf_tree_rotate_left(struct pf_tree_node **n) 485 { 486 struct pf_tree_node *q = *n, *p = (*n)->parent; 487 488 (*n)->parent = (*n)->right; 489 *n = (*n)->right; 490 (*n)->parent = p; 491 q->right = (*n)->left; 492 if (q->right) 493 q->right->parent = q; 494 (*n)->left = q; 495 q->balance--; 496 if ((*n)->balance > 0) 497 q->balance -= (*n)->balance; 498 (*n)->balance--; 499 if (q->balance < 0) 500 (*n)->balance += q->balance; 501 } 502 503 void 504 pf_tree_rotate_right(struct pf_tree_node **n) 505 { 506 struct pf_tree_node *q = *n, *p = (*n)->parent; 507 508 (*n)->parent = (*n)->left; 509 *n = (*n)->left; 510 (*n)->parent = p; 511 q->left = (*n)->right; 512 if (q->left) 513 q->left->parent = q; 514 (*n)->right = q; 515 q->balance++; 516 if ((*n)->balance < 0) 517 q->balance -= (*n)->balance; 518 (*n)->balance++; 519 if (q->balance > 0) 520 (*n)->balance += q->balance; 521 } 522 523 int 524 pf_tree_insert(struct pf_tree_node **n, struct pf_tree_node *p, 525 struct pf_tree_key *key, struct pf_state *state) 526 { 527 int deltaH = 0; 528 529 if (*n == NULL) { 530 *n = pool_get(&pf_tree_pl, PR_NOWAIT); 531 if (*n == NULL) 532 return (0); 533 bcopy(key, &(*n)->key, sizeof(struct pf_tree_key)); 534 (*n)->state = state; 535 (*n)->balance = 0; 536 (*n)->parent = p; 537 (*n)->left = (*n)->right = NULL; 538 deltaH = 1; 539 } else if (pf_tree_key_compare(key, &(*n)->key) > 0) { 540 if (pf_tree_insert(&(*n)->right, *n, key, state)) { 541 (*n)->balance++; 542 if ((*n)->balance == 1) 543 deltaH = 1; 544 else if ((*n)->balance == 2) { 545 if ((*n)->right->balance == -1) 546 pf_tree_rotate_right(&(*n)->right); 547 pf_tree_rotate_left(n); 548 } 549 } 550 } else { 551 if (pf_tree_insert(&(*n)->left, *n, key, state)) { 552 (*n)->balance--; 553 if ((*n)->balance == -1) 554 deltaH = 1; 555 else if ((*n)->balance == -2) { 556 if ((*n)->left->balance == 1) 557 pf_tree_rotate_left(&(*n)->left); 558 pf_tree_rotate_right(n); 559 } 560 } 561 } 562 return (deltaH); 563 } 564 565 int 566 pf_tree_remove(struct pf_tree_node **n, struct pf_tree_node *p, 567 struct pf_tree_key *key) 568 { 569 int deltaH = 0; 570 int c; 571 572 if (*n == NULL) 573 return (0); 574 c = pf_tree_key_compare(key, &(*n)->key); 575 if (c < 0) { 576 if (pf_tree_remove(&(*n)->left, *n, key)) { 577 (*n)->balance++; 578 if ((*n)->balance == 0) 579 deltaH = 1; 580 else if ((*n)->balance == 2) { 581 if ((*n)->right->balance == -1) 582 pf_tree_rotate_right(&(*n)->right); 583 pf_tree_rotate_left(n); 584 if ((*n)->balance == 0) 585 deltaH = 1; 586 } 587 } 588 } else if (c > 0) { 589 if (pf_tree_remove(&(*n)->right, *n, key)) { 590 (*n)->balance--; 591 if ((*n)->balance == 0) 592 deltaH = 1; 593 else if ((*n)->balance == -2) { 594 if ((*n)->left->balance == 1) 595 pf_tree_rotate_left(&(*n)->left); 596 pf_tree_rotate_right(n); 597 if ((*n)->balance == 0) 598 deltaH = 1; 599 } 600 } 601 } else { 602 if ((*n)->right == NULL) { 603 struct pf_tree_node *n0 = *n; 604 605 *n = (*n)->left; 606 if (*n != NULL) 607 (*n)->parent = p; 608 pool_put(&pf_tree_pl, n0); 609 deltaH = 1; 610 } else if ((*n)->left == NULL) { 611 struct pf_tree_node *n0 = *n; 612 613 *n = (*n)->right; 614 if (*n != NULL) 615 (*n)->parent = p; 616 pool_put(&pf_tree_pl, n0); 617 deltaH = 1; 618 } else { 619 struct pf_tree_node **qq = &(*n)->left; 620 621 while ((*qq)->right != NULL) 622 qq = &(*qq)->right; 623 bcopy(&(*qq)->key, &(*n)->key, 624 sizeof(struct pf_tree_key)); 625 (*n)->state = (*qq)->state; 626 bcopy(key, &(*qq)->key, sizeof(struct pf_tree_key)); 627 if (pf_tree_remove(&(*n)->left, *n, key)) { 628 (*n)->balance++; 629 if ((*n)->balance == 0) 630 deltaH = 1; 631 else if ((*n)->balance == 2) { 632 if ((*n)->right->balance == -1) 633 pf_tree_rotate_right( 634 &(*n)->right); 635 pf_tree_rotate_left(n); 636 if ((*n)->balance == 0) 637 deltaH = 1; 638 } 639 } 640 } 641 } 642 return (deltaH); 643 } 644 645 int 646 pflog_packet(struct ifnet *ifp, struct mbuf *m, int af, u_short dir, 647 u_short reason, struct pf_rule *rm) 648 { 649 #if NBPFILTER > 0 650 struct ifnet *ifn; 651 struct pfloghdr hdr; 652 struct mbuf m1; 653 654 if (ifp == NULL || m == NULL || rm == NULL) 655 return (-1); 656 657 hdr.af = htonl(af); 658 memcpy(hdr.ifname, ifp->if_xname, sizeof(hdr.ifname)); 659 660 hdr.rnr = htons(rm->nr); 661 hdr.reason = htons(reason); 662 hdr.dir = htons(dir); 663 hdr.action = htons(rm->action); 664 665 m1.m_next = m; 666 m1.m_len = PFLOG_HDRLEN; 667 m1.m_data = (char *) &hdr; 668 669 ifn = &(pflogif[0].sc_if); 670 671 if (ifn->if_bpf) 672 bpf_mtap(ifn->if_bpf, &m1); 673 #endif 674 675 return (0); 676 } 677 678 struct pf_tree_node * 679 pf_tree_first(struct pf_tree_node *n) 680 { 681 if (n == NULL) 682 return (NULL); 683 while (n->parent) 684 n = n->parent; 685 while (n->left) 686 n = n->left; 687 return (n); 688 } 689 690 struct pf_tree_node * 691 pf_tree_next(struct pf_tree_node *n) 692 { 693 if (n == NULL) 694 return (NULL); 695 if (n->right) { 696 n = n->right; 697 while (n->left) 698 n = n->left; 699 } else { 700 if (n->parent && (n == n->parent->left)) 701 n = n->parent; 702 else { 703 while (n->parent && (n == n->parent->right)) 704 n = n->parent; 705 n = n->parent; 706 } 707 } 708 return (n); 709 } 710 711 struct pf_tree_node * 712 pf_tree_search(struct pf_tree_node *n, struct pf_tree_key *key) 713 { 714 int c; 715 716 while (n && (c = pf_tree_key_compare(&n->key, key))) 717 if (c > 0) 718 n = n->left; 719 else 720 n = n->right; 721 pf_status.fcounters[FCNT_STATE_SEARCH]++; 722 return (n); 723 } 724 725 struct pf_state * 726 pf_find_state(struct pf_tree_node *n, struct pf_tree_key *key) 727 { 728 n = pf_tree_search(n, key); 729 if (n) 730 return (n->state); 731 else 732 return (NULL); 733 } 734 735 void 736 pf_insert_state(struct pf_state *state) 737 { 738 struct pf_tree_key key; 739 struct pf_state *s; 740 741 key.af = state->af; 742 key.proto = state->proto; 743 PF_ACPY(&key.addr[0], &state->lan.addr, state->af); 744 key.port[0] = state->lan.port; 745 PF_ACPY(&key.addr[1], &state->ext.addr, state->af); 746 key.port[1] = state->ext.port; 747 /* sanity checks can be removed later, should never occur */ 748 if ((s = pf_find_state(tree_lan_ext, &key)) != NULL) { 749 if (pf_status.debug >= PF_DEBUG_URGENT) { 750 printf("pf: ERROR! insert invalid\n"); 751 printf(" key already in tree_lan_ext\n"); 752 printf(" key: proto = %u, lan = ", state->proto); 753 pf_print_host(&key.addr[0], key.port[0], key.af); 754 printf(", ext = "); 755 pf_print_host(&key.addr[1], key.port[1], key.af); 756 printf("\n state: "); 757 pf_print_state(s); 758 printf("\n"); 759 } 760 } else { 761 pf_tree_insert(&tree_lan_ext, NULL, &key, state); 762 if (pf_find_state(tree_lan_ext, &key) != state) 763 DPFPRINTF(PF_DEBUG_URGENT, 764 ("pf: ERROR! insert failed\n")); 765 } 766 767 key.af = state->af; 768 key.proto = state->proto; 769 PF_ACPY(&key.addr[0], &state->ext.addr, state->af); 770 key.port[0] = state->ext.port; 771 PF_ACPY(&key.addr[1], &state->gwy.addr, state->af); 772 key.port[1] = state->gwy.port; 773 if ((s = pf_find_state(tree_ext_gwy, &key)) != NULL) { 774 if (pf_status.debug >= PF_DEBUG_URGENT) { 775 printf("pf: ERROR! insert invalid\n"); 776 printf(" key already in tree_ext_gwy\n"); 777 printf(" key: proto = %u, ext = ", state->proto); 778 pf_print_host(&key.addr[0], key.port[0], key.af); 779 printf(", gwy = "); 780 pf_print_host(&key.addr[1], key.port[1], key.af); 781 printf("\n state: "); 782 pf_print_state(s); 783 printf("\n"); 784 } 785 } else { 786 pf_tree_insert(&tree_ext_gwy, NULL, &key, state); 787 if (pf_find_state(tree_ext_gwy, &key) != state) 788 DPFPRINTF(PF_DEBUG_URGENT, 789 ("pf: ERROR! insert failed\n")); 790 } 791 pf_status.fcounters[FCNT_STATE_INSERT]++; 792 pf_status.states++; 793 } 794 795 void 796 pf_purge_expired_states(void) 797 { 798 struct pf_tree_node *cur, *next; 799 struct pf_tree_key key; 800 801 cur = pf_tree_first(tree_ext_gwy); 802 while (cur != NULL) { 803 if (cur->state->expire <= pftv.tv_sec) { 804 key.af = cur->state->af; 805 key.proto = cur->state->proto; 806 PF_ACPY(&key.addr[0], &cur->state->lan.addr, 807 cur->state->af); 808 key.port[0] = cur->state->lan.port; 809 PF_ACPY(&key.addr[1], &cur->state->ext.addr, 810 cur->state->af); 811 key.port[1] = cur->state->ext.port; 812 /* remove state from second tree */ 813 if (pf_find_state(tree_lan_ext, &key) != cur->state) 814 DPFPRINTF(PF_DEBUG_URGENT, 815 ("pf: ERROR: remove invalid!\n")); 816 pf_tree_remove(&tree_lan_ext, NULL, &key); 817 if (pf_find_state(tree_lan_ext, &key) != NULL) 818 DPFPRINTF(PF_DEBUG_URGENT, 819 ("pf: ERROR: remove failed\n")); 820 if (STATE_TRANSLATE(cur->state)) 821 pf_put_sport(cur->state->proto, 822 htons(cur->state->gwy.port)); 823 /* free state */ 824 pool_put(&pf_state_pl, cur->state); 825 /* 826 * remove state from tree being traversed, use next 827 * state's key to search after removal, since removal 828 * can invalidate pointers. 829 */ 830 next = pf_tree_next(cur); 831 if (next) { 832 key = next->key; 833 pf_tree_remove(&tree_ext_gwy, NULL, &cur->key); 834 cur = pf_tree_search(tree_ext_gwy, &key); 835 if (cur == NULL) 836 DPFPRINTF(PF_DEBUG_URGENT, 837 ("pf: ERROR: next not found\n")); 838 } else { 839 pf_tree_remove(&tree_ext_gwy, NULL, &cur->key); 840 cur = NULL; 841 } 842 pf_status.fcounters[FCNT_STATE_REMOVALS]++; 843 pf_status.states--; 844 } else 845 cur = pf_tree_next(cur); 846 } 847 } 848 849 void 850 pf_print_host(struct pf_addr *addr, u_int16_t p, u_int8_t af) 851 { 852 switch(af) { 853 #ifdef INET 854 case AF_INET: { 855 u_int32_t a = ntohl(addr->addr32[0]); 856 p = ntohs(p); 857 printf("%u.%u.%u.%u:%u", (a>>24)&255, (a>>16)&255, 858 (a>>8)&255, a&255, p); 859 break; 860 } 861 #endif /* INET */ 862 #ifdef INET6 863 case AF_INET6: { 864 u_int16_t b; 865 u_int8_t i, curstart = 255, curend = 0, 866 maxstart = 0, maxend = 0; 867 for (i = 0; i < 8; i++) { 868 if (!addr->addr16[i]) { 869 if (curstart == 255) 870 curstart = i; 871 else 872 curend = i; 873 } else { 874 if (curstart) { 875 if ((curend - curstart) > 876 (maxend - maxstart)) { 877 maxstart = curstart; 878 maxend = curend; 879 curstart = 255; 880 } 881 } 882 } 883 } 884 for (i = 0; i < 8; i++) { 885 if (i >= maxstart && i <= maxend) { 886 if (maxend != 7) { 887 if (i == maxstart) 888 printf(":"); 889 } else { 890 if (i == maxend) 891 printf(":"); 892 } 893 } else { 894 b = ntohs(addr->addr16[i]); 895 printf("%x", b); 896 if (i < 7) 897 printf(":"); 898 } 899 } 900 p = ntohs(p); 901 printf("[%u]", p); 902 break; 903 } 904 #endif /* INET6 */ 905 } 906 } 907 908 void 909 pf_print_state(struct pf_state *s) 910 { 911 switch (s->proto) { 912 case IPPROTO_TCP: 913 printf("TCP "); 914 break; 915 case IPPROTO_UDP: 916 printf("UDP "); 917 break; 918 case IPPROTO_ICMP: 919 printf("ICMP "); 920 break; 921 default: 922 printf("%u ", s->proto); 923 break; 924 } 925 pf_print_host(&s->lan.addr, s->lan.port, s->af); 926 printf(" "); 927 pf_print_host(&s->gwy.addr, s->gwy.port, s->af); 928 printf(" "); 929 pf_print_host(&s->ext.addr, s->ext.port, s->af); 930 printf(" [lo=%lu high=%lu win=%u modulator=%u]", s->src.seqlo, 931 s->src.seqhi, s->src.max_win, s->src.seqdiff); 932 printf(" [lo=%lu high=%lu win=%u modulator=%u]", s->dst.seqlo, 933 s->dst.seqhi, s->dst.max_win, s->dst.seqdiff); 934 printf(" %u:%u", s->src.state, s->dst.state); 935 } 936 937 void 938 pf_print_flags(u_int8_t f) 939 { 940 if (f) 941 printf(" "); 942 if (f & TH_FIN) 943 printf("F"); 944 if (f & TH_SYN) 945 printf("S"); 946 if (f & TH_RST) 947 printf("R"); 948 if (f & TH_PUSH) 949 printf("P"); 950 if (f & TH_ACK) 951 printf("A"); 952 if (f & TH_URG) 953 printf("U"); 954 } 955 956 void 957 pfattach(int num) 958 { 959 /* XXX - no M_* tags, but they are not used anyway */ 960 pool_init(&pf_tree_pl, sizeof(struct pf_tree_node), 0, 0, 0, "pftrpl", 961 0, NULL, NULL, 0); 962 pool_init(&pf_rule_pl, sizeof(struct pf_rule), 0, 0, 0, "pfrulepl", 963 0, NULL, NULL, 0); 964 pool_init(&pf_nat_pl, sizeof(struct pf_nat), 0, 0, 0, "pfnatpl", 965 0, NULL, NULL, 0); 966 pool_init(&pf_binat_pl, sizeof(struct pf_binat), 0, 0, 0, "pfbinatpl", 967 0, NULL, NULL, 0); 968 pool_init(&pf_rdr_pl, sizeof(struct pf_rdr), 0, 0, 0, "pfrdrpl", 969 0, NULL, NULL, 0); 970 pool_init(&pf_state_pl, sizeof(struct pf_state), 0, 0, 0, "pfstatepl", 971 0, NULL, NULL, 0); 972 pool_init(&pf_sport_pl, sizeof(struct pf_port_node), 0, 0, 0, "pfsport", 973 0, NULL, NULL, 0); 974 975 TAILQ_INIT(&pf_rules[0]); 976 TAILQ_INIT(&pf_rules[1]); 977 TAILQ_INIT(&pf_nats[0]); 978 TAILQ_INIT(&pf_nats[1]); 979 TAILQ_INIT(&pf_binats[0]); 980 TAILQ_INIT(&pf_binats[1]); 981 TAILQ_INIT(&pf_rdrs[0]); 982 TAILQ_INIT(&pf_rdrs[1]); 983 pf_rules_active = &pf_rules[0]; 984 pf_rules_inactive = &pf_rules[1]; 985 pf_nats_active = &pf_nats[0]; 986 pf_nats_inactive = &pf_nats[1]; 987 pf_binats_active = &pf_binats[0]; 988 pf_binats_inactive = &pf_binats[1]; 989 pf_rdrs_active = &pf_rdrs[0]; 990 pf_rdrs_inactive = &pf_rdrs[1]; 991 992 LIST_INIT(&pf_tcp_ports); 993 LIST_INIT(&pf_udp_ports); 994 995 pf_normalize_init(); 996 } 997 998 int 999 pfopen(dev_t dev, int flags, int fmt, struct proc *p) 1000 { 1001 if (minor(dev) >= 1) 1002 return (ENXIO); 1003 return (0); 1004 } 1005 1006 int 1007 pfclose(dev_t dev, int flags, int fmt, struct proc *p) 1008 { 1009 if (minor(dev) >= 1) 1010 return (ENXIO); 1011 return (0); 1012 } 1013 1014 int 1015 pfioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct proc *p) 1016 { 1017 int error = 0; 1018 int s; 1019 1020 /* XXX keep in sync with switch() below */ 1021 if (securelevel > 1) 1022 switch (cmd) { 1023 case DIOCGETRULES: 1024 case DIOCGETRULE: 1025 case DIOCGETNATS: 1026 case DIOCGETNAT: 1027 case DIOCGETBINATS: 1028 case DIOCGETBINAT: 1029 case DIOCGETRDRS: 1030 case DIOCGETRDR: 1031 case DIOCGETSTATE: 1032 case DIOCSETSTATUSIF: 1033 case DIOCGETSTATUS: 1034 case DIOCCLRSTATUS: 1035 case DIOCNATLOOK: 1036 case DIOCSETDEBUG: 1037 case DIOCGETSTATES: 1038 case DIOCGETTIMEOUT: 1039 break; 1040 default: 1041 return EPERM; 1042 } 1043 1044 if (!(flags & FWRITE)) 1045 switch (cmd) { 1046 case DIOCGETRULES: 1047 case DIOCGETRULE: 1048 case DIOCGETNATS: 1049 case DIOCGETNAT: 1050 case DIOCGETRDRS: 1051 case DIOCGETRDR: 1052 case DIOCGETSTATE: 1053 case DIOCGETSTATUS: 1054 case DIOCGETSTATES: 1055 case DIOCGETTIMEOUT: 1056 case DIOCGETBINATS: 1057 case DIOCGETBINAT: 1058 break; 1059 default: 1060 return (EACCES); 1061 } 1062 1063 switch (cmd) { 1064 1065 case DIOCSTART: 1066 if (pf_status.running) 1067 error = EEXIST; 1068 else { 1069 u_int32_t states = pf_status.states; 1070 bzero(&pf_status, sizeof(struct pf_status)); 1071 pf_status.running = 1; 1072 pf_status.states = states; 1073 microtime(&pftv); 1074 pf_status.since = pftv.tv_sec; 1075 DPFPRINTF(PF_DEBUG_MISC, ("pf: started\n")); 1076 } 1077 break; 1078 1079 case DIOCSTOP: 1080 if (!pf_status.running) 1081 error = ENOENT; 1082 else { 1083 pf_status.running = 0; 1084 DPFPRINTF(PF_DEBUG_MISC, ("pf: stopped\n")); 1085 } 1086 break; 1087 1088 case DIOCBEGINRULES: { 1089 u_int32_t *ticket = (u_int32_t *)addr; 1090 struct pf_rule *rule; 1091 1092 while ((rule = TAILQ_FIRST(pf_rules_inactive)) != NULL) { 1093 TAILQ_REMOVE(pf_rules_inactive, rule, entries); 1094 pool_put(&pf_rule_pl, rule); 1095 } 1096 *ticket = ++ticket_rules_inactive; 1097 break; 1098 } 1099 1100 case DIOCADDRULE: { 1101 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 1102 struct pf_rule *rule, *tail; 1103 1104 if (pr->ticket != ticket_rules_inactive) { 1105 error = EBUSY; 1106 break; 1107 } 1108 rule = pool_get(&pf_rule_pl, PR_NOWAIT); 1109 if (rule == NULL) { 1110 error = ENOMEM; 1111 break; 1112 } 1113 bcopy(&pr->rule, rule, sizeof(struct pf_rule)); 1114 #ifndef INET 1115 if (rule->af == AF_INET) { 1116 pool_put(&pf_rule_pl, rule); 1117 error = EAFNOSUPPORT; 1118 break; 1119 } 1120 #endif /* INET */ 1121 #ifndef INET6 1122 if (rule->af == AF_INET6) { 1123 pool_put(&pf_rule_pl, rule); 1124 error = EAFNOSUPPORT; 1125 break; 1126 } 1127 #endif /* INET6 */ 1128 tail = TAILQ_LAST(pf_rules_inactive, pf_rulequeue); 1129 if (tail) 1130 rule->nr = tail->nr + 1; 1131 else 1132 rule->nr = 0; 1133 rule->ifp = NULL; 1134 if (rule->ifname[0]) { 1135 rule->ifp = ifunit(rule->ifname); 1136 if (rule->ifp == NULL) { 1137 pool_put(&pf_rule_pl, rule); 1138 error = EINVAL; 1139 break; 1140 } 1141 } else 1142 rule->ifp = NULL; 1143 if (rule->rt_ifname[0]) { 1144 rule->rt_ifp = ifunit(rule->rt_ifname); 1145 if (rule->rt_ifname == NULL) { 1146 pool_put(&pf_rule_pl, rule); 1147 error = EINVAL; 1148 break; 1149 } 1150 } else 1151 rule->rt_ifp = NULL; 1152 rule->evaluations = rule->packets = rule->bytes = 0; 1153 TAILQ_INSERT_TAIL(pf_rules_inactive, rule, entries); 1154 break; 1155 } 1156 1157 case DIOCCOMMITRULES: { 1158 u_int32_t *ticket = (u_int32_t *)addr; 1159 struct pf_rulequeue *old_rules; 1160 struct pf_rule *rule; 1161 struct pf_tree_node *n; 1162 1163 if (*ticket != ticket_rules_inactive) { 1164 error = EBUSY; 1165 break; 1166 } 1167 1168 /* Swap rules, keep the old. */ 1169 s = splsoftnet(); 1170 /* 1171 * Rules are about to get freed, clear rule pointers in states 1172 */ 1173 for (n = pf_tree_first(tree_ext_gwy); n != NULL; 1174 n = pf_tree_next(n)) 1175 n->state->rule = NULL; 1176 old_rules = pf_rules_active; 1177 pf_rules_active = pf_rules_inactive; 1178 pf_rules_inactive = old_rules; 1179 ticket_rules_active = ticket_rules_inactive; 1180 pf_calc_skip_steps(pf_rules_active); 1181 splx(s); 1182 1183 /* Purge the old rule list. */ 1184 while ((rule = TAILQ_FIRST(old_rules)) != NULL) { 1185 TAILQ_REMOVE(old_rules, rule, entries); 1186 pool_put(&pf_rule_pl, rule); 1187 } 1188 break; 1189 } 1190 1191 case DIOCGETRULES: { 1192 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 1193 struct pf_rule *tail; 1194 1195 s = splsoftnet(); 1196 tail = TAILQ_LAST(pf_rules_active, pf_rulequeue); 1197 if (tail) 1198 pr->nr = tail->nr + 1; 1199 else 1200 pr->nr = 0; 1201 pr->ticket = ticket_rules_active; 1202 splx(s); 1203 break; 1204 } 1205 1206 case DIOCGETRULE: { 1207 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 1208 struct pf_rule *rule; 1209 1210 if (pr->ticket != ticket_rules_active) { 1211 error = EBUSY; 1212 break; 1213 } 1214 s = splsoftnet(); 1215 rule = TAILQ_FIRST(pf_rules_active); 1216 while ((rule != NULL) && (rule->nr != pr->nr)) 1217 rule = TAILQ_NEXT(rule, entries); 1218 if (rule == NULL) { 1219 error = EBUSY; 1220 splx(s); 1221 break; 1222 } 1223 bcopy(rule, &pr->rule, sizeof(struct pf_rule)); 1224 splx(s); 1225 break; 1226 } 1227 1228 case DIOCCHANGERULE: { 1229 struct pfioc_changerule *pcr = (struct pfioc_changerule *)addr; 1230 struct pf_rule *oldrule = NULL, *newrule = NULL; 1231 u_int32_t nr = 0; 1232 1233 if (pcr->action < PF_CHANGE_ADD_HEAD || 1234 pcr->action > PF_CHANGE_REMOVE) { 1235 error = EINVAL; 1236 break; 1237 } 1238 1239 if (pcr->action != PF_CHANGE_REMOVE) { 1240 newrule = pool_get(&pf_rule_pl, PR_NOWAIT); 1241 if (newrule == NULL) { 1242 error = ENOMEM; 1243 break; 1244 } 1245 bcopy(&pcr->newrule, newrule, sizeof(struct pf_rule)); 1246 #ifndef INET 1247 if (newrule->af == AF_INET) { 1248 pool_put(&pf_rule_pl, newrule); 1249 error = EAFNOSUPPORT; 1250 break; 1251 } 1252 #endif /* INET */ 1253 #ifndef INET6 1254 if (newrule->af == AF_INET6) { 1255 pool_put(&pf_rule_pl, newrule); 1256 error = EAFNOSUPPORT; 1257 break; 1258 } 1259 #endif /* INET6 */ 1260 newrule->ifp = NULL; 1261 if (newrule->ifname[0]) { 1262 newrule->ifp = ifunit(newrule->ifname); 1263 if (newrule->ifp == NULL) { 1264 pool_put(&pf_rule_pl, newrule); 1265 error = EINVAL; 1266 break; 1267 } 1268 } 1269 newrule->rt_ifp = NULL; 1270 if (newrule->rt_ifname[0]) { 1271 newrule->rt_ifp = ifunit(newrule->rt_ifname); 1272 if (newrule->rt_ifname == NULL) { 1273 pool_put(&pf_rule_pl, newrule); 1274 error = EINVAL; 1275 break; 1276 } 1277 } 1278 newrule->evaluations = newrule->packets = 0; 1279 newrule->bytes = 0; 1280 } 1281 1282 s = splsoftnet(); 1283 1284 if (pcr->action == PF_CHANGE_ADD_HEAD) 1285 oldrule = TAILQ_FIRST(pf_rules_active); 1286 else if (pcr->action == PF_CHANGE_ADD_TAIL) 1287 oldrule = TAILQ_LAST(pf_rules_active, pf_rulequeue); 1288 else { 1289 oldrule = TAILQ_FIRST(pf_rules_active); 1290 while ((oldrule != NULL) && pf_compare_rules(oldrule, 1291 &pcr->oldrule)) 1292 oldrule = TAILQ_NEXT(oldrule, entries); 1293 if (oldrule == NULL) { 1294 error = EINVAL; 1295 splx(s); 1296 break; 1297 } 1298 } 1299 1300 if (pcr->action == PF_CHANGE_REMOVE) { 1301 struct pf_tree_node *n; 1302 1303 for (n = pf_tree_first(tree_ext_gwy); n != NULL; 1304 n = pf_tree_next(n)) 1305 if (n->state->rule == oldrule) 1306 n->state->rule = NULL; 1307 TAILQ_REMOVE(pf_rules_active, oldrule, entries); 1308 pool_put(&pf_rule_pl, oldrule); 1309 } else { 1310 if (oldrule == NULL) 1311 TAILQ_INSERT_TAIL(pf_rules_active, newrule, 1312 entries); 1313 else if (pcr->action == PF_CHANGE_ADD_HEAD || 1314 pcr->action == PF_CHANGE_ADD_BEFORE) 1315 TAILQ_INSERT_BEFORE(oldrule, newrule, entries); 1316 else 1317 TAILQ_INSERT_AFTER(pf_rules_active, oldrule, 1318 newrule, entries); 1319 } 1320 1321 TAILQ_FOREACH(oldrule, pf_rules_active, entries) 1322 oldrule->nr = nr++; 1323 1324 pf_calc_skip_steps(pf_rules_active); 1325 1326 ticket_rules_active++; 1327 splx(s); 1328 break; 1329 } 1330 1331 case DIOCBEGINNATS: { 1332 u_int32_t *ticket = (u_int32_t *)addr; 1333 struct pf_nat *nat; 1334 1335 while ((nat = TAILQ_FIRST(pf_nats_inactive)) != NULL) { 1336 TAILQ_REMOVE(pf_nats_inactive, nat, entries); 1337 pool_put(&pf_nat_pl, nat); 1338 } 1339 *ticket = ++ticket_nats_inactive; 1340 break; 1341 } 1342 1343 case DIOCADDNAT: { 1344 struct pfioc_nat *pn = (struct pfioc_nat *)addr; 1345 struct pf_nat *nat; 1346 1347 if (pn->ticket != ticket_nats_inactive) { 1348 error = EBUSY; 1349 break; 1350 } 1351 nat = pool_get(&pf_nat_pl, PR_NOWAIT); 1352 if (nat == NULL) { 1353 error = ENOMEM; 1354 break; 1355 } 1356 bcopy(&pn->nat, nat, sizeof(struct pf_nat)); 1357 #ifndef INET 1358 if (nat->af == AF_INET) { 1359 pool_put(&pf_nat_pl, nat); 1360 error = EAFNOSUPPORT; 1361 break; 1362 } 1363 #endif /* INET */ 1364 #ifndef INET6 1365 if (nat->af == AF_INET6) { 1366 pool_put(&pf_nat_pl, nat); 1367 error = EAFNOSUPPORT; 1368 break; 1369 } 1370 #endif /* INET6 */ 1371 if (nat->ifname[0]) { 1372 nat->ifp = ifunit(nat->ifname); 1373 if (nat->ifp == NULL) { 1374 pool_put(&pf_nat_pl, nat); 1375 error = EINVAL; 1376 break; 1377 } 1378 } else 1379 nat->ifp = NULL; 1380 TAILQ_INSERT_TAIL(pf_nats_inactive, nat, entries); 1381 break; 1382 } 1383 1384 case DIOCCOMMITNATS: { 1385 u_int32_t *ticket = (u_int32_t *)addr; 1386 struct pf_natqueue *old_nats; 1387 struct pf_nat *nat; 1388 1389 if (*ticket != ticket_nats_inactive) { 1390 error = EBUSY; 1391 break; 1392 } 1393 1394 /* Swap nats, keep the old. */ 1395 s = splsoftnet(); 1396 old_nats = pf_nats_active; 1397 pf_nats_active = pf_nats_inactive; 1398 pf_nats_inactive = old_nats; 1399 ticket_nats_active = ticket_nats_inactive; 1400 splx(s); 1401 1402 /* Purge the old nat list */ 1403 while ((nat = TAILQ_FIRST(old_nats)) != NULL) { 1404 TAILQ_REMOVE(old_nats, nat, entries); 1405 pool_put(&pf_nat_pl, nat); 1406 } 1407 break; 1408 } 1409 1410 case DIOCGETNATS: { 1411 struct pfioc_nat *pn = (struct pfioc_nat *)addr; 1412 struct pf_nat *nat; 1413 1414 pn->nr = 0; 1415 s = splsoftnet(); 1416 TAILQ_FOREACH(nat, pf_nats_active, entries) 1417 pn->nr++; 1418 pn->ticket = ticket_nats_active; 1419 splx(s); 1420 break; 1421 } 1422 1423 case DIOCGETNAT: { 1424 struct pfioc_nat *pn = (struct pfioc_nat *)addr; 1425 struct pf_nat *nat; 1426 u_int32_t nr; 1427 1428 if (pn->ticket != ticket_nats_active) { 1429 error = EBUSY; 1430 break; 1431 } 1432 nr = 0; 1433 s = splsoftnet(); 1434 nat = TAILQ_FIRST(pf_nats_active); 1435 while ((nat != NULL) && (nr < pn->nr)) { 1436 nat = TAILQ_NEXT(nat, entries); 1437 nr++; 1438 } 1439 if (nat == NULL) { 1440 error = EBUSY; 1441 splx(s); 1442 break; 1443 } 1444 bcopy(nat, &pn->nat, sizeof(struct pf_nat)); 1445 splx(s); 1446 break; 1447 } 1448 1449 case DIOCCHANGENAT: { 1450 struct pfioc_changenat *pcn = (struct pfioc_changenat *)addr; 1451 struct pf_nat *oldnat = NULL, *newnat = NULL; 1452 1453 if (pcn->action < PF_CHANGE_ADD_HEAD || 1454 pcn->action > PF_CHANGE_REMOVE) { 1455 error = EINVAL; 1456 break; 1457 } 1458 1459 if (pcn->action != PF_CHANGE_REMOVE) { 1460 newnat = pool_get(&pf_nat_pl, PR_NOWAIT); 1461 if (newnat == NULL) { 1462 error = ENOMEM; 1463 break; 1464 } 1465 bcopy(&pcn->newnat, newnat, sizeof(struct pf_nat)); 1466 #ifndef INET 1467 if (newnat->af == AF_INET) { 1468 pool_put(&pf_nat_pl, newnat); 1469 error = EAFNOSUPPORT; 1470 break; 1471 } 1472 #endif /* INET */ 1473 #ifndef INET6 1474 if (newnat->af == AF_INET6) { 1475 pool_put(&pf_nat_pl, newnat); 1476 error = EAFNOSUPPORT; 1477 break; 1478 } 1479 #endif /* INET6 */ 1480 newnat->ifp = NULL; 1481 if (newnat->ifname[0]) { 1482 newnat->ifp = ifunit(newnat->ifname); 1483 if (newnat->ifp == NULL) { 1484 pool_put(&pf_nat_pl, newnat); 1485 error = EINVAL; 1486 break; 1487 } 1488 } 1489 } 1490 1491 s = splsoftnet(); 1492 1493 if (pcn->action == PF_CHANGE_ADD_HEAD) 1494 oldnat = TAILQ_FIRST(pf_nats_active); 1495 else if (pcn->action == PF_CHANGE_ADD_TAIL) 1496 oldnat = TAILQ_LAST(pf_nats_active, pf_natqueue); 1497 else { 1498 oldnat = TAILQ_FIRST(pf_nats_active); 1499 while ((oldnat != NULL) && pf_compare_nats(oldnat, 1500 &pcn->oldnat)) 1501 oldnat = TAILQ_NEXT(oldnat, entries); 1502 if (oldnat == NULL) { 1503 error = EINVAL; 1504 splx(s); 1505 break; 1506 } 1507 } 1508 1509 if (pcn->action == PF_CHANGE_REMOVE) { 1510 TAILQ_REMOVE(pf_nats_active, oldnat, entries); 1511 pool_put(&pf_nat_pl, oldnat); 1512 } else { 1513 if (oldnat == NULL) 1514 TAILQ_INSERT_TAIL(pf_nats_active, newnat, 1515 entries); 1516 else if (pcn->action == PF_CHANGE_ADD_HEAD || 1517 pcn->action == PF_CHANGE_ADD_BEFORE) 1518 TAILQ_INSERT_BEFORE(oldnat, newnat, entries); 1519 else 1520 TAILQ_INSERT_AFTER(pf_nats_active, oldnat, 1521 newnat, entries); 1522 } 1523 1524 ticket_nats_active++; 1525 splx(s); 1526 break; 1527 } 1528 1529 case DIOCBEGINBINATS: { 1530 u_int32_t *ticket = (u_int32_t *)addr; 1531 struct pf_binat *binat; 1532 1533 while ((binat = TAILQ_FIRST(pf_binats_inactive)) != NULL) { 1534 TAILQ_REMOVE(pf_binats_inactive, binat, entries); 1535 pool_put(&pf_binat_pl, binat); 1536 } 1537 *ticket = ++ticket_binats_inactive; 1538 break; 1539 } 1540 1541 case DIOCADDBINAT: { 1542 struct pfioc_binat *pb = (struct pfioc_binat *)addr; 1543 struct pf_binat *binat; 1544 1545 if (pb->ticket != ticket_binats_inactive) { 1546 error = EBUSY; 1547 break; 1548 } 1549 binat = pool_get(&pf_binat_pl, PR_NOWAIT); 1550 if (binat == NULL) { 1551 error = ENOMEM; 1552 break; 1553 } 1554 bcopy(&pb->binat, binat, sizeof(struct pf_binat)); 1555 #ifndef INET 1556 if (binat->af == AF_INET) { 1557 pool_put(&pf_binat_pl, binat); 1558 error = EAFNOSUPPORT; 1559 break; 1560 } 1561 #endif /* INET */ 1562 #ifndef INET6 1563 if (binat->af == AF_INET6) { 1564 pool_put(&pf_binat_pl, binat); 1565 error = EAFNOSUPPORT; 1566 break; 1567 } 1568 #endif /* INET6 */ 1569 if (binat->ifname[0]) { 1570 binat->ifp = ifunit(binat->ifname); 1571 if (binat->ifp == NULL) { 1572 pool_put(&pf_binat_pl, binat); 1573 error = EINVAL; 1574 break; 1575 } 1576 } else 1577 binat->ifp = NULL; 1578 TAILQ_INSERT_TAIL(pf_binats_inactive, binat, entries); 1579 break; 1580 } 1581 1582 case DIOCCOMMITBINATS: { 1583 u_int32_t *ticket = (u_int32_t *)addr; 1584 struct pf_binatqueue *old_binats; 1585 struct pf_binat *binat; 1586 1587 if (*ticket != ticket_binats_inactive) { 1588 error = EBUSY; 1589 break; 1590 } 1591 1592 /* Swap binats, keep the old. */ 1593 s = splsoftnet(); 1594 old_binats = pf_binats_active; 1595 pf_binats_active = pf_binats_inactive; 1596 pf_binats_inactive = old_binats; 1597 ticket_binats_active = ticket_binats_inactive; 1598 splx(s); 1599 1600 /* Purge the old binat list */ 1601 while ((binat = TAILQ_FIRST(old_binats)) != NULL) { 1602 TAILQ_REMOVE(old_binats, binat, entries); 1603 pool_put(&pf_binat_pl, binat); 1604 } 1605 break; 1606 } 1607 1608 case DIOCGETBINATS: { 1609 struct pfioc_binat *pb = (struct pfioc_binat *)addr; 1610 struct pf_binat *binat; 1611 1612 pb->nr = 0; 1613 s = splsoftnet(); 1614 TAILQ_FOREACH(binat, pf_binats_active, entries) 1615 pb->nr++; 1616 pb->ticket = ticket_binats_active; 1617 splx(s); 1618 break; 1619 } 1620 1621 case DIOCGETBINAT: { 1622 struct pfioc_binat *pb = (struct pfioc_binat *)addr; 1623 struct pf_binat *binat; 1624 u_int32_t nr; 1625 1626 if (pb->ticket != ticket_binats_active) { 1627 error = EBUSY; 1628 break; 1629 } 1630 nr = 0; 1631 s = splsoftnet(); 1632 binat = TAILQ_FIRST(pf_binats_active); 1633 while ((binat != NULL) && (nr < pb->nr)) { 1634 binat = TAILQ_NEXT(binat, entries); 1635 nr++; 1636 } 1637 if (binat == NULL) { 1638 error = EBUSY; 1639 splx(s); 1640 break; 1641 } 1642 bcopy(binat, &pb->binat, sizeof(struct pf_binat)); 1643 splx(s); 1644 break; 1645 } 1646 1647 case DIOCCHANGEBINAT: { 1648 struct pfioc_changebinat *pcn = (struct pfioc_changebinat *)addr; 1649 struct pf_binat *oldbinat = NULL, *newbinat = NULL; 1650 1651 if (pcn->action < PF_CHANGE_ADD_HEAD || 1652 pcn->action > PF_CHANGE_REMOVE) { 1653 error = EINVAL; 1654 break; 1655 } 1656 1657 if (pcn->action != PF_CHANGE_REMOVE) { 1658 newbinat = pool_get(&pf_binat_pl, PR_NOWAIT); 1659 if (newbinat == NULL) { 1660 error = ENOMEM; 1661 break; 1662 } 1663 bcopy(&pcn->newbinat, newbinat, 1664 sizeof(struct pf_binat)); 1665 #ifndef INET 1666 if (newbinat->af == AF_INET) { 1667 pool_put(&pf_binat_pl, newbinat); 1668 error = EAFNOSUPPORT; 1669 break; 1670 } 1671 #endif /* INET */ 1672 #ifndef INET6 1673 if (newbinat->af == AF_INET6) { 1674 pool_put(&pf_binat_pl, newbinat); 1675 error = EAFNOSUPPORT; 1676 break; 1677 } 1678 #endif /* INET6 */ 1679 newbinat->ifp = NULL; 1680 if (newbinat->ifname[0]) { 1681 newbinat->ifp = ifunit(newbinat->ifname); 1682 if (newbinat->ifp == NULL) { 1683 pool_put(&pf_binat_pl, newbinat); 1684 error = EINVAL; 1685 break; 1686 } 1687 } 1688 } 1689 1690 s = splsoftnet(); 1691 1692 if (pcn->action == PF_CHANGE_ADD_HEAD) 1693 oldbinat = TAILQ_FIRST(pf_binats_active); 1694 else if (pcn->action == PF_CHANGE_ADD_TAIL) 1695 oldbinat = TAILQ_LAST(pf_binats_active, pf_binatqueue); 1696 else { 1697 oldbinat = TAILQ_FIRST(pf_binats_active); 1698 while ((oldbinat != NULL) && pf_compare_binats(oldbinat, 1699 &pcn->oldbinat)) 1700 oldbinat = TAILQ_NEXT(oldbinat, entries); 1701 if (oldbinat == NULL) { 1702 error = EINVAL; 1703 splx(s); 1704 break; 1705 } 1706 } 1707 1708 if (pcn->action == PF_CHANGE_REMOVE) { 1709 TAILQ_REMOVE(pf_binats_active, oldbinat, entries); 1710 pool_put(&pf_binat_pl, oldbinat); 1711 } else { 1712 if (oldbinat == NULL) 1713 TAILQ_INSERT_TAIL(pf_binats_active, newbinat, 1714 entries); 1715 else if (pcn->action == PF_CHANGE_ADD_HEAD || 1716 pcn->action == PF_CHANGE_ADD_BEFORE) 1717 TAILQ_INSERT_BEFORE(oldbinat, newbinat, 1718 entries); 1719 else 1720 TAILQ_INSERT_AFTER(pf_binats_active, oldbinat, 1721 newbinat, entries); 1722 } 1723 1724 ticket_binats_active++; 1725 splx(s); 1726 break; 1727 } 1728 1729 case DIOCBEGINRDRS: { 1730 u_int32_t *ticket = (u_int32_t *)addr; 1731 struct pf_rdr *rdr; 1732 1733 while ((rdr = TAILQ_FIRST(pf_rdrs_inactive)) != NULL) { 1734 TAILQ_REMOVE(pf_rdrs_inactive, rdr, entries); 1735 pool_put(&pf_rdr_pl, rdr); 1736 } 1737 *ticket = ++ticket_rdrs_inactive; 1738 break; 1739 } 1740 1741 case DIOCADDRDR: { 1742 struct pfioc_rdr *pr = (struct pfioc_rdr *)addr; 1743 struct pf_rdr *rdr; 1744 1745 if (pr->ticket != ticket_rdrs_inactive) { 1746 error = EBUSY; 1747 break; 1748 } 1749 rdr = pool_get(&pf_rdr_pl, PR_NOWAIT); 1750 if (rdr == NULL) { 1751 error = ENOMEM; 1752 break; 1753 } 1754 bcopy(&pr->rdr, rdr, sizeof(struct pf_rdr)); 1755 #ifndef INET 1756 if (rdr->af == AF_INET) { 1757 pool_put(&pf_rdr_pl, rdr); 1758 error = EAFNOSUPPORT; 1759 break; 1760 } 1761 #endif /* INET */ 1762 #ifndef INET6 1763 if (rdr->af == AF_INET6) { 1764 pool_put(&pf_rdr_pl, rdr); 1765 error = EAFNOSUPPORT; 1766 break; 1767 } 1768 #endif /* INET6 */ 1769 if (rdr->ifname[0]) { 1770 rdr->ifp = ifunit(rdr->ifname); 1771 if (rdr->ifp == NULL) { 1772 pool_put(&pf_rdr_pl, rdr); 1773 error = EINVAL; 1774 break; 1775 } 1776 } else 1777 rdr->ifp = NULL; 1778 TAILQ_INSERT_TAIL(pf_rdrs_inactive, rdr, entries); 1779 break; 1780 } 1781 1782 case DIOCCOMMITRDRS: { 1783 u_int32_t *ticket = (u_int32_t *)addr; 1784 struct pf_rdrqueue *old_rdrs; 1785 struct pf_rdr *rdr; 1786 1787 if (*ticket != ticket_rdrs_inactive) { 1788 error = EBUSY; 1789 break; 1790 } 1791 1792 /* Swap rdrs, keep the old. */ 1793 s = splsoftnet(); 1794 old_rdrs = pf_rdrs_active; 1795 pf_rdrs_active = pf_rdrs_inactive; 1796 pf_rdrs_inactive = old_rdrs; 1797 ticket_rdrs_active = ticket_rdrs_inactive; 1798 splx(s); 1799 1800 /* Purge the old rdr list */ 1801 while ((rdr = TAILQ_FIRST(old_rdrs)) != NULL) { 1802 TAILQ_REMOVE(old_rdrs, rdr, entries); 1803 pool_put(&pf_rdr_pl, rdr); 1804 } 1805 break; 1806 } 1807 1808 case DIOCGETRDRS: { 1809 struct pfioc_rdr *pr = (struct pfioc_rdr *)addr; 1810 struct pf_rdr *rdr; 1811 1812 pr->nr = 0; 1813 s = splsoftnet(); 1814 TAILQ_FOREACH(rdr, pf_rdrs_active, entries) 1815 pr->nr++; 1816 pr->ticket = ticket_rdrs_active; 1817 splx(s); 1818 break; 1819 } 1820 1821 case DIOCGETRDR: { 1822 struct pfioc_rdr *pr = (struct pfioc_rdr *)addr; 1823 struct pf_rdr *rdr; 1824 u_int32_t nr; 1825 1826 if (pr->ticket != ticket_rdrs_active) { 1827 error = EBUSY; 1828 break; 1829 } 1830 nr = 0; 1831 s = splsoftnet(); 1832 rdr = TAILQ_FIRST(pf_rdrs_active); 1833 while ((rdr != NULL) && (nr < pr->nr)) { 1834 rdr = TAILQ_NEXT(rdr, entries); 1835 nr++; 1836 } 1837 if (rdr == NULL) { 1838 error = EBUSY; 1839 splx(s); 1840 break; 1841 } 1842 bcopy(rdr, &pr->rdr, sizeof(struct pf_rdr)); 1843 splx(s); 1844 break; 1845 } 1846 1847 case DIOCCHANGERDR: { 1848 struct pfioc_changerdr *pcn = (struct pfioc_changerdr *)addr; 1849 struct pf_rdr *oldrdr = NULL, *newrdr = NULL; 1850 1851 if (pcn->action < PF_CHANGE_ADD_HEAD || 1852 pcn->action > PF_CHANGE_REMOVE) { 1853 error = EINVAL; 1854 break; 1855 } 1856 1857 if (pcn->action != PF_CHANGE_REMOVE) { 1858 newrdr = pool_get(&pf_rdr_pl, PR_NOWAIT); 1859 if (newrdr == NULL) { 1860 error = ENOMEM; 1861 break; 1862 } 1863 bcopy(&pcn->newrdr, newrdr, sizeof(struct pf_rdr)); 1864 #ifndef INET 1865 if (newrdr->af == AF_INET) { 1866 pool_put(&pf_rdr_pl, newrdr); 1867 error = EAFNOSUPPORT; 1868 break; 1869 } 1870 #endif /* INET */ 1871 #ifndef INET6 1872 if (newrdr->af == AF_INET6) { 1873 pool_put(&pf_rdr_pl, newrdr); 1874 error = EAFNOSUPPORT; 1875 break; 1876 } 1877 #endif /* INET6 */ 1878 newrdr->ifp = NULL; 1879 if (newrdr->ifname[0]) { 1880 newrdr->ifp = ifunit(newrdr->ifname); 1881 if (newrdr->ifp == NULL) { 1882 pool_put(&pf_rdr_pl, newrdr); 1883 error = EINVAL; 1884 break; 1885 } 1886 } 1887 } 1888 1889 s = splsoftnet(); 1890 1891 if (pcn->action == PF_CHANGE_ADD_HEAD) 1892 oldrdr = TAILQ_FIRST(pf_rdrs_active); 1893 else if (pcn->action == PF_CHANGE_ADD_TAIL) 1894 oldrdr = TAILQ_LAST(pf_rdrs_active, pf_rdrqueue); 1895 else { 1896 oldrdr = TAILQ_FIRST(pf_rdrs_active); 1897 while ((oldrdr != NULL) && pf_compare_rdrs(oldrdr, 1898 &pcn->oldrdr)) 1899 oldrdr = TAILQ_NEXT(oldrdr, entries); 1900 if (oldrdr == NULL) { 1901 error = EINVAL; 1902 splx(s); 1903 break; 1904 } 1905 } 1906 1907 if (pcn->action == PF_CHANGE_REMOVE) { 1908 TAILQ_REMOVE(pf_rdrs_active, oldrdr, entries); 1909 pool_put(&pf_rdr_pl, oldrdr); 1910 } else { 1911 if (oldrdr == NULL) 1912 TAILQ_INSERT_TAIL(pf_rdrs_active, newrdr, 1913 entries); 1914 else if (pcn->action == PF_CHANGE_ADD_HEAD || 1915 pcn->action == PF_CHANGE_ADD_BEFORE) 1916 TAILQ_INSERT_BEFORE(oldrdr, newrdr, entries); 1917 else 1918 TAILQ_INSERT_AFTER(pf_rdrs_active, oldrdr, 1919 newrdr, entries); 1920 } 1921 1922 ticket_rdrs_active++; 1923 splx(s); 1924 break; 1925 } 1926 1927 case DIOCCLRSTATES: { 1928 struct pf_tree_node *n; 1929 1930 s = splsoftnet(); 1931 for (n = pf_tree_first(tree_ext_gwy); n != NULL; 1932 n = pf_tree_next(n)) 1933 n->state->expire = 0; 1934 pf_purge_expired_states(); 1935 pf_status.states = 0; 1936 splx(s); 1937 break; 1938 } 1939 1940 case DIOCADDSTATE: { 1941 struct pfioc_state *ps = (struct pfioc_state *)addr; 1942 struct pf_state *state; 1943 1944 state = pool_get(&pf_state_pl, PR_NOWAIT); 1945 if (state == NULL) { 1946 error = ENOMEM; 1947 break; 1948 } 1949 s = splsoftnet(); 1950 microtime(&pftv); 1951 bcopy(&ps->state, state, sizeof(struct pf_state)); 1952 state->rule = NULL; 1953 state->creation = pftv.tv_sec; 1954 state->expire += pftv.tv_sec; 1955 state->packets = 0; 1956 state->bytes = 0; 1957 pf_insert_state(state); 1958 splx(s); 1959 } 1960 1961 case DIOCGETSTATE: { 1962 struct pfioc_state *ps = (struct pfioc_state *)addr; 1963 struct pf_tree_node *n; 1964 u_int32_t nr; 1965 1966 nr = 0; 1967 s = splsoftnet(); 1968 n = pf_tree_first(tree_ext_gwy); 1969 while ((n != NULL) && (nr < ps->nr)) { 1970 n = pf_tree_next(n); 1971 nr++; 1972 } 1973 if (n == NULL) { 1974 error = EBUSY; 1975 splx(s); 1976 break; 1977 } 1978 bcopy(n->state, &ps->state, sizeof(struct pf_state)); 1979 splx(s); 1980 microtime(&pftv); 1981 ps->state.creation = pftv.tv_sec - ps->state.creation; 1982 if (ps->state.expire <= pftv.tv_sec) 1983 ps->state.expire = 0; 1984 else 1985 ps->state.expire -= pftv.tv_sec; 1986 break; 1987 } 1988 1989 case DIOCGETSTATES: { 1990 struct pfioc_states *ps = (struct pfioc_states *)addr; 1991 struct pf_tree_node *n; 1992 struct pf_state *p, pstore; 1993 u_int32_t nr = 0; 1994 int space = ps->ps_len; 1995 1996 if (space == 0) { 1997 s = splsoftnet(); 1998 n = pf_tree_first(tree_ext_gwy); 1999 while (n != NULL) { 2000 n = pf_tree_next(n); 2001 nr++; 2002 } 2003 splx(s); 2004 ps->ps_len = sizeof(struct pf_state) * nr; 2005 return (0); 2006 } 2007 2008 microtime(&pftv); 2009 s = splsoftnet(); 2010 p = ps->ps_states; 2011 n = pf_tree_first(tree_ext_gwy); 2012 while (n && (nr + 1) * sizeof(*p) <= ps->ps_len) { 2013 bcopy(n->state, &pstore, sizeof(pstore)); 2014 pstore.creation = pftv.tv_sec - pstore.creation; 2015 if (pstore.expire <= pftv.tv_sec) 2016 pstore.expire = 0; 2017 else 2018 pstore.expire -= pftv.tv_sec; 2019 error = copyout(&pstore, p, sizeof(*p)); 2020 if (error) { 2021 splx(s); 2022 goto fail; 2023 } 2024 p++; 2025 nr++; 2026 n = pf_tree_next(n); 2027 } 2028 ps->ps_len = sizeof(struct pf_state) * nr; 2029 splx(s); 2030 break; 2031 } 2032 2033 case DIOCSETSTATUSIF: { 2034 struct pfioc_if *pi = (struct pfioc_if *)addr; 2035 struct ifnet *ifp; 2036 2037 if ((ifp = ifunit(pi->ifname)) == NULL) 2038 error = EINVAL; 2039 else 2040 status_ifp = ifp; 2041 break; 2042 } 2043 2044 case DIOCGETSTATUS: { 2045 struct pf_status *s = (struct pf_status *)addr; 2046 bcopy(&pf_status, s, sizeof(struct pf_status)); 2047 break; 2048 } 2049 2050 case DIOCCLRSTATUS: { 2051 u_int32_t running = pf_status.running; 2052 u_int32_t states = pf_status.states; 2053 u_int32_t since = pf_status.since; 2054 u_int32_t debug = pf_status.debug; 2055 2056 bzero(&pf_status, sizeof(struct pf_status)); 2057 pf_status.running = running; 2058 pf_status.states = states; 2059 pf_status.since = since; 2060 pf_status.debug = debug; 2061 break; 2062 } 2063 2064 case DIOCNATLOOK: { 2065 struct pfioc_natlook *pnl = (struct pfioc_natlook *)addr; 2066 struct pf_state *st; 2067 struct pf_tree_key key; 2068 int direction = pnl->direction; 2069 2070 key.af = pnl->af; 2071 key.proto = pnl->proto; 2072 2073 /* 2074 * userland gives us source and dest of connetion, reverse 2075 * the lookup so we ask for what happens with the return 2076 * traffic, enabling us to find it in the state tree. 2077 */ 2078 PF_ACPY(&key.addr[1], &pnl->saddr, pnl->af); 2079 key.port[1] = pnl->sport; 2080 PF_ACPY(&key.addr[0], &pnl->daddr, pnl->af); 2081 key.port[0] = pnl->dport; 2082 2083 if (!pnl->proto || 2084 PF_AZERO(&pnl->saddr, pnl->af) || 2085 PF_AZERO(&pnl->daddr, pnl->af) || 2086 !pnl->dport || !pnl->sport) 2087 error = EINVAL; 2088 else { 2089 s = splsoftnet(); 2090 if (direction == PF_IN) 2091 st = pf_find_state(tree_ext_gwy, &key); 2092 else 2093 st = pf_find_state(tree_lan_ext, &key); 2094 if (st != NULL) { 2095 if (direction == PF_IN) { 2096 PF_ACPY(&pnl->rsaddr, &st->lan.addr, 2097 st->af); 2098 pnl->rsport = st->lan.port; 2099 PF_ACPY(&pnl->rdaddr, &pnl->daddr, 2100 pnl->af); 2101 pnl->rdport = pnl->dport; 2102 } else { 2103 PF_ACPY(&pnl->rdaddr, &st->gwy.addr, 2104 st->af); 2105 pnl->rdport = st->gwy.port; 2106 PF_ACPY(&pnl->rsaddr, &pnl->saddr, 2107 pnl->af); 2108 pnl->rsport = pnl->sport; 2109 } 2110 } else 2111 error = ENOENT; 2112 splx(s); 2113 } 2114 break; 2115 } 2116 2117 case DIOCSETTIMEOUT: { 2118 struct pfioc_tm *pt = (struct pfioc_tm *)addr; 2119 int old; 2120 2121 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX || 2122 pt->seconds < 0) { 2123 error = EINVAL; 2124 goto fail; 2125 } 2126 old = *pftm_timeouts[pt->timeout]; 2127 *pftm_timeouts[pt->timeout] = pt->seconds; 2128 pt->seconds = old; 2129 break; 2130 } 2131 2132 case DIOCGETTIMEOUT: { 2133 struct pfioc_tm *pt = (struct pfioc_tm *)addr; 2134 2135 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) { 2136 error = EINVAL; 2137 goto fail; 2138 } 2139 pt->seconds = *pftm_timeouts[pt->timeout]; 2140 break; 2141 } 2142 2143 case DIOCSETDEBUG: { 2144 u_int32_t *level = (u_int32_t *)addr; 2145 pf_status.debug = *level; 2146 break; 2147 } 2148 2149 default: 2150 error = ENODEV; 2151 break; 2152 } 2153 fail: 2154 2155 return (error); 2156 } 2157 2158 #define PF_CALC_SKIP_STEP(i, c) \ 2159 do { \ 2160 if (a & 1 << i) { \ 2161 if (c) \ 2162 r->skip[i] = TAILQ_NEXT(s, entries); \ 2163 else \ 2164 a ^= 1 << i; \ 2165 } \ 2166 } while (0) 2167 2168 void 2169 pf_calc_skip_steps(struct pf_rulequeue *rules) 2170 { 2171 struct pf_rule *r, *s; 2172 int a, i; 2173 2174 r = TAILQ_FIRST(rules); 2175 while (r != NULL) { 2176 a = 0; 2177 for (i = 0; i < PF_SKIP_COUNT; ++i) { 2178 a |= 1 << i; 2179 r->skip[i] = TAILQ_NEXT(r, entries); 2180 } 2181 s = TAILQ_NEXT(r, entries); 2182 while (a && s != NULL) { 2183 PF_CALC_SKIP_STEP(PF_SKIP_IFP, s->ifp == r->ifp); 2184 PF_CALC_SKIP_STEP(PF_SKIP_AF, s->af == r->af); 2185 PF_CALC_SKIP_STEP(PF_SKIP_PROTO, s->proto == r->proto); 2186 PF_CALC_SKIP_STEP(PF_SKIP_SRC_ADDR, 2187 PF_AEQ(&s->src.addr, &r->src.addr, r->af) && 2188 PF_AEQ(&s->src.mask, &r->src.mask, r->af) && 2189 s->src.not == r->src.not); 2190 PF_CALC_SKIP_STEP(PF_SKIP_SRC_PORT, 2191 s->src.port[0] == r->src.port[0] && 2192 s->src.port[1] == r->src.port[1] && 2193 s->src.port_op == r->src.port_op); 2194 PF_CALC_SKIP_STEP(PF_SKIP_DST_ADDR, 2195 PF_AEQ(&s->dst.addr, &r->dst.addr, r->af) && 2196 PF_AEQ(&s->dst.mask, &r->dst.mask, r->af) && 2197 s->dst.not == r->dst.not); 2198 PF_CALC_SKIP_STEP(PF_SKIP_DST_PORT, 2199 s->dst.port[0] == r->dst.port[0] && 2200 s->dst.port[1] == r->dst.port[1] && 2201 s->dst.port_op == r->dst.port_op); 2202 s = TAILQ_NEXT(s, entries); 2203 } 2204 r = TAILQ_NEXT(r, entries); 2205 } 2206 } 2207 2208 u_int16_t 2209 pf_cksum_fixup(u_int16_t cksum, u_int16_t old, u_int16_t new, u_int8_t udp) 2210 { 2211 u_int32_t l; 2212 2213 if (udp && !cksum) 2214 return 0x0000; 2215 l = cksum + old - new; 2216 l = (l >> 16) + (l & 65535); 2217 l = l & 65535; 2218 if (udp && !l) 2219 return 0xFFFF; 2220 return (l); 2221 } 2222 2223 void 2224 pf_change_ap(struct pf_addr *a, u_int16_t *p, u_int16_t *ic, u_int16_t *pc, 2225 struct pf_addr *an, u_int16_t pn, u_int8_t u, int af) 2226 { 2227 struct pf_addr ao; 2228 u_int16_t po = *p; 2229 2230 PF_ACPY(&ao, a, af); 2231 PF_ACPY(a, an, af); 2232 2233 *p = pn; 2234 2235 switch (af) { 2236 #ifdef INET 2237 case AF_INET: 2238 *ic = pf_cksum_fixup(pf_cksum_fixup(*ic, 2239 ao.addr16[0], an->addr16[0], 0), 2240 ao.addr16[1], an->addr16[1], 0); 2241 *p = pn; 2242 *pc = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(*pc, 2243 ao.addr16[0], an->addr16[0], u), 2244 ao.addr16[1], an->addr16[1], u), 2245 po, pn, u); 2246 break; 2247 #endif /* INET */ 2248 #ifdef INET6 2249 case AF_INET6: 2250 *pc = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( 2251 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( 2252 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(*pc, 2253 ao.addr16[0], an->addr16[0], u), 2254 ao.addr16[1], an->addr16[1], u), 2255 ao.addr16[2], an->addr16[2], u), 2256 ao.addr16[3], an->addr16[3], u), 2257 ao.addr16[4], an->addr16[4], u), 2258 ao.addr16[5], an->addr16[5], u), 2259 ao.addr16[6], an->addr16[6], u), 2260 ao.addr16[7], an->addr16[7], u), 2261 po, pn, u); 2262 break; 2263 #endif /* INET6 */ 2264 } 2265 } 2266 2267 void 2268 pf_change_a(u_int32_t *a, u_int16_t *c, u_int32_t an, u_int8_t u) 2269 { 2270 u_int32_t ao = *a; 2271 2272 *a = an; 2273 *c = pf_cksum_fixup(pf_cksum_fixup(*c, ao / 65536, an / 65536, u), 2274 ao % 65536, an % 65536, u); 2275 } 2276 2277 #ifdef INET6 2278 void 2279 pf_change_a6(struct pf_addr *a, u_int16_t *c, struct pf_addr *an, u_int8_t u) 2280 { 2281 struct pf_addr ao; 2282 2283 PF_ACPY(&ao, a, AF_INET6); 2284 PF_ACPY(a, an, AF_INET6); 2285 2286 *c = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( 2287 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( 2288 pf_cksum_fixup(pf_cksum_fixup(*c, 2289 ao.addr16[0], an->addr16[0], u), 2290 ao.addr16[1], an->addr16[1], u), 2291 ao.addr16[2], an->addr16[2], u), 2292 ao.addr16[3], an->addr16[3], u), 2293 ao.addr16[4], an->addr16[4], u), 2294 ao.addr16[5], an->addr16[5], u), 2295 ao.addr16[6], an->addr16[6], u), 2296 ao.addr16[7], an->addr16[7], u); 2297 } 2298 #endif /* INET6 */ 2299 2300 void 2301 pf_change_icmp(struct pf_addr *ia, u_int16_t *ip, struct pf_addr *oa, 2302 struct pf_addr *na, u_int16_t np, u_int16_t *pc, u_int16_t *h2c, 2303 u_int16_t *ic, u_int16_t *hc, u_int8_t u, int af) 2304 { 2305 struct pf_addr oia, ooa; 2306 u_int32_t opc, oh2c = *h2c; 2307 u_int16_t oip = *ip; 2308 2309 PF_ACPY(&oia, ia, af); 2310 PF_ACPY(&ooa, oa, af); 2311 2312 if (pc != NULL) 2313 opc = *pc; 2314 /* Change inner protocol port, fix inner protocol checksum. */ 2315 *ip = np; 2316 if (pc != NULL) 2317 *pc = pf_cksum_fixup(*pc, oip, *ip, u); 2318 *ic = pf_cksum_fixup(*ic, oip, *ip, 0); 2319 if (pc != NULL) 2320 *ic = pf_cksum_fixup(*ic, opc, *pc, 0); 2321 PF_ACPY(ia, na, af); 2322 /* Change inner ip address, fix inner ipv4 checksum and icmp checksum. */ 2323 switch (af) { 2324 #ifdef INET 2325 case AF_INET: 2326 *h2c = pf_cksum_fixup(pf_cksum_fixup(*h2c, 2327 oia.addr16[0], ia->addr16[0], 0), 2328 oia.addr16[1], ia->addr16[1], 0); 2329 *ic = pf_cksum_fixup(pf_cksum_fixup(*ic, 2330 oia.addr16[0], ia->addr16[0], 0), 2331 oia.addr16[1], ia->addr16[1], 0); 2332 break; 2333 #endif /* INET */ 2334 #ifdef INET6 2335 case AF_INET6: 2336 *ic = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( 2337 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( 2338 pf_cksum_fixup(pf_cksum_fixup(*ic, 2339 oia.addr16[0], ia->addr16[0], u), 2340 oia.addr16[1], ia->addr16[1], u), 2341 oia.addr16[2], ia->addr16[2], u), 2342 oia.addr16[3], ia->addr16[3], u), 2343 oia.addr16[4], ia->addr16[4], u), 2344 oia.addr16[5], ia->addr16[5], u), 2345 oia.addr16[6], ia->addr16[6], u), 2346 oia.addr16[7], ia->addr16[7], u); 2347 break; 2348 #endif /* INET6 */ 2349 } 2350 *ic = pf_cksum_fixup(*ic, oh2c, *h2c, 0); 2351 /* Change outer ip address, fix outer ipv4 or icmpv6 checksum. */ 2352 PF_ACPY(oa, na, af); 2353 switch (af) { 2354 #ifdef INET 2355 case AF_INET: 2356 *hc = pf_cksum_fixup(pf_cksum_fixup(*hc, 2357 ooa.addr16[0], oa->addr16[0], 0), 2358 ooa.addr16[1], oa->addr16[1], 0); 2359 break; 2360 #endif /* INET */ 2361 #ifdef INET6 2362 case AF_INET6: 2363 *ic = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( 2364 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( 2365 pf_cksum_fixup(pf_cksum_fixup(*ic, 2366 ooa.addr16[0], oa->addr16[0], u), 2367 ooa.addr16[1], oa->addr16[1], u), 2368 ooa.addr16[2], oa->addr16[2], u), 2369 ooa.addr16[3], oa->addr16[3], u), 2370 ooa.addr16[4], oa->addr16[4], u), 2371 ooa.addr16[5], oa->addr16[5], u), 2372 ooa.addr16[6], oa->addr16[6], u), 2373 ooa.addr16[7], oa->addr16[7], u); 2374 break; 2375 #endif /* INET6 */ 2376 } 2377 } 2378 2379 void 2380 pf_send_reset(int off, struct tcphdr *th, struct pf_pdesc *pd, int af) 2381 { 2382 struct mbuf *m; 2383 struct m_tag *mtag; 2384 int len; 2385 #ifdef INET 2386 struct ip *h2; 2387 #endif /* INET */ 2388 #ifdef INET6 2389 struct ip6_hdr *h2_6; 2390 #endif /* INET6 */ 2391 struct tcphdr *th2; 2392 2393 switch (af) { 2394 #ifdef INET 2395 case AF_INET: 2396 len = sizeof(struct ip) + sizeof(struct tcphdr); 2397 break; 2398 #endif /* INET */ 2399 #ifdef INET6 2400 case AF_INET6: 2401 len = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 2402 break; 2403 #endif /* INET6 */ 2404 } 2405 2406 /* don't reply to RST packets */ 2407 if (th->th_flags & TH_RST) 2408 return; 2409 2410 /* create outgoing mbuf */ 2411 mtag = m_tag_get(PACKET_TAG_PF_GENERATED, 0, M_NOWAIT); 2412 if (mtag == NULL) 2413 return; 2414 m = m_gethdr(M_DONTWAIT, MT_HEADER); 2415 if (m == NULL) { 2416 m_tag_free(mtag); 2417 return; 2418 } 2419 m_tag_prepend(m, mtag); 2420 m->m_data += max_linkhdr; 2421 m->m_pkthdr.len = m->m_len = len; 2422 m->m_pkthdr.rcvif = NULL; 2423 bzero(m->m_data, len); 2424 switch (af) { 2425 #ifdef INET 2426 case AF_INET: 2427 h2 = mtod(m, struct ip *); 2428 2429 /* IP header fields included in the TCP checksum */ 2430 h2->ip_p = IPPROTO_TCP; 2431 h2->ip_len = htons(sizeof(*th2)); 2432 h2->ip_src.s_addr = pd->dst->v4.s_addr; 2433 h2->ip_dst.s_addr = pd->src->v4.s_addr; 2434 2435 th2 = (struct tcphdr *)((caddr_t)h2 + sizeof(struct ip)); 2436 break; 2437 #endif /* INET */ 2438 #ifdef INET6 2439 case AF_INET6: 2440 h2_6 = mtod(m, struct ip6_hdr *); 2441 2442 /* IP header fields included in the TCP checksum */ 2443 h2_6->ip6_nxt = IPPROTO_TCP; 2444 h2_6->ip6_plen = htons(sizeof(*th2)); 2445 memcpy(&h2_6->ip6_src, pd->dst, sizeof(struct in6_addr)); 2446 memcpy(&h2_6->ip6_dst, pd->src, sizeof(struct in6_addr)); 2447 2448 th2 = (struct tcphdr *)((caddr_t)h2_6 + sizeof(struct ip6_hdr)); 2449 break; 2450 #endif /* INET6 */ 2451 } 2452 2453 /* TCP header */ 2454 th2->th_sport = th->th_dport; 2455 th2->th_dport = th->th_sport; 2456 if (th->th_flags & TH_ACK) { 2457 th2->th_seq = th->th_ack; 2458 th2->th_flags = TH_RST; 2459 } else { 2460 int tlen = pd->p_len; 2461 if (th->th_flags & TH_SYN) 2462 tlen++; 2463 if (th->th_flags & TH_FIN) 2464 tlen++; 2465 th2->th_ack = htonl(ntohl(th->th_seq) + tlen); 2466 th2->th_flags = TH_RST | TH_ACK; 2467 } 2468 th2->th_off = sizeof(*th2) >> 2; 2469 2470 switch (af) { 2471 #ifdef INET 2472 case AF_INET: 2473 /* TCP checksum */ 2474 th2->th_sum = in_cksum(m, len); 2475 2476 /* Finish the IP header */ 2477 h2->ip_v = 4; 2478 h2->ip_hl = sizeof(*h2) >> 2; 2479 h2->ip_ttl = 128; 2480 h2->ip_sum = 0; 2481 h2->ip_len = len; 2482 h2->ip_off = 0; 2483 ip_output(m, NULL, NULL, 0, NULL, NULL); 2484 break; 2485 #endif /* INET */ 2486 #ifdef INET6 2487 case AF_INET6: 2488 /* TCP checksum */ 2489 th2->th_sum = in6_cksum(m, IPPROTO_TCP, 2490 sizeof(struct ip6_hdr), sizeof(*th)); 2491 2492 h2_6->ip6_vfc |= IPV6_VERSION; 2493 h2_6->ip6_hlim = 128; 2494 2495 ip6_output(m, NULL, NULL, 0, NULL, NULL); 2496 #endif /* INET6 */ 2497 } 2498 } 2499 2500 void 2501 pf_send_icmp(struct mbuf *m, u_int8_t type, u_int8_t code, int af) 2502 { 2503 struct m_tag *mtag; 2504 struct mbuf *m0; 2505 2506 mtag = m_tag_get(PACKET_TAG_PF_GENERATED, 0, M_NOWAIT); 2507 if (mtag == NULL) 2508 return; 2509 m0 = m_copy(m, 0, M_COPYALL); 2510 if (m0 == NULL) { 2511 m_tag_free(mtag); 2512 return; 2513 } 2514 m_tag_prepend(m0, mtag); 2515 switch (af) { 2516 #ifdef INET 2517 case AF_INET: 2518 icmp_error(m0, type, code, 0, 0); 2519 break; 2520 #endif /* INET */ 2521 #ifdef INET6 2522 case AF_INET6: 2523 icmp6_error(m0, type, code, 0); 2524 break; 2525 #endif /* INET6 */ 2526 } 2527 } 2528 2529 /* 2530 * Return 1 if the addresses a and b match (with mask m), otherwise return 0. 2531 * If n is 0, they match if they are equal. If n is != 0, they match if they 2532 * are different. 2533 */ 2534 int 2535 pf_match_addr(u_int8_t n, struct pf_addr *a, struct pf_addr *m, 2536 struct pf_addr *b, int af) 2537 { 2538 int match = 0; 2539 switch (af) { 2540 #ifdef INET 2541 case AF_INET: 2542 if ((a->addr32[0] & m->addr32[0]) == 2543 (b->addr32[0] & m->addr32[0])) 2544 match++; 2545 break; 2546 #endif /* INET */ 2547 #ifdef INET6 2548 case AF_INET6: 2549 if (((a->addr32[0] & m->addr32[0]) == 2550 (b->addr32[0] & m->addr32[0])) && 2551 ((a->addr32[1] & m->addr32[1]) == 2552 (b->addr32[1] & m->addr32[1])) && 2553 ((a->addr32[2] & m->addr32[2]) == 2554 (b->addr32[2] & m->addr32[2])) && 2555 ((a->addr32[3] & m->addr32[3]) == 2556 (b->addr32[3] & m->addr32[3]))) 2557 match++; 2558 break; 2559 #endif /* INET6 */ 2560 } 2561 if (match) { 2562 if (n) 2563 return (0); 2564 else 2565 return (1); 2566 } else { 2567 if (n) 2568 return (1); 2569 else 2570 return (0); 2571 } 2572 } 2573 2574 int 2575 pf_match_port(u_int8_t op, u_int16_t a1, u_int16_t a2, u_int16_t p) 2576 { 2577 NTOHS(a1); 2578 NTOHS(a2); 2579 NTOHS(p); 2580 switch (op) { 2581 case PF_OP_IRG: 2582 return (p > a1) && (p < a2); 2583 case PF_OP_XRG: 2584 return (p < a1) || (p > a2); 2585 case PF_OP_EQ: 2586 return (p == a1); 2587 case PF_OP_NE: 2588 return (p != a1); 2589 case PF_OP_LT: 2590 return (p < a1); 2591 case PF_OP_LE: 2592 return (p <= a1); 2593 case PF_OP_GT: 2594 return (p > a1); 2595 case PF_OP_GE: 2596 return (p >= a1); 2597 } 2598 return (0); /* never reached */ 2599 } 2600 2601 int 2602 pf_chk_sport(struct pf_port_list *plist, u_int16_t port) 2603 { 2604 struct pf_port_node *pnode; 2605 2606 LIST_FOREACH(pnode, plist, next) { 2607 if (pnode->port == port) 2608 return (1); 2609 } 2610 2611 return (0); 2612 } 2613 2614 int 2615 pf_add_sport(struct pf_port_list *plist, u_int16_t port) 2616 { 2617 struct pf_port_node *pnode; 2618 2619 pnode = pool_get(&pf_sport_pl, M_NOWAIT); 2620 if (pnode == NULL) 2621 return (ENOMEM); 2622 2623 pnode->port = port; 2624 LIST_INSERT_HEAD(plist, pnode, next); 2625 2626 return (0); 2627 } 2628 2629 void 2630 pf_put_sport(u_int8_t proto, u_int16_t port) 2631 { 2632 struct pf_port_list *plist; 2633 struct pf_port_node *pnode; 2634 2635 if (proto == IPPROTO_TCP) 2636 plist = &pf_tcp_ports; 2637 else if (proto == IPPROTO_UDP) 2638 plist = &pf_udp_ports; 2639 else 2640 return; 2641 2642 LIST_FOREACH(pnode, plist, next) { 2643 if (pnode->port == port) { 2644 LIST_REMOVE(pnode, next); 2645 pool_put(&pf_sport_pl, pnode); 2646 break; 2647 } 2648 } 2649 } 2650 2651 int 2652 pf_get_sport(u_int8_t proto, u_int16_t low, u_int16_t high, u_int16_t *port) 2653 { 2654 struct pf_port_list *plist; 2655 int step; 2656 u_int16_t cut; 2657 2658 if (proto == IPPROTO_TCP) 2659 plist = &pf_tcp_ports; 2660 else if (proto == IPPROTO_UDP) 2661 plist = &pf_udp_ports; 2662 else 2663 return (EINVAL); 2664 2665 /* port search; start random, step; similar 2 portloop in in_pcbbind */ 2666 if (low == high) { 2667 *port = low; 2668 if (!pf_chk_sport(plist, *port)) 2669 goto found; 2670 return (1); 2671 } else if (low < high) { 2672 step = 1; 2673 cut = arc4random() % (high - low) + low; 2674 } else { 2675 step = -1; 2676 cut = arc4random() % (low - high) + high; 2677 } 2678 2679 *port = cut - step; 2680 do { 2681 *port += step; 2682 if (!pf_chk_sport(plist, *port)) 2683 goto found; 2684 } while (*port != low && *port != high); 2685 2686 step = -step; 2687 *port = cut; 2688 do { 2689 *port += step; 2690 if (!pf_chk_sport(plist, *port)) 2691 goto found; 2692 } while (*port != low && *port != high); 2693 2694 return (1); /* none available */ 2695 2696 found: 2697 return (pf_add_sport(plist, *port)); 2698 } 2699 2700 struct pf_nat * 2701 pf_get_nat(struct ifnet *ifp, u_int8_t proto, struct pf_addr *saddr, 2702 struct pf_addr *daddr, int af) 2703 { 2704 struct pf_nat *n, *nm = NULL; 2705 2706 n = TAILQ_FIRST(pf_nats_active); 2707 while (n && nm == NULL) { 2708 if (((n->ifp == NULL) || (n->ifp == ifp && !n->ifnot) || 2709 (n->ifp != ifp && n->ifnot)) && 2710 (!n->proto || n->proto == proto) && 2711 (!n->af || n->af == af) && 2712 PF_MATCHA(n->snot, &n->saddr, &n->smask, saddr, af) && 2713 PF_MATCHA(n->dnot, &n->daddr, &n->dmask, daddr, af)) 2714 nm = n; 2715 else 2716 n = TAILQ_NEXT(n, entries); 2717 } 2718 return (nm); 2719 } 2720 2721 struct pf_binat * 2722 pf_get_binat(int direction, struct ifnet *ifp, u_int8_t proto, 2723 struct pf_addr *saddr, struct pf_addr *daddr, int af) 2724 { 2725 struct pf_binat *b, *bm = NULL; 2726 struct pf_addr fullmask; 2727 2728 memset(&fullmask, 0xff, sizeof(fullmask)); 2729 2730 b = TAILQ_FIRST(pf_binats_active); 2731 while (b && bm == NULL) { 2732 if (direction == PF_OUT && b->ifp == ifp && 2733 (!b->proto || b->proto == proto) && 2734 (!b->af || b->af == af) && 2735 PF_MATCHA(0, &b->saddr, &fullmask, saddr, af) && 2736 PF_MATCHA(b->dnot, &b->daddr, &b->dmask, daddr, af)) 2737 bm = b; 2738 else if (direction == PF_IN && b->ifp == ifp && 2739 (!b->proto || b->proto == proto) && 2740 (!b->af || b->af == af) && 2741 PF_MATCHA(0, &b->raddr, &fullmask, saddr, af) && 2742 PF_MATCHA(b->dnot, &b->daddr, &b->dmask, daddr, af)) 2743 bm = b; 2744 else 2745 b = TAILQ_NEXT(b, entries); 2746 } 2747 return (bm); 2748 } 2749 2750 struct pf_rdr * 2751 pf_get_rdr(struct ifnet *ifp, u_int8_t proto, struct pf_addr *saddr, 2752 struct pf_addr *daddr, u_int16_t dport, int af) 2753 { 2754 struct pf_rdr *r, *rm = NULL; 2755 2756 r = TAILQ_FIRST(pf_rdrs_active); 2757 while (r && rm == NULL) { 2758 if (((r->ifp == NULL) || (r->ifp == ifp && !r->ifnot) || 2759 (r->ifp != ifp && r->ifnot)) && 2760 (!r->proto || r->proto == proto) && 2761 (!r->af || r->af == af) && 2762 PF_MATCHA(r->snot, &r->saddr, &r->smask, saddr, af) && 2763 PF_MATCHA(r->dnot, &r->daddr, &r->dmask, daddr, af) && 2764 ((!r->dport2 && (!r->dport || dport == r->dport)) || 2765 (r->dport2 && (ntohs(dport) >= ntohs(r->dport)) && 2766 ntohs(dport) <= ntohs(r->dport2)))) 2767 rm = r; 2768 else 2769 r = TAILQ_NEXT(r, entries); 2770 } 2771 return (rm); 2772 } 2773 2774 u_int16_t 2775 pf_map_port_range(struct pf_rdr *rdr, u_int16_t port) 2776 { 2777 u_int32_t nport; 2778 2779 nport = ntohs(rdr->rport) - ntohs(rdr->dport) + ntohs(port); 2780 /* wrap around if necessary */ 2781 if (nport > 65535) 2782 nport -= 65535; 2783 return htons((u_int16_t)nport); 2784 } 2785 2786 2787 int 2788 pf_test_tcp(struct pf_rule **rm, int direction, struct ifnet *ifp, 2789 struct mbuf *m, int ipoff, int off, void *h, struct pf_pdesc *pd) 2790 { 2791 struct pf_nat *nat = NULL; 2792 struct pf_binat *binat = NULL; 2793 struct pf_rdr *rdr = NULL; 2794 struct pf_addr *saddr = pd->src, *daddr = pd->dst, baddr; 2795 struct tcphdr *th = pd->hdr.tcp; 2796 struct pf_rule *r; 2797 u_int16_t bport, nport = 0, af = pd->af; 2798 u_short reason; 2799 int rewrite = 0, error; 2800 2801 *rm = NULL; 2802 2803 if (direction == PF_OUT) { 2804 /* check outgoing packet for BINAT */ 2805 if ((binat = pf_get_binat(PF_OUT, ifp, IPPROTO_TCP, 2806 saddr, daddr, af)) != NULL) { 2807 PF_ACPY(&baddr, saddr, af); 2808 bport = th->th_sport; 2809 pf_change_ap(saddr, &th->th_sport, pd->ip_sum, 2810 &th->th_sum, &binat->raddr, th->th_sport, 0, af); 2811 rewrite++; 2812 } 2813 /* check outgoing packet for NAT */ 2814 else if ((nat = pf_get_nat(ifp, IPPROTO_TCP, 2815 saddr, daddr, af)) != NULL) { 2816 bport = th->th_sport; 2817 error = pf_get_sport(IPPROTO_TCP, 50001, 2818 65535, &nport); 2819 if (error) 2820 return (PF_DROP); 2821 PF_ACPY(&baddr, saddr, af); 2822 pf_change_ap(saddr, &th->th_sport, pd->ip_sum, 2823 &th->th_sum, &nat->raddr, htons(nport), 0, af); 2824 rewrite++; 2825 } 2826 } else { 2827 /* check incoming packet for RDR */ 2828 if ((rdr = pf_get_rdr(ifp, IPPROTO_TCP, saddr, daddr, 2829 th->th_dport, af)) != NULL) { 2830 bport = th->th_dport; 2831 if (rdr->opts & PF_RPORT_RANGE) 2832 nport = pf_map_port_range(rdr, th->th_dport); 2833 else if (rdr->rport) 2834 nport = rdr->rport; 2835 else 2836 nport = bport; 2837 PF_ACPY(&baddr, daddr, af); 2838 pf_change_ap(daddr, &th->th_dport, pd->ip_sum, 2839 &th->th_sum, &rdr->raddr, nport, 0, af); 2840 rewrite++; 2841 } 2842 /* check incoming packet for BINAT */ 2843 else if ((binat = pf_get_binat(PF_IN, ifp, IPPROTO_TCP, 2844 daddr, saddr, af)) != NULL) { 2845 PF_ACPY(&baddr, daddr, af); 2846 bport = th->th_dport; 2847 pf_change_ap(daddr, &th->th_dport, pd->ip_sum, 2848 &th->th_sum, &binat->saddr, th->th_dport, 0, af); 2849 rewrite++; 2850 } 2851 } 2852 2853 r = TAILQ_FIRST(pf_rules_active); 2854 while (r != NULL) { 2855 if (r->action == PF_SCRUB) { 2856 r = TAILQ_NEXT(r, entries); 2857 continue; 2858 } 2859 r->evaluations++; 2860 if (r->ifp != NULL && r->ifp != ifp) 2861 r = r->skip[PF_SKIP_IFP]; 2862 else if (r->af && r->af != af) 2863 r = r->skip[PF_SKIP_AF]; 2864 else if (r->proto && r->proto != IPPROTO_TCP) 2865 r = r->skip[PF_SKIP_PROTO]; 2866 else if (!PF_AZERO(&r->src.mask, af) && !PF_MATCHA(r->src.not, 2867 &r->src.addr, &r->src.mask, saddr, af)) 2868 r = r->skip[PF_SKIP_SRC_ADDR]; 2869 else if (r->src.port_op && !pf_match_port(r->src.port_op, 2870 r->src.port[0], r->src.port[1], th->th_sport)) 2871 r = r->skip[PF_SKIP_SRC_PORT]; 2872 else if (!PF_AZERO(&r->dst.mask, af) && !PF_MATCHA(r->dst.not, 2873 &r->dst.addr, &r->dst.mask, daddr, af)) 2874 r = r->skip[PF_SKIP_DST_ADDR]; 2875 else if (r->dst.port_op && !pf_match_port(r->dst.port_op, 2876 r->dst.port[0], r->dst.port[1], th->th_dport)) 2877 r = r->skip[PF_SKIP_DST_PORT]; 2878 else if (r->direction != direction) 2879 r = TAILQ_NEXT(r, entries); 2880 else if ((r->flagset & th->th_flags) != r->flags) 2881 r = TAILQ_NEXT(r, entries); 2882 else { 2883 *rm = r; 2884 if ((*rm)->quick) 2885 break; 2886 r = TAILQ_NEXT(r, entries); 2887 } 2888 } 2889 2890 if (*rm != NULL) { 2891 (*rm)->packets++; 2892 (*rm)->bytes += pd->tot_len; 2893 REASON_SET(&reason, PFRES_MATCH); 2894 2895 /* XXX will log packet before rewrite */ 2896 if ((*rm)->log) 2897 PFLOG_PACKET(ifp, h, m, af, direction, reason, *rm); 2898 2899 if (((*rm)->action == PF_DROP) && 2900 (((*rm)->rule_flag & PFRULE_RETURNRST) || 2901 (*rm)->return_icmp)) { 2902 /* undo NAT/RST changes, if they have taken place */ 2903 if (nat != NULL || 2904 (binat != NULL && direction == PF_OUT)) { 2905 pf_change_ap(saddr, &th->th_sport, pd->ip_sum, 2906 &th->th_sum, &baddr, bport, 0, af); 2907 rewrite++; 2908 } else if (rdr != NULL || 2909 (binat != NULL && direction == PF_IN)) { 2910 pf_change_ap(daddr, &th->th_dport, pd->ip_sum, 2911 &th->th_sum, &baddr, bport, 0, af); 2912 rewrite++; 2913 } 2914 if ((*rm)->rule_flag & PFRULE_RETURNRST) 2915 pf_send_reset(off, th, pd, af); 2916 else 2917 pf_send_icmp(m, (*rm)->return_icmp >> 8, 2918 (*rm)->return_icmp & 255, af); 2919 } 2920 2921 if ((*rm)->action == PF_DROP) { 2922 if (nport && nat != NULL) 2923 pf_put_sport(IPPROTO_TCP, nport); 2924 return (PF_DROP); 2925 } 2926 } 2927 2928 if (((*rm != NULL) && (*rm)->keep_state) || nat != NULL || 2929 binat != NULL || rdr != NULL) { 2930 /* create new state */ 2931 u_int16_t len; 2932 struct pf_state *s; 2933 2934 len = pd->tot_len - off - (th->th_off << 2); 2935 s = pool_get(&pf_state_pl, PR_NOWAIT); 2936 if (s == NULL) { 2937 if (nport && nat != NULL) 2938 pf_put_sport(IPPROTO_TCP, nport); 2939 return (PF_DROP); 2940 } 2941 2942 s->rule = *rm; 2943 s->allow_opts = *rm && (*rm)->allow_opts; 2944 s->log = *rm && ((*rm)->log & 2); 2945 s->proto = IPPROTO_TCP; 2946 s->direction = direction; 2947 s->af = af; 2948 if (direction == PF_OUT) { 2949 PF_ACPY(&s->gwy.addr, saddr, af); 2950 s->gwy.port = th->th_sport; /* sport */ 2951 PF_ACPY(&s->ext.addr, daddr, af); 2952 s->ext.port = th->th_dport; 2953 if (nat != NULL || binat != NULL) { 2954 PF_ACPY(&s->lan.addr, &baddr, af); 2955 s->lan.addr = baddr; 2956 s->lan.port = bport; 2957 } else { 2958 PF_ACPY(&s->lan.addr, &s->gwy.addr, af); 2959 s->lan.port = s->gwy.port; 2960 } 2961 } else { 2962 PF_ACPY(&s->lan.addr, daddr, af); 2963 s->lan.port = th->th_dport; 2964 PF_ACPY(&s->ext.addr, saddr, af); 2965 s->ext.port = th->th_sport; 2966 if (binat != NULL ||rdr != NULL) { 2967 PF_ACPY(&s->gwy.addr, &baddr, af); 2968 s->gwy.port = bport; 2969 } else { 2970 PF_ACPY(&s->gwy.addr, &s->lan.addr, af); 2971 s->gwy.port = s->lan.port; 2972 } 2973 } 2974 2975 s->src.seqlo = ntohl(th->th_seq); 2976 s->src.seqhi = s->src.seqlo + len + 1; 2977 if (th->th_flags == TH_SYN && *rm != NULL 2978 && (*rm)->keep_state == PF_STATE_MODULATE) { 2979 /* Generate sequence number modulator */ 2980 while ((s->src.seqdiff = arc4random()) == 0) 2981 ; 2982 pf_change_a(&th->th_seq, &th->th_sum, 2983 htonl(s->src.seqlo + s->src.seqdiff), 0); 2984 rewrite = 1; 2985 } else 2986 s->src.seqdiff = 0; 2987 if (th->th_flags & TH_SYN) 2988 s->src.seqhi++; 2989 if (th->th_flags & TH_FIN) 2990 s->src.seqhi++; 2991 s->src.max_win = MAX(ntohs(th->th_win), 1); 2992 s->dst.seqlo = 0; /* Haven't seen these yet */ 2993 s->dst.seqhi = 1; 2994 s->dst.max_win = 1; 2995 s->dst.seqdiff = 0; /* Defer random generation */ 2996 s->src.state = TCPS_SYN_SENT; 2997 s->dst.state = TCPS_CLOSED; 2998 s->creation = pftv.tv_sec; 2999 s->expire = pftv.tv_sec + pftm_tcp_first_packet; 3000 s->packets = 1; 3001 s->bytes = pd->tot_len; 3002 pf_insert_state(s); 3003 } 3004 3005 /* copy back packet headers if we performed NAT operations */ 3006 if (rewrite) 3007 m_copyback(m, off, sizeof(*th), (caddr_t)th); 3008 3009 return (PF_PASS); 3010 } 3011 3012 int 3013 pf_test_udp(struct pf_rule **rm, int direction, struct ifnet *ifp, 3014 struct mbuf *m, int ipoff, int off, void *h, struct pf_pdesc *pd) 3015 { 3016 struct pf_nat *nat = NULL; 3017 struct pf_binat *binat = NULL; 3018 struct pf_rdr *rdr = NULL; 3019 struct pf_addr *saddr = pd->src, *daddr = pd->dst, baddr; 3020 struct udphdr *uh = pd->hdr.udp; 3021 u_int16_t bport, nport = 0, af = pd->af; 3022 struct pf_rule *r; 3023 u_short reason; 3024 int rewrite = 0, error; 3025 3026 *rm = NULL; 3027 3028 if (direction == PF_OUT) { 3029 /* check outgoing packet for BINAT */ 3030 if ((binat = pf_get_binat(PF_OUT, ifp, IPPROTO_UDP, 3031 saddr, daddr, af)) != NULL) { 3032 PF_ACPY(&baddr, saddr, af); 3033 bport = uh->uh_sport; 3034 pf_change_ap(saddr, &uh->uh_sport, pd->ip_sum, 3035 &uh->uh_sum, &binat->raddr, uh->uh_sport, 1, af); 3036 rewrite++; 3037 } 3038 /* check outgoing packet for NAT */ 3039 else if ((nat = pf_get_nat(ifp, IPPROTO_UDP, 3040 saddr, daddr, af)) != NULL) { 3041 bport = uh->uh_sport; 3042 error = pf_get_sport(IPPROTO_UDP, 50001, 3043 65535, &nport); 3044 if (error) 3045 return (PF_DROP); 3046 PF_ACPY(&baddr, saddr, af); 3047 pf_change_ap(saddr, &uh->uh_sport, pd->ip_sum, 3048 &uh->uh_sum, &nat->raddr, htons(nport), 1, af); 3049 rewrite++; 3050 } 3051 } else { 3052 /* check incoming packet for RDR */ 3053 if ((rdr = pf_get_rdr(ifp, IPPROTO_UDP, saddr, daddr, 3054 uh->uh_dport, af)) != NULL) { 3055 bport = uh->uh_dport; 3056 if (rdr->opts & PF_RPORT_RANGE) 3057 nport = pf_map_port_range(rdr, uh->uh_dport); 3058 else if (rdr->rport) 3059 nport = rdr->rport; 3060 else 3061 nport = bport; 3062 3063 PF_ACPY(&baddr, daddr, af); 3064 pf_change_ap(daddr, &uh->uh_dport, pd->ip_sum, 3065 &uh->uh_sum, &rdr->raddr, nport, 1, af); 3066 rewrite++; 3067 } 3068 /* check incoming packet for BINAT */ 3069 else if ((binat = pf_get_binat(PF_IN, ifp, IPPROTO_UDP, 3070 daddr, daddr, af)) != NULL) { 3071 PF_ACPY(&baddr, daddr, af); 3072 bport = uh->uh_dport; 3073 pf_change_ap(daddr, &uh->uh_dport, pd->ip_sum, 3074 &uh->uh_sum, &binat->saddr, uh->uh_dport, 1, af); 3075 rewrite++; 3076 } 3077 } 3078 3079 r = TAILQ_FIRST(pf_rules_active); 3080 while (r != NULL) { 3081 if (r->action == PF_SCRUB) { 3082 r = TAILQ_NEXT(r, entries); 3083 continue; 3084 } 3085 r->evaluations++; 3086 3087 if (r->ifp != NULL && r->ifp != ifp) 3088 r = r->skip[PF_SKIP_IFP]; 3089 else if (r->af && r->af != af) 3090 r = r->skip[PF_SKIP_AF]; 3091 else if (r->proto && r->proto != IPPROTO_UDP) 3092 r = r->skip[PF_SKIP_PROTO]; 3093 else if (!PF_AZERO(&r->src.mask, af) && 3094 !PF_MATCHA(r->src.not, &r->src.addr, &r->src.mask, 3095 saddr, af)) 3096 r = r->skip[PF_SKIP_SRC_ADDR]; 3097 else if (r->src.port_op && !pf_match_port(r->src.port_op, 3098 r->src.port[0], r->src.port[1], uh->uh_sport)) 3099 r = r->skip[PF_SKIP_SRC_PORT]; 3100 else if (!PF_AZERO(&r->dst.mask, af) && 3101 !PF_MATCHA(r->dst.not, &r->dst.addr, &r->dst.mask, 3102 daddr, af)) 3103 r = r->skip[PF_SKIP_DST_ADDR]; 3104 else if (r->dst.port_op && !pf_match_port(r->dst.port_op, 3105 r->dst.port[0], r->dst.port[1], uh->uh_dport)) 3106 r = r->skip[PF_SKIP_DST_PORT]; 3107 else if (r->direction != direction) 3108 r = TAILQ_NEXT(r, entries); 3109 else { 3110 *rm = r; 3111 if ((*rm)->quick) 3112 break; 3113 r = TAILQ_NEXT(r, entries); 3114 } 3115 } 3116 3117 if (*rm != NULL) { 3118 (*rm)->packets++; 3119 (*rm)->bytes += pd->tot_len; 3120 REASON_SET(&reason, PFRES_MATCH); 3121 3122 /* XXX will log packet before rewrite */ 3123 if ((*rm)->log) 3124 PFLOG_PACKET(ifp, h, m, af, direction, reason, *rm); 3125 3126 if (((*rm)->action == PF_DROP) && (*rm)->return_icmp) { 3127 /* undo NAT/RST changes, if they have taken place */ 3128 if (nat != NULL || 3129 (binat != NULL && direction == PF_OUT)) { 3130 pf_change_ap(saddr, &uh->uh_sport, pd->ip_sum, 3131 &uh->uh_sum, &baddr, bport, 1, af); 3132 rewrite++; 3133 } else if (rdr != NULL || 3134 (binat != NULL && direction == PF_IN)) { 3135 pf_change_ap(daddr, &uh->uh_dport, pd->ip_sum, 3136 &uh->uh_sum, &baddr, bport, 1, af); 3137 rewrite++; 3138 } 3139 pf_send_icmp(m, (*rm)->return_icmp >> 8, 3140 (*rm)->return_icmp & 255, af); 3141 } 3142 3143 if ((*rm)->action == PF_DROP) { 3144 if (nport && nat != NULL) 3145 pf_put_sport(IPPROTO_UDP, nport); 3146 return (PF_DROP); 3147 } 3148 } 3149 3150 if ((*rm != NULL && (*rm)->keep_state) || nat != NULL || 3151 binat != NULL || rdr != NULL) { 3152 /* create new state */ 3153 struct pf_state *s; 3154 3155 s = pool_get(&pf_state_pl, PR_NOWAIT); 3156 if (s == NULL) { 3157 if (nport && nat != NULL) 3158 pf_put_sport(IPPROTO_UDP, nport); 3159 return (PF_DROP); 3160 } 3161 3162 s->rule = *rm; 3163 s->allow_opts = *rm && (*rm)->allow_opts; 3164 s->log = *rm && ((*rm)->log & 2); 3165 s->proto = IPPROTO_UDP; 3166 s->direction = direction; 3167 s->af = af; 3168 if (direction == PF_OUT) { 3169 PF_ACPY(&s->gwy.addr, saddr, af); 3170 s->gwy.port = uh->uh_sport; 3171 PF_ACPY(&s->ext.addr, daddr, af); 3172 s->ext.port = uh->uh_dport; 3173 if (nat != NULL || binat != NULL) { 3174 PF_ACPY(&s->lan.addr, &baddr, af); 3175 s->lan.port = bport; 3176 } else { 3177 PF_ACPY(&s->lan.addr, &s->gwy.addr, af); 3178 s->lan.port = s->gwy.port; 3179 } 3180 } else { 3181 PF_ACPY(&s->lan.addr, daddr, af); 3182 s->lan.port = uh->uh_dport; 3183 PF_ACPY(&s->ext.addr, saddr, af); 3184 s->ext.port = uh->uh_sport; 3185 if (binat != NULL || rdr != NULL) { 3186 PF_ACPY(&s->gwy.addr, &baddr, af); 3187 s->gwy.port = bport; 3188 } else { 3189 PF_ACPY(&s->gwy.addr, &s->lan.addr, af); 3190 s->gwy.port = s->lan.port; 3191 } 3192 } 3193 s->src.seqlo = 0; 3194 s->src.seqhi = 0; 3195 s->src.seqdiff = 0; 3196 s->src.max_win = 0; 3197 s->src.state = 1; 3198 s->dst.seqlo = 0; 3199 s->dst.seqhi = 0; 3200 s->dst.seqdiff = 0; 3201 s->dst.max_win = 0; 3202 s->dst.state = 0; 3203 s->creation = pftv.tv_sec; 3204 s->expire = pftv.tv_sec + pftm_udp_first_packet; 3205 s->packets = 1; 3206 s->bytes = pd->tot_len; 3207 pf_insert_state(s); 3208 } 3209 3210 /* copy back packet headers if we performed NAT operations */ 3211 if (rewrite) 3212 m_copyback(m, off, sizeof(*uh), (caddr_t)uh); 3213 3214 return (PF_PASS); 3215 } 3216 3217 int 3218 pf_test_icmp(struct pf_rule **rm, int direction, struct ifnet *ifp, 3219 struct mbuf *m, int ipoff, int off, void *h, struct pf_pdesc *pd) 3220 { 3221 struct pf_nat *nat = NULL; 3222 struct pf_binat *binat = NULL; 3223 struct pf_rdr *rdr = NULL; 3224 struct pf_addr *saddr = pd->src, *daddr = pd->dst, baddr; 3225 struct pf_rule *r; 3226 u_short reason; 3227 u_int16_t icmpid, af = pd->af; 3228 u_int8_t icmptype, icmpcode; 3229 #ifdef INET6 3230 int rewrite = 0; 3231 #endif /* INET6 */ 3232 3233 *rm = NULL; 3234 3235 switch (pd->proto) { 3236 #ifdef INET 3237 case IPPROTO_ICMP: 3238 icmptype = pd->hdr.icmp->icmp_type; 3239 icmpcode = pd->hdr.icmp->icmp_code; 3240 icmpid = pd->hdr.icmp->icmp_id; 3241 break; 3242 #endif /* INET */ 3243 #ifdef INET6 3244 case IPPROTO_ICMPV6: 3245 icmptype = pd->hdr.icmp6->icmp6_type; 3246 icmpcode = pd->hdr.icmp6->icmp6_code; 3247 icmpid = pd->hdr.icmp6->icmp6_id; 3248 break; 3249 #endif /* INET6 */ 3250 } 3251 3252 if (direction == PF_OUT) { 3253 /* check outgoing packet for BINAT */ 3254 if ((binat = pf_get_binat(PF_OUT, ifp, IPPROTO_ICMP, 3255 saddr, daddr, af)) != NULL) { 3256 PF_ACPY(&baddr, saddr, af); 3257 switch (af) { 3258 #ifdef INET 3259 case AF_INET: 3260 pf_change_a(&saddr->v4.s_addr, pd->ip_sum, 3261 binat->raddr.v4.s_addr, 0); 3262 break; 3263 #endif /* INET */ 3264 #ifdef INET6 3265 case AF_INET6: 3266 pf_change_a6(saddr, &pd->hdr.icmp6->icmp6_cksum, 3267 &binat->raddr, 0); 3268 rewrite++; 3269 break; 3270 #endif /* INET6 */ 3271 } 3272 } 3273 /* check outgoing packet for NAT */ 3274 else if ((nat = pf_get_nat(ifp, pd->proto, 3275 saddr, daddr, af)) != NULL) { 3276 PF_ACPY(&baddr, saddr, af); 3277 switch (af) { 3278 #ifdef INET 3279 case AF_INET: 3280 pf_change_a(&saddr->v4.s_addr, 3281 pd->ip_sum, nat->raddr.v4.s_addr, 0); 3282 break; 3283 #endif /* INET */ 3284 #ifdef INET6 3285 case AF_INET6: 3286 pf_change_a6(saddr, &pd->hdr.icmp6->icmp6_cksum, 3287 &nat->raddr, 0); 3288 rewrite++; 3289 break; 3290 #endif /* INET6 */ 3291 } 3292 } 3293 } else { 3294 /* check incoming packet for RDR */ 3295 if ((rdr = pf_get_rdr(ifp, pd->proto, 3296 saddr, daddr, 0, af)) != NULL) { 3297 PF_ACPY(&baddr, daddr, af); 3298 switch (af) { 3299 #ifdef INET 3300 case AF_INET: 3301 pf_change_a(&daddr->v4.s_addr, 3302 pd->ip_sum, rdr->raddr.v4.s_addr, 0); 3303 break; 3304 #endif /* INET */ 3305 #ifdef INET6 3306 case AF_INET6: 3307 pf_change_a6(daddr, &pd->hdr.icmp6->icmp6_cksum, 3308 &rdr->raddr, 0); 3309 rewrite++; 3310 break; 3311 #endif /* INET6 */ 3312 } 3313 } 3314 /* check incoming packet for BINAT */ 3315 else if ((binat = pf_get_binat(PF_IN, ifp, IPPROTO_ICMP, 3316 daddr, saddr, af)) != NULL) { 3317 PF_ACPY(&baddr, daddr, af); 3318 switch (af) { 3319 #ifdef INET 3320 case AF_INET: 3321 pf_change_a(&daddr->v4.s_addr, 3322 pd->ip_sum, binat->saddr.v4.s_addr, 0); 3323 break; 3324 #endif /* INET */ 3325 #ifdef INET6 3326 case AF_INET6: 3327 pf_change_a6(daddr, &pd->hdr.icmp6->icmp6_cksum, 3328 &binat->saddr, 0); 3329 rewrite++; 3330 break; 3331 #endif /* INET6 */ 3332 } 3333 } 3334 } 3335 3336 r = TAILQ_FIRST(pf_rules_active); 3337 while (r != NULL) { 3338 if (r->action == PF_SCRUB) { 3339 r = TAILQ_NEXT(r, entries); 3340 continue; 3341 } 3342 r->evaluations++; 3343 if (r->ifp != NULL && r->ifp != ifp) 3344 r = r->skip[PF_SKIP_IFP]; 3345 else if (r->af && r->af != af) 3346 r = r->skip[PF_SKIP_AF]; 3347 else if (r->proto && r->proto != pd->proto) 3348 r = r->skip[PF_SKIP_PROTO]; 3349 else if (!PF_AZERO(&r->src.mask, af) && !PF_MATCHA(r->src.not, 3350 &r->src.addr, &r->src.mask, saddr, af)) 3351 r = r->skip[PF_SKIP_SRC_ADDR]; 3352 else if (!PF_AZERO(&r->dst.mask, af) && !PF_MATCHA(r->dst.not, 3353 &r->dst.addr, &r->dst.mask, daddr, af)) 3354 r = r->skip[PF_SKIP_DST_ADDR]; 3355 else if (r->direction != direction) 3356 r = TAILQ_NEXT(r, entries); 3357 else if (r->ifp != NULL && r->ifp != ifp) 3358 r = TAILQ_NEXT(r, entries); 3359 else if (r->type && r->type != icmptype + 1) 3360 r = TAILQ_NEXT(r, entries); 3361 else if (r->code && r->code != icmpcode + 1) 3362 r = TAILQ_NEXT(r, entries); 3363 else { 3364 *rm = r; 3365 if ((*rm)->quick) 3366 break; 3367 r = TAILQ_NEXT(r, entries); 3368 } 3369 } 3370 3371 if (*rm != NULL) { 3372 (*rm)->packets++; 3373 (*rm)->bytes += pd->tot_len; 3374 REASON_SET(&reason, PFRES_MATCH); 3375 3376 /* XXX will log packet before rewrite */ 3377 if ((*rm)->log) 3378 PFLOG_PACKET(ifp, h, m, af, direction, reason, *rm); 3379 3380 if ((*rm)->action != PF_PASS) 3381 return (PF_DROP); 3382 } 3383 3384 if ((*rm != NULL && (*rm)->keep_state) || nat != NULL || 3385 rdr != NULL || binat != NULL) { 3386 /* create new state */ 3387 struct pf_state *s; 3388 3389 s = pool_get(&pf_state_pl, PR_NOWAIT); 3390 if (s == NULL) 3391 return (PF_DROP); 3392 3393 s->rule = *rm; 3394 s->allow_opts = *rm && (*rm)->allow_opts; 3395 s->log = *rm && ((*rm)->log & 2); 3396 s->proto = pd->proto; 3397 s->direction = direction; 3398 s->af = af; 3399 if (direction == PF_OUT) { 3400 PF_ACPY(&s->gwy.addr, saddr, af); 3401 s->gwy.port = icmpid; 3402 PF_ACPY(&s->ext.addr, daddr, af); 3403 s->ext.port = icmpid; 3404 if (nat != NULL || binat != NULL) 3405 PF_ACPY(&s->lan.addr, &baddr, af); 3406 else 3407 PF_ACPY(&s->lan.addr, &s->gwy.addr, af); 3408 s->lan.port = icmpid; 3409 } else { 3410 PF_ACPY(&s->lan.addr, daddr, af); 3411 s->lan.port = icmpid; 3412 PF_ACPY(&s->ext.addr, saddr, af); 3413 s->ext.port = icmpid; 3414 if (binat != NULL || rdr != NULL) 3415 PF_ACPY(&s->gwy.addr, &baddr, af); 3416 else 3417 PF_ACPY(&s->gwy.addr, &s->lan.addr, af); 3418 s->gwy.port = icmpid; 3419 } 3420 s->src.seqlo = 0; 3421 s->src.seqhi = 0; 3422 s->src.seqdiff = 0; 3423 s->src.max_win = 0; 3424 s->src.state = 0; 3425 s->dst.seqlo = 0; 3426 s->dst.seqhi = 0; 3427 s->dst.seqdiff = 0; 3428 s->dst.max_win = 0; 3429 s->dst.state = 0; 3430 s->creation = pftv.tv_sec; 3431 s->expire = pftv.tv_sec + pftm_icmp_first_packet; 3432 s->packets = 1; 3433 s->bytes = pd->tot_len; 3434 pf_insert_state(s); 3435 } 3436 3437 #ifdef INET6 3438 /* copy back packet headers if we performed IPv6 NAT operations */ 3439 if (rewrite) 3440 m_copyback(m, off, ICMP_MINLEN, 3441 (caddr_t)pd->hdr.icmp6); 3442 #endif /* INET6 */ 3443 3444 return (PF_PASS); 3445 } 3446 3447 int 3448 pf_test_other(struct pf_rule **rm, int direction, struct ifnet *ifp, 3449 struct mbuf *m, void *h, struct pf_pdesc *pd) 3450 { 3451 struct pf_rule *r; 3452 struct pf_nat *nat = NULL; 3453 struct pf_binat *binat = NULL; 3454 struct pf_rdr *rdr = NULL; 3455 struct pf_addr *saddr = pd->src, *daddr = pd->dst, baddr; 3456 u_int8_t af = pd->af; 3457 3458 *rm = NULL; 3459 3460 if (direction == PF_OUT) { 3461 /* check outgoing packet for BINAT */ 3462 if ((binat = pf_get_binat(PF_OUT, ifp, pd->proto, 3463 saddr, daddr, af)) != NULL) { 3464 PF_ACPY(&baddr, saddr, af); 3465 switch (af) { 3466 #ifdef INET 3467 case AF_INET: 3468 pf_change_a(&saddr->v4.s_addr, pd->ip_sum, 3469 binat->raddr.v4.s_addr, 0); 3470 break; 3471 #endif /* INET */ 3472 #ifdef INET6 3473 case AF_INET6: 3474 PF_ACPY(saddr, &binat->raddr, af); 3475 break; 3476 #endif /* INET6 */ 3477 } 3478 } 3479 /* check outgoing packet for NAT */ 3480 else if ((nat = pf_get_nat(ifp, pd->proto, 3481 saddr, daddr, af)) != NULL) { 3482 PF_ACPY(&baddr, saddr, af); 3483 switch (af) { 3484 #ifdef INET 3485 case AF_INET: 3486 pf_change_a(&saddr->v4.s_addr, 3487 pd->ip_sum, nat->raddr.v4.s_addr, 0); 3488 break; 3489 #endif /* INET */ 3490 #ifdef INET6 3491 case AF_INET6: 3492 PF_ACPY(saddr, &nat->raddr, af); 3493 break; 3494 #endif /* INET6 */ 3495 } 3496 } 3497 } else { 3498 /* check incoming packet for RDR */ 3499 if ((rdr = pf_get_rdr(ifp, pd->proto, 3500 saddr, daddr, 0, af)) != NULL) { 3501 PF_ACPY(&baddr, daddr, af); 3502 switch (af) { 3503 #ifdef INET 3504 case AF_INET: 3505 pf_change_a(&daddr->v4.s_addr, 3506 pd->ip_sum, rdr->raddr.v4.s_addr, 0); 3507 break; 3508 #endif /* INET */ 3509 #ifdef INET6 3510 case AF_INET6: 3511 PF_ACPY(daddr, &rdr->raddr, af); 3512 break; 3513 #endif /* INET6 */ 3514 } 3515 } 3516 /* check incoming packet for BINAT */ 3517 else if ((binat = pf_get_binat(PF_IN, ifp, pd->proto, 3518 daddr, saddr, af)) != NULL) { 3519 PF_ACPY(&baddr, daddr, af); 3520 switch (af) { 3521 #ifdef INET 3522 case AF_INET: 3523 pf_change_a(&daddr->v4.s_addr, 3524 pd->ip_sum, binat->saddr.v4.s_addr, 0); 3525 break; 3526 #endif /* INET */ 3527 #ifdef INET6 3528 case AF_INET6: 3529 PF_ACPY(daddr, &binat->saddr, af); 3530 break; 3531 #endif /* INET6 */ 3532 } 3533 } 3534 } 3535 3536 r = TAILQ_FIRST(pf_rules_active); 3537 while (r != NULL) { 3538 if (r->action == PF_SCRUB) { 3539 r = TAILQ_NEXT(r, entries); 3540 continue; 3541 } 3542 r->evaluations++; 3543 if (r->ifp != NULL && r->ifp != ifp) 3544 r = r->skip[PF_SKIP_IFP]; 3545 else if (r->af && r->af != af) 3546 r = r->skip[PF_SKIP_AF]; 3547 else if (r->proto && r->proto != pd->proto) 3548 r = r->skip[PF_SKIP_PROTO]; 3549 else if (!PF_AZERO(&r->src.mask, af) && !PF_MATCHA(r->src.not, 3550 &r->src.addr, &r->src.mask, pd->src, af)) 3551 r = r->skip[PF_SKIP_SRC_ADDR]; 3552 else if (!PF_AZERO(&r->dst.mask, af) && !PF_MATCHA(r->dst.not, 3553 &r->dst.addr, &r->dst.mask, pd->dst, af)) 3554 r = r->skip[PF_SKIP_DST_ADDR]; 3555 else if (r->direction != direction) 3556 r = TAILQ_NEXT(r, entries); 3557 else { 3558 *rm = r; 3559 if ((*rm)->quick) 3560 break; 3561 r = TAILQ_NEXT(r, entries); 3562 } 3563 } 3564 3565 if (*rm != NULL) { 3566 u_short reason; 3567 3568 (*rm)->packets++; 3569 (*rm)->bytes += pd->tot_len; 3570 REASON_SET(&reason, PFRES_MATCH); 3571 if ((*rm)->log) 3572 PFLOG_PACKET(ifp, h, m, af, direction, reason, *rm); 3573 3574 if ((*rm)->action != PF_PASS) 3575 return (PF_DROP); 3576 } 3577 3578 if ((*rm != NULL && (*rm)->keep_state) || nat != NULL || 3579 rdr != NULL || binat != NULL) { 3580 /* create new state */ 3581 struct pf_state *s; 3582 3583 s = pool_get(&pf_state_pl, PR_NOWAIT); 3584 if (s == NULL) 3585 return (PF_DROP); 3586 3587 s->rule = *rm; 3588 s->allow_opts = *rm && (*rm)->allow_opts; 3589 s->log = *rm && ((*rm)->log & 2); 3590 s->proto = pd->proto; 3591 s->direction = direction; 3592 s->af = af; 3593 if (direction == PF_OUT) { 3594 PF_ACPY(&s->gwy.addr, saddr, af); 3595 s->gwy.port = 0; 3596 PF_ACPY(&s->ext.addr, daddr, af); 3597 s->ext.port = 0; 3598 if (nat != NULL || binat != NULL) 3599 PF_ACPY(&s->lan.addr, &baddr, af); 3600 else 3601 PF_ACPY(&s->lan.addr, &s->gwy.addr, af); 3602 s->lan.port = 0; 3603 } else { 3604 PF_ACPY(&s->lan.addr, daddr, af); 3605 s->lan.port = 0; 3606 PF_ACPY(&s->ext.addr, saddr, af); 3607 s->ext.port = 0; 3608 if (binat != NULL || rdr != NULL) 3609 PF_ACPY(&s->gwy.addr, &baddr, af); 3610 else 3611 PF_ACPY(&s->gwy.addr, &s->lan.addr, af); 3612 s->gwy.port = 0; 3613 } 3614 s->src.seqlo = 0; 3615 s->src.seqhi = 0; 3616 s->src.seqdiff = 0; 3617 s->src.max_win = 0; 3618 s->src.state = 1; 3619 s->dst.seqlo = 0; 3620 s->dst.seqhi = 0; 3621 s->dst.seqdiff = 0; 3622 s->dst.max_win = 0; 3623 s->dst.state = 0; 3624 s->creation = pftv.tv_sec; 3625 s->expire = pftv.tv_sec + pftm_other_first_packet; 3626 s->packets = 1; 3627 s->bytes = pd->tot_len; 3628 pf_insert_state(s); 3629 } 3630 3631 return (PF_PASS); 3632 } 3633 3634 int 3635 pf_test_state_tcp(struct pf_state **state, int direction, struct ifnet *ifp, 3636 struct mbuf *m, int ipoff, int off, void *h, struct pf_pdesc *pd) 3637 { 3638 struct pf_tree_key key; 3639 struct tcphdr *th = pd->hdr.tcp; 3640 u_int16_t win = ntohs(th->th_win); 3641 u_int32_t ack, end, seq; 3642 int ackskew; 3643 struct pf_state_peer *src, *dst; 3644 3645 key.af = pd->af; 3646 key.proto = IPPROTO_TCP; 3647 PF_ACPY(&key.addr[0], pd->src, key.af); 3648 PF_ACPY(&key.addr[1], pd->dst, key.af); 3649 key.port[0] = th->th_sport; 3650 key.port[1] = th->th_dport; 3651 3652 if (direction == PF_IN) 3653 *state = pf_find_state(tree_ext_gwy, &key); 3654 else 3655 *state = pf_find_state(tree_lan_ext, &key); 3656 if (*state == NULL) 3657 return (PF_DROP); 3658 3659 if (direction == (*state)->direction) { 3660 src = &(*state)->src; 3661 dst = &(*state)->dst; 3662 } else { 3663 src = &(*state)->dst; 3664 dst = &(*state)->src; 3665 } 3666 3667 /* 3668 * Sequence tracking algorithm from Guido van Rooij's paper: 3669 * http://www.madison-gurkha.com/publications/tcp_filtering/ 3670 * tcp_filtering.ps 3671 */ 3672 3673 seq = ntohl(th->th_seq); 3674 if (src->seqlo == 0) { 3675 /* First packet from this end. Set its state */ 3676 3677 /* Deferred generation of sequence number modulator */ 3678 if (dst->seqdiff) { 3679 while ((src->seqdiff = arc4random()) == 0) 3680 ; 3681 ack = ntohl(th->th_ack) - dst->seqdiff; 3682 pf_change_a(&th->th_seq, &th->th_sum, htonl(seq + 3683 src->seqdiff), 0); 3684 pf_change_a(&th->th_ack, &th->th_sum, htonl(ack), 0); 3685 } else { 3686 ack = ntohl(th->th_ack); 3687 } 3688 3689 end = seq + pd->p_len; 3690 if (th->th_flags & TH_SYN) 3691 end++; 3692 if (th->th_flags & TH_FIN) 3693 end++; 3694 3695 src->seqlo = seq; 3696 if (src->state < TCPS_SYN_SENT) 3697 src->state = TCPS_SYN_SENT; 3698 3699 /* 3700 * May need to slide the window (seqhi may have been set by 3701 * the crappy stack check or if we picked up the connection 3702 * after establishment) 3703 */ 3704 if (SEQ_GEQ(end + MAX(1, dst->max_win), src->seqhi)) 3705 src->seqhi = end + MAX(1, dst->max_win); 3706 if (win > src->max_win) 3707 src->max_win = win; 3708 3709 } else { 3710 ack = ntohl(th->th_ack) - dst->seqdiff; 3711 if (src->seqdiff) { 3712 /* Modulate sequence numbers */ 3713 pf_change_a(&th->th_seq, &th->th_sum, htonl(seq + 3714 src->seqdiff), 0); 3715 pf_change_a(&th->th_ack, &th->th_sum, htonl(ack), 0); 3716 } 3717 end = seq + pd->p_len; 3718 if (th->th_flags & TH_SYN) 3719 end++; 3720 if (th->th_flags & TH_FIN) 3721 end++; 3722 } 3723 3724 if ((th->th_flags & TH_ACK) == 0) { 3725 /* Let it pass through the ack skew check */ 3726 ack = dst->seqlo; 3727 } else if ((ack == 0 && 3728 (th->th_flags & (TH_ACK|TH_RST)) == (TH_ACK|TH_RST)) || 3729 /* broken tcp stacks do not set ack */ 3730 (dst->state < TCPS_SYN_SENT)) { 3731 /* Many stacks (ours included) will set the ACK number in an 3732 * FIN|ACK if the SYN times out -- no sequence to ACK. 3733 */ 3734 ack = dst->seqlo; 3735 } 3736 3737 if (seq == end) { 3738 /* Ease sequencing restrictions on no data packets */ 3739 seq = src->seqlo; 3740 end = seq; 3741 } 3742 3743 ackskew = dst->seqlo - ack; 3744 3745 #define MAXACKWINDOW (0xffff + 1500) /* 1500 is an arbitrary fudge factor */ 3746 if (SEQ_GEQ(src->seqhi, end) && 3747 /* Last octet inside other's window space */ 3748 SEQ_GEQ(seq, src->seqlo - dst->max_win) && 3749 /* Retrans: not more than one window back */ 3750 (ackskew >= -MAXACKWINDOW) && 3751 /* Acking not more than one window back */ 3752 (ackskew <= MAXACKWINDOW)) { 3753 /* Acking not more than one window forward */ 3754 3755 (*state)->packets++; 3756 (*state)->bytes += pd->tot_len; 3757 3758 /* update max window */ 3759 if (src->max_win < win) 3760 src->max_win = win; 3761 /* syncronize sequencing */ 3762 if (SEQ_GT(end, src->seqlo)) 3763 src->seqlo = end; 3764 /* slide the window of what the other end can send */ 3765 if (SEQ_GEQ(ack + win, dst->seqhi)) 3766 dst->seqhi = ack + MAX(win, 1); 3767 3768 3769 /* update states */ 3770 if (th->th_flags & TH_SYN) 3771 if (src->state < TCPS_SYN_SENT) 3772 src->state = TCPS_SYN_SENT; 3773 if (th->th_flags & TH_FIN) 3774 if (src->state < TCPS_CLOSING) 3775 src->state = TCPS_CLOSING; 3776 if (th->th_flags & TH_ACK) { 3777 if (dst->state == TCPS_SYN_SENT) 3778 dst->state = TCPS_ESTABLISHED; 3779 else if (dst->state == TCPS_CLOSING) 3780 dst->state = TCPS_FIN_WAIT_2; 3781 } 3782 if (th->th_flags & TH_RST) 3783 src->state = dst->state = TCPS_TIME_WAIT; 3784 3785 /* update expire time */ 3786 if (src->state >= TCPS_FIN_WAIT_2 && 3787 dst->state >= TCPS_FIN_WAIT_2) 3788 (*state)->expire = pftv.tv_sec + pftm_tcp_closed; 3789 else if (src->state >= TCPS_FIN_WAIT_2 || 3790 dst->state >= TCPS_FIN_WAIT_2) 3791 (*state)->expire = pftv.tv_sec + pftm_tcp_fin_wait; 3792 else if (src->state >= TCPS_CLOSING || 3793 dst->state >= TCPS_CLOSING) 3794 (*state)->expire = pftv.tv_sec + pftm_tcp_closing; 3795 else if (src->state < TCPS_ESTABLISHED || 3796 dst->state < TCPS_ESTABLISHED) 3797 (*state)->expire = pftv.tv_sec + pftm_tcp_opening; 3798 else 3799 (*state)->expire = pftv.tv_sec + pftm_tcp_established; 3800 3801 /* Fall through to PASS packet */ 3802 3803 } else if ((dst->state < TCPS_SYN_SENT || 3804 dst->state >= TCPS_FIN_WAIT_2 || 3805 src->state >= TCPS_FIN_WAIT_2) && 3806 SEQ_GEQ(src->seqhi + MAXACKWINDOW, end) && 3807 /* Within a window forward of the originating packet */ 3808 SEQ_GEQ(seq, src->seqlo - MAXACKWINDOW)) { 3809 /* Within a window backward of the originating packet */ 3810 3811 /* 3812 * This currently handles three situations: 3813 * 1) Stupid stacks will shotgun SYNs before their peer 3814 * replies. 3815 * 2) When PF catches an already established stream (the 3816 * firewall rebooted, the state table was flushed, routes 3817 * changed...) 3818 * 3) Packets get funky immediately after the connection 3819 * closes (this should catch Solaris spurious ACK|FINs 3820 * that web servers like to spew after a close) 3821 * 3822 * This must be a little more careful than the above code 3823 * since packet floods will also be caught here. We don't 3824 * update the TTL here to mitigate the damage of a packet 3825 * flood and so the same code can handle awkward establishment 3826 * and a loosened connection close. 3827 * In the establishment case, a correct peer response will 3828 * validate the connection, go through the normal state code 3829 * and keep updating the state TTL. 3830 */ 3831 3832 if (pf_status.debug >= PF_DEBUG_MISC) { 3833 printf("pf: loose state match: "); 3834 pf_print_state(*state); 3835 pf_print_flags(th->th_flags); 3836 printf(" seq=%lu ack=%lu len=%u ackskew=%d pkts=%d\n", 3837 seq, ack, pd->p_len, ackskew, (*state)->packets); 3838 } 3839 3840 (*state)->packets++; 3841 (*state)->bytes += pd->tot_len; 3842 3843 /* update max window */ 3844 if (src->max_win < win) 3845 src->max_win = win; 3846 /* syncronize sequencing */ 3847 if (SEQ_GT(end, src->seqlo)) 3848 src->seqlo = end; 3849 /* slide the window of what the other end can send */ 3850 if (SEQ_GEQ(ack + win, dst->seqhi)) 3851 dst->seqhi = ack + MAX(win, 1); 3852 3853 /* 3854 * Cannot set dst->seqhi here since this could be a shotgunned 3855 * SYN and not an already established connection. 3856 */ 3857 3858 if (th->th_flags & TH_FIN) 3859 if (src->state < TCPS_CLOSING) 3860 src->state = TCPS_CLOSING; 3861 if (th->th_flags & TH_RST) 3862 src->state = dst->state = TCPS_TIME_WAIT; 3863 3864 /* Fall through to PASS packet */ 3865 3866 } else { 3867 if (pf_status.debug >= PF_DEBUG_MISC) { 3868 printf("pf: BAD state: "); 3869 pf_print_state(*state); 3870 pf_print_flags(th->th_flags); 3871 printf(" seq=%lu ack=%lu len=%u ackskew=%d pkts=%d " 3872 "dir=%s,%s\n", seq, ack, pd->p_len, ackskew, 3873 ++(*state)->packets, 3874 direction == PF_IN ? "in" : "out", 3875 direction == (*state)->direction ? "fwd" : "rev"); 3876 printf("pf: State failure on: %c %c %c %c | %c %c\n", 3877 SEQ_GEQ(src->seqhi, end) ? ' ' : '1', 3878 SEQ_GEQ(seq, src->seqlo - dst->max_win) ? ' ': '2', 3879 (ackskew >= -MAXACKWINDOW) ? ' ' : '3', 3880 (ackskew <= MAXACKWINDOW) ? ' ' : '4', 3881 SEQ_GEQ(src->seqhi + MAXACKWINDOW, end) ?' ' :'5', 3882 SEQ_GEQ(seq, src->seqlo - MAXACKWINDOW) ?' ' :'6'); 3883 } 3884 return (PF_DROP); 3885 } 3886 3887 /* Any packets which have gotten here are to be passed */ 3888 3889 /* translate source/destination address, if needed */ 3890 if (STATE_TRANSLATE(*state)) { 3891 if (direction == PF_OUT) 3892 pf_change_ap(pd->src, &th->th_sport, pd->ip_sum, 3893 &th->th_sum, &(*state)->gwy.addr, 3894 (*state)->gwy.port, 0, pd->af); 3895 else 3896 pf_change_ap(pd->dst, &th->th_dport, pd->ip_sum, 3897 &th->th_sum, &(*state)->lan.addr, 3898 (*state)->lan.port, 0, pd->af); 3899 m_copyback(m, off, sizeof(*th), (caddr_t)th); 3900 } else if (src->seqdiff) { 3901 /* Copyback sequence modulation */ 3902 m_copyback(m, off, sizeof(*th), (caddr_t)th); 3903 } 3904 3905 if ((*state)->rule != NULL) { 3906 (*state)->rule->packets++; 3907 (*state)->rule->bytes += pd->tot_len; 3908 } 3909 return (PF_PASS); 3910 } 3911 3912 int 3913 pf_test_state_udp(struct pf_state **state, int direction, struct ifnet *ifp, 3914 struct mbuf *m, int ipoff, int off, void *h, struct pf_pdesc *pd) 3915 { 3916 struct pf_state_peer *src, *dst; 3917 struct pf_tree_key key; 3918 struct udphdr *uh = pd->hdr.udp; 3919 3920 key.af = pd->af; 3921 key.proto = IPPROTO_UDP; 3922 PF_ACPY(&key.addr[0], pd->src, key.af); 3923 PF_ACPY(&key.addr[1], pd->dst, key.af); 3924 key.port[0] = pd->hdr.udp->uh_sport; 3925 key.port[1] = pd->hdr.udp->uh_dport; 3926 3927 if (direction == PF_IN) 3928 *state = pf_find_state(tree_ext_gwy, &key); 3929 else 3930 *state = pf_find_state(tree_lan_ext, &key); 3931 if (*state == NULL) 3932 return (PF_DROP); 3933 3934 if (direction == (*state)->direction) { 3935 src = &(*state)->src; 3936 dst = &(*state)->dst; 3937 } else { 3938 src = &(*state)->dst; 3939 dst = &(*state)->src; 3940 } 3941 3942 (*state)->packets++; 3943 (*state)->bytes += pd->tot_len; 3944 3945 /* update states */ 3946 if (src->state < 1) 3947 src->state = 1; 3948 if (dst->state == 1) 3949 dst->state = 2; 3950 3951 /* update expire time */ 3952 if (src->state == 2 && dst->state == 2) 3953 (*state)->expire = pftv.tv_sec + pftm_udp_multiple; 3954 else 3955 (*state)->expire = pftv.tv_sec + pftm_udp_single; 3956 3957 /* translate source/destination address, if necessary */ 3958 if (STATE_TRANSLATE(*state)) { 3959 if (direction == PF_OUT) 3960 pf_change_ap(pd->src, &uh->uh_sport, pd->ip_sum, 3961 &uh->uh_sum, &(*state)->gwy.addr, 3962 (*state)->gwy.port, 1, pd->af); 3963 else 3964 pf_change_ap(pd->dst, &uh->uh_dport, pd->ip_sum, 3965 &uh->uh_sum, &(*state)->lan.addr, 3966 (*state)->lan.port, 1, pd->af); 3967 m_copyback(m, off, sizeof(*uh), (caddr_t)uh); 3968 } 3969 3970 if ((*state)->rule != NULL) { 3971 (*state)->rule->packets++; 3972 (*state)->rule->bytes += pd->tot_len; 3973 } 3974 return (PF_PASS); 3975 } 3976 3977 int 3978 pf_test_state_icmp(struct pf_state **state, int direction, struct ifnet *ifp, 3979 struct mbuf *m, int ipoff, int off, void *h, struct pf_pdesc *pd) 3980 { 3981 struct pf_addr *saddr = pd->src, *daddr = pd->dst; 3982 u_int16_t icmpid, *icmpsum; 3983 u_int8_t icmptype; 3984 int state_icmp = 0; 3985 3986 switch (pd->proto) { 3987 #ifdef INET 3988 case IPPROTO_ICMP: 3989 icmptype = pd->hdr.icmp->icmp_type; 3990 icmpid = pd->hdr.icmp->icmp_id; 3991 icmpsum = &pd->hdr.icmp->icmp_cksum; 3992 3993 if (icmptype == ICMP_UNREACH || 3994 icmptype == ICMP_SOURCEQUENCH || 3995 icmptype == ICMP_REDIRECT || 3996 icmptype == ICMP_TIMXCEED || 3997 icmptype == ICMP_PARAMPROB) 3998 state_icmp++; 3999 break; 4000 #endif /* INET */ 4001 #ifdef INET6 4002 case IPPROTO_ICMPV6: 4003 icmptype = pd->hdr.icmp6->icmp6_type; 4004 icmpid = pd->hdr.icmp6->icmp6_id; 4005 icmpsum = &pd->hdr.icmp6->icmp6_cksum; 4006 4007 if (icmptype == ICMP6_DST_UNREACH || 4008 icmptype == ICMP6_PACKET_TOO_BIG || 4009 icmptype == ICMP6_TIME_EXCEEDED || 4010 icmptype == ICMP6_PARAM_PROB) 4011 state_icmp++; 4012 break; 4013 #endif /* INET6 */ 4014 } 4015 4016 if (!state_icmp) { 4017 4018 /* 4019 * ICMP query/reply message not related to a TCP/UDP packet. 4020 * Search for an ICMP state. 4021 */ 4022 struct pf_tree_key key; 4023 4024 key.af = pd->af; 4025 key.proto = pd->proto; 4026 PF_ACPY(&key.addr[0], saddr, key.af); 4027 PF_ACPY(&key.addr[1], daddr, key.af); 4028 key.port[0] = icmpid; 4029 key.port[1] = icmpid; 4030 4031 if (direction == PF_IN) 4032 *state = pf_find_state(tree_ext_gwy, &key); 4033 else 4034 *state = pf_find_state(tree_lan_ext, &key); 4035 if (*state == NULL) 4036 return (PF_DROP); 4037 4038 (*state)->packets++; 4039 (*state)->bytes += pd->tot_len; 4040 (*state)->expire = pftv.tv_sec + pftm_icmp_error_reply; 4041 4042 /* translate source/destination address, if needed */ 4043 if (PF_ANEQ(&(*state)->lan.addr, &(*state)->gwy.addr, pd->af)) { 4044 if (direction == PF_OUT) { 4045 switch (pd->af) { 4046 #ifdef INET 4047 case AF_INET: 4048 pf_change_a(&saddr->v4.s_addr, 4049 pd->ip_sum, 4050 (*state)->gwy.addr.v4.s_addr, 0); 4051 break; 4052 #endif /* INET */ 4053 #ifdef INET6 4054 case AF_INET6: 4055 pf_change_a6(saddr, 4056 &pd->hdr.icmp6->icmp6_cksum, 4057 &(*state)->gwy.addr, 0); 4058 m_copyback(m, off, ICMP_MINLEN, 4059 (caddr_t)pd->hdr.icmp6); 4060 break; 4061 #endif /* INET6 */ 4062 } 4063 } else { 4064 switch (pd->af) { 4065 #ifdef INET 4066 case AF_INET: 4067 pf_change_a(&daddr->v4.s_addr, 4068 pd->ip_sum, 4069 (*state)->lan.addr.v4.s_addr, 0); 4070 break; 4071 #endif /* INET */ 4072 #ifdef INET6 4073 case AF_INET6: 4074 pf_change_a6(daddr, 4075 &pd->hdr.icmp6->icmp6_cksum, 4076 &(*state)->lan.addr, 0); 4077 m_copyback(m, off, ICMP_MINLEN, 4078 (caddr_t)pd->hdr.icmp6); 4079 break; 4080 #endif /* INET6 */ 4081 } 4082 } 4083 } 4084 4085 return (PF_PASS); 4086 4087 } else { 4088 /* 4089 * ICMP error message in response to a TCP/UDP packet. 4090 * Extract the inner TCP/UDP header and search for that state. 4091 */ 4092 4093 struct pf_pdesc pd2; 4094 #ifdef INET 4095 struct ip h2; 4096 #endif /* INET */ 4097 #ifdef INET6 4098 struct ip6_hdr h2_6; 4099 int terminal = 0; 4100 #endif /* INET6 */ 4101 int ipoff2; 4102 int off2; 4103 4104 pd2.af = pd->af; 4105 switch (pd->af) { 4106 #ifdef INET 4107 case AF_INET: 4108 /* offset of h2 in mbuf chain */ 4109 ipoff2 = off + ICMP_MINLEN; 4110 4111 if (!pf_pull_hdr(m, ipoff2, &h2, sizeof(h2), 4112 NULL, NULL, pd2.af)) { 4113 DPFPRINTF(PF_DEBUG_MISC, 4114 ("pf: ICMP error message too short (ip)\n")); 4115 return (PF_DROP); 4116 } 4117 /* ICMP error messages don't refer to non-first fragments */ 4118 if (ntohs(h2.ip_off) & IP_OFFMASK) 4119 return (PF_DROP); 4120 4121 /* offset of protocol header that follows h2 */ 4122 off2 = ipoff2 + (h2.ip_hl << 2); 4123 4124 pd2.proto = h2.ip_p; 4125 pd2.src = (struct pf_addr *)&h2.ip_src; 4126 pd2.dst = (struct pf_addr *)&h2.ip_dst; 4127 pd2.ip_sum = &h2.ip_sum; 4128 break; 4129 #endif /* INET */ 4130 #ifdef INET6 4131 case AF_INET6: 4132 ipoff2 = off + sizeof(struct icmp6_hdr); 4133 4134 if (!pf_pull_hdr(m, ipoff2, &h2_6, sizeof(h2_6), 4135 NULL, NULL, pd2.af)) { 4136 DPFPRINTF(PF_DEBUG_MISC, 4137 ("pf: ICMP error message too short (ip6)\n")); 4138 return (PF_DROP); 4139 } 4140 pd2.proto = h2_6.ip6_nxt; 4141 pd2.src = (struct pf_addr *)&h2_6.ip6_src; 4142 pd2.dst = (struct pf_addr *)&h2_6.ip6_dst; 4143 pd2.ip_sum = NULL; 4144 off2 = ipoff2 + sizeof(h2_6); 4145 do { 4146 switch (pd2.proto) { 4147 case IPPROTO_FRAGMENT: 4148 /* XXX we don't handle fagments yet */ 4149 return (PF_DROP); 4150 case IPPROTO_AH: 4151 case IPPROTO_HOPOPTS: 4152 case IPPROTO_ROUTING: 4153 case IPPROTO_DSTOPTS: { 4154 /* get next header and header length */ 4155 struct _opt6 opt6; 4156 4157 if (!pf_pull_hdr(m, off2, &opt6, 4158 sizeof(opt6), NULL, NULL, pd2.af)) { 4159 DPFPRINTF(PF_DEBUG_MISC, 4160 ("pf: ICMPv6 short opt\n")); 4161 return(PF_DROP); 4162 } 4163 pd2.proto = opt6.opt6_nxt; 4164 off2 += (opt6.opt6_hlen + 1) * 8; 4165 /* goto the next header */ 4166 break; 4167 } 4168 default: 4169 terminal++; 4170 break; 4171 } 4172 } while (!terminal); 4173 break; 4174 #endif /* INET6 */ 4175 } 4176 4177 switch (pd2.proto) { 4178 case IPPROTO_TCP: { 4179 struct tcphdr th; 4180 u_int32_t seq; 4181 struct pf_tree_key key; 4182 struct pf_state_peer *src, *dst; 4183 4184 /* 4185 * Only the first 8 bytes of the TCP header can be 4186 * expected. Don't access any TCP header fields after 4187 * th_seq, an ackskew test is not possible. 4188 */ 4189 if (!pf_pull_hdr(m, off2, &th, 8, NULL, NULL, pd2.af)) { 4190 DPFPRINTF(PF_DEBUG_MISC, 4191 ("pf: ICMP error message too short (tcp)\n")); 4192 return (PF_DROP); 4193 } 4194 4195 key.af = pd2.af; 4196 key.proto = IPPROTO_TCP; 4197 PF_ACPY(&key.addr[0], pd2.dst, pd2.af); 4198 key.port[0] = th.th_dport; 4199 PF_ACPY(&key.addr[1], pd2.src, pd2.af); 4200 key.port[1] = th.th_sport; 4201 4202 if (direction == PF_IN) 4203 *state = pf_find_state(tree_ext_gwy, &key); 4204 else 4205 *state = pf_find_state(tree_lan_ext, &key); 4206 if (*state == NULL) 4207 return (PF_DROP); 4208 4209 if (direction == (*state)->direction) { 4210 src = &(*state)->dst; 4211 dst = &(*state)->src; 4212 } else { 4213 src = &(*state)->src; 4214 dst = &(*state)->dst; 4215 } 4216 4217 /* Demodulate sequence number */ 4218 seq = ntohl(th.th_seq) - src->seqdiff; 4219 if (src->seqdiff) 4220 pf_change_a(&th.th_seq, &th.th_sum, 4221 htonl(seq), 0); 4222 4223 if (!SEQ_GEQ(src->seqhi, seq) || 4224 !SEQ_GEQ(seq, src->seqlo - dst->max_win)) { 4225 if (pf_status.debug >= PF_DEBUG_MISC) { 4226 printf("pf: BAD ICMP state: "); 4227 pf_print_state(*state); 4228 printf(" seq=%lu\n", seq); 4229 } 4230 return (PF_DROP); 4231 } 4232 4233 if (STATE_TRANSLATE(*state)) { 4234 if (direction == PF_IN) { 4235 pf_change_icmp(pd2.src, &th.th_sport, 4236 saddr, &(*state)->lan.addr, 4237 (*state)->lan.port, NULL, 4238 pd2.ip_sum, icmpsum, 4239 pd->ip_sum, 0, pd2.af); 4240 } else { 4241 pf_change_icmp(pd2.dst, &th.th_dport, 4242 saddr, &(*state)->gwy.addr, 4243 (*state)->gwy.port, NULL, 4244 pd2.ip_sum, icmpsum, 4245 pd->ip_sum, 0, pd2.af); 4246 } 4247 switch (pd2.af) { 4248 #ifdef INET 4249 case AF_INET: 4250 m_copyback(m, off, ICMP_MINLEN, 4251 (caddr_t)pd->hdr.icmp); 4252 m_copyback(m, ipoff2, sizeof(h2), 4253 (caddr_t)&h2); 4254 break; 4255 #endif /* INET */ 4256 #ifdef INET6 4257 case AF_INET6: 4258 m_copyback(m, off, ICMP_MINLEN, 4259 (caddr_t)pd->hdr.icmp6); 4260 m_copyback(m, ipoff2, sizeof(h2_6), 4261 (caddr_t)&h2_6); 4262 break; 4263 #endif /* INET6 */ 4264 } 4265 m_copyback(m, off2, 8, (caddr_t)&th); 4266 } else if (src->seqdiff) { 4267 m_copyback(m, off2, 8, (caddr_t)&th); 4268 } 4269 4270 return (PF_PASS); 4271 break; 4272 } 4273 case IPPROTO_UDP: { 4274 struct udphdr uh; 4275 struct pf_tree_key key; 4276 4277 if (!pf_pull_hdr(m, off2, &uh, sizeof(uh), 4278 NULL, NULL, pd2.af)) { 4279 DPFPRINTF(PF_DEBUG_MISC, 4280 ("pf: ICMP error message too short (udp)\n")); 4281 return (PF_DROP); 4282 } 4283 4284 key.af = pd2.af; 4285 key.proto = IPPROTO_UDP; 4286 PF_ACPY(&key.addr[0], pd2.dst, pd2.af); 4287 key.port[0] = uh.uh_dport; 4288 PF_ACPY(&key.addr[1], pd2.src, pd2.af); 4289 key.port[1] = uh.uh_sport; 4290 4291 if (direction == PF_IN) 4292 *state = pf_find_state(tree_ext_gwy, &key); 4293 else 4294 *state = pf_find_state(tree_lan_ext, &key); 4295 if (*state == NULL) 4296 return (PF_DROP); 4297 4298 if (STATE_TRANSLATE(*state)) { 4299 if (direction == PF_IN) { 4300 pf_change_icmp(pd2.src, &uh.uh_sport, 4301 daddr, &(*state)->lan.addr, 4302 (*state)->lan.port, &uh.uh_sum, 4303 pd2.ip_sum, icmpsum, 4304 pd->ip_sum, 1, pd2.af); 4305 } else { 4306 pf_change_icmp(pd2.dst, &uh.uh_dport, 4307 saddr, &(*state)->gwy.addr, 4308 (*state)->gwy.port, &uh.uh_sum, 4309 pd2.ip_sum, icmpsum, 4310 pd->ip_sum, 1, pd2.af); 4311 } 4312 switch (pd2.af) { 4313 #ifdef INET 4314 case AF_INET: 4315 m_copyback(m, off, ICMP_MINLEN, 4316 (caddr_t)pd->hdr.icmp); 4317 m_copyback(m, ipoff2, sizeof(h2), 4318 (caddr_t)&h2); 4319 break; 4320 #endif /* INET */ 4321 #ifdef INET6 4322 case AF_INET6: 4323 m_copyback(m, off, ICMP_MINLEN, 4324 (caddr_t)pd->hdr.icmp6); 4325 m_copyback(m, ipoff2, sizeof(h2_6), 4326 (caddr_t)&h2_6); 4327 break; 4328 #endif /* INET6 */ 4329 } 4330 m_copyback(m, off2, sizeof(uh), 4331 (caddr_t)&uh); 4332 } 4333 4334 return (PF_PASS); 4335 break; 4336 } 4337 #ifdef INET 4338 case IPPROTO_ICMP: { 4339 struct icmp iih; 4340 struct pf_tree_key key; 4341 4342 if (!pf_pull_hdr(m, off2, &iih, ICMP_MINLEN, 4343 NULL, NULL, pd2.af)) { 4344 DPFPRINTF(PF_DEBUG_MISC, 4345 ("pf: ICMP error message too short (icmp)\n")); 4346 return (PF_DROP); 4347 } 4348 4349 key.af = pd2.af; 4350 key.proto = IPPROTO_ICMP; 4351 PF_ACPY(&key.addr[0], pd2.dst, pd2.af); 4352 key.port[0] = iih.icmp_id; 4353 PF_ACPY(&key.addr[1], pd2.src, pd2.af); 4354 key.port[1] = iih.icmp_id; 4355 4356 if (direction == PF_IN) 4357 *state = pf_find_state(tree_ext_gwy, &key); 4358 else 4359 *state = pf_find_state(tree_lan_ext, &key); 4360 if (*state == NULL) 4361 return (PF_DROP); 4362 4363 if (STATE_TRANSLATE(*state)) { 4364 if (direction == PF_IN) { 4365 pf_change_icmp(pd2.src, &iih.icmp_id, 4366 daddr, &(*state)->lan.addr, 4367 (*state)->lan.port, NULL, 4368 pd2.ip_sum, icmpsum, 4369 pd->ip_sum, 0, AF_INET); 4370 } else { 4371 pf_change_icmp(pd2.dst, &iih.icmp_id, 4372 saddr, &(*state)->gwy.addr, 4373 (*state)->gwy.port, NULL, 4374 pd2.ip_sum, icmpsum, 4375 pd->ip_sum, 0, AF_INET); 4376 } 4377 m_copyback(m, off, ICMP_MINLEN, 4378 (caddr_t)pd->hdr.icmp); 4379 m_copyback(m, ipoff2, sizeof(h2), 4380 (caddr_t)&h2); 4381 m_copyback(m, off2, ICMP_MINLEN, 4382 (caddr_t)&iih); 4383 } 4384 4385 return (PF_PASS); 4386 break; 4387 } 4388 #endif /* INET */ 4389 #ifdef INET6 4390 case IPPROTO_ICMPV6: { 4391 struct icmp6_hdr iih; 4392 struct pf_tree_key key; 4393 4394 if (!pf_pull_hdr(m, off2, &iih, ICMP_MINLEN, 4395 NULL, NULL, pd2.af)) { 4396 DPFPRINTF(PF_DEBUG_MISC, 4397 ("pf: ICMP error message too short (icmp6)\n")); 4398 return (PF_DROP); 4399 } 4400 4401 key.af = pd2.af; 4402 key.proto = IPPROTO_ICMPV6; 4403 PF_ACPY(&key.addr[0], pd2.dst, pd2.af); 4404 key.port[0] = iih.icmp6_id; 4405 PF_ACPY(&key.addr[1], pd2.src, pd2.af); 4406 key.port[1] = iih.icmp6_id; 4407 4408 if (direction == PF_IN) 4409 *state = pf_find_state(tree_ext_gwy, &key); 4410 else 4411 *state = pf_find_state(tree_lan_ext, &key); 4412 if (*state == NULL) 4413 return (PF_DROP); 4414 4415 if (STATE_TRANSLATE(*state)) { 4416 if (direction == PF_IN) { 4417 pf_change_icmp(pd2.src, &iih.icmp6_id, 4418 daddr, &(*state)->lan.addr, 4419 (*state)->lan.port, NULL, 4420 pd2.ip_sum, icmpsum, 4421 pd->ip_sum, 0, AF_INET6); 4422 } else { 4423 pf_change_icmp(pd2.dst, &iih.icmp6_id, 4424 saddr, &(*state)->gwy.addr, 4425 (*state)->gwy.port, NULL, 4426 pd2.ip_sum, icmpsum, 4427 pd->ip_sum, 0, AF_INET6); 4428 } 4429 m_copyback(m, off, ICMP_MINLEN, 4430 (caddr_t)pd->hdr.icmp6); 4431 m_copyback(m, ipoff2, sizeof(h2_6), 4432 (caddr_t)&h2_6); 4433 m_copyback(m, off2, ICMP_MINLEN, 4434 (caddr_t)&iih); 4435 } 4436 4437 return (PF_PASS); 4438 break; 4439 } 4440 #endif /* INET6 */ 4441 default: 4442 DPFPRINTF(PF_DEBUG_MISC, 4443 ("pf: ICMP error message for bad proto\n")); 4444 return (PF_DROP); 4445 } 4446 4447 } 4448 } 4449 4450 int 4451 pf_test_state_other(struct pf_state **state, int direction, struct ifnet *ifp, 4452 struct pf_pdesc *pd) 4453 { 4454 struct pf_state_peer *src, *dst; 4455 struct pf_tree_key key; 4456 4457 key.af = pd->af; 4458 key.proto = pd->proto; 4459 PF_ACPY(&key.addr[0], pd->src, key.af); 4460 PF_ACPY(&key.addr[1], pd->dst, key.af); 4461 key.port[0] = 0; 4462 key.port[1] = 0; 4463 4464 if (direction == PF_IN) 4465 *state = pf_find_state(tree_ext_gwy, &key); 4466 else 4467 *state = pf_find_state(tree_lan_ext, &key); 4468 if (*state == NULL) 4469 return (PF_DROP); 4470 4471 if (direction == (*state)->direction) { 4472 src = &(*state)->src; 4473 dst = &(*state)->dst; 4474 } else { 4475 src = &(*state)->dst; 4476 dst = &(*state)->src; 4477 } 4478 4479 (*state)->packets++; 4480 (*state)->bytes += pd->tot_len; 4481 4482 /* update states */ 4483 if (src->state < 1) 4484 src->state = 1; 4485 if (dst->state == 1) 4486 dst->state = 2; 4487 4488 /* update expire time */ 4489 if (src->state == 2 && dst->state == 2) 4490 (*state)->expire = pftv.tv_sec + pftm_other_multiple; 4491 else 4492 (*state)->expire = pftv.tv_sec + pftm_other_single; 4493 4494 /* translate source/destination address, if necessary */ 4495 if (STATE_TRANSLATE(*state)) { 4496 if (direction == PF_OUT) 4497 switch (pd->af) { 4498 #ifdef INET 4499 case AF_INET: 4500 pf_change_a(&pd->src->v4.s_addr, 4501 pd->ip_sum, (*state)->gwy.addr.v4.s_addr, 0); 4502 break; 4503 #endif /* INET */ 4504 #ifdef INET6 4505 case AF_INET6: 4506 PF_ACPY(pd->src, &(*state)->gwy.addr, pd->af); 4507 break; 4508 #endif /* INET6 */ 4509 } 4510 else 4511 switch (pd->af) { 4512 #ifdef INET 4513 case AF_INET: 4514 pf_change_a(&pd->dst->v4.s_addr, 4515 pd->ip_sum, (*state)->lan.addr.v4.s_addr, 0); 4516 break; 4517 #endif /* INET */ 4518 #ifdef INET6 4519 case AF_INET6: 4520 PF_ACPY(pd->dst, &(*state)->lan.addr, pd->af); 4521 break; 4522 #endif /* INET6 */ 4523 } 4524 } 4525 4526 if ((*state)->rule != NULL) { 4527 (*state)->rule->packets++; 4528 (*state)->rule->bytes += pd->tot_len; 4529 } 4530 return (PF_PASS); 4531 } 4532 4533 /* 4534 * ipoff and off are measured from the start of the mbuf chain. 4535 * h must be at "ipoff" on the mbuf chain. 4536 */ 4537 void * 4538 pf_pull_hdr(struct mbuf *m, int off, void *p, int len, 4539 u_short *actionp, u_short *reasonp, int af) 4540 { 4541 switch (af) { 4542 #ifdef INET 4543 case AF_INET: { 4544 struct ip *h = mtod(m, struct ip *); 4545 u_int16_t fragoff = (h->ip_off & IP_OFFMASK) << 3; 4546 4547 if (fragoff) { 4548 if (fragoff >= len) 4549 ACTION_SET(actionp, PF_PASS); 4550 else { 4551 ACTION_SET(actionp, PF_DROP); 4552 REASON_SET(reasonp, PFRES_FRAG); 4553 } 4554 return (NULL); 4555 } 4556 if (m->m_pkthdr.len < off + len || h->ip_len < off + len) { 4557 ACTION_SET(actionp, PF_DROP); 4558 REASON_SET(reasonp, PFRES_SHORT); 4559 return (NULL); 4560 } 4561 break; 4562 } 4563 #endif /* INET */ 4564 #ifdef INET6 4565 case AF_INET6: { 4566 struct ip6_hdr *h = mtod(m, struct ip6_hdr *); 4567 if (m->m_pkthdr.len < off + len || 4568 (ntohs(h->ip6_plen) + sizeof(struct ip6_hdr)) < off + len) { 4569 ACTION_SET(actionp, PF_DROP); 4570 REASON_SET(reasonp, PFRES_SHORT); 4571 return (NULL); 4572 } 4573 break; 4574 } 4575 #endif /* INET6 */ 4576 } 4577 m_copydata(m, off, len, p); 4578 return (p); 4579 } 4580 4581 #ifdef INET 4582 void 4583 pf_route(struct mbuf *m, struct pf_rule *r) 4584 { 4585 struct mbuf *m0, *m1; 4586 struct route iproute; 4587 struct route *ro; 4588 struct sockaddr_in *dst; 4589 struct ip *ip, *mhip; 4590 struct ifnet *ifp = r->rt_ifp; 4591 int hlen; 4592 int len, off, error = 0; 4593 4594 if (m == NULL) 4595 return; 4596 4597 if (r->rt == PF_DUPTO) { 4598 m0 = m_copym2(m, 0, M_COPYALL, M_NOWAIT); 4599 if (m0 == NULL) 4600 return; 4601 } else 4602 m0 = m; 4603 4604 ip = mtod(m0, struct ip *); 4605 hlen = ip->ip_hl << 2; 4606 4607 ro = &iproute; 4608 bzero((caddr_t)ro, sizeof(*ro)); 4609 dst = satosin(&ro->ro_dst); 4610 dst->sin_family = AF_INET; 4611 dst->sin_len = sizeof(*dst); 4612 dst->sin_addr = ip->ip_dst; 4613 4614 if (r->rt == PF_FASTROUTE) { 4615 rtalloc(ro); 4616 if (ro->ro_rt == 0) { 4617 ipstat.ips_noroute++; 4618 goto bad; 4619 } 4620 4621 ifp = ro->ro_rt->rt_ifp; 4622 ro->ro_rt->rt_use++; 4623 4624 if (ro->ro_rt->rt_flags & RTF_GATEWAY) 4625 dst = satosin(ro->ro_rt->rt_gateway); 4626 } else { 4627 if (!PF_AZERO(&r->rt_addr, AF_INET)) 4628 dst->sin_addr.s_addr = r->rt_addr.v4.s_addr; 4629 } 4630 4631 if (ifp == NULL) 4632 goto bad; 4633 4634 /* Copied from ip_output. */ 4635 if ((u_int16_t)ip->ip_len <= ifp->if_mtu) { 4636 ip->ip_len = htons((u_int16_t)ip->ip_len); 4637 ip->ip_off = htons((u_int16_t)ip->ip_off); 4638 if ((ifp->if_capabilities & IFCAP_CSUM_IPv4) && 4639 ifp->if_bridge == NULL) { 4640 m0->m_pkthdr.csum |= M_IPV4_CSUM_OUT; 4641 ipstat.ips_outhwcsum++; 4642 } else { 4643 ip->ip_sum = 0; 4644 ip->ip_sum = in_cksum(m0, hlen); 4645 } 4646 /* Update relevant hardware checksum stats for TCP/UDP */ 4647 if (m0->m_pkthdr.csum & M_TCPV4_CSUM_OUT) 4648 tcpstat.tcps_outhwcsum++; 4649 else if (m0->m_pkthdr.csum & M_UDPV4_CSUM_OUT) 4650 udpstat.udps_outhwcsum++; 4651 error = (*ifp->if_output)(ifp, m0, sintosa(dst), NULL); 4652 goto done; 4653 } 4654 4655 /* 4656 * Too large for interface; fragment if possible. 4657 * Must be able to put at least 8 bytes per fragment. 4658 */ 4659 if (ip->ip_off & IP_DF) { 4660 error = EMSGSIZE; 4661 ipstat.ips_cantfrag++; 4662 goto bad; 4663 } 4664 len = (ifp->if_mtu - hlen) &~ 7; 4665 if (len < 8) { 4666 error = EMSGSIZE; 4667 goto bad; 4668 } 4669 /* 4670 * If we are doing fragmentation, we can't defer TCP/UDP 4671 * checksumming; compute the checksum and clear the flag. 4672 */ 4673 if (m0->m_pkthdr.csum & (M_TCPV4_CSUM_OUT | M_UDPV4_CSUM_OUT)) { 4674 in_delayed_cksum(m0); 4675 m0->m_pkthdr.csum &= ~(M_UDPV4_CSUM_OUT | M_TCPV4_CSUM_OUT); 4676 } 4677 4678 { 4679 int mhlen, firstlen = len; 4680 struct mbuf **mnext = &m0->m_nextpkt; 4681 4682 /* 4683 * Loop through length of segment after first fragment, 4684 * make new header and copy data of each part and link onto chain. 4685 */ 4686 m1 = m0; 4687 mhlen = sizeof (struct ip); 4688 for (off = hlen + len; off < (u_int16_t)ip->ip_len; off += len) { 4689 MGETHDR(m0, M_DONTWAIT, MT_HEADER); 4690 if (m0 == 0) { 4691 error = ENOBUFS; 4692 ipstat.ips_odropped++; 4693 goto sendorfree; 4694 } 4695 *mnext = m0; 4696 mnext = &m0->m_nextpkt; 4697 m0->m_data += max_linkhdr; 4698 mhip = mtod(m0, struct ip *); 4699 *mhip = *ip; 4700 /* we must inherit MCAST and BCAST flags */ 4701 m0->m_flags |= m1->m_flags & (M_MCAST|M_BCAST); 4702 if (hlen > sizeof (struct ip)) { 4703 mhlen = ip_optcopy(ip, mhip) + sizeof (struct ip); 4704 mhip->ip_hl = mhlen >> 2; 4705 } 4706 m0->m_len = mhlen; 4707 mhip->ip_off = ((off - hlen) >> 3) + (ip->ip_off & ~IP_MF); 4708 if (ip->ip_off & IP_MF) 4709 mhip->ip_off |= IP_MF; 4710 if (off + len >= (u_int16_t)ip->ip_len) 4711 len = (u_int16_t)ip->ip_len - off; 4712 else 4713 mhip->ip_off |= IP_MF; 4714 mhip->ip_len = htons((u_int16_t)(len + mhlen)); 4715 m0->m_next = m_copy(m1, off, len); 4716 if (m0->m_next == 0) { 4717 error = ENOBUFS;/* ??? */ 4718 ipstat.ips_odropped++; 4719 goto sendorfree; 4720 } 4721 m0->m_pkthdr.len = mhlen + len; 4722 m0->m_pkthdr.rcvif = (struct ifnet *)0; 4723 mhip->ip_off = htons((u_int16_t)mhip->ip_off); 4724 if ((ifp->if_capabilities & IFCAP_CSUM_IPv4) && 4725 ifp->if_bridge == NULL) { 4726 m0->m_pkthdr.csum |= M_IPV4_CSUM_OUT; 4727 ipstat.ips_outhwcsum++; 4728 } else { 4729 mhip->ip_sum = 0; 4730 mhip->ip_sum = in_cksum(m0, mhlen); 4731 } 4732 ipstat.ips_ofragments++; 4733 } 4734 /* 4735 * Update first fragment by trimming what's been copied out 4736 * and updating header, then send each fragment (in order). 4737 */ 4738 m0 = m1; 4739 m_adj(m0, hlen + firstlen - (u_int16_t)ip->ip_len); 4740 m0->m_pkthdr.len = hlen + firstlen; 4741 ip->ip_len = htons((u_int16_t)m0->m_pkthdr.len); 4742 ip->ip_off = htons((u_int16_t)(ip->ip_off | IP_MF)); 4743 if ((ifp->if_capabilities & IFCAP_CSUM_IPv4) && 4744 ifp->if_bridge == NULL) { 4745 m0->m_pkthdr.csum |= M_IPV4_CSUM_OUT; 4746 ipstat.ips_outhwcsum++; 4747 } else { 4748 ip->ip_sum = 0; 4749 ip->ip_sum = in_cksum(m0, hlen); 4750 } 4751 sendorfree: 4752 for (m0 = m1; m0; m0 = m1) { 4753 m1 = m0->m_nextpkt; 4754 m0->m_nextpkt = 0; 4755 if (error == 0) 4756 error = (*ifp->if_output)(ifp, m0, sintosa(dst), 4757 NULL); 4758 else 4759 m_freem(m0); 4760 } 4761 4762 if (error == 0) 4763 ipstat.ips_fragmented++; 4764 } 4765 4766 done: 4767 if (ro == &iproute && ro->ro_rt) 4768 RTFREE(ro->ro_rt); 4769 return; 4770 4771 bad: 4772 m_freem(m0); 4773 goto done; 4774 } 4775 #endif /* INET */ 4776 4777 #ifdef INET6 4778 void 4779 pf_route6(struct mbuf *m, struct pf_rule *r) 4780 { 4781 struct mbuf *m0; 4782 struct m_tag *mtag; 4783 struct route_in6 ip6route; 4784 struct route_in6 *ro; 4785 struct sockaddr_in6 *dst; 4786 struct ip6_hdr *ip6; 4787 struct ifnet *ifp = r->rt_ifp; 4788 int error = 0; 4789 4790 if (m == NULL) 4791 return; 4792 4793 if (r->rt == PF_DUPTO) { 4794 m0 = m_copym2(m, 0, M_COPYALL, M_NOWAIT); 4795 if (m0 == NULL) 4796 return; 4797 } else 4798 m0 = m; 4799 4800 ip6 = mtod(m0, struct ip6_hdr *); 4801 4802 ro = &ip6route; 4803 bzero((caddr_t)ro, sizeof(*ro)); 4804 dst = (struct sockaddr_in6 *)&ro->ro_dst; 4805 dst->sin6_family = AF_INET6; 4806 dst->sin6_len = sizeof(*dst); 4807 dst->sin6_addr = ip6->ip6_dst; 4808 4809 if (!PF_AZERO(&r->rt_addr, AF_INET6)) 4810 dst->sin6_addr = r->rt_addr.v6; 4811 4812 /* Cheat. */ 4813 if (r->rt == PF_FASTROUTE) { 4814 mtag = m_tag_get(PACKET_TAG_PF_GENERATED, 0, M_NOWAIT); 4815 if (mtag == NULL) 4816 goto bad; 4817 m_tag_prepend(m0, mtag); 4818 ip6_output(m0, NULL, NULL, NULL, NULL, NULL); 4819 return; 4820 } 4821 4822 if (ifp == NULL) 4823 goto bad; 4824 4825 /* 4826 * Do not fragment packets (yet). Not much is done here for dealing 4827 * with errors. Actions on errors depend on whether the packet 4828 * was generated locally or being forwarded. 4829 */ 4830 if (m0->m_pkthdr.len <= ifp->if_mtu) { 4831 error = (*ifp->if_output)(ifp, m0, (struct sockaddr *)dst, 4832 NULL); 4833 } else 4834 m_freem(m0); 4835 4836 done: 4837 return; 4838 4839 bad: 4840 m_freem(m0); 4841 goto done; 4842 } 4843 #endif /* INET6 */ 4844 4845 #ifdef INET 4846 int 4847 pf_test(int dir, struct ifnet *ifp, struct mbuf **m0) 4848 { 4849 u_short action, reason = 0, log = 0; 4850 struct mbuf *m = *m0; 4851 struct ip *h; 4852 struct pf_rule *r = NULL; 4853 struct pf_state *s = NULL; 4854 struct pf_pdesc pd; 4855 int off; 4856 4857 if (!pf_status.running || 4858 (m_tag_find(m, PACKET_TAG_PF_GENERATED, NULL) != NULL)) 4859 return (PF_PASS); 4860 4861 #ifdef DIAGNOSTIC 4862 if ((m->m_flags & M_PKTHDR) == 0) 4863 panic("non-M_PKTHDR is passed to pf_test"); 4864 #endif 4865 4866 /* purge expire states */ 4867 microtime(&pftv); 4868 if (pftv.tv_sec - pf_last_purge >= pftm_interval) { 4869 pf_purge_expired_states(); 4870 pf_purge_expired_fragments(); 4871 pf_last_purge = pftv.tv_sec; 4872 } 4873 4874 if (m->m_pkthdr.len < sizeof(*h)) { 4875 action = PF_DROP; 4876 REASON_SET(&reason, PFRES_SHORT); 4877 log = 1; 4878 goto done; 4879 } 4880 4881 /* We do IP header normalization and packet reassembly here */ 4882 if (pf_normalize_ip(m0, dir, ifp, &reason) != PF_PASS) { 4883 ACTION_SET(&action, PF_DROP); 4884 goto done; 4885 } 4886 m = *m0; 4887 h = mtod(m, struct ip *); 4888 4889 off = h->ip_hl << 2; 4890 if (off < sizeof(*h)) { 4891 action = PF_DROP; 4892 REASON_SET(&reason, PFRES_SHORT); 4893 log = 1; 4894 goto done; 4895 } 4896 4897 pd.src = (struct pf_addr *)&h->ip_src; 4898 pd.dst = (struct pf_addr *)&h->ip_dst; 4899 pd.ip_sum = &h->ip_sum; 4900 pd.proto = h->ip_p; 4901 pd.af = AF_INET; 4902 pd.tot_len = h->ip_len; 4903 4904 switch (h->ip_p) { 4905 4906 case IPPROTO_TCP: { 4907 struct tcphdr th; 4908 pd.hdr.tcp = &th; 4909 4910 if (!pf_pull_hdr(m, off, &th, sizeof(th), 4911 &action, &reason, AF_INET)) { 4912 log = action != PF_PASS; 4913 goto done; 4914 } 4915 pd.p_len = pd.tot_len - off - (th.th_off << 2); 4916 action = pf_normalize_tcp(dir, ifp, m, 0, off, h, &pd); 4917 if (action == PF_DROP) 4918 break; 4919 action = pf_test_state_tcp(&s, dir, ifp, m, 0, off, h, &pd); 4920 if (action == PF_PASS) { 4921 r = s->rule; 4922 log = s->log; 4923 } else if (s == NULL) 4924 action = pf_test_tcp(&r, dir, ifp, m, 0, off, h, &pd); 4925 break; 4926 } 4927 4928 case IPPROTO_UDP: { 4929 struct udphdr uh; 4930 pd.hdr.udp = &uh; 4931 4932 if (!pf_pull_hdr(m, off, &uh, sizeof(uh), 4933 &action, &reason, AF_INET)) { 4934 log = action != PF_PASS; 4935 goto done; 4936 } 4937 action = pf_test_state_udp(&s, dir, ifp, m, 0, off, h, &pd); 4938 if (action == PF_PASS) { 4939 r = s->rule; 4940 log = s->log; 4941 } else if (s == NULL) 4942 action = pf_test_udp(&r, dir, ifp, m, 0, off, h, &pd); 4943 break; 4944 } 4945 4946 case IPPROTO_ICMP: { 4947 struct icmp ih; 4948 pd.hdr.icmp = &ih; 4949 4950 if (!pf_pull_hdr(m, off, &ih, ICMP_MINLEN, 4951 &action, &reason, AF_INET)) { 4952 log = action != PF_PASS; 4953 goto done; 4954 } 4955 action = pf_test_state_icmp(&s, dir, ifp, m, 0, off, h, &pd); 4956 if (action == PF_PASS) { 4957 r = s->rule; 4958 if (r != NULL) { 4959 r->packets++; 4960 r->bytes += h->ip_len; 4961 } 4962 log = s->log; 4963 } else if (s == NULL) 4964 action = pf_test_icmp(&r, dir, ifp, m, 0, off, h, &pd); 4965 break; 4966 } 4967 4968 default: 4969 action = pf_test_state_other(&s, dir, ifp, &pd); 4970 if (action == PF_PASS) { 4971 r = s->rule; 4972 log = s->log; 4973 } else if (s == NULL) 4974 action = pf_test_other(&r, dir, ifp, m, h, &pd); 4975 break; 4976 } 4977 4978 if (ifp == status_ifp) { 4979 pf_status.bcounters[0][dir] += pd.tot_len; 4980 pf_status.pcounters[0][dir][action]++; 4981 } 4982 4983 done: 4984 if (action != PF_DROP && h->ip_hl > 5 && 4985 !((s && s->allow_opts) || (r && r->allow_opts))) { 4986 action = PF_DROP; 4987 REASON_SET(&reason, PFRES_SHORT); 4988 log = 1; 4989 DPFPRINTF(PF_DEBUG_MISC, 4990 ("pf: dropping packet with ip options\n")); 4991 } 4992 4993 if (log) { 4994 if (r == NULL) { 4995 struct pf_rule r0; 4996 r0.ifp = ifp; 4997 r0.action = action; 4998 r0.nr = -1; 4999 PFLOG_PACKET(ifp, h, m, AF_INET, dir, reason, &r0); 5000 } else 5001 PFLOG_PACKET(ifp, h, m, AF_INET, dir, reason, r); 5002 } 5003 5004 /* pf_route can free the mbuf causing *m to become NULL */ 5005 if (r && r->rt) { 5006 pf_route(m, r); 5007 if (r->rt != PF_DUPTO) { 5008 /* m0 already freed */ 5009 *m0 = NULL; 5010 } 5011 } 5012 5013 return (action); 5014 } 5015 #endif /* INET */ 5016 5017 #ifdef INET6 5018 int 5019 pf_test6(int dir, struct ifnet *ifp, struct mbuf **m0) 5020 { 5021 u_short action, reason = 0, log = 0; 5022 struct mbuf *m = *m0; 5023 struct ip6_hdr *h; 5024 struct pf_rule *r = NULL; 5025 struct pf_state *s = NULL; 5026 struct pf_pdesc pd; 5027 int off, terminal = 0; 5028 5029 if (!pf_status.running || 5030 (m_tag_find(m, PACKET_TAG_PF_GENERATED, NULL) != NULL)) 5031 return (PF_PASS); 5032 5033 #ifdef DIAGNOSTIC 5034 if ((m->m_flags & M_PKTHDR) == 0) 5035 panic("non-M_PKTHDR is passed to pf_test"); 5036 #endif 5037 5038 /* purge expire states */ 5039 microtime(&pftv); 5040 if (pftv.tv_sec - pf_last_purge >= pftm_interval) { 5041 pf_purge_expired_states(); 5042 pf_purge_expired_fragments(); 5043 pf_last_purge = pftv.tv_sec; 5044 } 5045 5046 if (m->m_pkthdr.len < sizeof(*h)) { 5047 action = PF_DROP; 5048 REASON_SET(&reason, PFRES_SHORT); 5049 log = 1; 5050 goto done; 5051 } 5052 5053 m = *m0; 5054 h = mtod(m, struct ip6_hdr *); 5055 5056 pd.src = (struct pf_addr *)&h->ip6_src; 5057 pd.dst = (struct pf_addr *)&h->ip6_dst; 5058 pd.ip_sum = NULL; 5059 pd.af = AF_INET6; 5060 pd.tot_len = ntohs(h->ip6_plen) + sizeof(struct ip6_hdr); 5061 5062 off = ((caddr_t)h - m->m_data) + sizeof(struct ip6_hdr); 5063 pd.proto = h->ip6_nxt; 5064 do { 5065 switch (pd.proto) { 5066 case IPPROTO_FRAGMENT: 5067 /* XXX we don't handle fragments yet */ 5068 action = PF_DROP; 5069 REASON_SET(&reason, PFRES_FRAG); 5070 goto done; 5071 case IPPROTO_AH: 5072 case IPPROTO_HOPOPTS: 5073 case IPPROTO_ROUTING: 5074 case IPPROTO_DSTOPTS: { 5075 /* get next header and header length */ 5076 struct _opt6 opt6; 5077 5078 if (!pf_pull_hdr(m, off, &opt6, sizeof(opt6), 5079 NULL, NULL, pd.af)) { 5080 DPFPRINTF(PF_DEBUG_MISC, 5081 ("pf: IPv6 short opt\n")); 5082 action = PF_DROP; 5083 REASON_SET(&reason, PFRES_SHORT); 5084 log = 1; 5085 goto done; 5086 } 5087 pd.proto = opt6.opt6_nxt; 5088 off += (opt6.opt6_hlen + 1) * 8; 5089 /* goto the next header */ 5090 break; 5091 } 5092 default: 5093 terminal++; 5094 break; 5095 } 5096 } while (!terminal); 5097 5098 switch (pd.proto) { 5099 5100 case IPPROTO_TCP: { 5101 struct tcphdr th; 5102 pd.hdr.tcp = &th; 5103 5104 if (!pf_pull_hdr(m, off, &th, sizeof(th), 5105 &action, &reason, AF_INET6)) { 5106 log = action != PF_PASS; 5107 goto done; 5108 } 5109 pd.p_len = pd.tot_len - off - (th.th_off << 2); 5110 action = pf_normalize_tcp(dir, ifp, m, 0, off, h, &pd); 5111 if (action == PF_DROP) 5112 break; 5113 action = pf_test_state_tcp(&s, dir, ifp, m, 0, off, h, &pd); 5114 if (action == PF_PASS) { 5115 r = s->rule; 5116 log = s->log; 5117 } else if (s == NULL) 5118 action = pf_test_tcp(&r, dir, ifp, m, 0, off, h, &pd); 5119 break; 5120 } 5121 5122 case IPPROTO_UDP: { 5123 struct udphdr uh; 5124 pd.hdr.udp = &uh; 5125 5126 if (!pf_pull_hdr(m, off, &uh, sizeof(uh), 5127 &action, &reason, AF_INET6)) { 5128 log = action != PF_PASS; 5129 goto done; 5130 } 5131 action = pf_test_state_udp(&s, dir, ifp, m, 0, off, h, &pd); 5132 if (action == PF_PASS) { 5133 r = s->rule; 5134 log = s->log; 5135 } else if (s == NULL) 5136 action = pf_test_udp(&r, dir, ifp, m, 0, off, h, &pd); 5137 break; 5138 } 5139 5140 case IPPROTO_ICMPV6: { 5141 struct icmp6_hdr ih; 5142 pd.hdr.icmp6 = &ih; 5143 5144 if (!pf_pull_hdr(m, off, &ih, sizeof(ih), 5145 &action, &reason, AF_INET6)) { 5146 log = action != PF_PASS; 5147 goto done; 5148 } 5149 action = pf_test_state_icmp(&s, dir, ifp, m, 0, off, h, &pd); 5150 if (action == PF_PASS) { 5151 r = s->rule; 5152 if (r != NULL) { 5153 r->packets++; 5154 r->bytes += h->ip6_plen; 5155 } 5156 log = s->log; 5157 } else if (s == NULL) 5158 action = pf_test_icmp(&r, dir, ifp, m, 0, off, h, &pd); 5159 break; 5160 } 5161 5162 default: 5163 action = pf_test_other(&r, dir, ifp, m, h, &pd); 5164 break; 5165 } 5166 5167 if (ifp == status_ifp) { 5168 pf_status.bcounters[1][dir] += h->ip6_plen; 5169 pf_status.pcounters[1][dir][action]++; 5170 } 5171 5172 done: 5173 /* XXX handle IPv6 options, if not allowed. not implemented. */ 5174 5175 if (log) { 5176 if (r == NULL) { 5177 struct pf_rule r0; 5178 r0.ifp = ifp; 5179 r0.action = action; 5180 r0.nr = -1; 5181 PFLOG_PACKET(ifp, h, m, AF_INET6, dir, reason, &r0); 5182 } else 5183 PFLOG_PACKET(ifp, h, m, AF_INET6, dir, reason, r); 5184 } 5185 5186 /* pf_route6 can free the mbuf causing *m to become NULL */ 5187 if (r && r->rt) { 5188 pf_route6(m, r); 5189 if (r->rt != PF_DUPTO) { 5190 /* m0 already freed */ 5191 *m0 = NULL; 5192 } 5193 } 5194 5195 return (action); 5196 } 5197 #endif /* INET6 */ 5198