1 /* $OpenBSD: pf.c,v 1.614 2008/08/02 12:34:37 henning Exp $ */ 2 3 /* 4 * Copyright (c) 2004 The DragonFly Project. All rights reserved. 5 * 6 * Copyright (c) 2001 Daniel Hartmeier 7 * Copyright (c) 2002 - 2008 Henning Brauer 8 * All rights reserved. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 14 * - Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * - Redistributions in binary form must reproduce the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer in the documentation and/or other materials provided 19 * with the distribution. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 29 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 31 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 32 * POSSIBILITY OF SUCH DAMAGE. 33 * 34 * Effort sponsored in part by the Defense Advanced Research Projects 35 * Agency (DARPA) and Air Force Research Laboratory, Air Force 36 * Materiel Command, USAF, under agreement number F30602-01-2-0537. 37 * 38 */ 39 40 #include "opt_inet.h" 41 #include "opt_inet6.h" 42 43 #include <sys/param.h> 44 #include <sys/systm.h> 45 #include <sys/malloc.h> 46 #include <sys/mbuf.h> 47 #include <sys/filio.h> 48 #include <sys/socket.h> 49 #include <sys/socketvar.h> 50 #include <sys/kernel.h> 51 #include <sys/time.h> 52 #include <sys/sysctl.h> 53 #include <sys/endian.h> 54 #include <sys/proc.h> 55 #include <sys/kthread.h> 56 57 #include <machine/inttypes.h> 58 59 #include <sys/md5.h> 60 61 #include <net/if.h> 62 #include <net/if_types.h> 63 #include <net/bpf.h> 64 #include <net/netisr.h> 65 #include <net/route.h> 66 67 #include <netinet/in.h> 68 #include <netinet/in_var.h> 69 #include <netinet/in_systm.h> 70 #include <netinet/ip.h> 71 #include <netinet/ip_var.h> 72 #include <netinet/tcp.h> 73 #include <netinet/tcp_seq.h> 74 #include <netinet/udp.h> 75 #include <netinet/ip_icmp.h> 76 #include <netinet/in_pcb.h> 77 #include <netinet/tcp_timer.h> 78 #include <netinet/tcp_var.h> 79 #include <netinet/udp_var.h> 80 #include <netinet/icmp_var.h> 81 #include <netinet/if_ether.h> 82 83 #include <net/pf/pfvar.h> 84 #include <net/pf/if_pflog.h> 85 86 #include <net/pf/if_pfsync.h> 87 88 #ifdef INET6 89 #include <netinet/ip6.h> 90 #include <netinet/in_pcb.h> 91 #include <netinet/icmp6.h> 92 #include <netinet6/nd6.h> 93 #include <netinet6/ip6_var.h> 94 #include <netinet6/in6_pcb.h> 95 #endif /* INET6 */ 96 97 #include <sys/in_cksum.h> 98 #include <sys/ucred.h> 99 #include <machine/limits.h> 100 #include <sys/msgport2.h> 101 #include <net/netmsg2.h> 102 103 extern int ip_optcopy(struct ip *, struct ip *); 104 extern int debug_pfugidhack; 105 106 struct lwkt_token pf_token = LWKT_TOKEN_INITIALIZER(pf_token); 107 108 #define DPFPRINTF(n, x) if (pf_status.debug >= (n)) kprintf x 109 110 /* 111 * Global variables 112 */ 113 114 /* mask radix tree */ 115 struct radix_node_head *pf_maskhead; 116 117 /* state tables */ 118 struct pf_state_tree pf_statetbl; 119 120 struct pf_altqqueue pf_altqs[2]; 121 struct pf_palist pf_pabuf; 122 struct pf_altqqueue *pf_altqs_active; 123 struct pf_altqqueue *pf_altqs_inactive; 124 struct pf_status pf_status; 125 126 u_int32_t ticket_altqs_active; 127 u_int32_t ticket_altqs_inactive; 128 int altqs_inactive_open; 129 u_int32_t ticket_pabuf; 130 131 MD5_CTX pf_tcp_secret_ctx; 132 u_char pf_tcp_secret[16]; 133 int pf_tcp_secret_init; 134 int pf_tcp_iss_off; 135 136 struct pf_anchor_stackframe { 137 struct pf_ruleset *rs; 138 struct pf_rule *r; 139 struct pf_anchor_node *parent; 140 struct pf_anchor *child; 141 } pf_anchor_stack[64]; 142 143 struct malloc_type *pf_src_tree_pl, *pf_rule_pl, *pf_pooladdr_pl; 144 struct malloc_type *pf_state_pl, *pf_state_key_pl, *pf_state_item_pl; 145 struct malloc_type *pf_altq_pl; 146 147 void pf_print_host(struct pf_addr *, u_int16_t, u_int8_t); 148 149 void pf_init_threshold(struct pf_threshold *, u_int32_t, 150 u_int32_t); 151 void pf_add_threshold(struct pf_threshold *); 152 int pf_check_threshold(struct pf_threshold *); 153 154 void pf_change_ap(struct pf_addr *, u_int16_t *, 155 u_int16_t *, u_int16_t *, struct pf_addr *, 156 u_int16_t, u_int8_t, sa_family_t); 157 int pf_modulate_sack(struct mbuf *, int, struct pf_pdesc *, 158 struct tcphdr *, struct pf_state_peer *); 159 #ifdef INET6 160 void pf_change_a6(struct pf_addr *, u_int16_t *, 161 struct pf_addr *, u_int8_t); 162 #endif /* INET6 */ 163 void pf_change_icmp(struct pf_addr *, u_int16_t *, 164 struct pf_addr *, struct pf_addr *, u_int16_t, 165 u_int16_t *, u_int16_t *, u_int16_t *, 166 u_int16_t *, u_int8_t, sa_family_t); 167 void pf_send_tcp(const struct pf_rule *, sa_family_t, 168 const struct pf_addr *, const struct pf_addr *, 169 u_int16_t, u_int16_t, u_int32_t, u_int32_t, 170 u_int8_t, u_int16_t, u_int16_t, u_int8_t, int, 171 u_int16_t, struct ether_header *, struct ifnet *); 172 void pf_send_icmp(struct mbuf *, u_int8_t, u_int8_t, 173 sa_family_t, struct pf_rule *); 174 struct pf_rule *pf_match_translation(struct pf_pdesc *, struct mbuf *, 175 int, int, struct pfi_kif *, 176 struct pf_addr *, u_int16_t, struct pf_addr *, 177 u_int16_t, int); 178 struct pf_rule *pf_get_translation(struct pf_pdesc *, struct mbuf *, 179 int, int, struct pfi_kif *, struct pf_src_node **, 180 struct pf_state_key **, struct pf_state_key **, 181 struct pf_state_key **, struct pf_state_key **, 182 struct pf_addr *, struct pf_addr *, 183 u_int16_t, u_int16_t); 184 void pf_detach_state(struct pf_state *); 185 int pf_state_key_setup(struct pf_pdesc *, struct pf_rule *, 186 struct pf_state_key **, struct pf_state_key **, 187 struct pf_state_key **, struct pf_state_key **, 188 struct pf_addr *, struct pf_addr *, 189 u_int16_t, u_int16_t); 190 void pf_state_key_detach(struct pf_state *, int); 191 u_int32_t pf_tcp_iss(struct pf_pdesc *); 192 int pf_test_rule(struct pf_rule **, struct pf_state **, 193 int, struct pfi_kif *, struct mbuf *, int, 194 void *, struct pf_pdesc *, struct pf_rule **, 195 struct pf_ruleset **, struct ifqueue *, struct inpcb *); 196 static __inline int pf_create_state(struct pf_rule *, struct pf_rule *, 197 struct pf_rule *, struct pf_pdesc *, 198 struct pf_src_node *, struct pf_state_key *, 199 struct pf_state_key *, struct pf_state_key *, 200 struct pf_state_key *, struct mbuf *, int, 201 u_int16_t, u_int16_t, int *, struct pfi_kif *, 202 struct pf_state **, int, u_int16_t, u_int16_t, 203 int); 204 int pf_test_fragment(struct pf_rule **, int, 205 struct pfi_kif *, struct mbuf *, void *, 206 struct pf_pdesc *, struct pf_rule **, 207 struct pf_ruleset **); 208 int pf_tcp_track_full(struct pf_state_peer *, 209 struct pf_state_peer *, struct pf_state **, 210 struct pfi_kif *, struct mbuf *, int, 211 struct pf_pdesc *, u_short *, int *); 212 int pf_tcp_track_sloppy(struct pf_state_peer *, 213 struct pf_state_peer *, struct pf_state **, 214 struct pf_pdesc *, u_short *); 215 int pf_test_state_tcp(struct pf_state **, int, 216 struct pfi_kif *, struct mbuf *, int, 217 void *, struct pf_pdesc *, u_short *); 218 int pf_test_state_udp(struct pf_state **, int, 219 struct pfi_kif *, struct mbuf *, int, 220 void *, struct pf_pdesc *); 221 int pf_test_state_icmp(struct pf_state **, int, 222 struct pfi_kif *, struct mbuf *, int, 223 void *, struct pf_pdesc *, u_short *); 224 int pf_test_state_other(struct pf_state **, int, 225 struct pfi_kif *, struct mbuf *, struct pf_pdesc *); 226 void pf_step_into_anchor(int *, struct pf_ruleset **, int, 227 struct pf_rule **, struct pf_rule **, int *); 228 int pf_step_out_of_anchor(int *, struct pf_ruleset **, 229 int, struct pf_rule **, struct pf_rule **, 230 int *); 231 void pf_hash(struct pf_addr *, struct pf_addr *, 232 struct pf_poolhashkey *, sa_family_t); 233 int pf_map_addr(u_int8_t, struct pf_rule *, 234 struct pf_addr *, struct pf_addr *, 235 struct pf_addr *, struct pf_src_node **); 236 int pf_get_sport(sa_family_t, u_int8_t, struct pf_rule *, 237 struct pf_addr *, struct pf_addr *, u_int16_t, 238 struct pf_addr *, u_int16_t*, u_int16_t, u_int16_t, 239 struct pf_src_node **); 240 void pf_route(struct mbuf **, struct pf_rule *, int, 241 struct ifnet *, struct pf_state *, 242 struct pf_pdesc *); 243 void pf_route6(struct mbuf **, struct pf_rule *, int, 244 struct ifnet *, struct pf_state *, 245 struct pf_pdesc *); 246 u_int8_t pf_get_wscale(struct mbuf *, int, u_int16_t, 247 sa_family_t); 248 u_int16_t pf_get_mss(struct mbuf *, int, u_int16_t, 249 sa_family_t); 250 u_int16_t pf_calc_mss(struct pf_addr *, sa_family_t, 251 u_int16_t); 252 void pf_set_rt_ifp(struct pf_state *, 253 struct pf_addr *); 254 int pf_check_proto_cksum(struct mbuf *, int, int, 255 u_int8_t, sa_family_t); 256 struct pf_divert *pf_get_divert(struct mbuf *); 257 void pf_print_state_parts(struct pf_state *, 258 struct pf_state_key *, struct pf_state_key *); 259 int pf_addr_wrap_neq(struct pf_addr_wrap *, 260 struct pf_addr_wrap *); 261 struct pf_state *pf_find_state(struct pfi_kif *, 262 struct pf_state_key_cmp *, u_int, struct mbuf *); 263 int pf_src_connlimit(struct pf_state **); 264 int pf_check_congestion(struct ifqueue *); 265 266 extern int pf_end_threads; 267 268 struct pf_pool_limit pf_pool_limits[PF_LIMIT_MAX] = { 269 { &pf_state_pl, PFSTATE_HIWAT }, 270 { &pf_src_tree_pl, PFSNODE_HIWAT }, 271 { &pf_frent_pl, PFFRAG_FRENT_HIWAT }, 272 { &pfr_ktable_pl, PFR_KTABLE_HIWAT }, 273 { &pfr_kentry_pl, PFR_KENTRY_HIWAT } 274 }; 275 276 #define STATE_LOOKUP(i, k, d, s, m) \ 277 do { \ 278 s = pf_find_state(i, k, d, m); \ 279 if (s == NULL || (s)->timeout == PFTM_PURGE) \ 280 return (PF_DROP); \ 281 if (d == PF_OUT && \ 282 (((s)->rule.ptr->rt == PF_ROUTETO && \ 283 (s)->rule.ptr->direction == PF_OUT) || \ 284 ((s)->rule.ptr->rt == PF_REPLYTO && \ 285 (s)->rule.ptr->direction == PF_IN)) && \ 286 (s)->rt_kif != NULL && \ 287 (s)->rt_kif != i) \ 288 return (PF_PASS); \ 289 } while (0) 290 291 #define BOUND_IFACE(r, k) \ 292 ((r)->rule_flag & PFRULE_IFBOUND) ? (k) : pfi_all 293 294 #define STATE_INC_COUNTERS(s) \ 295 do { \ 296 s->rule.ptr->states_cur++; \ 297 s->rule.ptr->states_tot++; \ 298 if (s->anchor.ptr != NULL) { \ 299 s->anchor.ptr->states_cur++; \ 300 s->anchor.ptr->states_tot++; \ 301 } \ 302 if (s->nat_rule.ptr != NULL) { \ 303 s->nat_rule.ptr->states_cur++; \ 304 s->nat_rule.ptr->states_tot++; \ 305 } \ 306 } while (0) 307 308 #define STATE_DEC_COUNTERS(s) \ 309 do { \ 310 if (s->nat_rule.ptr != NULL) \ 311 s->nat_rule.ptr->states_cur--; \ 312 if (s->anchor.ptr != NULL) \ 313 s->anchor.ptr->states_cur--; \ 314 s->rule.ptr->states_cur--; \ 315 } while (0) 316 317 static MALLOC_DEFINE(M_PFSTATEPL, "pfstatepl", "pf state pool list"); 318 static MALLOC_DEFINE(M_PFSRCTREEPL, "pfsrctpl", "pf source tree pool list"); 319 static MALLOC_DEFINE(M_PFSTATEKEYPL, "pfstatekeypl", "pf state key pool list"); 320 static MALLOC_DEFINE(M_PFSTATEITEMPL, "pfstateitempl", "pf state item pool list"); 321 322 static __inline int pf_src_compare(struct pf_src_node *, struct pf_src_node *); 323 static __inline int pf_state_compare_key(struct pf_state_key *, 324 struct pf_state_key *); 325 static __inline int pf_state_compare_id(struct pf_state *, 326 struct pf_state *); 327 328 struct pf_src_tree tree_src_tracking; 329 330 struct pf_state_tree_id tree_id; 331 struct pf_state_queue state_list; 332 333 RB_GENERATE(pf_src_tree, pf_src_node, entry, pf_src_compare); 334 RB_GENERATE(pf_state_tree, pf_state_key, entry, pf_state_compare_key); 335 RB_GENERATE(pf_state_tree_id, pf_state, 336 entry_id, pf_state_compare_id); 337 338 static __inline int 339 pf_src_compare(struct pf_src_node *a, struct pf_src_node *b) 340 { 341 int diff; 342 343 if (a->rule.ptr > b->rule.ptr) 344 return (1); 345 if (a->rule.ptr < b->rule.ptr) 346 return (-1); 347 if ((diff = a->af - b->af) != 0) 348 return (diff); 349 switch (a->af) { 350 #ifdef INET 351 case AF_INET: 352 if (a->addr.addr32[0] > b->addr.addr32[0]) 353 return (1); 354 if (a->addr.addr32[0] < b->addr.addr32[0]) 355 return (-1); 356 break; 357 #endif /* INET */ 358 #ifdef INET6 359 case AF_INET6: 360 if (a->addr.addr32[3] > b->addr.addr32[3]) 361 return (1); 362 if (a->addr.addr32[3] < b->addr.addr32[3]) 363 return (-1); 364 if (a->addr.addr32[2] > b->addr.addr32[2]) 365 return (1); 366 if (a->addr.addr32[2] < b->addr.addr32[2]) 367 return (-1); 368 if (a->addr.addr32[1] > b->addr.addr32[1]) 369 return (1); 370 if (a->addr.addr32[1] < b->addr.addr32[1]) 371 return (-1); 372 if (a->addr.addr32[0] > b->addr.addr32[0]) 373 return (1); 374 if (a->addr.addr32[0] < b->addr.addr32[0]) 375 return (-1); 376 break; 377 #endif /* INET6 */ 378 } 379 return (0); 380 } 381 382 u_int32_t 383 pf_state_hash(struct pf_state_key *sk) 384 { 385 u_int32_t hv = (u_int32_t)(((intptr_t)sk >> 6) ^ ((intptr_t)sk >> 15)); 386 if (hv == 0) /* disallow 0 */ 387 hv = 1; 388 return(hv); 389 } 390 391 #ifdef INET6 392 void 393 pf_addrcpy(struct pf_addr *dst, struct pf_addr *src, sa_family_t af) 394 { 395 switch (af) { 396 #ifdef INET 397 case AF_INET: 398 dst->addr32[0] = src->addr32[0]; 399 break; 400 #endif /* INET */ 401 case AF_INET6: 402 dst->addr32[0] = src->addr32[0]; 403 dst->addr32[1] = src->addr32[1]; 404 dst->addr32[2] = src->addr32[2]; 405 dst->addr32[3] = src->addr32[3]; 406 break; 407 } 408 } 409 #endif /* INET6 */ 410 411 void 412 pf_init_threshold(struct pf_threshold *threshold, 413 u_int32_t limit, u_int32_t seconds) 414 { 415 threshold->limit = limit * PF_THRESHOLD_MULT; 416 threshold->seconds = seconds; 417 threshold->count = 0; 418 threshold->last = time_second; 419 } 420 421 void 422 pf_add_threshold(struct pf_threshold *threshold) 423 { 424 u_int32_t t = time_second, diff = t - threshold->last; 425 426 if (diff >= threshold->seconds) 427 threshold->count = 0; 428 else 429 threshold->count -= threshold->count * diff / 430 threshold->seconds; 431 threshold->count += PF_THRESHOLD_MULT; 432 threshold->last = t; 433 } 434 435 int 436 pf_check_threshold(struct pf_threshold *threshold) 437 { 438 return (threshold->count > threshold->limit); 439 } 440 441 int 442 pf_src_connlimit(struct pf_state **state) 443 { 444 int bad = 0; 445 446 (*state)->src_node->conn++; 447 (*state)->src.tcp_est = 1; 448 pf_add_threshold(&(*state)->src_node->conn_rate); 449 450 if ((*state)->rule.ptr->max_src_conn && 451 (*state)->rule.ptr->max_src_conn < 452 (*state)->src_node->conn) { 453 pf_status.lcounters[LCNT_SRCCONN]++; 454 bad++; 455 } 456 457 if ((*state)->rule.ptr->max_src_conn_rate.limit && 458 pf_check_threshold(&(*state)->src_node->conn_rate)) { 459 pf_status.lcounters[LCNT_SRCCONNRATE]++; 460 bad++; 461 } 462 463 if (!bad) 464 return (0); 465 466 if ((*state)->rule.ptr->overload_tbl) { 467 struct pfr_addr p; 468 u_int32_t killed = 0; 469 470 pf_status.lcounters[LCNT_OVERLOAD_TABLE]++; 471 if (pf_status.debug >= PF_DEBUG_MISC) { 472 kprintf("pf_src_connlimit: blocking address "); 473 pf_print_host(&(*state)->src_node->addr, 0, 474 (*state)->key[PF_SK_WIRE]->af); 475 } 476 477 bzero(&p, sizeof(p)); 478 p.pfra_af = (*state)->key[PF_SK_WIRE]->af; 479 switch ((*state)->key[PF_SK_WIRE]->af) { 480 #ifdef INET 481 case AF_INET: 482 p.pfra_net = 32; 483 p.pfra_ip4addr = (*state)->src_node->addr.v4; 484 break; 485 #endif /* INET */ 486 #ifdef INET6 487 case AF_INET6: 488 p.pfra_net = 128; 489 p.pfra_ip6addr = (*state)->src_node->addr.v6; 490 break; 491 #endif /* INET6 */ 492 } 493 494 pfr_insert_kentry((*state)->rule.ptr->overload_tbl, 495 &p, time_second); 496 497 /* kill existing states if that's required. */ 498 if ((*state)->rule.ptr->flush) { 499 struct pf_state_key *sk; 500 struct pf_state *st; 501 502 pf_status.lcounters[LCNT_OVERLOAD_FLUSH]++; 503 RB_FOREACH(st, pf_state_tree_id, &tree_id) { 504 sk = st->key[PF_SK_WIRE]; 505 /* 506 * Kill states from this source. (Only those 507 * from the same rule if PF_FLUSH_GLOBAL is not 508 * set) 509 */ 510 if (sk->af == 511 (*state)->key[PF_SK_WIRE]->af && 512 (((*state)->direction == PF_OUT && 513 PF_AEQ(&(*state)->src_node->addr, 514 &sk->addr[0], sk->af)) || 515 ((*state)->direction == PF_IN && 516 PF_AEQ(&(*state)->src_node->addr, 517 &sk->addr[1], sk->af))) && 518 ((*state)->rule.ptr->flush & 519 PF_FLUSH_GLOBAL || 520 (*state)->rule.ptr == st->rule.ptr)) { 521 st->timeout = PFTM_PURGE; 522 st->src.state = st->dst.state = 523 TCPS_CLOSED; 524 killed++; 525 } 526 } 527 if (pf_status.debug >= PF_DEBUG_MISC) 528 kprintf(", %u states killed", killed); 529 } 530 if (pf_status.debug >= PF_DEBUG_MISC) 531 kprintf("\n"); 532 } 533 534 /* kill this state */ 535 (*state)->timeout = PFTM_PURGE; 536 (*state)->src.state = (*state)->dst.state = TCPS_CLOSED; 537 return (1); 538 } 539 540 int 541 pf_insert_src_node(struct pf_src_node **sn, struct pf_rule *rule, 542 struct pf_addr *src, sa_family_t af) 543 { 544 struct pf_src_node k; 545 546 if (*sn == NULL) { 547 k.af = af; 548 PF_ACPY(&k.addr, src, af); 549 if (rule->rule_flag & PFRULE_RULESRCTRACK || 550 rule->rpool.opts & PF_POOL_STICKYADDR) 551 k.rule.ptr = rule; 552 else 553 k.rule.ptr = NULL; 554 pf_status.scounters[SCNT_SRC_NODE_SEARCH]++; 555 *sn = RB_FIND(pf_src_tree, &tree_src_tracking, &k); 556 } 557 if (*sn == NULL) { 558 if (!rule->max_src_nodes || 559 rule->src_nodes < rule->max_src_nodes) 560 (*sn) = kmalloc(sizeof(struct pf_src_node), M_PFSRCTREEPL, M_NOWAIT|M_ZERO); 561 else 562 pf_status.lcounters[LCNT_SRCNODES]++; 563 if ((*sn) == NULL) 564 return (-1); 565 566 pf_init_threshold(&(*sn)->conn_rate, 567 rule->max_src_conn_rate.limit, 568 rule->max_src_conn_rate.seconds); 569 570 (*sn)->af = af; 571 if (rule->rule_flag & PFRULE_RULESRCTRACK || 572 rule->rpool.opts & PF_POOL_STICKYADDR) 573 (*sn)->rule.ptr = rule; 574 else 575 (*sn)->rule.ptr = NULL; 576 PF_ACPY(&(*sn)->addr, src, af); 577 if (RB_INSERT(pf_src_tree, 578 &tree_src_tracking, *sn) != NULL) { 579 if (pf_status.debug >= PF_DEBUG_MISC) { 580 kprintf("pf: src_tree insert failed: "); 581 pf_print_host(&(*sn)->addr, 0, af); 582 kprintf("\n"); 583 } 584 kfree(*sn, M_PFSRCTREEPL); 585 return (-1); 586 } 587 (*sn)->creation = time_second; 588 (*sn)->ruletype = rule->action; 589 if ((*sn)->rule.ptr != NULL) 590 (*sn)->rule.ptr->src_nodes++; 591 pf_status.scounters[SCNT_SRC_NODE_INSERT]++; 592 pf_status.src_nodes++; 593 } else { 594 if (rule->max_src_states && 595 (*sn)->states >= rule->max_src_states) { 596 pf_status.lcounters[LCNT_SRCSTATES]++; 597 return (-1); 598 } 599 } 600 return (0); 601 } 602 603 /* state table stuff */ 604 605 static __inline int 606 pf_state_compare_key(struct pf_state_key *a, struct pf_state_key *b) 607 { 608 int diff; 609 610 if ((diff = a->proto - b->proto) != 0) 611 return (diff); 612 if ((diff = a->af - b->af) != 0) 613 return (diff); 614 switch (a->af) { 615 #ifdef INET 616 case AF_INET: 617 if (a->addr[0].addr32[0] > b->addr[0].addr32[0]) 618 return (1); 619 if (a->addr[0].addr32[0] < b->addr[0].addr32[0]) 620 return (-1); 621 if (a->addr[1].addr32[0] > b->addr[1].addr32[0]) 622 return (1); 623 if (a->addr[1].addr32[0] < b->addr[1].addr32[0]) 624 return (-1); 625 break; 626 #endif /* INET */ 627 #ifdef INET6 628 case AF_INET6: 629 if (a->addr[0].addr32[3] > b->addr[0].addr32[3]) 630 return (1); 631 if (a->addr[0].addr32[3] < b->addr[0].addr32[3]) 632 return (-1); 633 if (a->addr[1].addr32[3] > b->addr[1].addr32[3]) 634 return (1); 635 if (a->addr[1].addr32[3] < b->addr[1].addr32[3]) 636 return (-1); 637 if (a->addr[0].addr32[2] > b->addr[0].addr32[2]) 638 return (1); 639 if (a->addr[0].addr32[2] < b->addr[0].addr32[2]) 640 return (-1); 641 if (a->addr[1].addr32[2] > b->addr[1].addr32[2]) 642 return (1); 643 if (a->addr[1].addr32[2] < b->addr[1].addr32[2]) 644 return (-1); 645 if (a->addr[0].addr32[1] > b->addr[0].addr32[1]) 646 return (1); 647 if (a->addr[0].addr32[1] < b->addr[0].addr32[1]) 648 return (-1); 649 if (a->addr[1].addr32[1] > b->addr[1].addr32[1]) 650 return (1); 651 if (a->addr[1].addr32[1] < b->addr[1].addr32[1]) 652 return (-1); 653 if (a->addr[0].addr32[0] > b->addr[0].addr32[0]) 654 return (1); 655 if (a->addr[0].addr32[0] < b->addr[0].addr32[0]) 656 return (-1); 657 if (a->addr[1].addr32[0] > b->addr[1].addr32[0]) 658 return (1); 659 if (a->addr[1].addr32[0] < b->addr[1].addr32[0]) 660 return (-1); 661 break; 662 #endif /* INET6 */ 663 } 664 665 if ((diff = a->port[0] - b->port[0]) != 0) 666 return (diff); 667 if ((diff = a->port[1] - b->port[1]) != 0) 668 return (diff); 669 670 return (0); 671 } 672 673 static __inline int 674 pf_state_compare_id(struct pf_state *a, struct pf_state *b) 675 { 676 if (a->id > b->id) 677 return (1); 678 if (a->id < b->id) 679 return (-1); 680 if (a->creatorid > b->creatorid) 681 return (1); 682 if (a->creatorid < b->creatorid) 683 return (-1); 684 685 return (0); 686 } 687 688 int 689 pf_state_key_attach(struct pf_state_key *sk, struct pf_state *s, int idx) 690 { 691 struct pf_state_item *si; 692 struct pf_state_key *cur; 693 694 KKASSERT(s->key[idx] == NULL); /* XXX handle this? */ 695 696 if ((cur = RB_INSERT(pf_state_tree, &pf_statetbl, sk)) != NULL) { 697 /* key exists. check for same kif, if none, add to key */ 698 TAILQ_FOREACH(si, &cur->states, entry) 699 if (si->s->kif == s->kif && 700 si->s->direction == s->direction) { 701 if (pf_status.debug >= PF_DEBUG_MISC) { 702 kprintf( 703 "pf: %s key attach failed on %s: ", 704 (idx == PF_SK_WIRE) ? 705 "wire" : "stack", 706 s->kif->pfik_name); 707 pf_print_state_parts(s, 708 (idx == PF_SK_WIRE) ? sk : NULL, 709 (idx == PF_SK_STACK) ? sk : NULL); 710 kprintf("\n"); 711 } 712 kfree(sk, M_PFSTATEKEYPL); 713 return (-1); /* collision! */ 714 } 715 kfree(sk, M_PFSTATEKEYPL); 716 717 s->key[idx] = cur; 718 } else 719 s->key[idx] = sk; 720 721 if ((si = kmalloc(sizeof(struct pf_state_item), M_PFSTATEITEMPL, M_NOWAIT)) == NULL) { 722 pf_state_key_detach(s, idx); 723 return (-1); 724 } 725 si->s = s; 726 727 /* list is sorted, if-bound states before floating */ 728 if (s->kif == pfi_all) 729 TAILQ_INSERT_TAIL(&s->key[idx]->states, si, entry); 730 else 731 TAILQ_INSERT_HEAD(&s->key[idx]->states, si, entry); 732 return (0); 733 } 734 735 void 736 pf_detach_state(struct pf_state *s) 737 { 738 if (s->key[PF_SK_WIRE] == s->key[PF_SK_STACK]) 739 s->key[PF_SK_WIRE] = NULL; 740 741 if (s->key[PF_SK_STACK] != NULL) 742 pf_state_key_detach(s, PF_SK_STACK); 743 744 if (s->key[PF_SK_WIRE] != NULL) 745 pf_state_key_detach(s, PF_SK_WIRE); 746 } 747 748 void 749 pf_state_key_detach(struct pf_state *s, int idx) 750 { 751 struct pf_state_item *si; 752 si = TAILQ_FIRST(&s->key[idx]->states); 753 while (si && si->s != s) 754 si = TAILQ_NEXT(si, entry); 755 756 if (si) { 757 TAILQ_REMOVE(&s->key[idx]->states, si, entry); 758 kfree(si, M_PFSTATEITEMPL); 759 } 760 761 if (TAILQ_EMPTY(&s->key[idx]->states)) { 762 RB_REMOVE(pf_state_tree, &pf_statetbl, s->key[idx]); 763 if (s->key[idx]->reverse) 764 s->key[idx]->reverse->reverse = NULL; 765 if (s->key[idx]->inp) 766 s->key[idx]->inp->inp_pf_sk = NULL; 767 kfree(s->key[idx], M_PFSTATEKEYPL); 768 } 769 s->key[idx] = NULL; 770 } 771 772 struct pf_state_key * 773 pf_alloc_state_key(int pool_flags) 774 { 775 struct pf_state_key *sk; 776 777 if ((sk = kmalloc(sizeof(struct pf_state_key), M_PFSTATEKEYPL, pool_flags)) == NULL) 778 return (NULL); 779 TAILQ_INIT(&sk->states); 780 781 return (sk); 782 } 783 784 int 785 pf_state_key_setup(struct pf_pdesc *pd, struct pf_rule *nr, 786 struct pf_state_key **skw, struct pf_state_key **sks, 787 struct pf_state_key **skp, struct pf_state_key **nkp, 788 struct pf_addr *saddr, struct pf_addr *daddr, 789 u_int16_t sport, u_int16_t dport) 790 { 791 KKASSERT((*skp == NULL && *nkp == NULL)); 792 793 if ((*skp = pf_alloc_state_key(M_NOWAIT | M_ZERO)) == NULL) 794 return (ENOMEM); 795 796 PF_ACPY(&(*skp)->addr[pd->sidx], saddr, pd->af); 797 PF_ACPY(&(*skp)->addr[pd->didx], daddr, pd->af); 798 (*skp)->port[pd->sidx] = sport; 799 (*skp)->port[pd->didx] = dport; 800 (*skp)->proto = pd->proto; 801 (*skp)->af = pd->af; 802 803 if (nr != NULL) { 804 if ((*nkp = pf_alloc_state_key(M_NOWAIT | M_ZERO)) == NULL) 805 return (ENOMEM); /* caller must handle cleanup */ 806 807 /* XXX maybe just bcopy and TAILQ_INIT(&(*nkp)->states) */ 808 PF_ACPY(&(*nkp)->addr[0], &(*skp)->addr[0], pd->af); 809 PF_ACPY(&(*nkp)->addr[1], &(*skp)->addr[1], pd->af); 810 (*nkp)->port[0] = (*skp)->port[0]; 811 (*nkp)->port[1] = (*skp)->port[1]; 812 (*nkp)->proto = pd->proto; 813 (*nkp)->af = pd->af; 814 } else 815 *nkp = *skp; 816 817 if (pd->dir == PF_IN) { 818 *skw = *skp; 819 *sks = *nkp; 820 } else { 821 *sks = *skp; 822 *skw = *nkp; 823 } 824 return (0); 825 } 826 827 828 int 829 pf_state_insert(struct pfi_kif *kif, struct pf_state_key *skw, 830 struct pf_state_key *sks, struct pf_state *s) 831 { 832 s->kif = kif; 833 834 if (skw == sks) { 835 if (pf_state_key_attach(skw, s, PF_SK_WIRE)) 836 return (-1); 837 s->key[PF_SK_STACK] = s->key[PF_SK_WIRE]; 838 } else { 839 if (pf_state_key_attach(skw, s, PF_SK_WIRE)) { 840 kfree(sks, M_PFSTATEKEYPL); 841 return (-1); 842 } 843 if (pf_state_key_attach(sks, s, PF_SK_STACK)) { 844 pf_state_key_detach(s, PF_SK_WIRE); 845 return (-1); 846 } 847 } 848 849 if (s->id == 0 && s->creatorid == 0) { 850 s->id = htobe64(pf_status.stateid++); 851 s->creatorid = pf_status.hostid; 852 } 853 854 /* 855 * Calculate hash code for altq 856 */ 857 s->hash = crc32(s->key[PF_SK_WIRE], sizeof(*sks)); 858 859 if (RB_INSERT(pf_state_tree_id, &tree_id, s) != NULL) { 860 if (pf_status.debug >= PF_DEBUG_MISC) { 861 kprintf("pf: state insert failed: " 862 "id: %016jx creatorid: %08x", 863 (uintmax_t)be64toh(s->id), ntohl(s->creatorid)); 864 if (s->sync_flags & PFSTATE_FROMSYNC) 865 kprintf(" (from sync)"); 866 kprintf("\n"); 867 } 868 pf_detach_state(s); 869 return (-1); 870 } 871 TAILQ_INSERT_TAIL(&state_list, s, entry_list); 872 pf_status.fcounters[FCNT_STATE_INSERT]++; 873 pf_status.states++; 874 pfi_kif_ref(kif, PFI_KIF_REF_STATE); 875 pfsync_insert_state(s); 876 return (0); 877 } 878 879 struct pf_state * 880 pf_find_state_byid(struct pf_state_cmp *key) 881 { 882 pf_status.fcounters[FCNT_STATE_SEARCH]++; 883 884 return (RB_FIND(pf_state_tree_id, &tree_id, (struct pf_state *)key)); 885 } 886 887 struct pf_state * 888 pf_find_state(struct pfi_kif *kif, struct pf_state_key_cmp *key, u_int dir, 889 struct mbuf *m) 890 { 891 struct pf_state_key *sk; 892 struct pf_state_item *si; 893 894 pf_status.fcounters[FCNT_STATE_SEARCH]++; 895 896 if (dir == PF_OUT && m->m_pkthdr.pf.statekey && 897 ((struct pf_state_key *)m->m_pkthdr.pf.statekey)->reverse) 898 sk = ((struct pf_state_key *)m->m_pkthdr.pf.statekey)->reverse; 899 else { 900 if ((sk = RB_FIND(pf_state_tree, &pf_statetbl, 901 (struct pf_state_key *)key)) == NULL) 902 return (NULL); 903 if (dir == PF_OUT && m->m_pkthdr.pf.statekey) { 904 ((struct pf_state_key *) 905 m->m_pkthdr.pf.statekey)->reverse = sk; 906 sk->reverse = m->m_pkthdr.pf.statekey; 907 } 908 } 909 910 if (dir == PF_OUT) 911 m->m_pkthdr.pf.statekey = NULL; 912 913 /* list is sorted, if-bound states before floating ones */ 914 TAILQ_FOREACH(si, &sk->states, entry) 915 if ((si->s->kif == pfi_all || si->s->kif == kif) && 916 sk == (dir == PF_IN ? si->s->key[PF_SK_WIRE] : 917 si->s->key[PF_SK_STACK])) 918 return (si->s); 919 920 return (NULL); 921 } 922 923 struct pf_state * 924 pf_find_state_all(struct pf_state_key_cmp *key, u_int dir, int *more) 925 { 926 struct pf_state_key *sk; 927 struct pf_state_item *si, *ret = NULL; 928 929 pf_status.fcounters[FCNT_STATE_SEARCH]++; 930 931 sk = RB_FIND(pf_state_tree, &pf_statetbl, (struct pf_state_key *)key); 932 933 if (sk != NULL) { 934 TAILQ_FOREACH(si, &sk->states, entry) 935 if (dir == PF_INOUT || 936 (sk == (dir == PF_IN ? si->s->key[PF_SK_WIRE] : 937 si->s->key[PF_SK_STACK]))) { 938 if (more == NULL) 939 return (si->s); 940 941 if (ret) 942 (*more)++; 943 else 944 ret = si; 945 } 946 } 947 return (ret ? ret->s : NULL); 948 } 949 950 /* END state table stuff */ 951 952 953 void 954 pf_purge_thread(void *v) 955 { 956 int nloops = 0; 957 int locked = 0; 958 959 lwkt_gettoken(&pf_token); 960 for (;;) { 961 tsleep(pf_purge_thread, PWAIT, "pftm", 1 * hz); 962 963 lockmgr(&pf_consistency_lock, LK_EXCLUSIVE); 964 965 if (pf_end_threads) { 966 pf_purge_expired_states(pf_status.states, 1); 967 pf_purge_expired_fragments(); 968 pf_purge_expired_src_nodes(1); 969 pf_end_threads++; 970 971 lockmgr(&pf_consistency_lock, LK_RELEASE); 972 wakeup(pf_purge_thread); 973 kthread_exit(); 974 } 975 crit_enter(); 976 977 /* process a fraction of the state table every second */ 978 if(!pf_purge_expired_states(1 + (pf_status.states 979 / pf_default_rule.timeout[PFTM_INTERVAL]), 0)) { 980 981 pf_purge_expired_states(1 + (pf_status.states 982 / pf_default_rule.timeout[PFTM_INTERVAL]), 1); 983 } 984 985 /* purge other expired types every PFTM_INTERVAL seconds */ 986 if (++nloops >= pf_default_rule.timeout[PFTM_INTERVAL]) { 987 pf_purge_expired_fragments(); 988 if (!pf_purge_expired_src_nodes(locked)) { 989 pf_purge_expired_src_nodes(1); 990 } 991 nloops = 0; 992 } 993 crit_exit(); 994 lockmgr(&pf_consistency_lock, LK_RELEASE); 995 } 996 lwkt_reltoken(&pf_token); 997 } 998 999 u_int32_t 1000 pf_state_expires(const struct pf_state *state) 1001 { 1002 u_int32_t timeout; 1003 u_int32_t start; 1004 u_int32_t end; 1005 u_int32_t states; 1006 1007 /* handle all PFTM_* > PFTM_MAX here */ 1008 if (state->timeout == PFTM_PURGE) 1009 return (time_second); 1010 if (state->timeout == PFTM_UNTIL_PACKET) 1011 return (0); 1012 KKASSERT(state->timeout != PFTM_UNLINKED); 1013 KKASSERT(state->timeout < PFTM_MAX); 1014 timeout = state->rule.ptr->timeout[state->timeout]; 1015 if (!timeout) 1016 timeout = pf_default_rule.timeout[state->timeout]; 1017 start = state->rule.ptr->timeout[PFTM_ADAPTIVE_START]; 1018 if (start) { 1019 end = state->rule.ptr->timeout[PFTM_ADAPTIVE_END]; 1020 states = state->rule.ptr->states_cur; 1021 } else { 1022 start = pf_default_rule.timeout[PFTM_ADAPTIVE_START]; 1023 end = pf_default_rule.timeout[PFTM_ADAPTIVE_END]; 1024 states = pf_status.states; 1025 } 1026 if (end && states > start && start < end) { 1027 if (states < end) 1028 return (state->expire + timeout * (end - states) / 1029 (end - start)); 1030 else 1031 return (time_second); 1032 } 1033 return (state->expire + timeout); 1034 } 1035 1036 int 1037 pf_purge_expired_src_nodes(int waslocked) 1038 { 1039 struct pf_src_node *cur, *next; 1040 int locked = waslocked; 1041 1042 for (cur = RB_MIN(pf_src_tree, &tree_src_tracking); cur; cur = next) { 1043 next = RB_NEXT(pf_src_tree, &tree_src_tracking, cur); 1044 1045 if (cur->states <= 0 && cur->expire <= time_second) { 1046 if (! locked) { 1047 lockmgr(&pf_consistency_lock, LK_EXCLUSIVE); 1048 next = RB_NEXT(pf_src_tree, 1049 &tree_src_tracking, cur); 1050 locked = 1; 1051 } 1052 if (cur->rule.ptr != NULL) { 1053 cur->rule.ptr->src_nodes--; 1054 if (cur->rule.ptr->states_cur <= 0 && 1055 cur->rule.ptr->max_src_nodes <= 0) 1056 pf_rm_rule(NULL, cur->rule.ptr); 1057 } 1058 RB_REMOVE(pf_src_tree, &tree_src_tracking, cur); 1059 pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++; 1060 pf_status.src_nodes--; 1061 kfree(cur, M_PFSRCTREEPL); 1062 } 1063 } 1064 1065 if (locked && !waslocked) 1066 lockmgr(&pf_consistency_lock, LK_RELEASE); 1067 return(1); 1068 } 1069 1070 void 1071 pf_src_tree_remove_state(struct pf_state *s) 1072 { 1073 u_int32_t timeout; 1074 1075 if (s->src_node != NULL) { 1076 if (s->src.tcp_est) 1077 --s->src_node->conn; 1078 if (--s->src_node->states <= 0) { 1079 timeout = s->rule.ptr->timeout[PFTM_SRC_NODE]; 1080 if (!timeout) 1081 timeout = 1082 pf_default_rule.timeout[PFTM_SRC_NODE]; 1083 s->src_node->expire = time_second + timeout; 1084 } 1085 } 1086 if (s->nat_src_node != s->src_node && s->nat_src_node != NULL) { 1087 if (--s->nat_src_node->states <= 0) { 1088 timeout = s->rule.ptr->timeout[PFTM_SRC_NODE]; 1089 if (!timeout) 1090 timeout = 1091 pf_default_rule.timeout[PFTM_SRC_NODE]; 1092 s->nat_src_node->expire = time_second + timeout; 1093 } 1094 } 1095 s->src_node = s->nat_src_node = NULL; 1096 } 1097 1098 /* callers should be at crit_enter() */ 1099 void 1100 pf_unlink_state(struct pf_state *cur) 1101 { 1102 if (cur->src.state == PF_TCPS_PROXY_DST) { 1103 /* XXX wire key the right one? */ 1104 pf_send_tcp(cur->rule.ptr, cur->key[PF_SK_WIRE]->af, 1105 &cur->key[PF_SK_WIRE]->addr[1], 1106 &cur->key[PF_SK_WIRE]->addr[0], 1107 cur->key[PF_SK_WIRE]->port[1], 1108 cur->key[PF_SK_WIRE]->port[0], 1109 cur->src.seqhi, cur->src.seqlo + 1, 1110 TH_RST|TH_ACK, 0, 0, 0, 1, cur->tag, NULL, NULL); 1111 } 1112 RB_REMOVE(pf_state_tree_id, &tree_id, cur); 1113 if (cur->creatorid == pf_status.hostid) 1114 pfsync_delete_state(cur); 1115 cur->timeout = PFTM_UNLINKED; 1116 pf_src_tree_remove_state(cur); 1117 pf_detach_state(cur); 1118 } 1119 1120 static struct pf_state *purge_cur; 1121 1122 /* callers should be at crit_enter() and hold the 1123 * write_lock on pf_consistency_lock */ 1124 void 1125 pf_free_state(struct pf_state *cur) 1126 { 1127 if (pfsyncif != NULL && 1128 (pfsyncif->sc_bulk_send_next == cur || 1129 pfsyncif->sc_bulk_terminator == cur)) 1130 return; 1131 KKASSERT(cur->timeout == PFTM_UNLINKED); 1132 if (--cur->rule.ptr->states_cur <= 0 && 1133 cur->rule.ptr->src_nodes <= 0) 1134 pf_rm_rule(NULL, cur->rule.ptr); 1135 if (cur->nat_rule.ptr != NULL) 1136 if (--cur->nat_rule.ptr->states_cur <= 0 && 1137 cur->nat_rule.ptr->src_nodes <= 0) 1138 pf_rm_rule(NULL, cur->nat_rule.ptr); 1139 if (cur->anchor.ptr != NULL) 1140 if (--cur->anchor.ptr->states_cur <= 0) 1141 pf_rm_rule(NULL, cur->anchor.ptr); 1142 pf_normalize_tcp_cleanup(cur); 1143 pfi_kif_unref(cur->kif, PFI_KIF_REF_STATE); 1144 1145 /* 1146 * We may be freeing pf_purge_expired_states()'s saved scan entry, 1147 * adjust it if necessary. 1148 */ 1149 if (purge_cur == cur) { 1150 kprintf("PURGE CONFLICT\n"); 1151 purge_cur = TAILQ_NEXT(purge_cur, entry_list); 1152 } 1153 TAILQ_REMOVE(&state_list, cur, entry_list); 1154 if (cur->tag) 1155 pf_tag_unref(cur->tag); 1156 kfree(cur, M_PFSTATEPL); 1157 pf_status.fcounters[FCNT_STATE_REMOVALS]++; 1158 pf_status.states--; 1159 } 1160 1161 int 1162 pf_purge_expired_states(u_int32_t maxcheck, int waslocked) 1163 { 1164 struct pf_state *cur; 1165 int locked = waslocked; 1166 1167 while (maxcheck--) { 1168 /* 1169 * Wrap to start of list when we hit the end 1170 */ 1171 cur = purge_cur; 1172 if (cur == NULL) { 1173 cur = TAILQ_FIRST(&state_list); 1174 if (cur == NULL) 1175 break; /* list empty */ 1176 } 1177 1178 /* 1179 * Setup next (purge_cur) while we process this one. If we block and 1180 * something else deletes purge_cur, pf_free_state() will adjust it further 1181 * ahead. 1182 */ 1183 purge_cur = TAILQ_NEXT(cur, entry_list); 1184 1185 if (cur->timeout == PFTM_UNLINKED) { 1186 /* free unlinked state */ 1187 if (! locked) { 1188 lockmgr(&pf_consistency_lock, LK_EXCLUSIVE); 1189 locked = 1; 1190 } 1191 pf_free_state(cur); 1192 } else if (pf_state_expires(cur) <= time_second) { 1193 /* unlink and free expired state */ 1194 pf_unlink_state(cur); 1195 if (! locked) { 1196 if (!lockmgr(&pf_consistency_lock, LK_EXCLUSIVE)) 1197 return (0); 1198 locked = 1; 1199 } 1200 pf_free_state(cur); 1201 } 1202 } 1203 1204 if (locked) 1205 lockmgr(&pf_consistency_lock, LK_RELEASE); 1206 return (1); 1207 } 1208 1209 int 1210 pf_tbladdr_setup(struct pf_ruleset *rs, struct pf_addr_wrap *aw) 1211 { 1212 if (aw->type != PF_ADDR_TABLE) 1213 return (0); 1214 if ((aw->p.tbl = pfr_attach_table(rs, aw->v.tblname)) == NULL) 1215 return (1); 1216 return (0); 1217 } 1218 1219 void 1220 pf_tbladdr_remove(struct pf_addr_wrap *aw) 1221 { 1222 if (aw->type != PF_ADDR_TABLE || aw->p.tbl == NULL) 1223 return; 1224 pfr_detach_table(aw->p.tbl); 1225 aw->p.tbl = NULL; 1226 } 1227 1228 void 1229 pf_tbladdr_copyout(struct pf_addr_wrap *aw) 1230 { 1231 struct pfr_ktable *kt = aw->p.tbl; 1232 1233 if (aw->type != PF_ADDR_TABLE || kt == NULL) 1234 return; 1235 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL) 1236 kt = kt->pfrkt_root; 1237 aw->p.tbl = NULL; 1238 aw->p.tblcnt = (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) ? 1239 kt->pfrkt_cnt : -1; 1240 } 1241 1242 void 1243 pf_print_host(struct pf_addr *addr, u_int16_t p, sa_family_t af) 1244 { 1245 switch (af) { 1246 #ifdef INET 1247 case AF_INET: { 1248 u_int32_t a = ntohl(addr->addr32[0]); 1249 kprintf("%u.%u.%u.%u", (a>>24)&255, (a>>16)&255, 1250 (a>>8)&255, a&255); 1251 if (p) { 1252 p = ntohs(p); 1253 kprintf(":%u", p); 1254 } 1255 break; 1256 } 1257 #endif /* INET */ 1258 #ifdef INET6 1259 case AF_INET6: { 1260 u_int16_t b; 1261 u_int8_t i, curstart = 255, curend = 0, 1262 maxstart = 0, maxend = 0; 1263 for (i = 0; i < 8; i++) { 1264 if (!addr->addr16[i]) { 1265 if (curstart == 255) 1266 curstart = i; 1267 else 1268 curend = i; 1269 } else { 1270 if (curstart) { 1271 if ((curend - curstart) > 1272 (maxend - maxstart)) { 1273 maxstart = curstart; 1274 maxend = curend; 1275 curstart = 255; 1276 } 1277 } 1278 } 1279 } 1280 for (i = 0; i < 8; i++) { 1281 if (i >= maxstart && i <= maxend) { 1282 if (maxend != 7) { 1283 if (i == maxstart) 1284 kprintf(":"); 1285 } else { 1286 if (i == maxend) 1287 kprintf(":"); 1288 } 1289 } else { 1290 b = ntohs(addr->addr16[i]); 1291 kprintf("%x", b); 1292 if (i < 7) 1293 kprintf(":"); 1294 } 1295 } 1296 if (p) { 1297 p = ntohs(p); 1298 kprintf("[%u]", p); 1299 } 1300 break; 1301 } 1302 #endif /* INET6 */ 1303 } 1304 } 1305 1306 void 1307 pf_print_state(struct pf_state *s) 1308 { 1309 pf_print_state_parts(s, NULL, NULL); 1310 } 1311 1312 void 1313 pf_print_state_parts(struct pf_state *s, 1314 struct pf_state_key *skwp, struct pf_state_key *sksp) 1315 { 1316 struct pf_state_key *skw, *sks; 1317 u_int8_t proto, dir; 1318 1319 /* Do our best to fill these, but they're skipped if NULL */ 1320 skw = skwp ? skwp : (s ? s->key[PF_SK_WIRE] : NULL); 1321 sks = sksp ? sksp : (s ? s->key[PF_SK_STACK] : NULL); 1322 proto = skw ? skw->proto : (sks ? sks->proto : 0); 1323 dir = s ? s->direction : 0; 1324 1325 switch (proto) { 1326 case IPPROTO_TCP: 1327 kprintf("TCP "); 1328 break; 1329 case IPPROTO_UDP: 1330 kprintf("UDP "); 1331 break; 1332 case IPPROTO_ICMP: 1333 kprintf("ICMP "); 1334 break; 1335 case IPPROTO_ICMPV6: 1336 kprintf("ICMPV6 "); 1337 break; 1338 default: 1339 kprintf("%u ", skw->proto); 1340 break; 1341 } 1342 switch (dir) { 1343 case PF_IN: 1344 kprintf(" in"); 1345 break; 1346 case PF_OUT: 1347 kprintf(" out"); 1348 break; 1349 } 1350 if (skw) { 1351 kprintf(" wire: "); 1352 pf_print_host(&skw->addr[0], skw->port[0], skw->af); 1353 kprintf(" "); 1354 pf_print_host(&skw->addr[1], skw->port[1], skw->af); 1355 } 1356 if (sks) { 1357 kprintf(" stack: "); 1358 if (sks != skw) { 1359 pf_print_host(&sks->addr[0], sks->port[0], sks->af); 1360 kprintf(" "); 1361 pf_print_host(&sks->addr[1], sks->port[1], sks->af); 1362 } else 1363 kprintf("-"); 1364 } 1365 if (s) { 1366 if (proto == IPPROTO_TCP) { 1367 kprintf(" [lo=%u high=%u win=%u modulator=%u", 1368 s->src.seqlo, s->src.seqhi, 1369 s->src.max_win, s->src.seqdiff); 1370 if (s->src.wscale && s->dst.wscale) 1371 kprintf(" wscale=%u", 1372 s->src.wscale & PF_WSCALE_MASK); 1373 kprintf("]"); 1374 kprintf(" [lo=%u high=%u win=%u modulator=%u", 1375 s->dst.seqlo, s->dst.seqhi, 1376 s->dst.max_win, s->dst.seqdiff); 1377 if (s->src.wscale && s->dst.wscale) 1378 kprintf(" wscale=%u", 1379 s->dst.wscale & PF_WSCALE_MASK); 1380 kprintf("]"); 1381 } 1382 kprintf(" %u:%u", s->src.state, s->dst.state); 1383 } 1384 } 1385 1386 void 1387 pf_print_flags(u_int8_t f) 1388 { 1389 if (f) 1390 kprintf(" "); 1391 if (f & TH_FIN) 1392 kprintf("F"); 1393 if (f & TH_SYN) 1394 kprintf("S"); 1395 if (f & TH_RST) 1396 kprintf("R"); 1397 if (f & TH_PUSH) 1398 kprintf("P"); 1399 if (f & TH_ACK) 1400 kprintf("A"); 1401 if (f & TH_URG) 1402 kprintf("U"); 1403 if (f & TH_ECE) 1404 kprintf("E"); 1405 if (f & TH_CWR) 1406 kprintf("W"); 1407 } 1408 1409 #define PF_SET_SKIP_STEPS(i) \ 1410 do { \ 1411 while (head[i] != cur) { \ 1412 head[i]->skip[i].ptr = cur; \ 1413 head[i] = TAILQ_NEXT(head[i], entries); \ 1414 } \ 1415 } while (0) 1416 1417 void 1418 pf_calc_skip_steps(struct pf_rulequeue *rules) 1419 { 1420 struct pf_rule *cur, *prev, *head[PF_SKIP_COUNT]; 1421 int i; 1422 1423 cur = TAILQ_FIRST(rules); 1424 prev = cur; 1425 for (i = 0; i < PF_SKIP_COUNT; ++i) 1426 head[i] = cur; 1427 while (cur != NULL) { 1428 1429 if (cur->kif != prev->kif || cur->ifnot != prev->ifnot) 1430 PF_SET_SKIP_STEPS(PF_SKIP_IFP); 1431 if (cur->direction != prev->direction) 1432 PF_SET_SKIP_STEPS(PF_SKIP_DIR); 1433 if (cur->af != prev->af) 1434 PF_SET_SKIP_STEPS(PF_SKIP_AF); 1435 if (cur->proto != prev->proto) 1436 PF_SET_SKIP_STEPS(PF_SKIP_PROTO); 1437 if (cur->src.neg != prev->src.neg || 1438 pf_addr_wrap_neq(&cur->src.addr, &prev->src.addr)) 1439 PF_SET_SKIP_STEPS(PF_SKIP_SRC_ADDR); 1440 if (cur->src.port[0] != prev->src.port[0] || 1441 cur->src.port[1] != prev->src.port[1] || 1442 cur->src.port_op != prev->src.port_op) 1443 PF_SET_SKIP_STEPS(PF_SKIP_SRC_PORT); 1444 if (cur->dst.neg != prev->dst.neg || 1445 pf_addr_wrap_neq(&cur->dst.addr, &prev->dst.addr)) 1446 PF_SET_SKIP_STEPS(PF_SKIP_DST_ADDR); 1447 if (cur->dst.port[0] != prev->dst.port[0] || 1448 cur->dst.port[1] != prev->dst.port[1] || 1449 cur->dst.port_op != prev->dst.port_op) 1450 PF_SET_SKIP_STEPS(PF_SKIP_DST_PORT); 1451 1452 prev = cur; 1453 cur = TAILQ_NEXT(cur, entries); 1454 } 1455 for (i = 0; i < PF_SKIP_COUNT; ++i) 1456 PF_SET_SKIP_STEPS(i); 1457 } 1458 1459 int 1460 pf_addr_wrap_neq(struct pf_addr_wrap *aw1, struct pf_addr_wrap *aw2) 1461 { 1462 if (aw1->type != aw2->type) 1463 return (1); 1464 switch (aw1->type) { 1465 case PF_ADDR_ADDRMASK: 1466 case PF_ADDR_RANGE: 1467 if (PF_ANEQ(&aw1->v.a.addr, &aw2->v.a.addr, 0)) 1468 return (1); 1469 if (PF_ANEQ(&aw1->v.a.mask, &aw2->v.a.mask, 0)) 1470 return (1); 1471 return (0); 1472 case PF_ADDR_DYNIFTL: 1473 return (aw1->p.dyn->pfid_kt != aw2->p.dyn->pfid_kt); 1474 case PF_ADDR_NOROUTE: 1475 case PF_ADDR_URPFFAILED: 1476 return (0); 1477 case PF_ADDR_TABLE: 1478 return (aw1->p.tbl != aw2->p.tbl); 1479 case PF_ADDR_RTLABEL: 1480 return (aw1->v.rtlabel != aw2->v.rtlabel); 1481 default: 1482 kprintf("invalid address type: %d\n", aw1->type); 1483 return (1); 1484 } 1485 } 1486 1487 u_int16_t 1488 pf_cksum_fixup(u_int16_t cksum, u_int16_t old, u_int16_t new, u_int8_t udp) 1489 { 1490 u_int32_t l; 1491 1492 if (udp && !cksum) 1493 return (0x0000); 1494 l = cksum + old - new; 1495 l = (l >> 16) + (l & 65535); 1496 l = l & 65535; 1497 if (udp && !l) 1498 return (0xFFFF); 1499 return (l); 1500 } 1501 1502 void 1503 pf_change_ap(struct pf_addr *a, u_int16_t *p, u_int16_t *ic, u_int16_t *pc, 1504 struct pf_addr *an, u_int16_t pn, u_int8_t u, sa_family_t af) 1505 { 1506 struct pf_addr ao; 1507 u_int16_t po = *p; 1508 1509 PF_ACPY(&ao, a, af); 1510 PF_ACPY(a, an, af); 1511 1512 *p = pn; 1513 1514 switch (af) { 1515 #ifdef INET 1516 case AF_INET: 1517 *ic = pf_cksum_fixup(pf_cksum_fixup(*ic, 1518 ao.addr16[0], an->addr16[0], 0), 1519 ao.addr16[1], an->addr16[1], 0); 1520 *p = pn; 1521 *pc = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(*pc, 1522 ao.addr16[0], an->addr16[0], u), 1523 ao.addr16[1], an->addr16[1], u), 1524 po, pn, u); 1525 break; 1526 #endif /* INET */ 1527 #ifdef INET6 1528 case AF_INET6: 1529 *pc = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( 1530 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( 1531 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(*pc, 1532 ao.addr16[0], an->addr16[0], u), 1533 ao.addr16[1], an->addr16[1], u), 1534 ao.addr16[2], an->addr16[2], u), 1535 ao.addr16[3], an->addr16[3], u), 1536 ao.addr16[4], an->addr16[4], u), 1537 ao.addr16[5], an->addr16[5], u), 1538 ao.addr16[6], an->addr16[6], u), 1539 ao.addr16[7], an->addr16[7], u), 1540 po, pn, u); 1541 break; 1542 #endif /* INET6 */ 1543 } 1544 } 1545 1546 1547 /* Changes a u_int32_t. Uses a void * so there are no align restrictions */ 1548 void 1549 pf_change_a(void *a, u_int16_t *c, u_int32_t an, u_int8_t u) 1550 { 1551 u_int32_t ao; 1552 1553 memcpy(&ao, a, sizeof(ao)); 1554 memcpy(a, &an, sizeof(u_int32_t)); 1555 *c = pf_cksum_fixup(pf_cksum_fixup(*c, ao / 65536, an / 65536, u), 1556 ao % 65536, an % 65536, u); 1557 } 1558 1559 #ifdef INET6 1560 void 1561 pf_change_a6(struct pf_addr *a, u_int16_t *c, struct pf_addr *an, u_int8_t u) 1562 { 1563 struct pf_addr ao; 1564 1565 PF_ACPY(&ao, a, AF_INET6); 1566 PF_ACPY(a, an, AF_INET6); 1567 1568 *c = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( 1569 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( 1570 pf_cksum_fixup(pf_cksum_fixup(*c, 1571 ao.addr16[0], an->addr16[0], u), 1572 ao.addr16[1], an->addr16[1], u), 1573 ao.addr16[2], an->addr16[2], u), 1574 ao.addr16[3], an->addr16[3], u), 1575 ao.addr16[4], an->addr16[4], u), 1576 ao.addr16[5], an->addr16[5], u), 1577 ao.addr16[6], an->addr16[6], u), 1578 ao.addr16[7], an->addr16[7], u); 1579 } 1580 #endif /* INET6 */ 1581 1582 void 1583 pf_change_icmp(struct pf_addr *ia, u_int16_t *ip, struct pf_addr *oa, 1584 struct pf_addr *na, u_int16_t np, u_int16_t *pc, u_int16_t *h2c, 1585 u_int16_t *ic, u_int16_t *hc, u_int8_t u, sa_family_t af) 1586 { 1587 struct pf_addr oia, ooa; 1588 1589 PF_ACPY(&oia, ia, af); 1590 if (oa) 1591 PF_ACPY(&ooa, oa, af); 1592 1593 /* Change inner protocol port, fix inner protocol checksum. */ 1594 if (ip != NULL) { 1595 u_int16_t oip = *ip; 1596 u_int32_t opc = 0; 1597 1598 if (pc != NULL) 1599 opc = *pc; 1600 *ip = np; 1601 if (pc != NULL) 1602 *pc = pf_cksum_fixup(*pc, oip, *ip, u); 1603 *ic = pf_cksum_fixup(*ic, oip, *ip, 0); 1604 if (pc != NULL) 1605 *ic = pf_cksum_fixup(*ic, opc, *pc, 0); 1606 } 1607 /* Change inner ip address, fix inner ip and icmp checksums. */ 1608 PF_ACPY(ia, na, af); 1609 switch (af) { 1610 #ifdef INET 1611 case AF_INET: { 1612 u_int32_t oh2c = *h2c; 1613 1614 *h2c = pf_cksum_fixup(pf_cksum_fixup(*h2c, 1615 oia.addr16[0], ia->addr16[0], 0), 1616 oia.addr16[1], ia->addr16[1], 0); 1617 *ic = pf_cksum_fixup(pf_cksum_fixup(*ic, 1618 oia.addr16[0], ia->addr16[0], 0), 1619 oia.addr16[1], ia->addr16[1], 0); 1620 *ic = pf_cksum_fixup(*ic, oh2c, *h2c, 0); 1621 break; 1622 } 1623 #endif /* INET */ 1624 #ifdef INET6 1625 case AF_INET6: 1626 *ic = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( 1627 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( 1628 pf_cksum_fixup(pf_cksum_fixup(*ic, 1629 oia.addr16[0], ia->addr16[0], u), 1630 oia.addr16[1], ia->addr16[1], u), 1631 oia.addr16[2], ia->addr16[2], u), 1632 oia.addr16[3], ia->addr16[3], u), 1633 oia.addr16[4], ia->addr16[4], u), 1634 oia.addr16[5], ia->addr16[5], u), 1635 oia.addr16[6], ia->addr16[6], u), 1636 oia.addr16[7], ia->addr16[7], u); 1637 break; 1638 #endif /* INET6 */ 1639 } 1640 /* Outer ip address, fix outer ip or icmpv6 checksum, if necessary. */ 1641 if (oa) { 1642 PF_ACPY(oa, na, af); 1643 switch (af) { 1644 #ifdef INET 1645 case AF_INET: 1646 *hc = pf_cksum_fixup(pf_cksum_fixup(*hc, 1647 ooa.addr16[0], oa->addr16[0], 0), 1648 ooa.addr16[1], oa->addr16[1], 0); 1649 break; 1650 #endif /* INET */ 1651 #ifdef INET6 1652 case AF_INET6: 1653 *ic = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( 1654 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( 1655 pf_cksum_fixup(pf_cksum_fixup(*ic, 1656 ooa.addr16[0], oa->addr16[0], u), 1657 ooa.addr16[1], oa->addr16[1], u), 1658 ooa.addr16[2], oa->addr16[2], u), 1659 ooa.addr16[3], oa->addr16[3], u), 1660 ooa.addr16[4], oa->addr16[4], u), 1661 ooa.addr16[5], oa->addr16[5], u), 1662 ooa.addr16[6], oa->addr16[6], u), 1663 ooa.addr16[7], oa->addr16[7], u); 1664 break; 1665 #endif /* INET6 */ 1666 } 1667 } 1668 } 1669 1670 1671 /* 1672 * Need to modulate the sequence numbers in the TCP SACK option 1673 * (credits to Krzysztof Pfaff for report and patch) 1674 */ 1675 int 1676 pf_modulate_sack(struct mbuf *m, int off, struct pf_pdesc *pd, 1677 struct tcphdr *th, struct pf_state_peer *dst) 1678 { 1679 int hlen = (th->th_off << 2) - sizeof(*th), thoptlen = hlen; 1680 u_int8_t opts[TCP_MAXOLEN], *opt = opts; 1681 int copyback = 0, i, olen; 1682 struct raw_sackblock sack; 1683 1684 #define TCPOLEN_SACKLEN (TCPOLEN_SACK + 2) 1685 if (hlen < TCPOLEN_SACKLEN || 1686 !pf_pull_hdr(m, off + sizeof(*th), opts, hlen, NULL, NULL, pd->af)) 1687 return 0; 1688 1689 while (hlen >= TCPOLEN_SACKLEN) { 1690 olen = opt[1]; 1691 switch (*opt) { 1692 case TCPOPT_EOL: /* FALLTHROUGH */ 1693 case TCPOPT_NOP: 1694 opt++; 1695 hlen--; 1696 break; 1697 case TCPOPT_SACK: 1698 if (olen > hlen) 1699 olen = hlen; 1700 if (olen >= TCPOLEN_SACKLEN) { 1701 for (i = 2; i + TCPOLEN_SACK <= olen; 1702 i += TCPOLEN_SACK) { 1703 memcpy(&sack, &opt[i], sizeof(sack)); 1704 pf_change_a(&sack.rblk_start, &th->th_sum, 1705 htonl(ntohl(sack.rblk_start) - 1706 dst->seqdiff), 0); 1707 pf_change_a(&sack.rblk_end, &th->th_sum, 1708 htonl(ntohl(sack.rblk_end) - 1709 dst->seqdiff), 0); 1710 memcpy(&opt[i], &sack, sizeof(sack)); 1711 } 1712 copyback = 1; 1713 } 1714 /* FALLTHROUGH */ 1715 default: 1716 if (olen < 2) 1717 olen = 2; 1718 hlen -= olen; 1719 opt += olen; 1720 } 1721 } 1722 1723 if (copyback) 1724 m_copyback(m, off + sizeof(*th), thoptlen, opts); 1725 return (copyback); 1726 } 1727 1728 void 1729 pf_send_tcp(const struct pf_rule *r, sa_family_t af, 1730 const struct pf_addr *saddr, const struct pf_addr *daddr, 1731 u_int16_t sport, u_int16_t dport, u_int32_t seq, u_int32_t ack, 1732 u_int8_t flags, u_int16_t win, u_int16_t mss, u_int8_t ttl, int tag, 1733 u_int16_t rtag, struct ether_header *eh, struct ifnet *ifp) 1734 { 1735 struct mbuf *m; 1736 int len = 0, tlen; 1737 #ifdef INET 1738 struct ip *h = NULL; 1739 #endif /* INET */ 1740 #ifdef INET6 1741 struct ip6_hdr *h6 = NULL; 1742 #endif /* INET6 */ 1743 struct tcphdr *th = NULL; 1744 char *opt; 1745 1746 ASSERT_LWKT_TOKEN_HELD(&pf_token); 1747 1748 /* maximum segment size tcp option */ 1749 tlen = sizeof(struct tcphdr); 1750 if (mss) 1751 tlen += 4; 1752 1753 switch (af) { 1754 #ifdef INET 1755 case AF_INET: 1756 len = sizeof(struct ip) + tlen; 1757 break; 1758 #endif /* INET */ 1759 #ifdef INET6 1760 case AF_INET6: 1761 len = sizeof(struct ip6_hdr) + tlen; 1762 break; 1763 #endif /* INET6 */ 1764 } 1765 1766 /* 1767 * Create outgoing mbuf. 1768 * 1769 * DragonFly doesn't zero the auxillary pkghdr fields, only fw_flags, 1770 * so make sure pf.flags is clear. 1771 */ 1772 m = m_gethdr(MB_DONTWAIT, MT_HEADER); 1773 if (m == NULL) { 1774 return; 1775 } 1776 if (tag) 1777 m->m_pkthdr.fw_flags |= PF_MBUF_TAGGED; 1778 m->m_pkthdr.pf.flags = 0; 1779 m->m_pkthdr.pf.tag = rtag; 1780 /* XXX Recheck when upgrading to > 4.4 */ 1781 m->m_pkthdr.pf.statekey = NULL; 1782 if (r != NULL && r->rtableid >= 0) 1783 m->m_pkthdr.pf.rtableid = r->rtableid; 1784 1785 #ifdef ALTQ 1786 if (r != NULL && r->qid) { 1787 m->m_pkthdr.fw_flags |= PF_MBUF_STRUCTURE; 1788 m->m_pkthdr.pf.qid = r->qid; 1789 m->m_pkthdr.pf.ecn_af = af; 1790 m->m_pkthdr.pf.hdr = mtod(m, struct ip *); 1791 } 1792 #endif /* ALTQ */ 1793 m->m_data += max_linkhdr; 1794 m->m_pkthdr.len = m->m_len = len; 1795 m->m_pkthdr.rcvif = NULL; 1796 bzero(m->m_data, len); 1797 switch (af) { 1798 #ifdef INET 1799 case AF_INET: 1800 h = mtod(m, struct ip *); 1801 1802 /* IP header fields included in the TCP checksum */ 1803 h->ip_p = IPPROTO_TCP; 1804 h->ip_len = tlen; 1805 h->ip_src.s_addr = saddr->v4.s_addr; 1806 h->ip_dst.s_addr = daddr->v4.s_addr; 1807 1808 th = (struct tcphdr *)((caddr_t)h + sizeof(struct ip)); 1809 break; 1810 #endif /* INET */ 1811 #ifdef INET6 1812 case AF_INET6: 1813 h6 = mtod(m, struct ip6_hdr *); 1814 1815 /* IP header fields included in the TCP checksum */ 1816 h6->ip6_nxt = IPPROTO_TCP; 1817 h6->ip6_plen = htons(tlen); 1818 memcpy(&h6->ip6_src, &saddr->v6, sizeof(struct in6_addr)); 1819 memcpy(&h6->ip6_dst, &daddr->v6, sizeof(struct in6_addr)); 1820 1821 th = (struct tcphdr *)((caddr_t)h6 + sizeof(struct ip6_hdr)); 1822 break; 1823 #endif /* INET6 */ 1824 } 1825 1826 /* TCP header */ 1827 th->th_sport = sport; 1828 th->th_dport = dport; 1829 th->th_seq = htonl(seq); 1830 th->th_ack = htonl(ack); 1831 th->th_off = tlen >> 2; 1832 th->th_flags = flags; 1833 th->th_win = htons(win); 1834 1835 if (mss) { 1836 opt = (char *)(th + 1); 1837 opt[0] = TCPOPT_MAXSEG; 1838 opt[1] = 4; 1839 mss = htons(mss); 1840 bcopy((caddr_t)&mss, (caddr_t)(opt + 2), 2); 1841 } 1842 1843 switch (af) { 1844 #ifdef INET 1845 case AF_INET: 1846 /* TCP checksum */ 1847 th->th_sum = in_cksum(m, len); 1848 1849 /* Finish the IP header */ 1850 h->ip_v = 4; 1851 h->ip_hl = sizeof(*h) >> 2; 1852 h->ip_tos = IPTOS_LOWDELAY; 1853 h->ip_len = len; 1854 h->ip_off = path_mtu_discovery ? IP_DF : 0; 1855 h->ip_ttl = ttl ? ttl : ip_defttl; 1856 h->ip_sum = 0; 1857 if (eh == NULL) { 1858 lwkt_reltoken(&pf_token); 1859 ip_output(m, NULL, NULL, 0, NULL, NULL); 1860 lwkt_gettoken(&pf_token); 1861 } else { 1862 struct route ro; 1863 struct rtentry rt; 1864 struct ether_header *e = (void *)ro.ro_dst.sa_data; 1865 1866 if (ifp == NULL) { 1867 m_freem(m); 1868 return; 1869 } 1870 rt.rt_ifp = ifp; 1871 ro.ro_rt = &rt; 1872 ro.ro_dst.sa_len = sizeof(ro.ro_dst); 1873 ro.ro_dst.sa_family = pseudo_AF_HDRCMPLT; 1874 bcopy(eh->ether_dhost, e->ether_shost, ETHER_ADDR_LEN); 1875 bcopy(eh->ether_shost, e->ether_dhost, ETHER_ADDR_LEN); 1876 e->ether_type = eh->ether_type; 1877 /* XXX_IMPORT: later */ 1878 lwkt_reltoken(&pf_token); 1879 ip_output(m, (void *)NULL, &ro, 0, 1880 (void *)NULL, (void *)NULL); 1881 lwkt_gettoken(&pf_token); 1882 } 1883 break; 1884 #endif /* INET */ 1885 #ifdef INET6 1886 case AF_INET6: 1887 /* TCP checksum */ 1888 th->th_sum = in6_cksum(m, IPPROTO_TCP, 1889 sizeof(struct ip6_hdr), tlen); 1890 1891 h6->ip6_vfc |= IPV6_VERSION; 1892 h6->ip6_hlim = IPV6_DEFHLIM; 1893 1894 lwkt_reltoken(&pf_token); 1895 ip6_output(m, NULL, NULL, 0, NULL, NULL, NULL); 1896 lwkt_gettoken(&pf_token); 1897 break; 1898 #endif /* INET6 */ 1899 } 1900 } 1901 1902 void 1903 pf_send_icmp(struct mbuf *m, u_int8_t type, u_int8_t code, sa_family_t af, 1904 struct pf_rule *r) 1905 { 1906 struct mbuf *m0; 1907 1908 /* 1909 * DragonFly doesn't zero the auxillary pkghdr fields, only fw_flags, 1910 * so make sure pf.flags is clear. 1911 */ 1912 if ((m0 = m_copy(m, 0, M_COPYALL)) == NULL) 1913 return; 1914 1915 m0->m_pkthdr.fw_flags |= PF_MBUF_TAGGED; 1916 m0->m_pkthdr.pf.flags = 0; 1917 /* XXX Re-Check when Upgrading to > 4.4 */ 1918 m0->m_pkthdr.pf.statekey = NULL; 1919 1920 if (r->rtableid >= 0) 1921 m0->m_pkthdr.pf.rtableid = r->rtableid; 1922 1923 #ifdef ALTQ 1924 if (r->qid) { 1925 m->m_pkthdr.fw_flags |= PF_MBUF_STRUCTURE; 1926 m0->m_pkthdr.pf.qid = r->qid; 1927 m0->m_pkthdr.pf.ecn_af = af; 1928 m0->m_pkthdr.pf.hdr = mtod(m0, struct ip *); 1929 } 1930 #endif /* ALTQ */ 1931 1932 switch (af) { 1933 #ifdef INET 1934 case AF_INET: 1935 icmp_error(m0, type, code, 0, 0); 1936 break; 1937 #endif /* INET */ 1938 #ifdef INET6 1939 case AF_INET6: 1940 icmp6_error(m0, type, code, 0); 1941 break; 1942 #endif /* INET6 */ 1943 } 1944 } 1945 1946 /* 1947 * Return 1 if the addresses a and b match (with mask m), otherwise return 0. 1948 * If n is 0, they match if they are equal. If n is != 0, they match if they 1949 * are different. 1950 */ 1951 int 1952 pf_match_addr(u_int8_t n, struct pf_addr *a, struct pf_addr *m, 1953 struct pf_addr *b, sa_family_t af) 1954 { 1955 int match = 0; 1956 1957 switch (af) { 1958 #ifdef INET 1959 case AF_INET: 1960 if ((a->addr32[0] & m->addr32[0]) == 1961 (b->addr32[0] & m->addr32[0])) 1962 match++; 1963 break; 1964 #endif /* INET */ 1965 #ifdef INET6 1966 case AF_INET6: 1967 if (((a->addr32[0] & m->addr32[0]) == 1968 (b->addr32[0] & m->addr32[0])) && 1969 ((a->addr32[1] & m->addr32[1]) == 1970 (b->addr32[1] & m->addr32[1])) && 1971 ((a->addr32[2] & m->addr32[2]) == 1972 (b->addr32[2] & m->addr32[2])) && 1973 ((a->addr32[3] & m->addr32[3]) == 1974 (b->addr32[3] & m->addr32[3]))) 1975 match++; 1976 break; 1977 #endif /* INET6 */ 1978 } 1979 if (match) { 1980 if (n) 1981 return (0); 1982 else 1983 return (1); 1984 } else { 1985 if (n) 1986 return (1); 1987 else 1988 return (0); 1989 } 1990 } 1991 1992 /* 1993 * Return 1 if b <= a <= e, otherwise return 0. 1994 */ 1995 int 1996 pf_match_addr_range(struct pf_addr *b, struct pf_addr *e, 1997 struct pf_addr *a, sa_family_t af) 1998 { 1999 switch (af) { 2000 #ifdef INET 2001 case AF_INET: 2002 if ((a->addr32[0] < b->addr32[0]) || 2003 (a->addr32[0] > e->addr32[0])) 2004 return (0); 2005 break; 2006 #endif /* INET */ 2007 #ifdef INET6 2008 case AF_INET6: { 2009 int i; 2010 2011 /* check a >= b */ 2012 for (i = 0; i < 4; ++i) 2013 if (a->addr32[i] > b->addr32[i]) 2014 break; 2015 else if (a->addr32[i] < b->addr32[i]) 2016 return (0); 2017 /* check a <= e */ 2018 for (i = 0; i < 4; ++i) 2019 if (a->addr32[i] < e->addr32[i]) 2020 break; 2021 else if (a->addr32[i] > e->addr32[i]) 2022 return (0); 2023 break; 2024 } 2025 #endif /* INET6 */ 2026 } 2027 return (1); 2028 } 2029 2030 int 2031 pf_match(u_int8_t op, u_int32_t a1, u_int32_t a2, u_int32_t p) 2032 { 2033 switch (op) { 2034 case PF_OP_IRG: 2035 return ((p > a1) && (p < a2)); 2036 case PF_OP_XRG: 2037 return ((p < a1) || (p > a2)); 2038 case PF_OP_RRG: 2039 return ((p >= a1) && (p <= a2)); 2040 case PF_OP_EQ: 2041 return (p == a1); 2042 case PF_OP_NE: 2043 return (p != a1); 2044 case PF_OP_LT: 2045 return (p < a1); 2046 case PF_OP_LE: 2047 return (p <= a1); 2048 case PF_OP_GT: 2049 return (p > a1); 2050 case PF_OP_GE: 2051 return (p >= a1); 2052 } 2053 return (0); /* never reached */ 2054 } 2055 2056 int 2057 pf_match_port(u_int8_t op, u_int16_t a1, u_int16_t a2, u_int16_t p) 2058 { 2059 a1 = ntohs(a1); 2060 a2 = ntohs(a2); 2061 p = ntohs(p); 2062 return (pf_match(op, a1, a2, p)); 2063 } 2064 2065 int 2066 pf_match_uid(u_int8_t op, uid_t a1, uid_t a2, uid_t u) 2067 { 2068 if (u == UID_MAX && op != PF_OP_EQ && op != PF_OP_NE) 2069 return (0); 2070 return (pf_match(op, a1, a2, u)); 2071 } 2072 2073 int 2074 pf_match_gid(u_int8_t op, gid_t a1, gid_t a2, gid_t g) 2075 { 2076 if (g == GID_MAX && op != PF_OP_EQ && op != PF_OP_NE) 2077 return (0); 2078 return (pf_match(op, a1, a2, g)); 2079 } 2080 2081 int 2082 pf_match_tag(struct mbuf *m, struct pf_rule *r, int *tag) 2083 { 2084 if (*tag == -1) 2085 *tag = m->m_pkthdr.pf.tag; 2086 2087 return ((!r->match_tag_not && r->match_tag == *tag) || 2088 (r->match_tag_not && r->match_tag != *tag)); 2089 } 2090 2091 int 2092 pf_tag_packet(struct mbuf *m, int tag, int rtableid) 2093 { 2094 if (tag <= 0 && rtableid < 0) 2095 return (0); 2096 2097 if (tag > 0) 2098 m->m_pkthdr.pf.tag = tag; 2099 if (rtableid >= 0) 2100 m->m_pkthdr.pf.rtableid = rtableid; 2101 2102 return (0); 2103 } 2104 2105 void 2106 pf_step_into_anchor(int *depth, struct pf_ruleset **rs, int n, 2107 struct pf_rule **r, struct pf_rule **a, int *match) 2108 { 2109 struct pf_anchor_stackframe *f; 2110 2111 (*r)->anchor->match = 0; 2112 if (match) 2113 *match = 0; 2114 if (*depth >= sizeof(pf_anchor_stack) / 2115 sizeof(pf_anchor_stack[0])) { 2116 kprintf("pf_step_into_anchor: stack overflow\n"); 2117 *r = TAILQ_NEXT(*r, entries); 2118 return; 2119 } else if (*depth == 0 && a != NULL) 2120 *a = *r; 2121 f = pf_anchor_stack + (*depth)++; 2122 f->rs = *rs; 2123 f->r = *r; 2124 if ((*r)->anchor_wildcard) { 2125 f->parent = &(*r)->anchor->children; 2126 if ((f->child = RB_MIN(pf_anchor_node, f->parent)) == 2127 NULL) { 2128 *r = NULL; 2129 return; 2130 } 2131 *rs = &f->child->ruleset; 2132 } else { 2133 f->parent = NULL; 2134 f->child = NULL; 2135 *rs = &(*r)->anchor->ruleset; 2136 } 2137 *r = TAILQ_FIRST((*rs)->rules[n].active.ptr); 2138 } 2139 2140 int 2141 pf_step_out_of_anchor(int *depth, struct pf_ruleset **rs, int n, 2142 struct pf_rule **r, struct pf_rule **a, int *match) 2143 { 2144 struct pf_anchor_stackframe *f; 2145 int quick = 0; 2146 2147 do { 2148 if (*depth <= 0) 2149 break; 2150 f = pf_anchor_stack + *depth - 1; 2151 if (f->parent != NULL && f->child != NULL) { 2152 if (f->child->match || 2153 (match != NULL && *match)) { 2154 f->r->anchor->match = 1; 2155 *match = 0; 2156 } 2157 f->child = RB_NEXT(pf_anchor_node, f->parent, f->child); 2158 if (f->child != NULL) { 2159 *rs = &f->child->ruleset; 2160 *r = TAILQ_FIRST((*rs)->rules[n].active.ptr); 2161 if (*r == NULL) 2162 continue; 2163 else 2164 break; 2165 } 2166 } 2167 (*depth)--; 2168 if (*depth == 0 && a != NULL) 2169 *a = NULL; 2170 *rs = f->rs; 2171 if (f->r->anchor->match || (match != NULL && *match)) 2172 quick = f->r->quick; 2173 *r = TAILQ_NEXT(f->r, entries); 2174 } while (*r == NULL); 2175 2176 return (quick); 2177 } 2178 2179 #ifdef INET6 2180 void 2181 pf_poolmask(struct pf_addr *naddr, struct pf_addr *raddr, 2182 struct pf_addr *rmask, struct pf_addr *saddr, sa_family_t af) 2183 { 2184 switch (af) { 2185 #ifdef INET 2186 case AF_INET: 2187 naddr->addr32[0] = (raddr->addr32[0] & rmask->addr32[0]) | 2188 ((rmask->addr32[0] ^ 0xffffffff ) & saddr->addr32[0]); 2189 break; 2190 #endif /* INET */ 2191 case AF_INET6: 2192 naddr->addr32[0] = (raddr->addr32[0] & rmask->addr32[0]) | 2193 ((rmask->addr32[0] ^ 0xffffffff ) & saddr->addr32[0]); 2194 naddr->addr32[1] = (raddr->addr32[1] & rmask->addr32[1]) | 2195 ((rmask->addr32[1] ^ 0xffffffff ) & saddr->addr32[1]); 2196 naddr->addr32[2] = (raddr->addr32[2] & rmask->addr32[2]) | 2197 ((rmask->addr32[2] ^ 0xffffffff ) & saddr->addr32[2]); 2198 naddr->addr32[3] = (raddr->addr32[3] & rmask->addr32[3]) | 2199 ((rmask->addr32[3] ^ 0xffffffff ) & saddr->addr32[3]); 2200 break; 2201 } 2202 } 2203 2204 void 2205 pf_addr_inc(struct pf_addr *addr, sa_family_t af) 2206 { 2207 switch (af) { 2208 #ifdef INET 2209 case AF_INET: 2210 addr->addr32[0] = htonl(ntohl(addr->addr32[0]) + 1); 2211 break; 2212 #endif /* INET */ 2213 case AF_INET6: 2214 if (addr->addr32[3] == 0xffffffff) { 2215 addr->addr32[3] = 0; 2216 if (addr->addr32[2] == 0xffffffff) { 2217 addr->addr32[2] = 0; 2218 if (addr->addr32[1] == 0xffffffff) { 2219 addr->addr32[1] = 0; 2220 addr->addr32[0] = 2221 htonl(ntohl(addr->addr32[0]) + 1); 2222 } else 2223 addr->addr32[1] = 2224 htonl(ntohl(addr->addr32[1]) + 1); 2225 } else 2226 addr->addr32[2] = 2227 htonl(ntohl(addr->addr32[2]) + 1); 2228 } else 2229 addr->addr32[3] = 2230 htonl(ntohl(addr->addr32[3]) + 1); 2231 break; 2232 } 2233 } 2234 #endif /* INET6 */ 2235 2236 #define mix(a,b,c) \ 2237 do { \ 2238 a -= b; a -= c; a ^= (c >> 13); \ 2239 b -= c; b -= a; b ^= (a << 8); \ 2240 c -= a; c -= b; c ^= (b >> 13); \ 2241 a -= b; a -= c; a ^= (c >> 12); \ 2242 b -= c; b -= a; b ^= (a << 16); \ 2243 c -= a; c -= b; c ^= (b >> 5); \ 2244 a -= b; a -= c; a ^= (c >> 3); \ 2245 b -= c; b -= a; b ^= (a << 10); \ 2246 c -= a; c -= b; c ^= (b >> 15); \ 2247 } while (0) 2248 2249 /* 2250 * hash function based on bridge_hash in if_bridge.c 2251 */ 2252 void 2253 pf_hash(struct pf_addr *inaddr, struct pf_addr *hash, 2254 struct pf_poolhashkey *key, sa_family_t af) 2255 { 2256 u_int32_t a = 0x9e3779b9, b = 0x9e3779b9, c = key->key32[0]; 2257 2258 switch (af) { 2259 #ifdef INET 2260 case AF_INET: 2261 a += inaddr->addr32[0]; 2262 b += key->key32[1]; 2263 mix(a, b, c); 2264 hash->addr32[0] = c + key->key32[2]; 2265 break; 2266 #endif /* INET */ 2267 #ifdef INET6 2268 case AF_INET6: 2269 a += inaddr->addr32[0]; 2270 b += inaddr->addr32[2]; 2271 mix(a, b, c); 2272 hash->addr32[0] = c; 2273 a += inaddr->addr32[1]; 2274 b += inaddr->addr32[3]; 2275 c += key->key32[1]; 2276 mix(a, b, c); 2277 hash->addr32[1] = c; 2278 a += inaddr->addr32[2]; 2279 b += inaddr->addr32[1]; 2280 c += key->key32[2]; 2281 mix(a, b, c); 2282 hash->addr32[2] = c; 2283 a += inaddr->addr32[3]; 2284 b += inaddr->addr32[0]; 2285 c += key->key32[3]; 2286 mix(a, b, c); 2287 hash->addr32[3] = c; 2288 break; 2289 #endif /* INET6 */ 2290 } 2291 } 2292 2293 int 2294 pf_map_addr(sa_family_t af, struct pf_rule *r, struct pf_addr *saddr, 2295 struct pf_addr *naddr, struct pf_addr *init_addr, struct pf_src_node **sn) 2296 { 2297 unsigned char hash[16]; 2298 struct pf_pool *rpool = &r->rpool; 2299 struct pf_addr *raddr = &rpool->cur->addr.v.a.addr; 2300 struct pf_addr *rmask = &rpool->cur->addr.v.a.mask; 2301 struct pf_pooladdr *acur = rpool->cur; 2302 struct pf_src_node k; 2303 2304 if (*sn == NULL && r->rpool.opts & PF_POOL_STICKYADDR && 2305 (r->rpool.opts & PF_POOL_TYPEMASK) != PF_POOL_NONE) { 2306 k.af = af; 2307 PF_ACPY(&k.addr, saddr, af); 2308 if (r->rule_flag & PFRULE_RULESRCTRACK || 2309 r->rpool.opts & PF_POOL_STICKYADDR) 2310 k.rule.ptr = r; 2311 else 2312 k.rule.ptr = NULL; 2313 pf_status.scounters[SCNT_SRC_NODE_SEARCH]++; 2314 *sn = RB_FIND(pf_src_tree, &tree_src_tracking, &k); 2315 if (*sn != NULL && !PF_AZERO(&(*sn)->raddr, af)) { 2316 PF_ACPY(naddr, &(*sn)->raddr, af); 2317 if (pf_status.debug >= PF_DEBUG_MISC) { 2318 kprintf("pf_map_addr: src tracking maps "); 2319 pf_print_host(&k.addr, 0, af); 2320 kprintf(" to "); 2321 pf_print_host(naddr, 0, af); 2322 kprintf("\n"); 2323 } 2324 return (0); 2325 } 2326 } 2327 2328 if (rpool->cur->addr.type == PF_ADDR_NOROUTE) 2329 return (1); 2330 if (rpool->cur->addr.type == PF_ADDR_DYNIFTL) { 2331 switch (af) { 2332 #ifdef INET 2333 case AF_INET: 2334 if (rpool->cur->addr.p.dyn->pfid_acnt4 < 1 && 2335 (rpool->opts & PF_POOL_TYPEMASK) != 2336 PF_POOL_ROUNDROBIN) 2337 return (1); 2338 raddr = &rpool->cur->addr.p.dyn->pfid_addr4; 2339 rmask = &rpool->cur->addr.p.dyn->pfid_mask4; 2340 break; 2341 #endif /* INET */ 2342 #ifdef INET6 2343 case AF_INET6: 2344 if (rpool->cur->addr.p.dyn->pfid_acnt6 < 1 && 2345 (rpool->opts & PF_POOL_TYPEMASK) != 2346 PF_POOL_ROUNDROBIN) 2347 return (1); 2348 raddr = &rpool->cur->addr.p.dyn->pfid_addr6; 2349 rmask = &rpool->cur->addr.p.dyn->pfid_mask6; 2350 break; 2351 #endif /* INET6 */ 2352 } 2353 } else if (rpool->cur->addr.type == PF_ADDR_TABLE) { 2354 if ((rpool->opts & PF_POOL_TYPEMASK) != PF_POOL_ROUNDROBIN) 2355 return (1); /* unsupported */ 2356 } else { 2357 raddr = &rpool->cur->addr.v.a.addr; 2358 rmask = &rpool->cur->addr.v.a.mask; 2359 } 2360 2361 switch (rpool->opts & PF_POOL_TYPEMASK) { 2362 case PF_POOL_NONE: 2363 PF_ACPY(naddr, raddr, af); 2364 break; 2365 case PF_POOL_BITMASK: 2366 PF_POOLMASK(naddr, raddr, rmask, saddr, af); 2367 break; 2368 case PF_POOL_RANDOM: 2369 if (init_addr != NULL && PF_AZERO(init_addr, af)) { 2370 switch (af) { 2371 #ifdef INET 2372 case AF_INET: 2373 rpool->counter.addr32[0] = htonl(karc4random()); 2374 break; 2375 #endif /* INET */ 2376 #ifdef INET6 2377 case AF_INET6: 2378 if (rmask->addr32[3] != 0xffffffff) 2379 rpool->counter.addr32[3] = 2380 htonl(karc4random()); 2381 else 2382 break; 2383 if (rmask->addr32[2] != 0xffffffff) 2384 rpool->counter.addr32[2] = 2385 htonl(karc4random()); 2386 else 2387 break; 2388 if (rmask->addr32[1] != 0xffffffff) 2389 rpool->counter.addr32[1] = 2390 htonl(karc4random()); 2391 else 2392 break; 2393 if (rmask->addr32[0] != 0xffffffff) 2394 rpool->counter.addr32[0] = 2395 htonl(karc4random()); 2396 break; 2397 #endif /* INET6 */ 2398 } 2399 PF_POOLMASK(naddr, raddr, rmask, &rpool->counter, af); 2400 PF_ACPY(init_addr, naddr, af); 2401 2402 } else { 2403 PF_AINC(&rpool->counter, af); 2404 PF_POOLMASK(naddr, raddr, rmask, &rpool->counter, af); 2405 } 2406 break; 2407 case PF_POOL_SRCHASH: 2408 pf_hash(saddr, (struct pf_addr *)&hash, &rpool->key, af); 2409 PF_POOLMASK(naddr, raddr, rmask, (struct pf_addr *)&hash, af); 2410 break; 2411 case PF_POOL_ROUNDROBIN: 2412 if (rpool->cur->addr.type == PF_ADDR_TABLE) { 2413 if (!pfr_pool_get(rpool->cur->addr.p.tbl, 2414 &rpool->tblidx, &rpool->counter, 2415 &raddr, &rmask, af)) 2416 goto get_addr; 2417 } else if (rpool->cur->addr.type == PF_ADDR_DYNIFTL) { 2418 if (!pfr_pool_get(rpool->cur->addr.p.dyn->pfid_kt, 2419 &rpool->tblidx, &rpool->counter, 2420 &raddr, &rmask, af)) 2421 goto get_addr; 2422 } else if (pf_match_addr(0, raddr, rmask, &rpool->counter, af)) 2423 goto get_addr; 2424 2425 try_next: 2426 if ((rpool->cur = TAILQ_NEXT(rpool->cur, entries)) == NULL) 2427 rpool->cur = TAILQ_FIRST(&rpool->list); 2428 if (rpool->cur->addr.type == PF_ADDR_TABLE) { 2429 rpool->tblidx = -1; 2430 if (pfr_pool_get(rpool->cur->addr.p.tbl, 2431 &rpool->tblidx, &rpool->counter, 2432 &raddr, &rmask, af)) { 2433 /* table contains no address of type 'af' */ 2434 if (rpool->cur != acur) 2435 goto try_next; 2436 return (1); 2437 } 2438 } else if (rpool->cur->addr.type == PF_ADDR_DYNIFTL) { 2439 rpool->tblidx = -1; 2440 if (pfr_pool_get(rpool->cur->addr.p.dyn->pfid_kt, 2441 &rpool->tblidx, &rpool->counter, 2442 &raddr, &rmask, af)) { 2443 /* table contains no address of type 'af' */ 2444 if (rpool->cur != acur) 2445 goto try_next; 2446 return (1); 2447 } 2448 } else { 2449 raddr = &rpool->cur->addr.v.a.addr; 2450 rmask = &rpool->cur->addr.v.a.mask; 2451 PF_ACPY(&rpool->counter, raddr, af); 2452 } 2453 2454 get_addr: 2455 PF_ACPY(naddr, &rpool->counter, af); 2456 if (init_addr != NULL && PF_AZERO(init_addr, af)) 2457 PF_ACPY(init_addr, naddr, af); 2458 PF_AINC(&rpool->counter, af); 2459 break; 2460 } 2461 if (*sn != NULL) 2462 PF_ACPY(&(*sn)->raddr, naddr, af); 2463 2464 if (pf_status.debug >= PF_DEBUG_MISC && 2465 (rpool->opts & PF_POOL_TYPEMASK) != PF_POOL_NONE) { 2466 kprintf("pf_map_addr: selected address "); 2467 pf_print_host(naddr, 0, af); 2468 kprintf("\n"); 2469 } 2470 2471 return (0); 2472 } 2473 2474 int 2475 pf_get_sport(sa_family_t af, u_int8_t proto, struct pf_rule *r, 2476 struct pf_addr *saddr, struct pf_addr *daddr, u_int16_t dport, 2477 struct pf_addr *naddr, u_int16_t *nport, u_int16_t low, u_int16_t high, 2478 struct pf_src_node **sn) 2479 { 2480 struct pf_state_key_cmp key; 2481 struct pf_addr init_addr; 2482 u_int16_t cut; 2483 2484 bzero(&init_addr, sizeof(init_addr)); 2485 if (pf_map_addr(af, r, saddr, naddr, &init_addr, sn)) 2486 return (1); 2487 2488 if (proto == IPPROTO_ICMP) { 2489 low = 1; 2490 high = 65535; 2491 } 2492 2493 do { 2494 key.af = af; 2495 key.proto = proto; 2496 PF_ACPY(&key.addr[1], daddr, key.af); 2497 PF_ACPY(&key.addr[0], naddr, key.af); 2498 key.port[1] = dport; 2499 2500 /* 2501 * port search; start random, step; 2502 * similar 2 portloop in in_pcbbind 2503 */ 2504 if (!(proto == IPPROTO_TCP || proto == IPPROTO_UDP || 2505 proto == IPPROTO_ICMP)) { 2506 key.port[0] = dport; 2507 if (pf_find_state_all(&key, PF_IN, NULL) == NULL) 2508 return (0); 2509 } else if (low == 0 && high == 0) { 2510 key.port[0] = *nport; 2511 if (pf_find_state_all(&key, PF_IN, NULL) == NULL) 2512 return (0); 2513 } else if (low == high) { 2514 key.port[0] = htons(low); 2515 if (pf_find_state_all(&key, PF_IN, NULL) == NULL) { 2516 *nport = htons(low); 2517 return (0); 2518 } 2519 } else { 2520 u_int16_t tmp; 2521 2522 if (low > high) { 2523 tmp = low; 2524 low = high; 2525 high = tmp; 2526 } 2527 /* low < high */ 2528 cut = htonl(karc4random()) % (1 + high - low) + low; 2529 /* low <= cut <= high */ 2530 for (tmp = cut; tmp <= high; ++(tmp)) { 2531 key.port[0] = htons(tmp); 2532 if (pf_find_state_all(&key, PF_IN, NULL) == 2533 NULL && !in_baddynamic(tmp, proto)) { 2534 *nport = htons(tmp); 2535 return (0); 2536 } 2537 } 2538 for (tmp = cut - 1; tmp >= low; --(tmp)) { 2539 key.port[0] = htons(tmp); 2540 if (pf_find_state_all(&key, PF_IN, NULL) == 2541 NULL && !in_baddynamic(tmp, proto)) { 2542 *nport = htons(tmp); 2543 return (0); 2544 } 2545 } 2546 } 2547 2548 switch (r->rpool.opts & PF_POOL_TYPEMASK) { 2549 case PF_POOL_RANDOM: 2550 case PF_POOL_ROUNDROBIN: 2551 if (pf_map_addr(af, r, saddr, naddr, &init_addr, sn)) 2552 return (1); 2553 break; 2554 case PF_POOL_NONE: 2555 case PF_POOL_SRCHASH: 2556 case PF_POOL_BITMASK: 2557 default: 2558 return (1); 2559 } 2560 } while (! PF_AEQ(&init_addr, naddr, af) ); 2561 return (1); /* none available */ 2562 } 2563 2564 struct pf_rule * 2565 pf_match_translation(struct pf_pdesc *pd, struct mbuf *m, int off, 2566 int direction, struct pfi_kif *kif, struct pf_addr *saddr, u_int16_t sport, 2567 struct pf_addr *daddr, u_int16_t dport, int rs_num) 2568 { 2569 struct pf_rule *r, *rm = NULL; 2570 struct pf_ruleset *ruleset = NULL; 2571 int tag = -1; 2572 int rtableid = -1; 2573 int asd = 0; 2574 2575 r = TAILQ_FIRST(pf_main_ruleset.rules[rs_num].active.ptr); 2576 while (r && rm == NULL) { 2577 struct pf_rule_addr *src = NULL, *dst = NULL; 2578 struct pf_addr_wrap *xdst = NULL; 2579 2580 if (r->action == PF_BINAT && direction == PF_IN) { 2581 src = &r->dst; 2582 if (r->rpool.cur != NULL) 2583 xdst = &r->rpool.cur->addr; 2584 } else { 2585 src = &r->src; 2586 dst = &r->dst; 2587 } 2588 2589 r->evaluations++; 2590 if (pfi_kif_match(r->kif, kif) == r->ifnot) 2591 r = r->skip[PF_SKIP_IFP].ptr; 2592 else if (r->direction && r->direction != direction) 2593 r = r->skip[PF_SKIP_DIR].ptr; 2594 else if (r->af && r->af != pd->af) 2595 r = r->skip[PF_SKIP_AF].ptr; 2596 else if (r->proto && r->proto != pd->proto) 2597 r = r->skip[PF_SKIP_PROTO].ptr; 2598 else if (PF_MISMATCHAW(&src->addr, saddr, pd->af, 2599 src->neg, kif)) 2600 r = r->skip[src == &r->src ? PF_SKIP_SRC_ADDR : 2601 PF_SKIP_DST_ADDR].ptr; 2602 else if (src->port_op && !pf_match_port(src->port_op, 2603 src->port[0], src->port[1], sport)) 2604 r = r->skip[src == &r->src ? PF_SKIP_SRC_PORT : 2605 PF_SKIP_DST_PORT].ptr; 2606 else if (dst != NULL && 2607 PF_MISMATCHAW(&dst->addr, daddr, pd->af, dst->neg, NULL)) 2608 r = r->skip[PF_SKIP_DST_ADDR].ptr; 2609 else if (xdst != NULL && PF_MISMATCHAW(xdst, daddr, pd->af, 2610 0, NULL)) 2611 r = TAILQ_NEXT(r, entries); 2612 else if (dst != NULL && dst->port_op && 2613 !pf_match_port(dst->port_op, dst->port[0], 2614 dst->port[1], dport)) 2615 r = r->skip[PF_SKIP_DST_PORT].ptr; 2616 else if (r->match_tag && !pf_match_tag(m, r, &tag)) 2617 r = TAILQ_NEXT(r, entries); 2618 else if (r->os_fingerprint != PF_OSFP_ANY && (pd->proto != 2619 IPPROTO_TCP || !pf_osfp_match(pf_osfp_fingerprint(pd, m, 2620 off, pd->hdr.tcp), r->os_fingerprint))) 2621 r = TAILQ_NEXT(r, entries); 2622 else { 2623 if (r->tag) 2624 tag = r->tag; 2625 if (r->rtableid >= 0) 2626 rtableid = r->rtableid; 2627 if (r->anchor == NULL) { 2628 rm = r; 2629 } else 2630 pf_step_into_anchor(&asd, &ruleset, rs_num, 2631 &r, NULL, NULL); 2632 } 2633 if (r == NULL) 2634 pf_step_out_of_anchor(&asd, &ruleset, rs_num, &r, 2635 NULL, NULL); 2636 } 2637 if (pf_tag_packet(m, tag, rtableid)) 2638 return (NULL); 2639 if (rm != NULL && (rm->action == PF_NONAT || 2640 rm->action == PF_NORDR || rm->action == PF_NOBINAT)) 2641 return (NULL); 2642 return (rm); 2643 } 2644 2645 struct pf_rule * 2646 pf_get_translation(struct pf_pdesc *pd, struct mbuf *m, int off, int direction, 2647 struct pfi_kif *kif, struct pf_src_node **sn, 2648 struct pf_state_key **skw, struct pf_state_key **sks, 2649 struct pf_state_key **skp, struct pf_state_key **nkp, 2650 struct pf_addr *saddr, struct pf_addr *daddr, 2651 u_int16_t sport, u_int16_t dport) 2652 { 2653 struct pf_rule *r = NULL; 2654 2655 2656 if (direction == PF_OUT) { 2657 r = pf_match_translation(pd, m, off, direction, kif, saddr, 2658 sport, daddr, dport, PF_RULESET_BINAT); 2659 if (r == NULL) 2660 r = pf_match_translation(pd, m, off, direction, kif, 2661 saddr, sport, daddr, dport, PF_RULESET_NAT); 2662 } else { 2663 r = pf_match_translation(pd, m, off, direction, kif, saddr, 2664 sport, daddr, dport, PF_RULESET_RDR); 2665 if (r == NULL) 2666 r = pf_match_translation(pd, m, off, direction, kif, 2667 saddr, sport, daddr, dport, PF_RULESET_BINAT); 2668 } 2669 2670 if (r != NULL) { 2671 struct pf_addr *naddr; 2672 u_int16_t *nport; 2673 2674 if (pf_state_key_setup(pd, r, skw, sks, skp, nkp, 2675 saddr, daddr, sport, dport)) 2676 return r; 2677 2678 /* XXX We only modify one side for now. */ 2679 naddr = &(*nkp)->addr[1]; 2680 nport = &(*nkp)->port[1]; 2681 2682 /* 2683 * NOTE: Currently all translations will clear 2684 * BRIDGE_MBUF_TAGGED, telling the bridge to 2685 * ignore the original input encapsulation. 2686 */ 2687 switch (r->action) { 2688 case PF_NONAT: 2689 case PF_NOBINAT: 2690 case PF_NORDR: 2691 return (NULL); 2692 case PF_NAT: 2693 m->m_pkthdr.fw_flags &= ~BRIDGE_MBUF_TAGGED; 2694 if (pf_get_sport(pd->af, pd->proto, r, saddr, 2695 daddr, dport, naddr, nport, r->rpool.proxy_port[0], 2696 r->rpool.proxy_port[1], sn)) { 2697 DPFPRINTF(PF_DEBUG_MISC, 2698 ("pf: NAT proxy port allocation " 2699 "(%u-%u) failed\n", 2700 r->rpool.proxy_port[0], 2701 r->rpool.proxy_port[1])); 2702 return (NULL); 2703 } 2704 break; 2705 case PF_BINAT: 2706 m->m_pkthdr.fw_flags &= ~BRIDGE_MBUF_TAGGED; 2707 switch (direction) { 2708 case PF_OUT: 2709 if (r->rpool.cur->addr.type == PF_ADDR_DYNIFTL){ 2710 switch (pd->af) { 2711 #ifdef INET 2712 case AF_INET: 2713 if (r->rpool.cur->addr.p.dyn-> 2714 pfid_acnt4 < 1) 2715 return (NULL); 2716 PF_POOLMASK(naddr, 2717 &r->rpool.cur->addr.p.dyn-> 2718 pfid_addr4, 2719 &r->rpool.cur->addr.p.dyn-> 2720 pfid_mask4, 2721 saddr, AF_INET); 2722 break; 2723 #endif /* INET */ 2724 #ifdef INET6 2725 case AF_INET6: 2726 if (r->rpool.cur->addr.p.dyn-> 2727 pfid_acnt6 < 1) 2728 return (NULL); 2729 PF_POOLMASK(naddr, 2730 &r->rpool.cur->addr.p.dyn-> 2731 pfid_addr6, 2732 &r->rpool.cur->addr.p.dyn-> 2733 pfid_mask6, 2734 saddr, AF_INET6); 2735 break; 2736 #endif /* INET6 */ 2737 } 2738 } else 2739 PF_POOLMASK(naddr, 2740 &r->rpool.cur->addr.v.a.addr, 2741 &r->rpool.cur->addr.v.a.mask, 2742 saddr, pd->af); 2743 break; 2744 case PF_IN: 2745 if (r->src.addr.type == PF_ADDR_DYNIFTL) { 2746 switch (pd->af) { 2747 #ifdef INET 2748 case AF_INET: 2749 if (r->src.addr.p.dyn-> 2750 pfid_acnt4 < 1) 2751 return (NULL); 2752 PF_POOLMASK(naddr, 2753 &r->src.addr.p.dyn-> 2754 pfid_addr4, 2755 &r->src.addr.p.dyn-> 2756 pfid_mask4, 2757 daddr, AF_INET); 2758 break; 2759 #endif /* INET */ 2760 #ifdef INET6 2761 case AF_INET6: 2762 if (r->src.addr.p.dyn-> 2763 pfid_acnt6 < 1) 2764 return (NULL); 2765 PF_POOLMASK(naddr, 2766 &r->src.addr.p.dyn-> 2767 pfid_addr6, 2768 &r->src.addr.p.dyn-> 2769 pfid_mask6, 2770 daddr, AF_INET6); 2771 break; 2772 #endif /* INET6 */ 2773 } 2774 } else 2775 PF_POOLMASK(naddr, 2776 &r->src.addr.v.a.addr, 2777 &r->src.addr.v.a.mask, daddr, 2778 pd->af); 2779 break; 2780 } 2781 break; 2782 case PF_RDR: { 2783 m->m_pkthdr.fw_flags &= ~BRIDGE_MBUF_TAGGED; 2784 if (pf_map_addr(pd->af, r, saddr, naddr, NULL, sn)) 2785 return (NULL); 2786 if ((r->rpool.opts & PF_POOL_TYPEMASK) == 2787 PF_POOL_BITMASK) 2788 PF_POOLMASK(naddr, naddr, 2789 &r->rpool.cur->addr.v.a.mask, daddr, 2790 pd->af); 2791 2792 if (r->rpool.proxy_port[1]) { 2793 u_int32_t tmp_nport; 2794 2795 tmp_nport = ((ntohs(dport) - 2796 ntohs(r->dst.port[0])) % 2797 (r->rpool.proxy_port[1] - 2798 r->rpool.proxy_port[0] + 1)) + 2799 r->rpool.proxy_port[0]; 2800 2801 /* wrap around if necessary */ 2802 if (tmp_nport > 65535) 2803 tmp_nport -= 65535; 2804 *nport = htons((u_int16_t)tmp_nport); 2805 } else if (r->rpool.proxy_port[0]) 2806 *nport = htons(r->rpool.proxy_port[0]); 2807 break; 2808 } 2809 default: 2810 return (NULL); 2811 } 2812 } 2813 2814 return (r); 2815 } 2816 2817 #ifdef SMP 2818 struct netmsg_hashlookup { 2819 struct netmsg_base base; 2820 struct inpcb **nm_pinp; 2821 struct inpcbinfo *nm_pcbinfo; 2822 struct pf_addr *nm_saddr; 2823 struct pf_addr *nm_daddr; 2824 uint16_t nm_sport; 2825 uint16_t nm_dport; 2826 sa_family_t nm_af; 2827 }; 2828 2829 #ifdef PF_SOCKET_LOOKUP_DOMSG 2830 static void 2831 in_pcblookup_hash_handler(netmsg_t msg) 2832 { 2833 struct netmsg_hashlookup *rmsg = (struct netmsg_hashlookup *)msg; 2834 2835 if (rmsg->nm_af == AF_INET) 2836 *rmsg->nm_pinp = in_pcblookup_hash(rmsg->nm_pcbinfo, 2837 rmsg->nm_saddr->v4, rmsg->nm_sport, rmsg->nm_daddr->v4, 2838 rmsg->nm_dport, INPLOOKUP_WILDCARD, NULL); 2839 #ifdef INET6 2840 else 2841 *rmsg->nm_pinp = in6_pcblookup_hash(rmsg->nm_pcbinfo, 2842 &rmsg->nm_saddr->v6, rmsg->nm_sport, &rmsg->nm_daddr->v6, 2843 rmsg->nm_dport, INPLOOKUP_WILDCARD, NULL); 2844 #endif /* INET6 */ 2845 lwkt_replymsg(&rmsg->base.lmsg, 0); 2846 } 2847 #endif /* PF_SOCKET_LOOKUP_DOMSG */ 2848 2849 #endif /* SMP */ 2850 2851 int 2852 pf_socket_lookup(int direction, struct pf_pdesc *pd) 2853 { 2854 struct pf_addr *saddr, *daddr; 2855 u_int16_t sport, dport; 2856 struct inpcbinfo *pi; 2857 struct inpcb *inp; 2858 #ifdef SMP 2859 struct netmsg_hashlookup *msg = NULL; 2860 #ifdef PF_SOCKET_LOOKUP_DOMSG 2861 struct netmsg_hashlookup msg0; 2862 #endif 2863 #endif 2864 int pi_cpu = 0; 2865 2866 if (pd == NULL) 2867 return (-1); 2868 pd->lookup.uid = UID_MAX; 2869 pd->lookup.gid = GID_MAX; 2870 pd->lookup.pid = NO_PID; 2871 if (direction == PF_IN) { 2872 saddr = pd->src; 2873 daddr = pd->dst; 2874 } else { 2875 saddr = pd->dst; 2876 daddr = pd->src; 2877 } 2878 switch (pd->proto) { 2879 case IPPROTO_TCP: 2880 if (pd->hdr.tcp == NULL) 2881 return (-1); 2882 sport = pd->hdr.tcp->th_sport; 2883 dport = pd->hdr.tcp->th_dport; 2884 2885 pi_cpu = tcp_addrcpu(saddr->v4.s_addr, sport, daddr->v4.s_addr, dport); 2886 pi = &tcbinfo[pi_cpu]; 2887 #ifdef SMP 2888 /* 2889 * Our netstack runs lockless on MP systems 2890 * (only for TCP connections at the moment). 2891 * 2892 * As we are not allowed to read another CPU's tcbinfo, 2893 * we have to ask that CPU via remote call to search the 2894 * table for us. 2895 * 2896 * Prepare a msg iff data belongs to another CPU. 2897 */ 2898 if (pi_cpu != mycpu->gd_cpuid) { 2899 #ifdef PF_SOCKET_LOOKUP_DOMSG 2900 /* 2901 * NOTE: 2902 * 2903 * Following lwkt_domsg() is dangerous and could 2904 * lockup the network system, e.g. 2905 * 2906 * On 2 CPU system: 2907 * netisr0 domsg to netisr1 (due to lookup) 2908 * netisr1 domsg to netisr0 (due to lookup) 2909 * 2910 * We simply return -1 here, since we are probably 2911 * called before NAT, so the TCP packet should 2912 * already be on the correct CPU. 2913 */ 2914 msg = &msg0; 2915 netmsg_init(&msg->base, NULL, &curthread->td_msgport, 2916 0, in_pcblookup_hash_handler); 2917 msg->nm_pinp = &inp; 2918 msg->nm_pcbinfo = pi; 2919 msg->nm_saddr = saddr; 2920 msg->nm_sport = sport; 2921 msg->nm_daddr = daddr; 2922 msg->nm_dport = dport; 2923 msg->nm_af = pd->af; 2924 #else /* !PF_SOCKET_LOOKUP_DOMSG */ 2925 kprintf("pf_socket_lookup: tcp packet not on the " 2926 "correct cpu %d, cur cpu %d\n", 2927 pi_cpu, mycpuid); 2928 print_backtrace(-1); 2929 return -1; 2930 #endif /* PF_SOCKET_LOOKUP_DOMSG */ 2931 } 2932 #endif /* SMP */ 2933 break; 2934 case IPPROTO_UDP: 2935 if (pd->hdr.udp == NULL) 2936 return (-1); 2937 sport = pd->hdr.udp->uh_sport; 2938 dport = pd->hdr.udp->uh_dport; 2939 pi = &udbinfo; 2940 break; 2941 default: 2942 return (-1); 2943 } 2944 if (direction != PF_IN) { 2945 u_int16_t p; 2946 2947 p = sport; 2948 sport = dport; 2949 dport = p; 2950 } 2951 switch (pd->af) { 2952 #ifdef INET6 2953 case AF_INET6: 2954 #ifdef SMP 2955 /* 2956 * Query other CPU, second part 2957 * 2958 * msg only gets initialized when: 2959 * 1) packet is TCP 2960 * 2) the info belongs to another CPU 2961 * 2962 * Use some switch/case magic to avoid code duplication. 2963 */ 2964 if (msg == NULL) 2965 #endif /* SMP */ 2966 { 2967 inp = in6_pcblookup_hash(pi, &saddr->v6, sport, 2968 &daddr->v6, dport, INPLOOKUP_WILDCARD, NULL); 2969 2970 if (inp == NULL) 2971 return (-1); 2972 break; 2973 } 2974 /* FALLTHROUGH if SMP and on other CPU */ 2975 #endif /* INET6 */ 2976 case AF_INET: 2977 #ifdef SMP 2978 if (msg != NULL) { 2979 lwkt_domsg(cpu_portfn(pi_cpu), 2980 &msg->base.lmsg, 0); 2981 } else 2982 #endif /* SMP */ 2983 { 2984 inp = in_pcblookup_hash(pi, saddr->v4, sport, daddr->v4, 2985 dport, INPLOOKUP_WILDCARD, NULL); 2986 } 2987 if (inp == NULL) 2988 return (-1); 2989 break; 2990 2991 default: 2992 return (-1); 2993 } 2994 pd->lookup.uid = inp->inp_socket->so_cred->cr_uid; 2995 pd->lookup.gid = inp->inp_socket->so_cred->cr_groups[0]; 2996 return (1); 2997 } 2998 2999 u_int8_t 3000 pf_get_wscale(struct mbuf *m, int off, u_int16_t th_off, sa_family_t af) 3001 { 3002 int hlen; 3003 u_int8_t hdr[60]; 3004 u_int8_t *opt, optlen; 3005 u_int8_t wscale = 0; 3006 3007 hlen = th_off << 2; /* hlen <= sizeof(hdr) */ 3008 if (hlen <= sizeof(struct tcphdr)) 3009 return (0); 3010 if (!pf_pull_hdr(m, off, hdr, hlen, NULL, NULL, af)) 3011 return (0); 3012 opt = hdr + sizeof(struct tcphdr); 3013 hlen -= sizeof(struct tcphdr); 3014 while (hlen >= 3) { 3015 switch (*opt) { 3016 case TCPOPT_EOL: 3017 case TCPOPT_NOP: 3018 ++opt; 3019 --hlen; 3020 break; 3021 case TCPOPT_WINDOW: 3022 wscale = opt[2]; 3023 if (wscale > TCP_MAX_WINSHIFT) 3024 wscale = TCP_MAX_WINSHIFT; 3025 wscale |= PF_WSCALE_FLAG; 3026 /* FALLTHROUGH */ 3027 default: 3028 optlen = opt[1]; 3029 if (optlen < 2) 3030 optlen = 2; 3031 hlen -= optlen; 3032 opt += optlen; 3033 break; 3034 } 3035 } 3036 return (wscale); 3037 } 3038 3039 u_int16_t 3040 pf_get_mss(struct mbuf *m, int off, u_int16_t th_off, sa_family_t af) 3041 { 3042 int hlen; 3043 u_int8_t hdr[60]; 3044 u_int8_t *opt, optlen; 3045 u_int16_t mss = tcp_mssdflt; 3046 3047 hlen = th_off << 2; /* hlen <= sizeof(hdr) */ 3048 if (hlen <= sizeof(struct tcphdr)) 3049 return (0); 3050 if (!pf_pull_hdr(m, off, hdr, hlen, NULL, NULL, af)) 3051 return (0); 3052 opt = hdr + sizeof(struct tcphdr); 3053 hlen -= sizeof(struct tcphdr); 3054 while (hlen >= TCPOLEN_MAXSEG) { 3055 switch (*opt) { 3056 case TCPOPT_EOL: 3057 case TCPOPT_NOP: 3058 ++opt; 3059 --hlen; 3060 break; 3061 case TCPOPT_MAXSEG: 3062 bcopy((caddr_t)(opt + 2), (caddr_t)&mss, 2); 3063 /* FALLTHROUGH */ 3064 default: 3065 optlen = opt[1]; 3066 if (optlen < 2) 3067 optlen = 2; 3068 hlen -= optlen; 3069 opt += optlen; 3070 break; 3071 } 3072 } 3073 return (mss); 3074 } 3075 3076 u_int16_t 3077 pf_calc_mss(struct pf_addr *addr, sa_family_t af, u_int16_t offer) 3078 { 3079 #ifdef INET 3080 struct sockaddr_in *dst; 3081 struct route ro; 3082 #endif /* INET */ 3083 #ifdef INET6 3084 struct sockaddr_in6 *dst6; 3085 struct route_in6 ro6; 3086 #endif /* INET6 */ 3087 struct rtentry *rt = NULL; 3088 int hlen = 0; 3089 u_int16_t mss = tcp_mssdflt; 3090 3091 switch (af) { 3092 #ifdef INET 3093 case AF_INET: 3094 hlen = sizeof(struct ip); 3095 bzero(&ro, sizeof(ro)); 3096 dst = (struct sockaddr_in *)&ro.ro_dst; 3097 dst->sin_family = AF_INET; 3098 dst->sin_len = sizeof(*dst); 3099 dst->sin_addr = addr->v4; 3100 rtalloc_ign(&ro, (RTF_CLONING | RTF_PRCLONING)); 3101 rt = ro.ro_rt; 3102 break; 3103 #endif /* INET */ 3104 #ifdef INET6 3105 case AF_INET6: 3106 hlen = sizeof(struct ip6_hdr); 3107 bzero(&ro6, sizeof(ro6)); 3108 dst6 = (struct sockaddr_in6 *)&ro6.ro_dst; 3109 dst6->sin6_family = AF_INET6; 3110 dst6->sin6_len = sizeof(*dst6); 3111 dst6->sin6_addr = addr->v6; 3112 rtalloc_ign((struct route *)&ro6, (RTF_CLONING | RTF_PRCLONING)); 3113 rt = ro6.ro_rt; 3114 break; 3115 #endif /* INET6 */ 3116 } 3117 3118 if (rt && rt->rt_ifp) { 3119 mss = rt->rt_ifp->if_mtu - hlen - sizeof(struct tcphdr); 3120 mss = max(tcp_mssdflt, mss); 3121 RTFREE(rt); 3122 } 3123 mss = min(mss, offer); 3124 mss = max(mss, 64); /* sanity - at least max opt space */ 3125 return (mss); 3126 } 3127 3128 void 3129 pf_set_rt_ifp(struct pf_state *s, struct pf_addr *saddr) 3130 { 3131 struct pf_rule *r = s->rule.ptr; 3132 3133 s->rt_kif = NULL; 3134 if (!r->rt || r->rt == PF_FASTROUTE) 3135 return; 3136 switch (s->key[PF_SK_WIRE]->af) { 3137 #ifdef INET 3138 case AF_INET: 3139 pf_map_addr(AF_INET, r, saddr, &s->rt_addr, NULL, 3140 &s->nat_src_node); 3141 s->rt_kif = r->rpool.cur->kif; 3142 break; 3143 #endif /* INET */ 3144 #ifdef INET6 3145 case AF_INET6: 3146 pf_map_addr(AF_INET6, r, saddr, &s->rt_addr, NULL, 3147 &s->nat_src_node); 3148 s->rt_kif = r->rpool.cur->kif; 3149 break; 3150 #endif /* INET6 */ 3151 } 3152 } 3153 3154 u_int32_t 3155 pf_tcp_iss(struct pf_pdesc *pd) 3156 { 3157 MD5_CTX ctx; 3158 u_int32_t digest[4]; 3159 3160 if (pf_tcp_secret_init == 0) { 3161 karc4rand(pf_tcp_secret, sizeof(pf_tcp_secret)); 3162 MD5Init(&pf_tcp_secret_ctx); 3163 MD5Update(&pf_tcp_secret_ctx, pf_tcp_secret, 3164 sizeof(pf_tcp_secret)); 3165 pf_tcp_secret_init = 1; 3166 } 3167 ctx = pf_tcp_secret_ctx; 3168 3169 MD5Update(&ctx, (char *)&pd->hdr.tcp->th_sport, sizeof(u_short)); 3170 MD5Update(&ctx, (char *)&pd->hdr.tcp->th_dport, sizeof(u_short)); 3171 if (pd->af == AF_INET6) { 3172 MD5Update(&ctx, (char *)&pd->src->v6, sizeof(struct in6_addr)); 3173 MD5Update(&ctx, (char *)&pd->dst->v6, sizeof(struct in6_addr)); 3174 } else { 3175 MD5Update(&ctx, (char *)&pd->src->v4, sizeof(struct in_addr)); 3176 MD5Update(&ctx, (char *)&pd->dst->v4, sizeof(struct in_addr)); 3177 } 3178 MD5Final((u_char *)digest, &ctx); 3179 pf_tcp_iss_off += 4096; 3180 return (digest[0] + pd->hdr.tcp->th_seq + pf_tcp_iss_off); 3181 } 3182 3183 int 3184 pf_test_rule(struct pf_rule **rm, struct pf_state **sm, int direction, 3185 struct pfi_kif *kif, struct mbuf *m, int off, void *h, 3186 struct pf_pdesc *pd, struct pf_rule **am, struct pf_ruleset **rsm, 3187 struct ifqueue *ifq, struct inpcb *inp) 3188 { 3189 struct pf_rule *nr = NULL; 3190 struct pf_addr *saddr = pd->src, *daddr = pd->dst; 3191 sa_family_t af = pd->af; 3192 struct pf_rule *r, *a = NULL; 3193 struct pf_ruleset *ruleset = NULL; 3194 struct pf_src_node *nsn = NULL; 3195 struct tcphdr *th = pd->hdr.tcp; 3196 struct pf_state_key *skw = NULL, *sks = NULL; 3197 struct pf_state_key *sk = NULL, *nk = NULL; 3198 u_short reason; 3199 int rewrite = 0, hdrlen = 0; 3200 int tag = -1, rtableid = -1; 3201 int asd = 0; 3202 int match = 0; 3203 int state_icmp = 0; 3204 u_int16_t sport = 0, dport = 0; 3205 u_int16_t nport = 0, bport = 0; 3206 u_int16_t bproto_sum = 0, bip_sum = 0; 3207 u_int8_t icmptype = 0, icmpcode = 0; 3208 3209 3210 if (direction == PF_IN && pf_check_congestion(ifq)) { 3211 REASON_SET(&reason, PFRES_CONGEST); 3212 return (PF_DROP); 3213 } 3214 3215 if (inp != NULL) 3216 pd->lookup.done = pf_socket_lookup(direction, pd); 3217 else if (debug_pfugidhack) { 3218 DPFPRINTF(PF_DEBUG_MISC, ("pf: unlocked lookup\n")); 3219 pd->lookup.done = pf_socket_lookup(direction, pd); 3220 } 3221 3222 switch (pd->proto) { 3223 case IPPROTO_TCP: 3224 sport = th->th_sport; 3225 dport = th->th_dport; 3226 hdrlen = sizeof(*th); 3227 break; 3228 case IPPROTO_UDP: 3229 sport = pd->hdr.udp->uh_sport; 3230 dport = pd->hdr.udp->uh_dport; 3231 hdrlen = sizeof(*pd->hdr.udp); 3232 break; 3233 #ifdef INET 3234 case IPPROTO_ICMP: 3235 if (pd->af != AF_INET) 3236 break; 3237 sport = dport = pd->hdr.icmp->icmp_id; 3238 hdrlen = sizeof(*pd->hdr.icmp); 3239 icmptype = pd->hdr.icmp->icmp_type; 3240 icmpcode = pd->hdr.icmp->icmp_code; 3241 3242 if (icmptype == ICMP_UNREACH || 3243 icmptype == ICMP_SOURCEQUENCH || 3244 icmptype == ICMP_REDIRECT || 3245 icmptype == ICMP_TIMXCEED || 3246 icmptype == ICMP_PARAMPROB) 3247 state_icmp++; 3248 break; 3249 #endif /* INET */ 3250 #ifdef INET6 3251 case IPPROTO_ICMPV6: 3252 if (af != AF_INET6) 3253 break; 3254 sport = dport = pd->hdr.icmp6->icmp6_id; 3255 hdrlen = sizeof(*pd->hdr.icmp6); 3256 icmptype = pd->hdr.icmp6->icmp6_type; 3257 icmpcode = pd->hdr.icmp6->icmp6_code; 3258 3259 if (icmptype == ICMP6_DST_UNREACH || 3260 icmptype == ICMP6_PACKET_TOO_BIG || 3261 icmptype == ICMP6_TIME_EXCEEDED || 3262 icmptype == ICMP6_PARAM_PROB) 3263 state_icmp++; 3264 break; 3265 #endif /* INET6 */ 3266 default: 3267 sport = dport = hdrlen = 0; 3268 break; 3269 } 3270 3271 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_FILTER].active.ptr); 3272 3273 bport = nport = sport; 3274 /* check packet for BINAT/NAT/RDR */ 3275 if ((nr = pf_get_translation(pd, m, off, direction, kif, &nsn, 3276 &skw, &sks, &sk, &nk, saddr, daddr, sport, dport)) != NULL) { 3277 if (nk == NULL || sk == NULL) { 3278 REASON_SET(&reason, PFRES_MEMORY); 3279 goto cleanup; 3280 } 3281 3282 if (pd->ip_sum) 3283 bip_sum = *pd->ip_sum; 3284 3285 switch (pd->proto) { 3286 case IPPROTO_TCP: 3287 bproto_sum = th->th_sum; 3288 pd->proto_sum = &th->th_sum; 3289 3290 if (PF_ANEQ(saddr, &nk->addr[pd->sidx], af) || 3291 nk->port[pd->sidx] != sport) { 3292 pf_change_ap(saddr, &th->th_sport, pd->ip_sum, 3293 &th->th_sum, &nk->addr[pd->sidx], 3294 nk->port[pd->sidx], 0, af); 3295 pd->sport = &th->th_sport; 3296 sport = th->th_sport; 3297 } 3298 3299 if (PF_ANEQ(daddr, &nk->addr[pd->didx], af) || 3300 nk->port[pd->didx] != dport) { 3301 pf_change_ap(daddr, &th->th_dport, pd->ip_sum, 3302 &th->th_sum, &nk->addr[pd->didx], 3303 nk->port[pd->didx], 0, af); 3304 dport = th->th_dport; 3305 pd->dport = &th->th_dport; 3306 } 3307 rewrite++; 3308 break; 3309 case IPPROTO_UDP: 3310 bproto_sum = pd->hdr.udp->uh_sum; 3311 pd->proto_sum = &pd->hdr.udp->uh_sum; 3312 3313 if (PF_ANEQ(saddr, &nk->addr[pd->sidx], af) || 3314 nk->port[pd->sidx] != sport) { 3315 pf_change_ap(saddr, &pd->hdr.udp->uh_sport, 3316 pd->ip_sum, &pd->hdr.udp->uh_sum, 3317 &nk->addr[pd->sidx], 3318 nk->port[pd->sidx], 1, af); 3319 sport = pd->hdr.udp->uh_sport; 3320 pd->sport = &pd->hdr.udp->uh_sport; 3321 } 3322 3323 if (PF_ANEQ(daddr, &nk->addr[pd->didx], af) || 3324 nk->port[pd->didx] != dport) { 3325 pf_change_ap(daddr, &pd->hdr.udp->uh_dport, 3326 pd->ip_sum, &pd->hdr.udp->uh_sum, 3327 &nk->addr[pd->didx], 3328 nk->port[pd->didx], 1, af); 3329 dport = pd->hdr.udp->uh_dport; 3330 pd->dport = &pd->hdr.udp->uh_dport; 3331 } 3332 rewrite++; 3333 break; 3334 #ifdef INET 3335 case IPPROTO_ICMP: 3336 nk->port[0] = nk->port[1]; 3337 if (PF_ANEQ(saddr, &nk->addr[pd->sidx], AF_INET)) 3338 pf_change_a(&saddr->v4.s_addr, pd->ip_sum, 3339 nk->addr[pd->sidx].v4.s_addr, 0); 3340 3341 if (PF_ANEQ(daddr, &nk->addr[pd->didx], AF_INET)) 3342 pf_change_a(&daddr->v4.s_addr, pd->ip_sum, 3343 nk->addr[pd->didx].v4.s_addr, 0); 3344 3345 if (nk->port[1] != pd->hdr.icmp->icmp_id) { 3346 pd->hdr.icmp->icmp_cksum = pf_cksum_fixup( 3347 pd->hdr.icmp->icmp_cksum, sport, 3348 nk->port[1], 0); 3349 pd->hdr.icmp->icmp_id = nk->port[1]; 3350 pd->sport = &pd->hdr.icmp->icmp_id; 3351 } 3352 m_copyback(m, off, ICMP_MINLEN, (caddr_t)pd->hdr.icmp); 3353 break; 3354 #endif /* INET */ 3355 #ifdef INET6 3356 case IPPROTO_ICMPV6: 3357 nk->port[0] = nk->port[1]; 3358 if (PF_ANEQ(saddr, &nk->addr[pd->sidx], AF_INET6)) 3359 pf_change_a6(saddr, &pd->hdr.icmp6->icmp6_cksum, 3360 &nk->addr[pd->sidx], 0); 3361 3362 if (PF_ANEQ(daddr, &nk->addr[pd->didx], AF_INET6)) 3363 pf_change_a6(daddr, &pd->hdr.icmp6->icmp6_cksum, 3364 &nk->addr[pd->didx], 0); 3365 rewrite++; 3366 break; 3367 #endif /* INET */ 3368 default: 3369 switch (af) { 3370 #ifdef INET 3371 case AF_INET: 3372 if (PF_ANEQ(saddr, 3373 &nk->addr[pd->sidx], AF_INET)) 3374 pf_change_a(&saddr->v4.s_addr, 3375 pd->ip_sum, 3376 nk->addr[pd->sidx].v4.s_addr, 0); 3377 3378 if (PF_ANEQ(daddr, 3379 &nk->addr[pd->didx], AF_INET)) 3380 pf_change_a(&daddr->v4.s_addr, 3381 pd->ip_sum, 3382 nk->addr[pd->didx].v4.s_addr, 0); 3383 break; 3384 #endif /* INET */ 3385 #ifdef INET6 3386 case AF_INET6: 3387 if (PF_ANEQ(saddr, 3388 &nk->addr[pd->sidx], AF_INET6)) 3389 PF_ACPY(saddr, &nk->addr[pd->sidx], af); 3390 3391 if (PF_ANEQ(daddr, 3392 &nk->addr[pd->didx], AF_INET6)) 3393 PF_ACPY(saddr, &nk->addr[pd->didx], af); 3394 break; 3395 #endif /* INET */ 3396 } 3397 break; 3398 } 3399 if (nr->natpass) 3400 r = NULL; 3401 pd->nat_rule = nr; 3402 } 3403 3404 while (r != NULL) { 3405 r->evaluations++; 3406 if (pfi_kif_match(r->kif, kif) == r->ifnot) 3407 r = r->skip[PF_SKIP_IFP].ptr; 3408 else if (r->direction && r->direction != direction) 3409 r = r->skip[PF_SKIP_DIR].ptr; 3410 else if (r->af && r->af != af) 3411 r = r->skip[PF_SKIP_AF].ptr; 3412 else if (r->proto && r->proto != pd->proto) 3413 r = r->skip[PF_SKIP_PROTO].ptr; 3414 else if (PF_MISMATCHAW(&r->src.addr, saddr, af, 3415 r->src.neg, kif)) 3416 r = r->skip[PF_SKIP_SRC_ADDR].ptr; 3417 /* tcp/udp only. port_op always 0 in other cases */ 3418 else if (r->src.port_op && !pf_match_port(r->src.port_op, 3419 r->src.port[0], r->src.port[1], sport)) 3420 r = r->skip[PF_SKIP_SRC_PORT].ptr; 3421 else if (PF_MISMATCHAW(&r->dst.addr, daddr, af, 3422 r->dst.neg, NULL)) 3423 r = r->skip[PF_SKIP_DST_ADDR].ptr; 3424 /* tcp/udp only. port_op always 0 in other cases */ 3425 else if (r->dst.port_op && !pf_match_port(r->dst.port_op, 3426 r->dst.port[0], r->dst.port[1], dport)) 3427 r = r->skip[PF_SKIP_DST_PORT].ptr; 3428 /* icmp only. type always 0 in other cases */ 3429 else if (r->type && r->type != icmptype + 1) 3430 r = TAILQ_NEXT(r, entries); 3431 /* icmp only. type always 0 in other cases */ 3432 else if (r->code && r->code != icmpcode + 1) 3433 r = TAILQ_NEXT(r, entries); 3434 else if (r->tos && !(r->tos == pd->tos)) 3435 r = TAILQ_NEXT(r, entries); 3436 else if (r->rule_flag & PFRULE_FRAGMENT) 3437 r = TAILQ_NEXT(r, entries); 3438 else if (pd->proto == IPPROTO_TCP && 3439 (r->flagset & th->th_flags) != r->flags) 3440 r = TAILQ_NEXT(r, entries); 3441 /* tcp/udp only. uid.op always 0 in other cases */ 3442 else if (r->uid.op && (pd->lookup.done || (pd->lookup.done = 3443 pf_socket_lookup(direction, pd), 1)) && 3444 !pf_match_uid(r->uid.op, r->uid.uid[0], r->uid.uid[1], 3445 pd->lookup.uid)) 3446 r = TAILQ_NEXT(r, entries); 3447 /* tcp/udp only. gid.op always 0 in other cases */ 3448 else if (r->gid.op && (pd->lookup.done || (pd->lookup.done = 3449 pf_socket_lookup(direction, pd), 1)) && 3450 !pf_match_gid(r->gid.op, r->gid.gid[0], r->gid.gid[1], 3451 pd->lookup.gid)) 3452 r = TAILQ_NEXT(r, entries); 3453 else if (r->prob && 3454 r->prob <= karc4random()) 3455 r = TAILQ_NEXT(r, entries); 3456 else if (r->match_tag && !pf_match_tag(m, r, &tag)) 3457 r = TAILQ_NEXT(r, entries); 3458 else if (r->os_fingerprint != PF_OSFP_ANY && 3459 (pd->proto != IPPROTO_TCP || !pf_osfp_match( 3460 pf_osfp_fingerprint(pd, m, off, th), 3461 r->os_fingerprint))) 3462 r = TAILQ_NEXT(r, entries); 3463 else { 3464 if (r->tag) 3465 tag = r->tag; 3466 if (r->rtableid >= 0) 3467 rtableid = r->rtableid; 3468 if (r->anchor == NULL) { 3469 match = 1; 3470 *rm = r; 3471 *am = a; 3472 *rsm = ruleset; 3473 if ((*rm)->quick) 3474 break; 3475 r = TAILQ_NEXT(r, entries); 3476 } else 3477 pf_step_into_anchor(&asd, &ruleset, 3478 PF_RULESET_FILTER, &r, &a, &match); 3479 } 3480 if (r == NULL && pf_step_out_of_anchor(&asd, &ruleset, 3481 PF_RULESET_FILTER, &r, &a, &match)) 3482 break; 3483 } 3484 r = *rm; 3485 a = *am; 3486 ruleset = *rsm; 3487 3488 REASON_SET(&reason, PFRES_MATCH); 3489 3490 if (r->log || (nr != NULL && nr->log)) { 3491 if (rewrite) 3492 m_copyback(m, off, hdrlen, pd->hdr.any); 3493 PFLOG_PACKET(kif, h, m, af, direction, reason, r->log ? r : nr, 3494 a, ruleset, pd); 3495 } 3496 3497 if ((r->action == PF_DROP) && 3498 ((r->rule_flag & PFRULE_RETURNRST) || 3499 (r->rule_flag & PFRULE_RETURNICMP) || 3500 (r->rule_flag & PFRULE_RETURN))) { 3501 /* undo NAT changes, if they have taken place */ 3502 if (nr != NULL) { 3503 PF_ACPY(saddr, &sk->addr[pd->sidx], af); 3504 PF_ACPY(daddr, &sk->addr[pd->didx], af); 3505 if (pd->sport) 3506 *pd->sport = sk->port[pd->sidx]; 3507 if (pd->dport) 3508 *pd->dport = sk->port[pd->didx]; 3509 if (pd->proto_sum) 3510 *pd->proto_sum = bproto_sum; 3511 if (pd->ip_sum) 3512 *pd->ip_sum = bip_sum; 3513 m_copyback(m, off, hdrlen, pd->hdr.any); 3514 } 3515 if (pd->proto == IPPROTO_TCP && 3516 ((r->rule_flag & PFRULE_RETURNRST) || 3517 (r->rule_flag & PFRULE_RETURN)) && 3518 !(th->th_flags & TH_RST)) { 3519 u_int32_t ack = ntohl(th->th_seq) + pd->p_len; 3520 int len = 0; 3521 struct ip *h4; 3522 #ifdef INET6 3523 struct ip6_hdr *h6; 3524 #endif 3525 switch (af) { 3526 case AF_INET: 3527 h4 = mtod(m, struct ip *); 3528 len = h4->ip_len - off; 3529 break; 3530 #ifdef INET6 3531 case AF_INET6: 3532 h6 = mtod(m, struct ip6_hdr *); 3533 len = h6->ip6_plen - (off - sizeof(*h6)); 3534 break; 3535 #endif 3536 } 3537 3538 if (pf_check_proto_cksum(m, off, len, IPPROTO_TCP, af)) 3539 REASON_SET(&reason, PFRES_PROTCKSUM); 3540 else { 3541 if (th->th_flags & TH_SYN) 3542 ack++; 3543 if (th->th_flags & TH_FIN) 3544 ack++; 3545 pf_send_tcp(r, af, pd->dst, 3546 pd->src, th->th_dport, th->th_sport, 3547 ntohl(th->th_ack), ack, TH_RST|TH_ACK, 0, 0, 3548 r->return_ttl, 1, 0, pd->eh, kif->pfik_ifp); 3549 } 3550 } else if (pd->proto != IPPROTO_ICMP && af == AF_INET && 3551 r->return_icmp) 3552 pf_send_icmp(m, r->return_icmp >> 8, 3553 r->return_icmp & 255, af, r); 3554 else if (pd->proto != IPPROTO_ICMPV6 && af == AF_INET6 && 3555 r->return_icmp6) 3556 pf_send_icmp(m, r->return_icmp6 >> 8, 3557 r->return_icmp6 & 255, af, r); 3558 } 3559 3560 if (r->action == PF_DROP) 3561 goto cleanup; 3562 3563 if (pf_tag_packet(m, tag, rtableid)) { 3564 REASON_SET(&reason, PFRES_MEMORY); 3565 goto cleanup; 3566 } 3567 3568 if (!state_icmp && (r->keep_state || nr != NULL || 3569 (pd->flags & PFDESC_TCP_NORM))) { 3570 int action; 3571 action = pf_create_state(r, nr, a, pd, nsn, skw, sks, nk, sk, m, 3572 off, sport, dport, &rewrite, kif, sm, tag, bproto_sum, 3573 bip_sum, hdrlen); 3574 if (action != PF_PASS) 3575 return (action); 3576 } 3577 3578 /* copy back packet headers if we performed NAT operations */ 3579 if (rewrite) 3580 m_copyback(m, off, hdrlen, pd->hdr.any); 3581 3582 return (PF_PASS); 3583 3584 cleanup: 3585 if (sk != NULL) 3586 kfree(sk, M_PFSTATEKEYPL); 3587 if (nk != NULL) 3588 kfree(nk, M_PFSTATEKEYPL); 3589 return (PF_DROP); 3590 } 3591 3592 static __inline int 3593 pf_create_state(struct pf_rule *r, struct pf_rule *nr, struct pf_rule *a, 3594 struct pf_pdesc *pd, struct pf_src_node *nsn, struct pf_state_key *skw, 3595 struct pf_state_key *sks, struct pf_state_key *nk, struct pf_state_key *sk, 3596 struct mbuf *m, int off, u_int16_t sport, u_int16_t dport, int *rewrite, 3597 struct pfi_kif *kif, struct pf_state **sm, int tag, u_int16_t bproto_sum, 3598 u_int16_t bip_sum, int hdrlen) 3599 { 3600 struct pf_state *s = NULL; 3601 struct pf_src_node *sn = NULL; 3602 struct tcphdr *th = pd->hdr.tcp; 3603 u_int16_t mss = tcp_mssdflt; 3604 u_short reason; 3605 3606 /* check maximums */ 3607 if (r->max_states && (r->states_cur >= r->max_states)) { 3608 pf_status.lcounters[LCNT_STATES]++; 3609 REASON_SET(&reason, PFRES_MAXSTATES); 3610 return (PF_DROP); 3611 } 3612 /* src node for filter rule */ 3613 if ((r->rule_flag & PFRULE_SRCTRACK || 3614 r->rpool.opts & PF_POOL_STICKYADDR) && 3615 pf_insert_src_node(&sn, r, pd->src, pd->af) != 0) { 3616 REASON_SET(&reason, PFRES_SRCLIMIT); 3617 goto csfailed; 3618 } 3619 /* src node for translation rule */ 3620 if (nr != NULL && (nr->rpool.opts & PF_POOL_STICKYADDR) && 3621 pf_insert_src_node(&nsn, nr, &sk->addr[pd->sidx], pd->af)) { 3622 REASON_SET(&reason, PFRES_SRCLIMIT); 3623 goto csfailed; 3624 } 3625 s = kmalloc(sizeof(struct pf_state), M_PFSTATEPL, M_NOWAIT|M_ZERO); 3626 if (s == NULL) { 3627 REASON_SET(&reason, PFRES_MEMORY); 3628 goto csfailed; 3629 } 3630 s->id = 0; /* XXX Do we really need that? not in OpenBSD */ 3631 s->creatorid = 0; 3632 s->rule.ptr = r; 3633 s->nat_rule.ptr = nr; 3634 s->anchor.ptr = a; 3635 STATE_INC_COUNTERS(s); 3636 if (r->allow_opts) 3637 s->state_flags |= PFSTATE_ALLOWOPTS; 3638 if (r->rule_flag & PFRULE_STATESLOPPY) 3639 s->state_flags |= PFSTATE_SLOPPY; 3640 s->log = r->log & PF_LOG_ALL; 3641 if (nr != NULL) 3642 s->log |= nr->log & PF_LOG_ALL; 3643 switch (pd->proto) { 3644 case IPPROTO_TCP: 3645 s->src.seqlo = ntohl(th->th_seq); 3646 s->src.seqhi = s->src.seqlo + pd->p_len + 1; 3647 if ((th->th_flags & (TH_SYN|TH_ACK)) == TH_SYN && 3648 r->keep_state == PF_STATE_MODULATE) { 3649 /* Generate sequence number modulator */ 3650 if ((s->src.seqdiff = pf_tcp_iss(pd) - s->src.seqlo) == 3651 0) 3652 s->src.seqdiff = 1; 3653 pf_change_a(&th->th_seq, &th->th_sum, 3654 htonl(s->src.seqlo + s->src.seqdiff), 0); 3655 *rewrite = 1; 3656 } else 3657 s->src.seqdiff = 0; 3658 if (th->th_flags & TH_SYN) { 3659 s->src.seqhi++; 3660 s->src.wscale = pf_get_wscale(m, off, 3661 th->th_off, pd->af); 3662 } 3663 s->src.max_win = MAX(ntohs(th->th_win), 1); 3664 if (s->src.wscale & PF_WSCALE_MASK) { 3665 /* Remove scale factor from initial window */ 3666 int win = s->src.max_win; 3667 win += 1 << (s->src.wscale & PF_WSCALE_MASK); 3668 s->src.max_win = (win - 1) >> 3669 (s->src.wscale & PF_WSCALE_MASK); 3670 } 3671 if (th->th_flags & TH_FIN) 3672 s->src.seqhi++; 3673 s->dst.seqhi = 1; 3674 s->dst.max_win = 1; 3675 s->src.state = TCPS_SYN_SENT; 3676 s->dst.state = TCPS_CLOSED; 3677 s->timeout = PFTM_TCP_FIRST_PACKET; 3678 break; 3679 case IPPROTO_UDP: 3680 s->src.state = PFUDPS_SINGLE; 3681 s->dst.state = PFUDPS_NO_TRAFFIC; 3682 s->timeout = PFTM_UDP_FIRST_PACKET; 3683 break; 3684 case IPPROTO_ICMP: 3685 #ifdef INET6 3686 case IPPROTO_ICMPV6: 3687 #endif 3688 s->timeout = PFTM_ICMP_FIRST_PACKET; 3689 break; 3690 default: 3691 s->src.state = PFOTHERS_SINGLE; 3692 s->dst.state = PFOTHERS_NO_TRAFFIC; 3693 s->timeout = PFTM_OTHER_FIRST_PACKET; 3694 } 3695 3696 s->creation = time_second; 3697 s->expire = time_second; 3698 3699 if (sn != NULL) { 3700 s->src_node = sn; 3701 s->src_node->states++; 3702 } 3703 if (nsn != NULL) { 3704 /* XXX We only modify one side for now. */ 3705 PF_ACPY(&nsn->raddr, &nk->addr[1], pd->af); 3706 s->nat_src_node = nsn; 3707 s->nat_src_node->states++; 3708 } 3709 if (pd->proto == IPPROTO_TCP) { 3710 if ((pd->flags & PFDESC_TCP_NORM) && pf_normalize_tcp_init(m, 3711 off, pd, th, &s->src, &s->dst)) { 3712 REASON_SET(&reason, PFRES_MEMORY); 3713 pf_src_tree_remove_state(s); 3714 STATE_DEC_COUNTERS(s); 3715 kfree(s, M_PFSTATEPL); 3716 return (PF_DROP); 3717 } 3718 if ((pd->flags & PFDESC_TCP_NORM) && s->src.scrub && 3719 pf_normalize_tcp_stateful(m, off, pd, &reason, th, s, 3720 &s->src, &s->dst, rewrite)) { 3721 /* This really shouldn't happen!!! */ 3722 DPFPRINTF(PF_DEBUG_URGENT, 3723 ("pf_normalize_tcp_stateful failed on first pkt")); 3724 pf_normalize_tcp_cleanup(s); 3725 pf_src_tree_remove_state(s); 3726 STATE_DEC_COUNTERS(s); 3727 kfree(s, M_PFSTATEPL); 3728 return (PF_DROP); 3729 } 3730 } 3731 s->direction = pd->dir; 3732 3733 if (sk == NULL && pf_state_key_setup(pd, nr, &skw, &sks, &sk, &nk, 3734 pd->src, pd->dst, sport, dport)) 3735 goto csfailed; 3736 3737 if (pf_state_insert(BOUND_IFACE(r, kif), skw, sks, s)) { 3738 if (pd->proto == IPPROTO_TCP) 3739 pf_normalize_tcp_cleanup(s); 3740 REASON_SET(&reason, PFRES_STATEINS); 3741 pf_src_tree_remove_state(s); 3742 STATE_DEC_COUNTERS(s); 3743 kfree(s, M_PFSTATEPL); 3744 return (PF_DROP); 3745 } else 3746 *sm = s; 3747 3748 pf_set_rt_ifp(s, pd->src); /* needs s->state_key set */ 3749 if (tag > 0) { 3750 pf_tag_ref(tag); 3751 s->tag = tag; 3752 } 3753 if (pd->proto == IPPROTO_TCP && (th->th_flags & (TH_SYN|TH_ACK)) == 3754 TH_SYN && r->keep_state == PF_STATE_SYNPROXY) { 3755 s->src.state = PF_TCPS_PROXY_SRC; 3756 /* undo NAT changes, if they have taken place */ 3757 if (nr != NULL) { 3758 struct pf_state_key *skt = s->key[PF_SK_WIRE]; 3759 if (pd->dir == PF_OUT) 3760 skt = s->key[PF_SK_STACK]; 3761 PF_ACPY(pd->src, &skt->addr[pd->sidx], pd->af); 3762 PF_ACPY(pd->dst, &skt->addr[pd->didx], pd->af); 3763 if (pd->sport) 3764 *pd->sport = skt->port[pd->sidx]; 3765 if (pd->dport) 3766 *pd->dport = skt->port[pd->didx]; 3767 if (pd->proto_sum) 3768 *pd->proto_sum = bproto_sum; 3769 if (pd->ip_sum) 3770 *pd->ip_sum = bip_sum; 3771 m_copyback(m, off, hdrlen, pd->hdr.any); 3772 } 3773 s->src.seqhi = htonl(karc4random()); 3774 /* Find mss option */ 3775 mss = pf_get_mss(m, off, th->th_off, pd->af); 3776 mss = pf_calc_mss(pd->src, pd->af, mss); 3777 mss = pf_calc_mss(pd->dst, pd->af, mss); 3778 s->src.mss = mss; 3779 pf_send_tcp(r, pd->af, pd->dst, pd->src, th->th_dport, 3780 th->th_sport, s->src.seqhi, ntohl(th->th_seq) + 1, 3781 TH_SYN|TH_ACK, 0, s->src.mss, 0, 1, 0, NULL, NULL); 3782 REASON_SET(&reason, PFRES_SYNPROXY); 3783 return (PF_SYNPROXY_DROP); 3784 } 3785 3786 return (PF_PASS); 3787 3788 csfailed: 3789 if (sk != NULL) 3790 kfree(sk, M_PFSTATEKEYPL); 3791 if (nk != NULL) 3792 kfree(nk, M_PFSTATEKEYPL); 3793 3794 if (sn != NULL && sn->states == 0 && sn->expire == 0) { 3795 RB_REMOVE(pf_src_tree, &tree_src_tracking, sn); 3796 pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++; 3797 pf_status.src_nodes--; 3798 kfree(sn, M_PFSRCTREEPL); 3799 } 3800 if (nsn != sn && nsn != NULL && nsn->states == 0 && nsn->expire == 0) { 3801 RB_REMOVE(pf_src_tree, &tree_src_tracking, nsn); 3802 pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++; 3803 pf_status.src_nodes--; 3804 kfree(nsn, M_PFSRCTREEPL); 3805 } 3806 return (PF_DROP); 3807 } 3808 3809 int 3810 pf_test_fragment(struct pf_rule **rm, int direction, struct pfi_kif *kif, 3811 struct mbuf *m, void *h, struct pf_pdesc *pd, struct pf_rule **am, 3812 struct pf_ruleset **rsm) 3813 { 3814 struct pf_rule *r, *a = NULL; 3815 struct pf_ruleset *ruleset = NULL; 3816 sa_family_t af = pd->af; 3817 u_short reason; 3818 int tag = -1; 3819 int asd = 0; 3820 int match = 0; 3821 3822 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_FILTER].active.ptr); 3823 while (r != NULL) { 3824 r->evaluations++; 3825 if (pfi_kif_match(r->kif, kif) == r->ifnot) 3826 r = r->skip[PF_SKIP_IFP].ptr; 3827 else if (r->direction && r->direction != direction) 3828 r = r->skip[PF_SKIP_DIR].ptr; 3829 else if (r->af && r->af != af) 3830 r = r->skip[PF_SKIP_AF].ptr; 3831 else if (r->proto && r->proto != pd->proto) 3832 r = r->skip[PF_SKIP_PROTO].ptr; 3833 else if (PF_MISMATCHAW(&r->src.addr, pd->src, af, 3834 r->src.neg, kif)) 3835 r = r->skip[PF_SKIP_SRC_ADDR].ptr; 3836 else if (PF_MISMATCHAW(&r->dst.addr, pd->dst, af, 3837 r->dst.neg, NULL)) 3838 r = r->skip[PF_SKIP_DST_ADDR].ptr; 3839 else if (r->tos && !(r->tos == pd->tos)) 3840 r = TAILQ_NEXT(r, entries); 3841 else if (r->os_fingerprint != PF_OSFP_ANY) 3842 r = TAILQ_NEXT(r, entries); 3843 else if (pd->proto == IPPROTO_UDP && 3844 (r->src.port_op || r->dst.port_op)) 3845 r = TAILQ_NEXT(r, entries); 3846 else if (pd->proto == IPPROTO_TCP && 3847 (r->src.port_op || r->dst.port_op || r->flagset)) 3848 r = TAILQ_NEXT(r, entries); 3849 else if ((pd->proto == IPPROTO_ICMP || 3850 pd->proto == IPPROTO_ICMPV6) && 3851 (r->type || r->code)) 3852 r = TAILQ_NEXT(r, entries); 3853 else if (r->prob && r->prob <= karc4random()) 3854 r = TAILQ_NEXT(r, entries); 3855 else if (r->match_tag && !pf_match_tag(m, r, &tag)) 3856 r = TAILQ_NEXT(r, entries); 3857 else { 3858 if (r->anchor == NULL) { 3859 match = 1; 3860 *rm = r; 3861 *am = a; 3862 *rsm = ruleset; 3863 if ((*rm)->quick) 3864 break; 3865 r = TAILQ_NEXT(r, entries); 3866 } else 3867 pf_step_into_anchor(&asd, &ruleset, 3868 PF_RULESET_FILTER, &r, &a, &match); 3869 } 3870 if (r == NULL && pf_step_out_of_anchor(&asd, &ruleset, 3871 PF_RULESET_FILTER, &r, &a, &match)) 3872 break; 3873 } 3874 r = *rm; 3875 a = *am; 3876 ruleset = *rsm; 3877 3878 REASON_SET(&reason, PFRES_MATCH); 3879 3880 if (r->log) 3881 PFLOG_PACKET(kif, h, m, af, direction, reason, r, a, ruleset, 3882 pd); 3883 3884 if (r->action != PF_PASS) 3885 return (PF_DROP); 3886 3887 if (pf_tag_packet(m, tag, -1)) { 3888 REASON_SET(&reason, PFRES_MEMORY); 3889 return (PF_DROP); 3890 } 3891 3892 return (PF_PASS); 3893 } 3894 3895 int 3896 pf_tcp_track_full(struct pf_state_peer *src, struct pf_state_peer *dst, 3897 struct pf_state **state, struct pfi_kif *kif, struct mbuf *m, int off, 3898 struct pf_pdesc *pd, u_short *reason, int *copyback) 3899 { 3900 struct tcphdr *th = pd->hdr.tcp; 3901 u_int16_t win = ntohs(th->th_win); 3902 u_int32_t ack, end, seq, orig_seq; 3903 u_int8_t sws, dws; 3904 int ackskew; 3905 3906 if (src->wscale && dst->wscale && !(th->th_flags & TH_SYN)) { 3907 sws = src->wscale & PF_WSCALE_MASK; 3908 dws = dst->wscale & PF_WSCALE_MASK; 3909 } else 3910 sws = dws = 0; 3911 3912 /* 3913 * Sequence tracking algorithm from Guido van Rooij's paper: 3914 * http://www.madison-gurkha.com/publications/tcp_filtering/ 3915 * tcp_filtering.ps 3916 */ 3917 3918 orig_seq = seq = ntohl(th->th_seq); 3919 if (src->seqlo == 0) { 3920 /* First packet from this end. Set its state */ 3921 3922 if ((pd->flags & PFDESC_TCP_NORM || dst->scrub) && 3923 src->scrub == NULL) { 3924 if (pf_normalize_tcp_init(m, off, pd, th, src, dst)) { 3925 REASON_SET(reason, PFRES_MEMORY); 3926 return (PF_DROP); 3927 } 3928 } 3929 3930 /* Deferred generation of sequence number modulator */ 3931 if (dst->seqdiff && !src->seqdiff) { 3932 /* use random iss for the TCP server */ 3933 while ((src->seqdiff = karc4random() - seq) == 0) 3934 ; 3935 ack = ntohl(th->th_ack) - dst->seqdiff; 3936 pf_change_a(&th->th_seq, &th->th_sum, htonl(seq + 3937 src->seqdiff), 0); 3938 pf_change_a(&th->th_ack, &th->th_sum, htonl(ack), 0); 3939 *copyback = 1; 3940 } else { 3941 ack = ntohl(th->th_ack); 3942 } 3943 3944 end = seq + pd->p_len; 3945 if (th->th_flags & TH_SYN) { 3946 end++; 3947 (*state)->sync_flags |= PFSTATE_GOT_SYN2; 3948 if (dst->wscale & PF_WSCALE_FLAG) { 3949 src->wscale = pf_get_wscale(m, off, th->th_off, 3950 pd->af); 3951 if (src->wscale & PF_WSCALE_FLAG) { 3952 /* Remove scale factor from initial 3953 * window */ 3954 sws = src->wscale & PF_WSCALE_MASK; 3955 win = ((u_int32_t)win + (1 << sws) - 1) 3956 >> sws; 3957 dws = dst->wscale & PF_WSCALE_MASK; 3958 } else { 3959 /* fixup other window */ 3960 dst->max_win <<= dst->wscale & 3961 PF_WSCALE_MASK; 3962 /* in case of a retrans SYN|ACK */ 3963 dst->wscale = 0; 3964 } 3965 } 3966 } 3967 if (th->th_flags & TH_FIN) 3968 end++; 3969 3970 src->seqlo = seq; 3971 if (src->state < TCPS_SYN_SENT) 3972 src->state = TCPS_SYN_SENT; 3973 3974 /* 3975 * May need to slide the window (seqhi may have been set by 3976 * the crappy stack check or if we picked up the connection 3977 * after establishment) 3978 */ 3979 if (src->seqhi == 1 || 3980 SEQ_GEQ(end + MAX(1, dst->max_win << dws), src->seqhi)) 3981 src->seqhi = end + MAX(1, dst->max_win << dws); 3982 if (win > src->max_win) 3983 src->max_win = win; 3984 3985 } else { 3986 ack = ntohl(th->th_ack) - dst->seqdiff; 3987 if (src->seqdiff) { 3988 /* Modulate sequence numbers */ 3989 pf_change_a(&th->th_seq, &th->th_sum, htonl(seq + 3990 src->seqdiff), 0); 3991 pf_change_a(&th->th_ack, &th->th_sum, htonl(ack), 0); 3992 *copyback = 1; 3993 } 3994 end = seq + pd->p_len; 3995 if (th->th_flags & TH_SYN) 3996 end++; 3997 if (th->th_flags & TH_FIN) 3998 end++; 3999 } 4000 4001 if ((th->th_flags & TH_ACK) == 0) { 4002 /* Let it pass through the ack skew check */ 4003 ack = dst->seqlo; 4004 } else if ((ack == 0 && 4005 (th->th_flags & (TH_ACK|TH_RST)) == (TH_ACK|TH_RST)) || 4006 /* broken tcp stacks do not set ack */ 4007 (dst->state < TCPS_SYN_SENT)) { 4008 /* 4009 * Many stacks (ours included) will set the ACK number in an 4010 * FIN|ACK if the SYN times out -- no sequence to ACK. 4011 */ 4012 ack = dst->seqlo; 4013 } 4014 4015 if (seq == end) { 4016 /* Ease sequencing restrictions on no data packets */ 4017 seq = src->seqlo; 4018 end = seq; 4019 } 4020 4021 ackskew = dst->seqlo - ack; 4022 4023 4024 /* 4025 * Need to demodulate the sequence numbers in any TCP SACK options 4026 * (Selective ACK). We could optionally validate the SACK values 4027 * against the current ACK window, either forwards or backwards, but 4028 * I'm not confident that SACK has been implemented properly 4029 * everywhere. It wouldn't surprise me if several stacks accidently 4030 * SACK too far backwards of previously ACKed data. There really aren't 4031 * any security implications of bad SACKing unless the target stack 4032 * doesn't validate the option length correctly. Someone trying to 4033 * spoof into a TCP connection won't bother blindly sending SACK 4034 * options anyway. 4035 */ 4036 if (dst->seqdiff && (th->th_off << 2) > sizeof(struct tcphdr)) { 4037 if (pf_modulate_sack(m, off, pd, th, dst)) 4038 *copyback = 1; 4039 } 4040 4041 4042 #define MAXACKWINDOW (0xffff + 1500) /* 1500 is an arbitrary fudge factor */ 4043 if (SEQ_GEQ(src->seqhi, end) && 4044 /* Last octet inside other's window space */ 4045 SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)) && 4046 /* Retrans: not more than one window back */ 4047 (ackskew >= -MAXACKWINDOW) && 4048 /* Acking not more than one reassembled fragment backwards */ 4049 (ackskew <= (MAXACKWINDOW << sws)) && 4050 /* Acking not more than one window forward */ 4051 ((th->th_flags & TH_RST) == 0 || orig_seq == src->seqlo || 4052 (orig_seq == src->seqlo + 1) || (orig_seq + 1 == src->seqlo) || 4053 (pd->flags & PFDESC_IP_REAS) == 0)) { 4054 /* Require an exact/+1 sequence match on resets when possible */ 4055 4056 if (dst->scrub || src->scrub) { 4057 if (pf_normalize_tcp_stateful(m, off, pd, reason, th, 4058 *state, src, dst, copyback)) 4059 return (PF_DROP); 4060 } 4061 4062 /* update max window */ 4063 if (src->max_win < win) 4064 src->max_win = win; 4065 /* synchronize sequencing */ 4066 if (SEQ_GT(end, src->seqlo)) 4067 src->seqlo = end; 4068 /* slide the window of what the other end can send */ 4069 if (SEQ_GEQ(ack + (win << sws), dst->seqhi)) 4070 dst->seqhi = ack + MAX((win << sws), 1); 4071 4072 4073 /* update states */ 4074 if (th->th_flags & TH_SYN) 4075 if (src->state < TCPS_SYN_SENT) 4076 src->state = TCPS_SYN_SENT; 4077 if (th->th_flags & TH_FIN) 4078 if (src->state < TCPS_CLOSING) 4079 src->state = TCPS_CLOSING; 4080 if (th->th_flags & TH_ACK) { 4081 if (dst->state == TCPS_SYN_SENT) { 4082 dst->state = TCPS_ESTABLISHED; 4083 if (src->state == TCPS_ESTABLISHED && 4084 (*state)->src_node != NULL && 4085 pf_src_connlimit(state)) { 4086 REASON_SET(reason, PFRES_SRCLIMIT); 4087 return (PF_DROP); 4088 } 4089 } else if (dst->state == TCPS_CLOSING) 4090 dst->state = TCPS_FIN_WAIT_2; 4091 } 4092 if (th->th_flags & TH_RST) 4093 src->state = dst->state = TCPS_TIME_WAIT; 4094 4095 /* update expire time */ 4096 (*state)->expire = time_second; 4097 if (src->state >= TCPS_FIN_WAIT_2 && 4098 dst->state >= TCPS_FIN_WAIT_2) 4099 (*state)->timeout = PFTM_TCP_CLOSED; 4100 else if (src->state >= TCPS_CLOSING && 4101 dst->state >= TCPS_CLOSING) 4102 (*state)->timeout = PFTM_TCP_FIN_WAIT; 4103 else if (src->state < TCPS_ESTABLISHED || 4104 dst->state < TCPS_ESTABLISHED) 4105 (*state)->timeout = PFTM_TCP_OPENING; 4106 else if (src->state >= TCPS_CLOSING || 4107 dst->state >= TCPS_CLOSING) 4108 (*state)->timeout = PFTM_TCP_CLOSING; 4109 else 4110 (*state)->timeout = PFTM_TCP_ESTABLISHED; 4111 4112 /* Fall through to PASS packet */ 4113 4114 } else if ((dst->state < TCPS_SYN_SENT || 4115 dst->state >= TCPS_FIN_WAIT_2 || 4116 src->state >= TCPS_FIN_WAIT_2) && 4117 SEQ_GEQ(src->seqhi + MAXACKWINDOW, end) && 4118 /* Within a window forward of the originating packet */ 4119 SEQ_GEQ(seq, src->seqlo - MAXACKWINDOW)) { 4120 /* Within a window backward of the originating packet */ 4121 4122 /* 4123 * This currently handles three situations: 4124 * 1) Stupid stacks will shotgun SYNs before their peer 4125 * replies. 4126 * 2) When PF catches an already established stream (the 4127 * firewall rebooted, the state table was flushed, routes 4128 * changed...) 4129 * 3) Packets get funky immediately after the connection 4130 * closes (this should catch Solaris spurious ACK|FINs 4131 * that web servers like to spew after a close) 4132 * 4133 * This must be a little more careful than the above code 4134 * since packet floods will also be caught here. We don't 4135 * update the TTL here to mitigate the damage of a packet 4136 * flood and so the same code can handle awkward establishment 4137 * and a loosened connection close. 4138 * In the establishment case, a correct peer response will 4139 * validate the connection, go through the normal state code 4140 * and keep updating the state TTL. 4141 */ 4142 4143 if (pf_status.debug >= PF_DEBUG_MISC) { 4144 kprintf("pf: loose state match: "); 4145 pf_print_state(*state); 4146 pf_print_flags(th->th_flags); 4147 kprintf(" seq=%u (%u) ack=%u len=%u ackskew=%d " 4148 "pkts=%llu:%llu dir=%s,%s\n", seq, orig_seq, ack, pd->p_len, 4149 ackskew, (unsigned long long)(*state)->packets[0], 4150 (unsigned long long)(*state)->packets[1], 4151 pd->dir == PF_IN ? "in" : "out", 4152 pd->dir == (*state)->direction ? "fwd" : "rev"); 4153 } 4154 4155 if (dst->scrub || src->scrub) { 4156 if (pf_normalize_tcp_stateful(m, off, pd, reason, th, 4157 *state, src, dst, copyback)) 4158 return (PF_DROP); 4159 } 4160 4161 /* update max window */ 4162 if (src->max_win < win) 4163 src->max_win = win; 4164 /* synchronize sequencing */ 4165 if (SEQ_GT(end, src->seqlo)) 4166 src->seqlo = end; 4167 /* slide the window of what the other end can send */ 4168 if (SEQ_GEQ(ack + (win << sws), dst->seqhi)) 4169 dst->seqhi = ack + MAX((win << sws), 1); 4170 4171 /* 4172 * Cannot set dst->seqhi here since this could be a shotgunned 4173 * SYN and not an already established connection. 4174 */ 4175 4176 if (th->th_flags & TH_FIN) 4177 if (src->state < TCPS_CLOSING) 4178 src->state = TCPS_CLOSING; 4179 if (th->th_flags & TH_RST) 4180 src->state = dst->state = TCPS_TIME_WAIT; 4181 4182 /* Fall through to PASS packet */ 4183 4184 } else if ((*state)->pickup_mode == PF_PICKUPS_HASHONLY || 4185 ((*state)->pickup_mode == PF_PICKUPS_ENABLED && 4186 ((*state)->sync_flags & PFSTATE_GOT_SYN_MASK) != 4187 PFSTATE_GOT_SYN_MASK)) { 4188 /* 4189 * If pickup mode is hash only, do not fail on sequence checks. 4190 * 4191 * If pickup mode is enabled and we did not see the SYN in 4192 * both direction, do not fail on sequence checks because 4193 * we do not have complete information on window scale. 4194 * 4195 * Adjust expiration and fall through to PASS packet. 4196 * XXX Add a FIN check to reduce timeout? 4197 */ 4198 (*state)->expire = time_second; 4199 } else { 4200 /* 4201 * Failure processing 4202 */ 4203 if ((*state)->dst.state == TCPS_SYN_SENT && 4204 (*state)->src.state == TCPS_SYN_SENT) { 4205 /* Send RST for state mismatches during handshake */ 4206 if (!(th->th_flags & TH_RST)) 4207 pf_send_tcp((*state)->rule.ptr, pd->af, 4208 pd->dst, pd->src, th->th_dport, 4209 th->th_sport, ntohl(th->th_ack), 0, 4210 TH_RST, 0, 0, 4211 (*state)->rule.ptr->return_ttl, 1, 0, 4212 pd->eh, kif->pfik_ifp); 4213 src->seqlo = 0; 4214 src->seqhi = 1; 4215 src->max_win = 1; 4216 } else if (pf_status.debug >= PF_DEBUG_MISC) { 4217 kprintf("pf: BAD state: "); 4218 pf_print_state(*state); 4219 pf_print_flags(th->th_flags); 4220 kprintf(" seq=%u (%u) ack=%u len=%u ackskew=%d " 4221 "pkts=%llu:%llu dir=%s,%s\n", 4222 seq, orig_seq, ack, pd->p_len, ackskew, 4223 (unsigned long long)(*state)->packets[0], 4224 (unsigned long long)(*state)->packets[1], 4225 pd->dir == PF_IN ? "in" : "out", 4226 pd->dir == (*state)->direction ? "fwd" : "rev"); 4227 kprintf("pf: State failure on: %c %c %c %c | %c %c\n", 4228 SEQ_GEQ(src->seqhi, end) ? ' ' : '1', 4229 SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)) ? 4230 ' ': '2', 4231 (ackskew >= -MAXACKWINDOW) ? ' ' : '3', 4232 (ackskew <= (MAXACKWINDOW << sws)) ? ' ' : '4', 4233 SEQ_GEQ(src->seqhi + MAXACKWINDOW, end) ?' ' :'5', 4234 SEQ_GEQ(seq, src->seqlo - MAXACKWINDOW) ?' ' :'6'); 4235 } 4236 REASON_SET(reason, PFRES_BADSTATE); 4237 return (PF_DROP); 4238 } 4239 4240 return (PF_PASS); 4241 } 4242 4243 int 4244 pf_tcp_track_sloppy(struct pf_state_peer *src, struct pf_state_peer *dst, 4245 struct pf_state **state, struct pf_pdesc *pd, u_short *reason) 4246 { 4247 struct tcphdr *th = pd->hdr.tcp; 4248 4249 if (th->th_flags & TH_SYN) 4250 if (src->state < TCPS_SYN_SENT) 4251 src->state = TCPS_SYN_SENT; 4252 if (th->th_flags & TH_FIN) 4253 if (src->state < TCPS_CLOSING) 4254 src->state = TCPS_CLOSING; 4255 if (th->th_flags & TH_ACK) { 4256 if (dst->state == TCPS_SYN_SENT) { 4257 dst->state = TCPS_ESTABLISHED; 4258 if (src->state == TCPS_ESTABLISHED && 4259 (*state)->src_node != NULL && 4260 pf_src_connlimit(state)) { 4261 REASON_SET(reason, PFRES_SRCLIMIT); 4262 return (PF_DROP); 4263 } 4264 } else if (dst->state == TCPS_CLOSING) { 4265 dst->state = TCPS_FIN_WAIT_2; 4266 } else if (src->state == TCPS_SYN_SENT && 4267 dst->state < TCPS_SYN_SENT) { 4268 /* 4269 * Handle a special sloppy case where we only see one 4270 * half of the connection. If there is a ACK after 4271 * the initial SYN without ever seeing a packet from 4272 * the destination, set the connection to established. 4273 */ 4274 dst->state = src->state = TCPS_ESTABLISHED; 4275 if ((*state)->src_node != NULL && 4276 pf_src_connlimit(state)) { 4277 REASON_SET(reason, PFRES_SRCLIMIT); 4278 return (PF_DROP); 4279 } 4280 } else if (src->state == TCPS_CLOSING && 4281 dst->state == TCPS_ESTABLISHED && 4282 dst->seqlo == 0) { 4283 /* 4284 * Handle the closing of half connections where we 4285 * don't see the full bidirectional FIN/ACK+ACK 4286 * handshake. 4287 */ 4288 dst->state = TCPS_CLOSING; 4289 } 4290 } 4291 if (th->th_flags & TH_RST) 4292 src->state = dst->state = TCPS_TIME_WAIT; 4293 4294 /* update expire time */ 4295 (*state)->expire = time_second; 4296 if (src->state >= TCPS_FIN_WAIT_2 && 4297 dst->state >= TCPS_FIN_WAIT_2) 4298 (*state)->timeout = PFTM_TCP_CLOSED; 4299 else if (src->state >= TCPS_CLOSING && 4300 dst->state >= TCPS_CLOSING) 4301 (*state)->timeout = PFTM_TCP_FIN_WAIT; 4302 else if (src->state < TCPS_ESTABLISHED || 4303 dst->state < TCPS_ESTABLISHED) 4304 (*state)->timeout = PFTM_TCP_OPENING; 4305 else if (src->state >= TCPS_CLOSING || 4306 dst->state >= TCPS_CLOSING) 4307 (*state)->timeout = PFTM_TCP_CLOSING; 4308 else 4309 (*state)->timeout = PFTM_TCP_ESTABLISHED; 4310 4311 return (PF_PASS); 4312 } 4313 4314 int 4315 pf_test_state_tcp(struct pf_state **state, int direction, struct pfi_kif *kif, 4316 struct mbuf *m, int off, void *h, struct pf_pdesc *pd, 4317 u_short *reason) 4318 { 4319 struct pf_state_key_cmp key; 4320 struct tcphdr *th = pd->hdr.tcp; 4321 int copyback = 0; 4322 struct pf_state_peer *src, *dst; 4323 struct pf_state_key *sk; 4324 4325 key.af = pd->af; 4326 key.proto = IPPROTO_TCP; 4327 if (direction == PF_IN) { /* wire side, straight */ 4328 PF_ACPY(&key.addr[0], pd->src, key.af); 4329 PF_ACPY(&key.addr[1], pd->dst, key.af); 4330 key.port[0] = th->th_sport; 4331 key.port[1] = th->th_dport; 4332 } else { /* stack side, reverse */ 4333 PF_ACPY(&key.addr[1], pd->src, key.af); 4334 PF_ACPY(&key.addr[0], pd->dst, key.af); 4335 key.port[1] = th->th_sport; 4336 key.port[0] = th->th_dport; 4337 } 4338 4339 STATE_LOOKUP(kif, &key, direction, *state, m); 4340 4341 if (direction == (*state)->direction) { 4342 src = &(*state)->src; 4343 dst = &(*state)->dst; 4344 } else { 4345 src = &(*state)->dst; 4346 dst = &(*state)->src; 4347 } 4348 4349 sk = (*state)->key[pd->didx]; 4350 4351 if ((*state)->src.state == PF_TCPS_PROXY_SRC) { 4352 if (direction != (*state)->direction) { 4353 REASON_SET(reason, PFRES_SYNPROXY); 4354 return (PF_SYNPROXY_DROP); 4355 } 4356 if (th->th_flags & TH_SYN) { 4357 if (ntohl(th->th_seq) != (*state)->src.seqlo) { 4358 REASON_SET(reason, PFRES_SYNPROXY); 4359 return (PF_DROP); 4360 } 4361 pf_send_tcp((*state)->rule.ptr, pd->af, pd->dst, 4362 pd->src, th->th_dport, th->th_sport, 4363 (*state)->src.seqhi, ntohl(th->th_seq) + 1, 4364 TH_SYN|TH_ACK, 0, (*state)->src.mss, 0, 1, 4365 0, NULL, NULL); 4366 REASON_SET(reason, PFRES_SYNPROXY); 4367 return (PF_SYNPROXY_DROP); 4368 } else if (!(th->th_flags & TH_ACK) || 4369 (ntohl(th->th_ack) != (*state)->src.seqhi + 1) || 4370 (ntohl(th->th_seq) != (*state)->src.seqlo + 1)) { 4371 REASON_SET(reason, PFRES_SYNPROXY); 4372 return (PF_DROP); 4373 } else if ((*state)->src_node != NULL && 4374 pf_src_connlimit(state)) { 4375 REASON_SET(reason, PFRES_SRCLIMIT); 4376 return (PF_DROP); 4377 } else 4378 (*state)->src.state = PF_TCPS_PROXY_DST; 4379 } 4380 if ((*state)->src.state == PF_TCPS_PROXY_DST) { 4381 if (direction == (*state)->direction) { 4382 if (((th->th_flags & (TH_SYN|TH_ACK)) != TH_ACK) || 4383 (ntohl(th->th_ack) != (*state)->src.seqhi + 1) || 4384 (ntohl(th->th_seq) != (*state)->src.seqlo + 1)) { 4385 REASON_SET(reason, PFRES_SYNPROXY); 4386 return (PF_DROP); 4387 } 4388 (*state)->src.max_win = MAX(ntohs(th->th_win), 1); 4389 if ((*state)->dst.seqhi == 1) 4390 (*state)->dst.seqhi = htonl(karc4random()); 4391 pf_send_tcp((*state)->rule.ptr, pd->af, 4392 &sk->addr[pd->sidx], &sk->addr[pd->didx], 4393 sk->port[pd->sidx], sk->port[pd->didx], 4394 (*state)->dst.seqhi, 0, TH_SYN, 0, 4395 (*state)->src.mss, 0, 0, (*state)->tag, NULL, NULL); 4396 REASON_SET(reason, PFRES_SYNPROXY); 4397 return (PF_SYNPROXY_DROP); 4398 } else if (((th->th_flags & (TH_SYN|TH_ACK)) != 4399 (TH_SYN|TH_ACK)) || 4400 (ntohl(th->th_ack) != (*state)->dst.seqhi + 1)) { 4401 REASON_SET(reason, PFRES_SYNPROXY); 4402 return (PF_DROP); 4403 } else { 4404 (*state)->dst.max_win = MAX(ntohs(th->th_win), 1); 4405 (*state)->dst.seqlo = ntohl(th->th_seq); 4406 pf_send_tcp((*state)->rule.ptr, pd->af, pd->dst, 4407 pd->src, th->th_dport, th->th_sport, 4408 ntohl(th->th_ack), ntohl(th->th_seq) + 1, 4409 TH_ACK, (*state)->src.max_win, 0, 0, 0, 4410 (*state)->tag, NULL, NULL); 4411 pf_send_tcp((*state)->rule.ptr, pd->af, 4412 &sk->addr[pd->sidx], &sk->addr[pd->didx], 4413 sk->port[pd->sidx], sk->port[pd->didx], 4414 (*state)->src.seqhi + 1, (*state)->src.seqlo + 1, 4415 TH_ACK, (*state)->dst.max_win, 0, 0, 1, 4416 0, NULL, NULL); 4417 (*state)->src.seqdiff = (*state)->dst.seqhi - 4418 (*state)->src.seqlo; 4419 (*state)->dst.seqdiff = (*state)->src.seqhi - 4420 (*state)->dst.seqlo; 4421 (*state)->src.seqhi = (*state)->src.seqlo + 4422 (*state)->dst.max_win; 4423 (*state)->dst.seqhi = (*state)->dst.seqlo + 4424 (*state)->src.max_win; 4425 (*state)->src.wscale = (*state)->dst.wscale = 0; 4426 (*state)->src.state = (*state)->dst.state = 4427 TCPS_ESTABLISHED; 4428 REASON_SET(reason, PFRES_SYNPROXY); 4429 return (PF_SYNPROXY_DROP); 4430 } 4431 } 4432 4433 if (((th->th_flags & (TH_SYN|TH_ACK)) == TH_SYN) && 4434 dst->state >= TCPS_FIN_WAIT_2 && 4435 src->state >= TCPS_FIN_WAIT_2) { 4436 if (pf_status.debug >= PF_DEBUG_MISC) { 4437 kprintf("pf: state reuse "); 4438 pf_print_state(*state); 4439 pf_print_flags(th->th_flags); 4440 kprintf("\n"); 4441 } 4442 /* XXX make sure it's the same direction ?? */ 4443 (*state)->src.state = (*state)->dst.state = TCPS_CLOSED; 4444 pf_unlink_state(*state); 4445 *state = NULL; 4446 return (PF_DROP); 4447 } 4448 4449 if ((*state)->state_flags & PFSTATE_SLOPPY) { 4450 if (pf_tcp_track_sloppy(src, dst, state, pd, reason) == PF_DROP) 4451 return (PF_DROP); 4452 } else { 4453 if (pf_tcp_track_full(src, dst, state, kif, m, off, pd, reason, 4454 ©back) == PF_DROP) 4455 return (PF_DROP); 4456 } 4457 4458 /* translate source/destination address, if necessary */ 4459 if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) { 4460 struct pf_state_key *nk = (*state)->key[pd->didx]; 4461 4462 if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], pd->af) || 4463 nk->port[pd->sidx] != th->th_sport) { 4464 /* 4465 * The translated source address may be completely 4466 * unrelated to the saved link header, make sure 4467 * a bridge doesn't try to use it. 4468 */ 4469 m->m_pkthdr.fw_flags &= ~BRIDGE_MBUF_TAGGED; 4470 m->m_flags &= ~M_HASH; 4471 pf_change_ap(pd->src, &th->th_sport, pd->ip_sum, 4472 &th->th_sum, &nk->addr[pd->sidx], 4473 nk->port[pd->sidx], 0, pd->af); 4474 } 4475 4476 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], pd->af) || 4477 nk->port[pd->didx] != th->th_dport) { 4478 /* 4479 * If we don't redispatch the packet will go into 4480 * the protocol stack on the wrong cpu for the 4481 * post-translated address. 4482 */ 4483 m->m_flags &= ~M_HASH; 4484 pf_change_ap(pd->dst, &th->th_dport, pd->ip_sum, 4485 &th->th_sum, &nk->addr[pd->didx], 4486 nk->port[pd->didx], 0, pd->af); 4487 } 4488 copyback = 1; 4489 } 4490 4491 /* Copyback sequence modulation or stateful scrub changes if needed */ 4492 if (copyback) 4493 m_copyback(m, off, sizeof(*th), (caddr_t)th); 4494 4495 return (PF_PASS); 4496 } 4497 4498 int 4499 pf_test_state_udp(struct pf_state **state, int direction, struct pfi_kif *kif, 4500 struct mbuf *m, int off, void *h, struct pf_pdesc *pd) 4501 { 4502 struct pf_state_peer *src, *dst; 4503 struct pf_state_key_cmp key; 4504 struct udphdr *uh = pd->hdr.udp; 4505 4506 key.af = pd->af; 4507 key.proto = IPPROTO_UDP; 4508 if (direction == PF_IN) { /* wire side, straight */ 4509 PF_ACPY(&key.addr[0], pd->src, key.af); 4510 PF_ACPY(&key.addr[1], pd->dst, key.af); 4511 key.port[0] = uh->uh_sport; 4512 key.port[1] = uh->uh_dport; 4513 } else { /* stack side, reverse */ 4514 PF_ACPY(&key.addr[1], pd->src, key.af); 4515 PF_ACPY(&key.addr[0], pd->dst, key.af); 4516 key.port[1] = uh->uh_sport; 4517 key.port[0] = uh->uh_dport; 4518 } 4519 4520 STATE_LOOKUP(kif, &key, direction, *state, m); 4521 4522 if (direction == (*state)->direction) { 4523 src = &(*state)->src; 4524 dst = &(*state)->dst; 4525 } else { 4526 src = &(*state)->dst; 4527 dst = &(*state)->src; 4528 } 4529 4530 /* update states */ 4531 if (src->state < PFUDPS_SINGLE) 4532 src->state = PFUDPS_SINGLE; 4533 if (dst->state == PFUDPS_SINGLE) 4534 dst->state = PFUDPS_MULTIPLE; 4535 4536 /* update expire time */ 4537 (*state)->expire = time_second; 4538 if (src->state == PFUDPS_MULTIPLE && dst->state == PFUDPS_MULTIPLE) 4539 (*state)->timeout = PFTM_UDP_MULTIPLE; 4540 else 4541 (*state)->timeout = PFTM_UDP_SINGLE; 4542 4543 /* translate source/destination address, if necessary */ 4544 if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) { 4545 struct pf_state_key *nk = (*state)->key[pd->didx]; 4546 4547 if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], pd->af) || 4548 nk->port[pd->sidx] != uh->uh_sport) { 4549 /* 4550 * The translated source address may be completely 4551 * unrelated to the saved link header, make sure 4552 * a bridge doesn't try to use it. 4553 */ 4554 m->m_pkthdr.fw_flags &= ~BRIDGE_MBUF_TAGGED; 4555 m->m_flags &= ~M_HASH; 4556 pf_change_ap(pd->src, &uh->uh_sport, pd->ip_sum, 4557 &uh->uh_sum, &nk->addr[pd->sidx], 4558 nk->port[pd->sidx], 1, pd->af); 4559 } 4560 4561 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], pd->af) || 4562 nk->port[pd->didx] != uh->uh_dport) { 4563 /* 4564 * If we don't redispatch the packet will go into 4565 * the protocol stack on the wrong cpu for the 4566 * post-translated address. 4567 */ 4568 m->m_flags &= ~M_HASH; 4569 pf_change_ap(pd->dst, &uh->uh_dport, pd->ip_sum, 4570 &uh->uh_sum, &nk->addr[pd->didx], 4571 nk->port[pd->didx], 1, pd->af); 4572 } 4573 m_copyback(m, off, sizeof(*uh), (caddr_t)uh); 4574 } 4575 4576 return (PF_PASS); 4577 } 4578 4579 int 4580 pf_test_state_icmp(struct pf_state **state, int direction, struct pfi_kif *kif, 4581 struct mbuf *m, int off, void *h, struct pf_pdesc *pd, u_short *reason) 4582 { 4583 struct pf_addr *saddr = pd->src, *daddr = pd->dst; 4584 u_int16_t icmpid = 0, *icmpsum; 4585 u_int8_t icmptype; 4586 int state_icmp = 0; 4587 struct pf_state_key_cmp key; 4588 4589 switch (pd->proto) { 4590 #ifdef INET 4591 case IPPROTO_ICMP: 4592 icmptype = pd->hdr.icmp->icmp_type; 4593 icmpid = pd->hdr.icmp->icmp_id; 4594 icmpsum = &pd->hdr.icmp->icmp_cksum; 4595 4596 if (icmptype == ICMP_UNREACH || 4597 icmptype == ICMP_SOURCEQUENCH || 4598 icmptype == ICMP_REDIRECT || 4599 icmptype == ICMP_TIMXCEED || 4600 icmptype == ICMP_PARAMPROB) 4601 state_icmp++; 4602 break; 4603 #endif /* INET */ 4604 #ifdef INET6 4605 case IPPROTO_ICMPV6: 4606 icmptype = pd->hdr.icmp6->icmp6_type; 4607 icmpid = pd->hdr.icmp6->icmp6_id; 4608 icmpsum = &pd->hdr.icmp6->icmp6_cksum; 4609 4610 if (icmptype == ICMP6_DST_UNREACH || 4611 icmptype == ICMP6_PACKET_TOO_BIG || 4612 icmptype == ICMP6_TIME_EXCEEDED || 4613 icmptype == ICMP6_PARAM_PROB) 4614 state_icmp++; 4615 break; 4616 #endif /* INET6 */ 4617 } 4618 4619 if (!state_icmp) { 4620 4621 /* 4622 * ICMP query/reply message not related to a TCP/UDP packet. 4623 * Search for an ICMP state. 4624 */ 4625 key.af = pd->af; 4626 key.proto = pd->proto; 4627 key.port[0] = key.port[1] = icmpid; 4628 if (direction == PF_IN) { /* wire side, straight */ 4629 PF_ACPY(&key.addr[0], pd->src, key.af); 4630 PF_ACPY(&key.addr[1], pd->dst, key.af); 4631 } else { /* stack side, reverse */ 4632 PF_ACPY(&key.addr[1], pd->src, key.af); 4633 PF_ACPY(&key.addr[0], pd->dst, key.af); 4634 } 4635 4636 STATE_LOOKUP(kif, &key, direction, *state, m); 4637 4638 (*state)->expire = time_second; 4639 (*state)->timeout = PFTM_ICMP_ERROR_REPLY; 4640 4641 /* translate source/destination address, if necessary */ 4642 if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) { 4643 struct pf_state_key *nk = (*state)->key[pd->didx]; 4644 4645 switch (pd->af) { 4646 #ifdef INET 4647 case AF_INET: 4648 if (PF_ANEQ(pd->src, 4649 &nk->addr[pd->sidx], AF_INET)) 4650 pf_change_a(&saddr->v4.s_addr, 4651 pd->ip_sum, 4652 nk->addr[pd->sidx].v4.s_addr, 0); 4653 4654 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], 4655 AF_INET)) 4656 pf_change_a(&daddr->v4.s_addr, 4657 pd->ip_sum, 4658 nk->addr[pd->didx].v4.s_addr, 0); 4659 4660 if (nk->port[0] != 4661 pd->hdr.icmp->icmp_id) { 4662 pd->hdr.icmp->icmp_cksum = 4663 pf_cksum_fixup( 4664 pd->hdr.icmp->icmp_cksum, icmpid, 4665 nk->port[pd->sidx], 0); 4666 pd->hdr.icmp->icmp_id = 4667 nk->port[pd->sidx]; 4668 } 4669 4670 m_copyback(m, off, ICMP_MINLEN, 4671 (caddr_t)pd->hdr.icmp); 4672 break; 4673 #endif /* INET */ 4674 #ifdef INET6 4675 case AF_INET6: 4676 if (PF_ANEQ(pd->src, 4677 &nk->addr[pd->sidx], AF_INET6)) 4678 pf_change_a6(saddr, 4679 &pd->hdr.icmp6->icmp6_cksum, 4680 &nk->addr[pd->sidx], 0); 4681 4682 if (PF_ANEQ(pd->dst, 4683 &nk->addr[pd->didx], AF_INET6)) 4684 pf_change_a6(daddr, 4685 &pd->hdr.icmp6->icmp6_cksum, 4686 &nk->addr[pd->didx], 0); 4687 4688 m_copyback(m, off, 4689 sizeof(struct icmp6_hdr), 4690 (caddr_t)pd->hdr.icmp6); 4691 break; 4692 #endif /* INET6 */ 4693 } 4694 } 4695 return (PF_PASS); 4696 4697 } else { 4698 /* 4699 * ICMP error message in response to a TCP/UDP packet. 4700 * Extract the inner TCP/UDP header and search for that state. 4701 */ 4702 4703 struct pf_pdesc pd2; 4704 #ifdef INET 4705 struct ip h2; 4706 #endif /* INET */ 4707 #ifdef INET6 4708 struct ip6_hdr h2_6; 4709 int terminal = 0; 4710 #endif /* INET6 */ 4711 int ipoff2; 4712 int off2; 4713 4714 pd2.af = pd->af; 4715 /* Payload packet is from the opposite direction. */ 4716 pd2.sidx = (direction == PF_IN) ? 1 : 0; 4717 pd2.didx = (direction == PF_IN) ? 0 : 1; 4718 switch (pd->af) { 4719 #ifdef INET 4720 case AF_INET: 4721 /* offset of h2 in mbuf chain */ 4722 ipoff2 = off + ICMP_MINLEN; 4723 4724 if (!pf_pull_hdr(m, ipoff2, &h2, sizeof(h2), 4725 NULL, reason, pd2.af)) { 4726 DPFPRINTF(PF_DEBUG_MISC, 4727 ("pf: ICMP error message too short " 4728 "(ip)\n")); 4729 return (PF_DROP); 4730 } 4731 /* 4732 * ICMP error messages don't refer to non-first 4733 * fragments 4734 */ 4735 if (h2.ip_off & htons(IP_OFFMASK)) { 4736 REASON_SET(reason, PFRES_FRAG); 4737 return (PF_DROP); 4738 } 4739 4740 /* offset of protocol header that follows h2 */ 4741 off2 = ipoff2 + (h2.ip_hl << 2); 4742 4743 pd2.proto = h2.ip_p; 4744 pd2.src = (struct pf_addr *)&h2.ip_src; 4745 pd2.dst = (struct pf_addr *)&h2.ip_dst; 4746 pd2.ip_sum = &h2.ip_sum; 4747 break; 4748 #endif /* INET */ 4749 #ifdef INET6 4750 case AF_INET6: 4751 ipoff2 = off + sizeof(struct icmp6_hdr); 4752 4753 if (!pf_pull_hdr(m, ipoff2, &h2_6, sizeof(h2_6), 4754 NULL, reason, pd2.af)) { 4755 DPFPRINTF(PF_DEBUG_MISC, 4756 ("pf: ICMP error message too short " 4757 "(ip6)\n")); 4758 return (PF_DROP); 4759 } 4760 pd2.proto = h2_6.ip6_nxt; 4761 pd2.src = (struct pf_addr *)&h2_6.ip6_src; 4762 pd2.dst = (struct pf_addr *)&h2_6.ip6_dst; 4763 pd2.ip_sum = NULL; 4764 off2 = ipoff2 + sizeof(h2_6); 4765 do { 4766 switch (pd2.proto) { 4767 case IPPROTO_FRAGMENT: 4768 /* 4769 * ICMPv6 error messages for 4770 * non-first fragments 4771 */ 4772 REASON_SET(reason, PFRES_FRAG); 4773 return (PF_DROP); 4774 case IPPROTO_AH: 4775 case IPPROTO_HOPOPTS: 4776 case IPPROTO_ROUTING: 4777 case IPPROTO_DSTOPTS: { 4778 /* get next header and header length */ 4779 struct ip6_ext opt6; 4780 4781 if (!pf_pull_hdr(m, off2, &opt6, 4782 sizeof(opt6), NULL, reason, 4783 pd2.af)) { 4784 DPFPRINTF(PF_DEBUG_MISC, 4785 ("pf: ICMPv6 short opt\n")); 4786 return (PF_DROP); 4787 } 4788 if (pd2.proto == IPPROTO_AH) 4789 off2 += (opt6.ip6e_len + 2) * 4; 4790 else 4791 off2 += (opt6.ip6e_len + 1) * 8; 4792 pd2.proto = opt6.ip6e_nxt; 4793 /* goto the next header */ 4794 break; 4795 } 4796 default: 4797 terminal++; 4798 break; 4799 } 4800 } while (!terminal); 4801 break; 4802 #endif /* INET6 */ 4803 default: 4804 DPFPRINTF(PF_DEBUG_MISC, 4805 ("pf: ICMP AF %d unknown (ip6)\n", pd->af)); 4806 return (PF_DROP); 4807 break; 4808 } 4809 4810 switch (pd2.proto) { 4811 case IPPROTO_TCP: { 4812 struct tcphdr th; 4813 u_int32_t seq; 4814 struct pf_state_peer *src, *dst; 4815 u_int8_t dws; 4816 int copyback = 0; 4817 4818 /* 4819 * Only the first 8 bytes of the TCP header can be 4820 * expected. Don't access any TCP header fields after 4821 * th_seq, an ackskew test is not possible. 4822 */ 4823 if (!pf_pull_hdr(m, off2, &th, 8, NULL, reason, 4824 pd2.af)) { 4825 DPFPRINTF(PF_DEBUG_MISC, 4826 ("pf: ICMP error message too short " 4827 "(tcp)\n")); 4828 return (PF_DROP); 4829 } 4830 4831 key.af = pd2.af; 4832 key.proto = IPPROTO_TCP; 4833 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af); 4834 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af); 4835 key.port[pd2.sidx] = th.th_sport; 4836 key.port[pd2.didx] = th.th_dport; 4837 4838 STATE_LOOKUP(kif, &key, direction, *state, m); 4839 4840 if (direction == (*state)->direction) { 4841 src = &(*state)->dst; 4842 dst = &(*state)->src; 4843 } else { 4844 src = &(*state)->src; 4845 dst = &(*state)->dst; 4846 } 4847 4848 if (src->wscale && dst->wscale) 4849 dws = dst->wscale & PF_WSCALE_MASK; 4850 else 4851 dws = 0; 4852 4853 /* Demodulate sequence number */ 4854 seq = ntohl(th.th_seq) - src->seqdiff; 4855 if (src->seqdiff) { 4856 pf_change_a(&th.th_seq, icmpsum, 4857 htonl(seq), 0); 4858 copyback = 1; 4859 } 4860 4861 if (!((*state)->state_flags & PFSTATE_SLOPPY) && 4862 (!SEQ_GEQ(src->seqhi, seq) || 4863 !SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)))) { 4864 if (pf_status.debug >= PF_DEBUG_MISC) { 4865 kprintf("pf: BAD ICMP %d:%d ", 4866 icmptype, pd->hdr.icmp->icmp_code); 4867 pf_print_host(pd->src, 0, pd->af); 4868 kprintf(" -> "); 4869 pf_print_host(pd->dst, 0, pd->af); 4870 kprintf(" state: "); 4871 pf_print_state(*state); 4872 kprintf(" seq=%u\n", seq); 4873 } 4874 REASON_SET(reason, PFRES_BADSTATE); 4875 return (PF_DROP); 4876 } else { 4877 if (pf_status.debug >= PF_DEBUG_MISC) { 4878 kprintf("pf: OK ICMP %d:%d ", 4879 icmptype, pd->hdr.icmp->icmp_code); 4880 pf_print_host(pd->src, 0, pd->af); 4881 kprintf(" -> "); 4882 pf_print_host(pd->dst, 0, pd->af); 4883 kprintf(" state: "); 4884 pf_print_state(*state); 4885 kprintf(" seq=%u\n", seq); 4886 } 4887 } 4888 4889 /* translate source/destination address, if necessary */ 4890 if ((*state)->key[PF_SK_WIRE] != 4891 (*state)->key[PF_SK_STACK]) { 4892 struct pf_state_key *nk = 4893 (*state)->key[pd->didx]; 4894 4895 if (PF_ANEQ(pd2.src, 4896 &nk->addr[pd2.sidx], pd2.af) || 4897 nk->port[pd2.sidx] != th.th_sport) 4898 pf_change_icmp(pd2.src, &th.th_sport, 4899 daddr, &nk->addr[pd2.sidx], 4900 nk->port[pd2.sidx], NULL, 4901 pd2.ip_sum, icmpsum, 4902 pd->ip_sum, 0, pd2.af); 4903 4904 if (PF_ANEQ(pd2.dst, 4905 &nk->addr[pd2.didx], pd2.af) || 4906 nk->port[pd2.didx] != th.th_dport) 4907 pf_change_icmp(pd2.dst, &th.th_dport, 4908 NULL, /* XXX Inbound NAT? */ 4909 &nk->addr[pd2.didx], 4910 nk->port[pd2.didx], NULL, 4911 pd2.ip_sum, icmpsum, 4912 pd->ip_sum, 0, pd2.af); 4913 copyback = 1; 4914 } 4915 4916 if (copyback) { 4917 switch (pd2.af) { 4918 #ifdef INET 4919 case AF_INET: 4920 m_copyback(m, off, ICMP_MINLEN, 4921 (caddr_t)pd->hdr.icmp); 4922 m_copyback(m, ipoff2, sizeof(h2), 4923 (caddr_t)&h2); 4924 break; 4925 #endif /* INET */ 4926 #ifdef INET6 4927 case AF_INET6: 4928 m_copyback(m, off, 4929 sizeof(struct icmp6_hdr), 4930 (caddr_t)pd->hdr.icmp6); 4931 m_copyback(m, ipoff2, sizeof(h2_6), 4932 (caddr_t)&h2_6); 4933 break; 4934 #endif /* INET6 */ 4935 } 4936 m_copyback(m, off2, 8, (caddr_t)&th); 4937 } 4938 4939 return (PF_PASS); 4940 break; 4941 } 4942 case IPPROTO_UDP: { 4943 struct udphdr uh; 4944 4945 if (!pf_pull_hdr(m, off2, &uh, sizeof(uh), 4946 NULL, reason, pd2.af)) { 4947 DPFPRINTF(PF_DEBUG_MISC, 4948 ("pf: ICMP error message too short " 4949 "(udp)\n")); 4950 return (PF_DROP); 4951 } 4952 4953 key.af = pd2.af; 4954 key.proto = IPPROTO_UDP; 4955 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af); 4956 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af); 4957 key.port[pd2.sidx] = uh.uh_sport; 4958 key.port[pd2.didx] = uh.uh_dport; 4959 4960 STATE_LOOKUP(kif, &key, direction, *state, m); 4961 4962 /* translate source/destination address, if necessary */ 4963 if ((*state)->key[PF_SK_WIRE] != 4964 (*state)->key[PF_SK_STACK]) { 4965 struct pf_state_key *nk = 4966 (*state)->key[pd->didx]; 4967 4968 if (PF_ANEQ(pd2.src, 4969 &nk->addr[pd2.sidx], pd2.af) || 4970 nk->port[pd2.sidx] != uh.uh_sport) 4971 pf_change_icmp(pd2.src, &uh.uh_sport, 4972 daddr, &nk->addr[pd2.sidx], 4973 nk->port[pd2.sidx], &uh.uh_sum, 4974 pd2.ip_sum, icmpsum, 4975 pd->ip_sum, 1, pd2.af); 4976 4977 if (PF_ANEQ(pd2.dst, 4978 &nk->addr[pd2.didx], pd2.af) || 4979 nk->port[pd2.didx] != uh.uh_dport) 4980 pf_change_icmp(pd2.dst, &uh.uh_dport, 4981 NULL, /* XXX Inbound NAT? */ 4982 &nk->addr[pd2.didx], 4983 nk->port[pd2.didx], &uh.uh_sum, 4984 pd2.ip_sum, icmpsum, 4985 pd->ip_sum, 1, pd2.af); 4986 4987 switch (pd2.af) { 4988 #ifdef INET 4989 case AF_INET: 4990 m_copyback(m, off, ICMP_MINLEN, 4991 (caddr_t)pd->hdr.icmp); 4992 m_copyback(m, ipoff2, sizeof(h2), (caddr_t)&h2); 4993 break; 4994 #endif /* INET */ 4995 #ifdef INET6 4996 case AF_INET6: 4997 m_copyback(m, off, 4998 sizeof(struct icmp6_hdr), 4999 (caddr_t)pd->hdr.icmp6); 5000 m_copyback(m, ipoff2, sizeof(h2_6), 5001 (caddr_t)&h2_6); 5002 break; 5003 #endif /* INET6 */ 5004 } 5005 m_copyback(m, off2, sizeof(uh), (caddr_t)&uh); 5006 } 5007 5008 return (PF_PASS); 5009 break; 5010 } 5011 #ifdef INET 5012 case IPPROTO_ICMP: { 5013 struct icmp iih; 5014 5015 if (!pf_pull_hdr(m, off2, &iih, ICMP_MINLEN, 5016 NULL, reason, pd2.af)) { 5017 DPFPRINTF(PF_DEBUG_MISC, 5018 ("pf: ICMP error message too short i" 5019 "(icmp)\n")); 5020 return (PF_DROP); 5021 } 5022 5023 key.af = pd2.af; 5024 key.proto = IPPROTO_ICMP; 5025 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af); 5026 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af); 5027 key.port[0] = key.port[1] = iih.icmp_id; 5028 5029 STATE_LOOKUP(kif, &key, direction, *state, m); 5030 5031 /* translate source/destination address, if necessary */ 5032 if ((*state)->key[PF_SK_WIRE] != 5033 (*state)->key[PF_SK_STACK]) { 5034 struct pf_state_key *nk = 5035 (*state)->key[pd->didx]; 5036 5037 if (PF_ANEQ(pd2.src, 5038 &nk->addr[pd2.sidx], pd2.af) || 5039 nk->port[pd2.sidx] != iih.icmp_id) 5040 pf_change_icmp(pd2.src, &iih.icmp_id, 5041 daddr, &nk->addr[pd2.sidx], 5042 nk->port[pd2.sidx], NULL, 5043 pd2.ip_sum, icmpsum, 5044 pd->ip_sum, 0, AF_INET); 5045 5046 if (PF_ANEQ(pd2.dst, 5047 &nk->addr[pd2.didx], pd2.af) || 5048 nk->port[pd2.didx] != iih.icmp_id) 5049 pf_change_icmp(pd2.dst, &iih.icmp_id, 5050 NULL, /* XXX Inbound NAT? */ 5051 &nk->addr[pd2.didx], 5052 nk->port[pd2.didx], NULL, 5053 pd2.ip_sum, icmpsum, 5054 pd->ip_sum, 0, AF_INET); 5055 5056 m_copyback(m, off, ICMP_MINLEN, (caddr_t)pd->hdr.icmp); 5057 m_copyback(m, ipoff2, sizeof(h2), (caddr_t)&h2); 5058 m_copyback(m, off2, ICMP_MINLEN, (caddr_t)&iih); 5059 } 5060 return (PF_PASS); 5061 break; 5062 } 5063 #endif /* INET */ 5064 #ifdef INET6 5065 case IPPROTO_ICMPV6: { 5066 struct icmp6_hdr iih; 5067 5068 if (!pf_pull_hdr(m, off2, &iih, 5069 sizeof(struct icmp6_hdr), NULL, reason, pd2.af)) { 5070 DPFPRINTF(PF_DEBUG_MISC, 5071 ("pf: ICMP error message too short " 5072 "(icmp6)\n")); 5073 return (PF_DROP); 5074 } 5075 5076 key.af = pd2.af; 5077 key.proto = IPPROTO_ICMPV6; 5078 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af); 5079 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af); 5080 key.port[0] = key.port[1] = iih.icmp6_id; 5081 5082 STATE_LOOKUP(kif, &key, direction, *state, m); 5083 5084 /* translate source/destination address, if necessary */ 5085 if ((*state)->key[PF_SK_WIRE] != 5086 (*state)->key[PF_SK_STACK]) { 5087 struct pf_state_key *nk = 5088 (*state)->key[pd->didx]; 5089 5090 if (PF_ANEQ(pd2.src, 5091 &nk->addr[pd2.sidx], pd2.af) || 5092 nk->port[pd2.sidx] != iih.icmp6_id) 5093 pf_change_icmp(pd2.src, &iih.icmp6_id, 5094 daddr, &nk->addr[pd2.sidx], 5095 nk->port[pd2.sidx], NULL, 5096 pd2.ip_sum, icmpsum, 5097 pd->ip_sum, 0, AF_INET6); 5098 5099 if (PF_ANEQ(pd2.dst, 5100 &nk->addr[pd2.didx], pd2.af) || 5101 nk->port[pd2.didx] != iih.icmp6_id) 5102 pf_change_icmp(pd2.dst, &iih.icmp6_id, 5103 NULL, /* XXX Inbound NAT? */ 5104 &nk->addr[pd2.didx], 5105 nk->port[pd2.didx], NULL, 5106 pd2.ip_sum, icmpsum, 5107 pd->ip_sum, 0, AF_INET6); 5108 5109 m_copyback(m, off, sizeof(struct icmp6_hdr), 5110 (caddr_t)pd->hdr.icmp6); 5111 m_copyback(m, ipoff2, sizeof(h2_6), (caddr_t)&h2_6); 5112 m_copyback(m, off2, sizeof(struct icmp6_hdr), 5113 (caddr_t)&iih); 5114 } 5115 5116 return (PF_PASS); 5117 break; 5118 } 5119 #endif /* INET6 */ 5120 default: { 5121 key.af = pd2.af; 5122 key.proto = pd2.proto; 5123 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af); 5124 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af); 5125 key.port[0] = key.port[1] = 0; 5126 5127 STATE_LOOKUP(kif, &key, direction, *state, m); 5128 5129 /* translate source/destination address, if necessary */ 5130 if ((*state)->key[PF_SK_WIRE] != 5131 (*state)->key[PF_SK_STACK]) { 5132 struct pf_state_key *nk = 5133 (*state)->key[pd->didx]; 5134 5135 if (PF_ANEQ(pd2.src, 5136 &nk->addr[pd2.sidx], pd2.af)) 5137 pf_change_icmp(pd2.src, NULL, daddr, 5138 &nk->addr[pd2.sidx], 0, NULL, 5139 pd2.ip_sum, icmpsum, 5140 pd->ip_sum, 0, pd2.af); 5141 5142 if (PF_ANEQ(pd2.dst, 5143 &nk->addr[pd2.didx], pd2.af)) 5144 pf_change_icmp(pd2.src, NULL, 5145 NULL, /* XXX Inbound NAT? */ 5146 &nk->addr[pd2.didx], 0, NULL, 5147 pd2.ip_sum, icmpsum, 5148 pd->ip_sum, 0, pd2.af); 5149 5150 switch (pd2.af) { 5151 #ifdef INET 5152 case AF_INET: 5153 m_copyback(m, off, ICMP_MINLEN, 5154 (caddr_t)pd->hdr.icmp); 5155 m_copyback(m, ipoff2, sizeof(h2), (caddr_t)&h2); 5156 break; 5157 #endif /* INET */ 5158 #ifdef INET6 5159 case AF_INET6: 5160 m_copyback(m, off, 5161 sizeof(struct icmp6_hdr), 5162 (caddr_t)pd->hdr.icmp6); 5163 m_copyback(m, ipoff2, sizeof(h2_6), 5164 (caddr_t)&h2_6); 5165 break; 5166 #endif /* INET6 */ 5167 } 5168 } 5169 return (PF_PASS); 5170 break; 5171 } 5172 } 5173 } 5174 } 5175 5176 int 5177 pf_test_state_other(struct pf_state **state, int direction, struct pfi_kif *kif, 5178 struct mbuf *m, struct pf_pdesc *pd) 5179 { 5180 struct pf_state_peer *src, *dst; 5181 struct pf_state_key_cmp key; 5182 5183 key.af = pd->af; 5184 key.proto = pd->proto; 5185 if (direction == PF_IN) { 5186 PF_ACPY(&key.addr[0], pd->src, key.af); 5187 PF_ACPY(&key.addr[1], pd->dst, key.af); 5188 key.port[0] = key.port[1] = 0; 5189 } else { 5190 PF_ACPY(&key.addr[1], pd->src, key.af); 5191 PF_ACPY(&key.addr[0], pd->dst, key.af); 5192 key.port[1] = key.port[0] = 0; 5193 } 5194 5195 STATE_LOOKUP(kif, &key, direction, *state, m); 5196 5197 if (direction == (*state)->direction) { 5198 src = &(*state)->src; 5199 dst = &(*state)->dst; 5200 } else { 5201 src = &(*state)->dst; 5202 dst = &(*state)->src; 5203 } 5204 5205 /* update states */ 5206 if (src->state < PFOTHERS_SINGLE) 5207 src->state = PFOTHERS_SINGLE; 5208 if (dst->state == PFOTHERS_SINGLE) 5209 dst->state = PFOTHERS_MULTIPLE; 5210 5211 /* update expire time */ 5212 (*state)->expire = time_second; 5213 if (src->state == PFOTHERS_MULTIPLE && dst->state == PFOTHERS_MULTIPLE) 5214 (*state)->timeout = PFTM_OTHER_MULTIPLE; 5215 else 5216 (*state)->timeout = PFTM_OTHER_SINGLE; 5217 5218 /* translate source/destination address, if necessary */ 5219 if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) { 5220 struct pf_state_key *nk = (*state)->key[pd->didx]; 5221 5222 KKASSERT(nk); 5223 KKASSERT(pd); 5224 KKASSERT(pd->src); 5225 KKASSERT(pd->dst); 5226 switch (pd->af) { 5227 #ifdef INET 5228 case AF_INET: 5229 if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], AF_INET)) 5230 pf_change_a(&pd->src->v4.s_addr, 5231 pd->ip_sum, 5232 nk->addr[pd->sidx].v4.s_addr, 5233 0); 5234 5235 5236 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], AF_INET)) 5237 pf_change_a(&pd->dst->v4.s_addr, 5238 pd->ip_sum, 5239 nk->addr[pd->didx].v4.s_addr, 5240 0); 5241 5242 break; 5243 #endif /* INET */ 5244 #ifdef INET6 5245 case AF_INET6: 5246 if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], AF_INET)) 5247 PF_ACPY(pd->src, &nk->addr[pd->sidx], pd->af); 5248 5249 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], AF_INET)) 5250 PF_ACPY(pd->dst, &nk->addr[pd->didx], pd->af); 5251 #endif /* INET6 */ 5252 } 5253 } 5254 return (PF_PASS); 5255 } 5256 5257 /* 5258 * ipoff and off are measured from the start of the mbuf chain. 5259 * h must be at "ipoff" on the mbuf chain. 5260 */ 5261 void * 5262 pf_pull_hdr(struct mbuf *m, int off, void *p, int len, 5263 u_short *actionp, u_short *reasonp, sa_family_t af) 5264 { 5265 switch (af) { 5266 #ifdef INET 5267 case AF_INET: { 5268 struct ip *h = mtod(m, struct ip *); 5269 u_int16_t fragoff = (h->ip_off & IP_OFFMASK) << 3; 5270 5271 if (fragoff) { 5272 if (fragoff >= len) 5273 ACTION_SET(actionp, PF_PASS); 5274 else { 5275 ACTION_SET(actionp, PF_DROP); 5276 REASON_SET(reasonp, PFRES_FRAG); 5277 } 5278 return (NULL); 5279 } 5280 if (m->m_pkthdr.len < off + len || 5281 h->ip_len < off + len) { 5282 ACTION_SET(actionp, PF_DROP); 5283 REASON_SET(reasonp, PFRES_SHORT); 5284 return (NULL); 5285 } 5286 break; 5287 } 5288 #endif /* INET */ 5289 #ifdef INET6 5290 case AF_INET6: { 5291 struct ip6_hdr *h = mtod(m, struct ip6_hdr *); 5292 5293 if (m->m_pkthdr.len < off + len || 5294 (ntohs(h->ip6_plen) + sizeof(struct ip6_hdr)) < 5295 (unsigned)(off + len)) { 5296 ACTION_SET(actionp, PF_DROP); 5297 REASON_SET(reasonp, PFRES_SHORT); 5298 return (NULL); 5299 } 5300 break; 5301 } 5302 #endif /* INET6 */ 5303 } 5304 m_copydata(m, off, len, p); 5305 return (p); 5306 } 5307 5308 int 5309 pf_routable(struct pf_addr *addr, sa_family_t af, struct pfi_kif *kif) 5310 { 5311 struct sockaddr_in *dst; 5312 int ret = 1; 5313 int check_mpath; 5314 #ifdef INET6 5315 struct sockaddr_in6 *dst6; 5316 struct route_in6 ro; 5317 #else 5318 struct route ro; 5319 #endif 5320 struct radix_node *rn; 5321 struct rtentry *rt; 5322 struct ifnet *ifp; 5323 5324 check_mpath = 0; 5325 bzero(&ro, sizeof(ro)); 5326 switch (af) { 5327 case AF_INET: 5328 dst = satosin(&ro.ro_dst); 5329 dst->sin_family = AF_INET; 5330 dst->sin_len = sizeof(*dst); 5331 dst->sin_addr = addr->v4; 5332 break; 5333 #ifdef INET6 5334 case AF_INET6: 5335 dst6 = (struct sockaddr_in6 *)&ro.ro_dst; 5336 dst6->sin6_family = AF_INET6; 5337 dst6->sin6_len = sizeof(*dst6); 5338 dst6->sin6_addr = addr->v6; 5339 break; 5340 #endif /* INET6 */ 5341 default: 5342 return (0); 5343 } 5344 5345 /* Skip checks for ipsec interfaces */ 5346 if (kif != NULL && kif->pfik_ifp->if_type == IFT_ENC) 5347 goto out; 5348 5349 rtalloc_ign((struct route *)&ro, 0); 5350 5351 if (ro.ro_rt != NULL) { 5352 /* No interface given, this is a no-route check */ 5353 if (kif == NULL) 5354 goto out; 5355 5356 if (kif->pfik_ifp == NULL) { 5357 ret = 0; 5358 goto out; 5359 } 5360 5361 /* Perform uRPF check if passed input interface */ 5362 ret = 0; 5363 rn = (struct radix_node *)ro.ro_rt; 5364 do { 5365 rt = (struct rtentry *)rn; 5366 ifp = rt->rt_ifp; 5367 5368 if (kif->pfik_ifp == ifp) 5369 ret = 1; 5370 rn = NULL; 5371 } while (check_mpath == 1 && rn != NULL && ret == 0); 5372 } else 5373 ret = 0; 5374 out: 5375 if (ro.ro_rt != NULL) 5376 RTFREE(ro.ro_rt); 5377 return (ret); 5378 } 5379 5380 int 5381 pf_rtlabel_match(struct pf_addr *addr, sa_family_t af, struct pf_addr_wrap *aw) 5382 { 5383 struct sockaddr_in *dst; 5384 #ifdef INET6 5385 struct sockaddr_in6 *dst6; 5386 struct route_in6 ro; 5387 #else 5388 struct route ro; 5389 #endif 5390 int ret = 0; 5391 5392 ASSERT_LWKT_TOKEN_HELD(&pf_token); 5393 5394 bzero(&ro, sizeof(ro)); 5395 switch (af) { 5396 case AF_INET: 5397 dst = satosin(&ro.ro_dst); 5398 dst->sin_family = AF_INET; 5399 dst->sin_len = sizeof(*dst); 5400 dst->sin_addr = addr->v4; 5401 break; 5402 #ifdef INET6 5403 case AF_INET6: 5404 dst6 = (struct sockaddr_in6 *)&ro.ro_dst; 5405 dst6->sin6_family = AF_INET6; 5406 dst6->sin6_len = sizeof(*dst6); 5407 dst6->sin6_addr = addr->v6; 5408 break; 5409 #endif /* INET6 */ 5410 default: 5411 return (0); 5412 } 5413 5414 rtalloc_ign((struct route *)&ro, (RTF_CLONING | RTF_PRCLONING)); 5415 5416 if (ro.ro_rt != NULL) { 5417 RTFREE(ro.ro_rt); 5418 } 5419 5420 return (ret); 5421 } 5422 5423 #ifdef INET 5424 void 5425 pf_route(struct mbuf **m, struct pf_rule *r, int dir, struct ifnet *oifp, 5426 struct pf_state *s, struct pf_pdesc *pd) 5427 { 5428 struct mbuf *m0, *m1; 5429 struct route iproute; 5430 struct route *ro = NULL; 5431 struct sockaddr_in *dst; 5432 struct ip *ip; 5433 struct ifnet *ifp = NULL; 5434 struct pf_addr naddr; 5435 struct pf_src_node *sn = NULL; 5436 int error = 0; 5437 int sw_csum; 5438 #ifdef IPSEC 5439 struct m_tag *mtag; 5440 #endif /* IPSEC */ 5441 5442 ASSERT_LWKT_TOKEN_HELD(&pf_token); 5443 5444 if (m == NULL || *m == NULL || r == NULL || 5445 (dir != PF_IN && dir != PF_OUT) || oifp == NULL) 5446 panic("pf_route: invalid parameters"); 5447 5448 if (((*m)->m_pkthdr.fw_flags & PF_MBUF_ROUTED) == 0) { 5449 (*m)->m_pkthdr.fw_flags |= PF_MBUF_ROUTED; 5450 (*m)->m_pkthdr.pf.routed = 1; 5451 } else { 5452 if ((*m)->m_pkthdr.pf.routed++ > 3) { 5453 m0 = *m; 5454 *m = NULL; 5455 goto bad; 5456 } 5457 } 5458 5459 if (r->rt == PF_DUPTO) { 5460 if ((m0 = m_dup(*m, MB_DONTWAIT)) == NULL) { 5461 return; 5462 } 5463 } else { 5464 if ((r->rt == PF_REPLYTO) == (r->direction == dir)) { 5465 return; 5466 } 5467 m0 = *m; 5468 } 5469 5470 if (m0->m_len < sizeof(struct ip)) { 5471 DPFPRINTF(PF_DEBUG_URGENT, 5472 ("pf_route: m0->m_len < sizeof(struct ip)\n")); 5473 goto bad; 5474 } 5475 5476 ip = mtod(m0, struct ip *); 5477 5478 ro = &iproute; 5479 bzero((caddr_t)ro, sizeof(*ro)); 5480 dst = satosin(&ro->ro_dst); 5481 dst->sin_family = AF_INET; 5482 dst->sin_len = sizeof(*dst); 5483 dst->sin_addr = ip->ip_dst; 5484 5485 if (r->rt == PF_FASTROUTE) { 5486 rtalloc(ro); 5487 if (ro->ro_rt == 0) { 5488 ipstat.ips_noroute++; 5489 goto bad; 5490 } 5491 5492 ifp = ro->ro_rt->rt_ifp; 5493 ro->ro_rt->rt_use++; 5494 5495 if (ro->ro_rt->rt_flags & RTF_GATEWAY) 5496 dst = satosin(ro->ro_rt->rt_gateway); 5497 } else { 5498 if (TAILQ_EMPTY(&r->rpool.list)) { 5499 DPFPRINTF(PF_DEBUG_URGENT, 5500 ("pf_route: TAILQ_EMPTY(&r->rpool.list)\n")); 5501 goto bad; 5502 } 5503 if (s == NULL) { 5504 pf_map_addr(AF_INET, r, (struct pf_addr *)&ip->ip_src, 5505 &naddr, NULL, &sn); 5506 if (!PF_AZERO(&naddr, AF_INET)) 5507 dst->sin_addr.s_addr = naddr.v4.s_addr; 5508 ifp = r->rpool.cur->kif ? 5509 r->rpool.cur->kif->pfik_ifp : NULL; 5510 } else { 5511 if (!PF_AZERO(&s->rt_addr, AF_INET)) 5512 dst->sin_addr.s_addr = 5513 s->rt_addr.v4.s_addr; 5514 ifp = s->rt_kif ? s->rt_kif->pfik_ifp : NULL; 5515 } 5516 } 5517 if (ifp == NULL) 5518 goto bad; 5519 5520 if (oifp != ifp) { 5521 if (pf_test(PF_OUT, ifp, &m0, NULL, NULL) != PF_PASS) { 5522 goto bad; 5523 } else if (m0 == NULL) { 5524 goto done; 5525 } 5526 if (m0->m_len < sizeof(struct ip)) { 5527 DPFPRINTF(PF_DEBUG_URGENT, 5528 ("pf_route: m0->m_len < sizeof(struct ip)\n")); 5529 goto bad; 5530 } 5531 ip = mtod(m0, struct ip *); 5532 } 5533 5534 /* Copied from FreeBSD 5.1-CURRENT ip_output. */ 5535 m0->m_pkthdr.csum_flags |= CSUM_IP; 5536 sw_csum = m0->m_pkthdr.csum_flags & ~ifp->if_hwassist; 5537 if (sw_csum & CSUM_DELAY_DATA) { 5538 in_delayed_cksum(m0); 5539 sw_csum &= ~CSUM_DELAY_DATA; 5540 } 5541 m0->m_pkthdr.csum_flags &= ifp->if_hwassist; 5542 5543 if (ip->ip_len <= ifp->if_mtu || 5544 (ifp->if_hwassist & CSUM_FRAGMENT && 5545 (ip->ip_off & IP_DF) == 0)) { 5546 ip->ip_len = htons(ip->ip_len); 5547 ip->ip_off = htons(ip->ip_off); 5548 ip->ip_sum = 0; 5549 if (sw_csum & CSUM_DELAY_IP) { 5550 /* From KAME */ 5551 if (ip->ip_v == IPVERSION && 5552 (ip->ip_hl << 2) == sizeof(*ip)) { 5553 ip->ip_sum = in_cksum_hdr(ip); 5554 } else { 5555 ip->ip_sum = in_cksum(m0, ip->ip_hl << 2); 5556 } 5557 } 5558 lwkt_reltoken(&pf_token); 5559 error = ifp->if_output(ifp, m0, sintosa(dst), ro->ro_rt); 5560 lwkt_gettoken(&pf_token); 5561 goto done; 5562 } 5563 5564 /* 5565 * Too large for interface; fragment if possible. 5566 * Must be able to put at least 8 bytes per fragment. 5567 */ 5568 if (ip->ip_off & IP_DF) { 5569 ipstat.ips_cantfrag++; 5570 if (r->rt != PF_DUPTO) { 5571 icmp_error(m0, ICMP_UNREACH, ICMP_UNREACH_NEEDFRAG, 0, 5572 ifp->if_mtu); 5573 goto done; 5574 } else 5575 goto bad; 5576 } 5577 5578 m1 = m0; 5579 error = ip_fragment(ip, &m0, ifp->if_mtu, ifp->if_hwassist, sw_csum); 5580 if (error) { 5581 goto bad; 5582 } 5583 5584 for (m0 = m1; m0; m0 = m1) { 5585 m1 = m0->m_nextpkt; 5586 m0->m_nextpkt = 0; 5587 if (error == 0) { 5588 lwkt_reltoken(&pf_token); 5589 error = (*ifp->if_output)(ifp, m0, sintosa(dst), 5590 NULL); 5591 lwkt_gettoken(&pf_token); 5592 } else 5593 m_freem(m0); 5594 } 5595 5596 if (error == 0) 5597 ipstat.ips_fragmented++; 5598 5599 done: 5600 if (r->rt != PF_DUPTO) 5601 *m = NULL; 5602 if (ro == &iproute && ro->ro_rt) 5603 RTFREE(ro->ro_rt); 5604 return; 5605 5606 bad: 5607 m_freem(m0); 5608 goto done; 5609 } 5610 #endif /* INET */ 5611 5612 #ifdef INET6 5613 void 5614 pf_route6(struct mbuf **m, struct pf_rule *r, int dir, struct ifnet *oifp, 5615 struct pf_state *s, struct pf_pdesc *pd) 5616 { 5617 struct mbuf *m0; 5618 struct route_in6 ip6route; 5619 struct route_in6 *ro; 5620 struct sockaddr_in6 *dst; 5621 struct ip6_hdr *ip6; 5622 struct ifnet *ifp = NULL; 5623 struct pf_addr naddr; 5624 struct pf_src_node *sn = NULL; 5625 int error = 0; 5626 5627 if (m == NULL || *m == NULL || r == NULL || 5628 (dir != PF_IN && dir != PF_OUT) || oifp == NULL) 5629 panic("pf_route6: invalid parameters"); 5630 5631 if (((*m)->m_pkthdr.fw_flags & PF_MBUF_ROUTED) == 0) { 5632 (*m)->m_pkthdr.fw_flags |= PF_MBUF_ROUTED; 5633 (*m)->m_pkthdr.pf.routed = 1; 5634 } else { 5635 if ((*m)->m_pkthdr.pf.routed++ > 3) { 5636 m0 = *m; 5637 *m = NULL; 5638 goto bad; 5639 } 5640 } 5641 5642 if (r->rt == PF_DUPTO) { 5643 if ((m0 = m_dup(*m, MB_DONTWAIT)) == NULL) 5644 return; 5645 } else { 5646 if ((r->rt == PF_REPLYTO) == (r->direction == dir)) 5647 return; 5648 m0 = *m; 5649 } 5650 5651 if (m0->m_len < sizeof(struct ip6_hdr)) { 5652 DPFPRINTF(PF_DEBUG_URGENT, 5653 ("pf_route6: m0->m_len < sizeof(struct ip6_hdr)\n")); 5654 goto bad; 5655 } 5656 ip6 = mtod(m0, struct ip6_hdr *); 5657 5658 ro = &ip6route; 5659 bzero((caddr_t)ro, sizeof(*ro)); 5660 dst = (struct sockaddr_in6 *)&ro->ro_dst; 5661 dst->sin6_family = AF_INET6; 5662 dst->sin6_len = sizeof(*dst); 5663 dst->sin6_addr = ip6->ip6_dst; 5664 5665 /* 5666 * DragonFly doesn't zero the auxillary pkghdr fields, only fw_flags, 5667 * so make sure pf.flags is clear. 5668 * 5669 * Cheat. XXX why only in the v6 case??? 5670 */ 5671 if (r->rt == PF_FASTROUTE) { 5672 m0->m_pkthdr.fw_flags |= PF_MBUF_TAGGED; 5673 m0->m_pkthdr.pf.flags = 0; 5674 /* XXX Re-Check when Upgrading to > 4.4 */ 5675 m0->m_pkthdr.pf.statekey = NULL; 5676 ip6_output(m0, NULL, NULL, 0, NULL, NULL, NULL); 5677 return; 5678 } 5679 5680 if (TAILQ_EMPTY(&r->rpool.list)) { 5681 DPFPRINTF(PF_DEBUG_URGENT, 5682 ("pf_route6: TAILQ_EMPTY(&r->rpool.list)\n")); 5683 goto bad; 5684 } 5685 if (s == NULL) { 5686 pf_map_addr(AF_INET6, r, (struct pf_addr *)&ip6->ip6_src, 5687 &naddr, NULL, &sn); 5688 if (!PF_AZERO(&naddr, AF_INET6)) 5689 PF_ACPY((struct pf_addr *)&dst->sin6_addr, 5690 &naddr, AF_INET6); 5691 ifp = r->rpool.cur->kif ? r->rpool.cur->kif->pfik_ifp : NULL; 5692 } else { 5693 if (!PF_AZERO(&s->rt_addr, AF_INET6)) 5694 PF_ACPY((struct pf_addr *)&dst->sin6_addr, 5695 &s->rt_addr, AF_INET6); 5696 ifp = s->rt_kif ? s->rt_kif->pfik_ifp : NULL; 5697 } 5698 if (ifp == NULL) 5699 goto bad; 5700 5701 if (oifp != ifp) { 5702 if (pf_test6(PF_OUT, ifp, &m0, NULL, NULL) != PF_PASS) { 5703 goto bad; 5704 } else if (m0 == NULL) { 5705 goto done; 5706 } 5707 if (m0->m_len < sizeof(struct ip6_hdr)) { 5708 DPFPRINTF(PF_DEBUG_URGENT, 5709 ("pf_route6: m0->m_len < sizeof(struct ip6_hdr)\n")); 5710 goto bad; 5711 } 5712 ip6 = mtod(m0, struct ip6_hdr *); 5713 } 5714 5715 /* 5716 * If the packet is too large for the outgoing interface, 5717 * send back an icmp6 error. 5718 */ 5719 if (IN6_IS_ADDR_LINKLOCAL(&dst->sin6_addr)) 5720 dst->sin6_addr.s6_addr16[1] = htons(ifp->if_index); 5721 if ((u_long)m0->m_pkthdr.len <= ifp->if_mtu) { 5722 error = nd6_output(ifp, ifp, m0, dst, NULL); 5723 } else { 5724 in6_ifstat_inc(ifp, ifs6_in_toobig); 5725 if (r->rt != PF_DUPTO) 5726 icmp6_error(m0, ICMP6_PACKET_TOO_BIG, 0, ifp->if_mtu); 5727 else 5728 goto bad; 5729 } 5730 5731 done: 5732 if (r->rt != PF_DUPTO) 5733 *m = NULL; 5734 return; 5735 5736 bad: 5737 m_freem(m0); 5738 goto done; 5739 } 5740 #endif /* INET6 */ 5741 5742 5743 /* 5744 * check protocol (tcp/udp/icmp/icmp6) checksum and set mbuf flag 5745 * off is the offset where the protocol header starts 5746 * len is the total length of protocol header plus payload 5747 * returns 0 when the checksum is valid, otherwise returns 1. 5748 */ 5749 /* 5750 * XXX 5751 * FreeBSD supports cksum offload for the following drivers. 5752 * em(4), gx(4), lge(4), nge(4), ti(4), xl(4) 5753 * If we can make full use of it we would outperform ipfw/ipfilter in 5754 * very heavy traffic. 5755 * I have not tested 'cause I don't have NICs that supports cksum offload. 5756 * (There might be problems. Typical phenomena would be 5757 * 1. No route message for UDP packet. 5758 * 2. No connection acceptance from external hosts regardless of rule set.) 5759 */ 5760 int 5761 pf_check_proto_cksum(struct mbuf *m, int off, int len, u_int8_t p, 5762 sa_family_t af) 5763 { 5764 u_int16_t sum = 0; 5765 int hw_assist = 0; 5766 struct ip *ip; 5767 5768 if (off < sizeof(struct ip) || len < sizeof(struct udphdr)) 5769 return (1); 5770 if (m->m_pkthdr.len < off + len) 5771 return (1); 5772 5773 switch (p) { 5774 case IPPROTO_TCP: 5775 case IPPROTO_UDP: 5776 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) { 5777 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) { 5778 sum = m->m_pkthdr.csum_data; 5779 } else { 5780 ip = mtod(m, struct ip *); 5781 sum = in_pseudo(ip->ip_src.s_addr, 5782 ip->ip_dst.s_addr, htonl((u_short)len + 5783 m->m_pkthdr.csum_data + p)); 5784 } 5785 sum ^= 0xffff; 5786 ++hw_assist; 5787 } 5788 break; 5789 case IPPROTO_ICMP: 5790 #ifdef INET6 5791 case IPPROTO_ICMPV6: 5792 #endif /* INET6 */ 5793 break; 5794 default: 5795 return (1); 5796 } 5797 5798 if (!hw_assist) { 5799 switch (af) { 5800 case AF_INET: 5801 if (p == IPPROTO_ICMP) { 5802 if (m->m_len < off) 5803 return (1); 5804 m->m_data += off; 5805 m->m_len -= off; 5806 sum = in_cksum(m, len); 5807 m->m_data -= off; 5808 m->m_len += off; 5809 } else { 5810 if (m->m_len < sizeof(struct ip)) 5811 return (1); 5812 sum = in_cksum_range(m, p, off, len); 5813 if (sum == 0) { 5814 m->m_pkthdr.csum_flags |= 5815 (CSUM_DATA_VALID | 5816 CSUM_PSEUDO_HDR); 5817 m->m_pkthdr.csum_data = 0xffff; 5818 } 5819 } 5820 break; 5821 #ifdef INET6 5822 case AF_INET6: 5823 if (m->m_len < sizeof(struct ip6_hdr)) 5824 return (1); 5825 sum = in6_cksum(m, p, off, len); 5826 /* 5827 * XXX 5828 * IPv6 H/W cksum off-load not supported yet! 5829 * 5830 * if (sum == 0) { 5831 * m->m_pkthdr.csum_flags |= 5832 * (CSUM_DATA_VALID|CSUM_PSEUDO_HDR); 5833 * m->m_pkthdr.csum_data = 0xffff; 5834 *} 5835 */ 5836 break; 5837 #endif /* INET6 */ 5838 default: 5839 return (1); 5840 } 5841 } 5842 if (sum) { 5843 switch (p) { 5844 case IPPROTO_TCP: 5845 tcpstat.tcps_rcvbadsum++; 5846 break; 5847 case IPPROTO_UDP: 5848 udpstat.udps_badsum++; 5849 break; 5850 case IPPROTO_ICMP: 5851 icmpstat.icps_checksum++; 5852 break; 5853 #ifdef INET6 5854 case IPPROTO_ICMPV6: 5855 icmp6stat.icp6s_checksum++; 5856 break; 5857 #endif /* INET6 */ 5858 } 5859 return (1); 5860 } 5861 return (0); 5862 } 5863 5864 struct pf_divert * 5865 pf_find_divert(struct mbuf *m) 5866 { 5867 struct m_tag *mtag; 5868 5869 if ((mtag = m_tag_find(m, PACKET_TAG_PF_DIVERT, NULL)) == NULL) 5870 return (NULL); 5871 5872 return ((struct pf_divert *)(mtag + 1)); 5873 } 5874 5875 struct pf_divert * 5876 pf_get_divert(struct mbuf *m) 5877 { 5878 struct m_tag *mtag; 5879 5880 if ((mtag = m_tag_find(m, PACKET_TAG_PF_DIVERT, NULL)) == NULL) { 5881 mtag = m_tag_get(PACKET_TAG_PF_DIVERT, sizeof(struct pf_divert), 5882 M_NOWAIT); 5883 if (mtag == NULL) 5884 return (NULL); 5885 bzero(mtag + 1, sizeof(struct pf_divert)); 5886 m_tag_prepend(m, mtag); 5887 } 5888 5889 return ((struct pf_divert *)(mtag + 1)); 5890 } 5891 5892 #ifdef INET 5893 int 5894 pf_test(int dir, struct ifnet *ifp, struct mbuf **m0, 5895 struct ether_header *eh, struct inpcb *inp) 5896 { 5897 struct pfi_kif *kif; 5898 u_short action, reason = 0, log = 0; 5899 struct mbuf *m = *m0; 5900 struct ip *h = NULL; 5901 struct pf_rule *a = NULL, *r = &pf_default_rule, *tr, *nr; 5902 struct pf_state *s = NULL; 5903 struct pf_ruleset *ruleset = NULL; 5904 struct pf_pdesc pd; 5905 int off, dirndx, pqid = 0; 5906 5907 if (!pf_status.running) 5908 return (PF_PASS); 5909 5910 memset(&pd, 0, sizeof(pd)); 5911 if (ifp->if_type == IFT_CARP && ifp->if_carpdev) 5912 kif = (struct pfi_kif *)ifp->if_carpdev->if_pf_kif; 5913 else 5914 kif = (struct pfi_kif *)ifp->if_pf_kif; 5915 5916 if (kif == NULL) { 5917 DPFPRINTF(PF_DEBUG_URGENT, 5918 ("pf_test: kif == NULL, if_xname %s\n", ifp->if_xname)); 5919 return (PF_DROP); 5920 } 5921 if (kif->pfik_flags & PFI_IFLAG_SKIP) 5922 return (PF_PASS); 5923 5924 #ifdef DIAGNOSTIC 5925 if ((m->m_flags & M_PKTHDR) == 0) 5926 panic("non-M_PKTHDR is passed to pf_test"); 5927 #endif /* DIAGNOSTIC */ 5928 5929 if (m->m_pkthdr.len < (int)sizeof(*h)) { 5930 action = PF_DROP; 5931 REASON_SET(&reason, PFRES_SHORT); 5932 log = 1; 5933 goto done; 5934 } 5935 5936 /* 5937 * DragonFly doesn't zero the auxillary pkghdr fields, only fw_flags, 5938 * so make sure pf.flags is clear. 5939 */ 5940 if (m->m_pkthdr.fw_flags & PF_MBUF_TAGGED) 5941 return (PF_PASS); 5942 m->m_pkthdr.pf.flags = 0; 5943 /* Re-Check when updating to > 4.4 */ 5944 m->m_pkthdr.pf.statekey = NULL; 5945 5946 /* We do IP header normalization and packet reassembly here */ 5947 if (pf_normalize_ip(m0, dir, kif, &reason, &pd) != PF_PASS) { 5948 action = PF_DROP; 5949 goto done; 5950 } 5951 m = *m0; /* pf_normalize messes with m0 */ 5952 h = mtod(m, struct ip *); 5953 5954 off = h->ip_hl << 2; 5955 if (off < (int)sizeof(*h)) { 5956 action = PF_DROP; 5957 REASON_SET(&reason, PFRES_SHORT); 5958 log = 1; 5959 goto done; 5960 } 5961 5962 pd.src = (struct pf_addr *)&h->ip_src; 5963 pd.dst = (struct pf_addr *)&h->ip_dst; 5964 pd.sport = pd.dport = NULL; 5965 pd.ip_sum = &h->ip_sum; 5966 pd.proto_sum = NULL; 5967 pd.proto = h->ip_p; 5968 pd.dir = dir; 5969 pd.sidx = (dir == PF_IN) ? 0 : 1; 5970 pd.didx = (dir == PF_IN) ? 1 : 0; 5971 pd.af = AF_INET; 5972 pd.tos = h->ip_tos; 5973 pd.tot_len = h->ip_len; 5974 pd.eh = eh; 5975 5976 /* handle fragments that didn't get reassembled by normalization */ 5977 if (h->ip_off & (IP_MF | IP_OFFMASK)) { 5978 action = pf_test_fragment(&r, dir, kif, m, h, 5979 &pd, &a, &ruleset); 5980 goto done; 5981 } 5982 5983 switch (h->ip_p) { 5984 5985 case IPPROTO_TCP: { 5986 struct tcphdr th; 5987 5988 pd.hdr.tcp = &th; 5989 if (!pf_pull_hdr(m, off, &th, sizeof(th), 5990 &action, &reason, AF_INET)) { 5991 log = action != PF_PASS; 5992 goto done; 5993 } 5994 pd.p_len = pd.tot_len - off - (th.th_off << 2); 5995 if ((th.th_flags & TH_ACK) && pd.p_len == 0) 5996 pqid = 1; 5997 action = pf_normalize_tcp(dir, kif, m, 0, off, h, &pd); 5998 if (action == PF_DROP) 5999 goto done; 6000 action = pf_test_state_tcp(&s, dir, kif, m, off, h, &pd, 6001 &reason); 6002 if (action == PF_PASS) { 6003 pfsync_update_state(s); 6004 r = s->rule.ptr; 6005 a = s->anchor.ptr; 6006 log = s->log; 6007 } else if (s == NULL) 6008 action = pf_test_rule(&r, &s, dir, kif, 6009 m, off, h, &pd, &a, &ruleset, NULL, inp); 6010 break; 6011 } 6012 6013 case IPPROTO_UDP: { 6014 struct udphdr uh; 6015 6016 pd.hdr.udp = &uh; 6017 if (!pf_pull_hdr(m, off, &uh, sizeof(uh), 6018 &action, &reason, AF_INET)) { 6019 log = action != PF_PASS; 6020 goto done; 6021 } 6022 if (uh.uh_dport == 0 || 6023 ntohs(uh.uh_ulen) > m->m_pkthdr.len - off || 6024 ntohs(uh.uh_ulen) < sizeof(struct udphdr)) { 6025 action = PF_DROP; 6026 REASON_SET(&reason, PFRES_SHORT); 6027 goto done; 6028 } 6029 action = pf_test_state_udp(&s, dir, kif, m, off, h, &pd); 6030 if (action == PF_PASS) { 6031 pfsync_update_state(s); 6032 r = s->rule.ptr; 6033 a = s->anchor.ptr; 6034 log = s->log; 6035 } else if (s == NULL) 6036 action = pf_test_rule(&r, &s, dir, kif, 6037 m, off, h, &pd, &a, &ruleset, NULL, inp); 6038 break; 6039 } 6040 6041 case IPPROTO_ICMP: { 6042 struct icmp ih; 6043 6044 pd.hdr.icmp = &ih; 6045 if (!pf_pull_hdr(m, off, &ih, ICMP_MINLEN, 6046 &action, &reason, AF_INET)) { 6047 log = action != PF_PASS; 6048 goto done; 6049 } 6050 action = pf_test_state_icmp(&s, dir, kif, m, off, h, &pd, 6051 &reason); 6052 if (action == PF_PASS) { 6053 pfsync_update_state(s); 6054 r = s->rule.ptr; 6055 a = s->anchor.ptr; 6056 log = s->log; 6057 } else if (s == NULL) 6058 action = pf_test_rule(&r, &s, dir, kif, 6059 m, off, h, &pd, &a, &ruleset, NULL, inp); 6060 break; 6061 } 6062 6063 default: 6064 action = pf_test_state_other(&s, dir, kif, m, &pd); 6065 if (action == PF_PASS) { 6066 pfsync_update_state(s); 6067 r = s->rule.ptr; 6068 a = s->anchor.ptr; 6069 log = s->log; 6070 } else if (s == NULL) 6071 action = pf_test_rule(&r, &s, dir, kif, m, off, h, 6072 &pd, &a, &ruleset, NULL, inp); 6073 break; 6074 } 6075 6076 done: 6077 if (action == PF_PASS && h->ip_hl > 5 && 6078 !((s && s->state_flags & PFSTATE_ALLOWOPTS) || r->allow_opts)) { 6079 action = PF_DROP; 6080 REASON_SET(&reason, PFRES_IPOPTIONS); 6081 log = 1; 6082 DPFPRINTF(PF_DEBUG_MISC, 6083 ("pf: dropping packet with ip options\n")); 6084 } 6085 6086 if ((s && s->tag) || r->rtableid) 6087 pf_tag_packet(m, s ? s->tag : 0, r->rtableid); 6088 6089 #if 0 6090 if (dir == PF_IN && s && s->key[PF_SK_STACK]) 6091 m->m_pkthdr.pf.statekey = s->key[PF_SK_STACK]; 6092 #endif 6093 6094 #ifdef ALTQ 6095 if (action == PF_PASS && r->qid) { 6096 m->m_pkthdr.fw_flags |= PF_MBUF_STRUCTURE; 6097 if (pqid || (pd.tos & IPTOS_LOWDELAY)) 6098 m->m_pkthdr.pf.qid = r->pqid; 6099 else 6100 m->m_pkthdr.pf.qid = r->qid; 6101 m->m_pkthdr.pf.ecn_af = AF_INET; 6102 m->m_pkthdr.pf.hdr = h; 6103 /* add connection hash for fairq */ 6104 if (s) { 6105 /* for fairq */ 6106 m->m_pkthdr.pf.state_hash = s->hash; 6107 m->m_pkthdr.pf.flags |= PF_TAG_STATE_HASHED; 6108 } 6109 } 6110 #endif /* ALTQ */ 6111 6112 /* 6113 * connections redirected to loopback should not match sockets 6114 * bound specifically to loopback due to security implications, 6115 * see tcp_input() and in_pcblookup_listen(). 6116 */ 6117 if (dir == PF_IN && action == PF_PASS && (pd.proto == IPPROTO_TCP || 6118 pd.proto == IPPROTO_UDP) && s != NULL && s->nat_rule.ptr != NULL && 6119 (s->nat_rule.ptr->action == PF_RDR || 6120 s->nat_rule.ptr->action == PF_BINAT) && 6121 (ntohl(pd.dst->v4.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET) 6122 m->m_pkthdr.pf.flags |= PF_TAG_TRANSLATE_LOCALHOST; 6123 6124 if (dir == PF_IN && action == PF_PASS && r->divert.port) { 6125 struct pf_divert *divert; 6126 6127 if ((divert = pf_get_divert(m))) { 6128 m->m_pkthdr.pf.flags |= PF_TAG_DIVERTED; 6129 divert->port = r->divert.port; 6130 divert->addr.ipv4 = r->divert.addr.v4; 6131 } 6132 } 6133 6134 if (log) { 6135 struct pf_rule *lr; 6136 6137 if (s != NULL && s->nat_rule.ptr != NULL && 6138 s->nat_rule.ptr->log & PF_LOG_ALL) 6139 lr = s->nat_rule.ptr; 6140 else 6141 lr = r; 6142 PFLOG_PACKET(kif, h, m, AF_INET, dir, reason, lr, a, ruleset, 6143 &pd); 6144 } 6145 6146 kif->pfik_bytes[0][dir == PF_OUT][action != PF_PASS] += pd.tot_len; 6147 kif->pfik_packets[0][dir == PF_OUT][action != PF_PASS]++; 6148 6149 if (action == PF_PASS || r->action == PF_DROP) { 6150 dirndx = (dir == PF_OUT); 6151 r->packets[dirndx]++; 6152 r->bytes[dirndx] += pd.tot_len; 6153 if (a != NULL) { 6154 a->packets[dirndx]++; 6155 a->bytes[dirndx] += pd.tot_len; 6156 } 6157 if (s != NULL) { 6158 if (s->nat_rule.ptr != NULL) { 6159 s->nat_rule.ptr->packets[dirndx]++; 6160 s->nat_rule.ptr->bytes[dirndx] += pd.tot_len; 6161 } 6162 if (s->src_node != NULL) { 6163 s->src_node->packets[dirndx]++; 6164 s->src_node->bytes[dirndx] += pd.tot_len; 6165 } 6166 if (s->nat_src_node != NULL) { 6167 s->nat_src_node->packets[dirndx]++; 6168 s->nat_src_node->bytes[dirndx] += pd.tot_len; 6169 } 6170 dirndx = (dir == s->direction) ? 0 : 1; 6171 s->packets[dirndx]++; 6172 s->bytes[dirndx] += pd.tot_len; 6173 } 6174 tr = r; 6175 nr = (s != NULL) ? s->nat_rule.ptr : pd.nat_rule; 6176 if (nr != NULL && r == &pf_default_rule) 6177 tr = nr; 6178 if (tr->src.addr.type == PF_ADDR_TABLE) 6179 pfr_update_stats(tr->src.addr.p.tbl, 6180 (s == NULL) ? pd.src : 6181 &s->key[(s->direction == PF_IN)]-> 6182 addr[(s->direction == PF_OUT)], 6183 pd.af, pd.tot_len, dir == PF_OUT, 6184 r->action == PF_PASS, tr->src.neg); 6185 if (tr->dst.addr.type == PF_ADDR_TABLE) 6186 pfr_update_stats(tr->dst.addr.p.tbl, 6187 (s == NULL) ? pd.dst : 6188 &s->key[(s->direction == PF_IN)]-> 6189 addr[(s->direction == PF_IN)], 6190 pd.af, pd.tot_len, dir == PF_OUT, 6191 r->action == PF_PASS, tr->dst.neg); 6192 } 6193 6194 6195 if (action == PF_SYNPROXY_DROP) { 6196 m_freem(*m0); 6197 *m0 = NULL; 6198 action = PF_PASS; 6199 } else if (r->rt) 6200 /* pf_route can free the mbuf causing *m0 to become NULL */ 6201 pf_route(m0, r, dir, kif->pfik_ifp, s, &pd); 6202 6203 return (action); 6204 } 6205 #endif /* INET */ 6206 6207 #ifdef INET6 6208 int 6209 pf_test6(int dir, struct ifnet *ifp, struct mbuf **m0, 6210 struct ether_header *eh, struct inpcb *inp) 6211 { 6212 struct pfi_kif *kif; 6213 u_short action, reason = 0, log = 0; 6214 struct mbuf *m = *m0, *n = NULL; 6215 struct ip6_hdr *h = NULL; 6216 struct pf_rule *a = NULL, *r = &pf_default_rule, *tr, *nr; 6217 struct pf_state *s = NULL; 6218 struct pf_ruleset *ruleset = NULL; 6219 struct pf_pdesc pd; 6220 int off, terminal = 0, dirndx, rh_cnt = 0; 6221 6222 if (!pf_status.running) 6223 return (PF_PASS); 6224 6225 memset(&pd, 0, sizeof(pd)); 6226 if (ifp->if_type == IFT_CARP && ifp->if_carpdev) 6227 kif = (struct pfi_kif *)ifp->if_carpdev->if_pf_kif; 6228 else 6229 kif = (struct pfi_kif *)ifp->if_pf_kif; 6230 6231 if (kif == NULL) { 6232 DPFPRINTF(PF_DEBUG_URGENT, 6233 ("pf_test6: kif == NULL, if_xname %s\n", ifp->if_xname)); 6234 return (PF_DROP); 6235 } 6236 if (kif->pfik_flags & PFI_IFLAG_SKIP) 6237 return (PF_PASS); 6238 6239 #ifdef DIAGNOSTIC 6240 if ((m->m_flags & M_PKTHDR) == 0) 6241 panic("non-M_PKTHDR is passed to pf_test6"); 6242 #endif /* DIAGNOSTIC */ 6243 6244 if (m->m_pkthdr.len < (int)sizeof(*h)) { 6245 action = PF_DROP; 6246 REASON_SET(&reason, PFRES_SHORT); 6247 log = 1; 6248 goto done; 6249 } 6250 6251 /* 6252 * DragonFly doesn't zero the auxillary pkghdr fields, only fw_flags, 6253 * so make sure pf.flags is clear. 6254 */ 6255 if (m->m_pkthdr.fw_flags & PF_MBUF_TAGGED) 6256 return (PF_PASS); 6257 m->m_pkthdr.pf.flags = 0; 6258 /* Re-Check when updating to > 4.4 */ 6259 m->m_pkthdr.pf.statekey = NULL; 6260 6261 /* We do IP header normalization and packet reassembly here */ 6262 if (pf_normalize_ip6(m0, dir, kif, &reason, &pd) != PF_PASS) { 6263 action = PF_DROP; 6264 goto done; 6265 } 6266 m = *m0; /* pf_normalize messes with m0 */ 6267 h = mtod(m, struct ip6_hdr *); 6268 6269 #if 1 6270 /* 6271 * we do not support jumbogram yet. if we keep going, zero ip6_plen 6272 * will do something bad, so drop the packet for now. 6273 */ 6274 if (htons(h->ip6_plen) == 0) { 6275 action = PF_DROP; 6276 REASON_SET(&reason, PFRES_NORM); /*XXX*/ 6277 goto done; 6278 } 6279 #endif 6280 6281 pd.src = (struct pf_addr *)&h->ip6_src; 6282 pd.dst = (struct pf_addr *)&h->ip6_dst; 6283 pd.sport = pd.dport = NULL; 6284 pd.ip_sum = NULL; 6285 pd.proto_sum = NULL; 6286 pd.dir = dir; 6287 pd.sidx = (dir == PF_IN) ? 0 : 1; 6288 pd.didx = (dir == PF_IN) ? 1 : 0; 6289 pd.af = AF_INET6; 6290 pd.tos = 0; 6291 pd.tot_len = ntohs(h->ip6_plen) + sizeof(struct ip6_hdr); 6292 pd.eh = eh; 6293 6294 off = ((caddr_t)h - m->m_data) + sizeof(struct ip6_hdr); 6295 pd.proto = h->ip6_nxt; 6296 do { 6297 switch (pd.proto) { 6298 case IPPROTO_FRAGMENT: 6299 action = pf_test_fragment(&r, dir, kif, m, h, 6300 &pd, &a, &ruleset); 6301 if (action == PF_DROP) 6302 REASON_SET(&reason, PFRES_FRAG); 6303 goto done; 6304 case IPPROTO_ROUTING: { 6305 struct ip6_rthdr rthdr; 6306 6307 if (rh_cnt++) { 6308 DPFPRINTF(PF_DEBUG_MISC, 6309 ("pf: IPv6 more than one rthdr\n")); 6310 action = PF_DROP; 6311 REASON_SET(&reason, PFRES_IPOPTIONS); 6312 log = 1; 6313 goto done; 6314 } 6315 if (!pf_pull_hdr(m, off, &rthdr, sizeof(rthdr), NULL, 6316 &reason, pd.af)) { 6317 DPFPRINTF(PF_DEBUG_MISC, 6318 ("pf: IPv6 short rthdr\n")); 6319 action = PF_DROP; 6320 REASON_SET(&reason, PFRES_SHORT); 6321 log = 1; 6322 goto done; 6323 } 6324 if (rthdr.ip6r_type == IPV6_RTHDR_TYPE_0) { 6325 DPFPRINTF(PF_DEBUG_MISC, 6326 ("pf: IPv6 rthdr0\n")); 6327 action = PF_DROP; 6328 REASON_SET(&reason, PFRES_IPOPTIONS); 6329 log = 1; 6330 goto done; 6331 } 6332 /* FALLTHROUGH */ 6333 } 6334 case IPPROTO_AH: 6335 case IPPROTO_HOPOPTS: 6336 case IPPROTO_DSTOPTS: { 6337 /* get next header and header length */ 6338 struct ip6_ext opt6; 6339 6340 if (!pf_pull_hdr(m, off, &opt6, sizeof(opt6), 6341 NULL, &reason, pd.af)) { 6342 DPFPRINTF(PF_DEBUG_MISC, 6343 ("pf: IPv6 short opt\n")); 6344 action = PF_DROP; 6345 log = 1; 6346 goto done; 6347 } 6348 if (pd.proto == IPPROTO_AH) 6349 off += (opt6.ip6e_len + 2) * 4; 6350 else 6351 off += (opt6.ip6e_len + 1) * 8; 6352 pd.proto = opt6.ip6e_nxt; 6353 /* goto the next header */ 6354 break; 6355 } 6356 default: 6357 terminal++; 6358 break; 6359 } 6360 } while (!terminal); 6361 6362 /* if there's no routing header, use unmodified mbuf for checksumming */ 6363 if (!n) 6364 n = m; 6365 6366 switch (pd.proto) { 6367 6368 case IPPROTO_TCP: { 6369 struct tcphdr th; 6370 6371 pd.hdr.tcp = &th; 6372 if (!pf_pull_hdr(m, off, &th, sizeof(th), 6373 &action, &reason, AF_INET6)) { 6374 log = action != PF_PASS; 6375 goto done; 6376 } 6377 pd.p_len = pd.tot_len - off - (th.th_off << 2); 6378 action = pf_normalize_tcp(dir, kif, m, 0, off, h, &pd); 6379 if (action == PF_DROP) 6380 goto done; 6381 action = pf_test_state_tcp(&s, dir, kif, m, off, h, &pd, 6382 &reason); 6383 if (action == PF_PASS) { 6384 pfsync_update_state(s); 6385 r = s->rule.ptr; 6386 a = s->anchor.ptr; 6387 log = s->log; 6388 } else if (s == NULL) 6389 action = pf_test_rule(&r, &s, dir, kif, 6390 m, off, h, &pd, &a, &ruleset, NULL, inp); 6391 break; 6392 } 6393 6394 case IPPROTO_UDP: { 6395 struct udphdr uh; 6396 6397 pd.hdr.udp = &uh; 6398 if (!pf_pull_hdr(m, off, &uh, sizeof(uh), 6399 &action, &reason, AF_INET6)) { 6400 log = action != PF_PASS; 6401 goto done; 6402 } 6403 if (uh.uh_dport == 0 || 6404 ntohs(uh.uh_ulen) > m->m_pkthdr.len - off || 6405 ntohs(uh.uh_ulen) < sizeof(struct udphdr)) { 6406 action = PF_DROP; 6407 REASON_SET(&reason, PFRES_SHORT); 6408 goto done; 6409 } 6410 action = pf_test_state_udp(&s, dir, kif, m, off, h, &pd); 6411 if (action == PF_PASS) { 6412 pfsync_update_state(s); 6413 r = s->rule.ptr; 6414 a = s->anchor.ptr; 6415 log = s->log; 6416 } else if (s == NULL) 6417 action = pf_test_rule(&r, &s, dir, kif, 6418 m, off, h, &pd, &a, &ruleset, NULL, inp); 6419 break; 6420 } 6421 6422 case IPPROTO_ICMPV6: { 6423 struct icmp6_hdr ih; 6424 6425 pd.hdr.icmp6 = &ih; 6426 if (!pf_pull_hdr(m, off, &ih, sizeof(ih), 6427 &action, &reason, AF_INET6)) { 6428 log = action != PF_PASS; 6429 goto done; 6430 } 6431 action = pf_test_state_icmp(&s, dir, kif, 6432 m, off, h, &pd, &reason); 6433 if (action == PF_PASS) { 6434 pfsync_update_state(s); 6435 r = s->rule.ptr; 6436 a = s->anchor.ptr; 6437 log = s->log; 6438 } else if (s == NULL) 6439 action = pf_test_rule(&r, &s, dir, kif, 6440 m, off, h, &pd, &a, &ruleset, NULL, inp); 6441 break; 6442 } 6443 6444 default: 6445 action = pf_test_state_other(&s, dir, kif, m, &pd); 6446 if (action == PF_PASS) { 6447 pfsync_update_state(s); 6448 r = s->rule.ptr; 6449 a = s->anchor.ptr; 6450 log = s->log; 6451 } else if (s == NULL) 6452 action = pf_test_rule(&r, &s, dir, kif, m, off, h, 6453 &pd, &a, &ruleset, NULL, inp); 6454 break; 6455 } 6456 6457 done: 6458 if (n != m) { 6459 m_freem(n); 6460 n = NULL; 6461 } 6462 6463 /* handle dangerous IPv6 extension headers. */ 6464 if (action == PF_PASS && rh_cnt && 6465 !((s && s->state_flags & PFSTATE_ALLOWOPTS) || r->allow_opts)) { 6466 action = PF_DROP; 6467 REASON_SET(&reason, PFRES_IPOPTIONS); 6468 log = 1; 6469 DPFPRINTF(PF_DEBUG_MISC, 6470 ("pf: dropping packet with dangerous v6 headers\n")); 6471 } 6472 6473 if ((s && s->tag) || r->rtableid) 6474 pf_tag_packet(m, s ? s->tag : 0, r->rtableid); 6475 6476 #if 0 6477 if (dir == PF_IN && s && s->key[PF_SK_STACK]) 6478 m->m_pkthdr.pf.statekey = s->key[PF_SK_STACK]; 6479 #endif 6480 6481 #ifdef ALTQ 6482 if (action == PF_PASS && r->qid) { 6483 m->m_pkthdr.fw_flags |= PF_MBUF_STRUCTURE; 6484 if (pd.tos & IPTOS_LOWDELAY) 6485 m->m_pkthdr.pf.qid = r->pqid; 6486 else 6487 m->m_pkthdr.pf.qid = r->qid; 6488 m->m_pkthdr.pf.ecn_af = AF_INET6; 6489 m->m_pkthdr.pf.hdr = h; 6490 if (s) { 6491 /* for fairq */ 6492 m->m_pkthdr.pf.state_hash = s->hash; 6493 m->m_pkthdr.pf.flags |= PF_TAG_STATE_HASHED; 6494 } 6495 } 6496 #endif /* ALTQ */ 6497 6498 if (dir == PF_IN && action == PF_PASS && (pd.proto == IPPROTO_TCP || 6499 pd.proto == IPPROTO_UDP) && s != NULL && s->nat_rule.ptr != NULL && 6500 (s->nat_rule.ptr->action == PF_RDR || 6501 s->nat_rule.ptr->action == PF_BINAT) && 6502 IN6_IS_ADDR_LOOPBACK(&pd.dst->v6)) 6503 m->m_pkthdr.pf.flags |= PF_TAG_TRANSLATE_LOCALHOST; 6504 6505 if (dir == PF_IN && action == PF_PASS && r->divert.port) { 6506 struct pf_divert *divert; 6507 6508 if ((divert = pf_get_divert(m))) { 6509 m->m_pkthdr.pf.flags |= PF_TAG_DIVERTED; 6510 divert->port = r->divert.port; 6511 divert->addr.ipv6 = r->divert.addr.v6; 6512 } 6513 } 6514 6515 if (log) { 6516 struct pf_rule *lr; 6517 6518 if (s != NULL && s->nat_rule.ptr != NULL && 6519 s->nat_rule.ptr->log & PF_LOG_ALL) 6520 lr = s->nat_rule.ptr; 6521 else 6522 lr = r; 6523 PFLOG_PACKET(kif, h, m, AF_INET6, dir, reason, lr, a, ruleset, 6524 &pd); 6525 } 6526 6527 kif->pfik_bytes[1][dir == PF_OUT][action != PF_PASS] += pd.tot_len; 6528 kif->pfik_packets[1][dir == PF_OUT][action != PF_PASS]++; 6529 6530 if (action == PF_PASS || r->action == PF_DROP) { 6531 dirndx = (dir == PF_OUT); 6532 r->packets[dirndx]++; 6533 r->bytes[dirndx] += pd.tot_len; 6534 if (a != NULL) { 6535 a->packets[dirndx]++; 6536 a->bytes[dirndx] += pd.tot_len; 6537 } 6538 if (s != NULL) { 6539 if (s->nat_rule.ptr != NULL) { 6540 s->nat_rule.ptr->packets[dirndx]++; 6541 s->nat_rule.ptr->bytes[dirndx] += pd.tot_len; 6542 } 6543 if (s->src_node != NULL) { 6544 s->src_node->packets[dirndx]++; 6545 s->src_node->bytes[dirndx] += pd.tot_len; 6546 } 6547 if (s->nat_src_node != NULL) { 6548 s->nat_src_node->packets[dirndx]++; 6549 s->nat_src_node->bytes[dirndx] += pd.tot_len; 6550 } 6551 dirndx = (dir == s->direction) ? 0 : 1; 6552 s->packets[dirndx]++; 6553 s->bytes[dirndx] += pd.tot_len; 6554 } 6555 tr = r; 6556 nr = (s != NULL) ? s->nat_rule.ptr : pd.nat_rule; 6557 if (nr != NULL && r == &pf_default_rule) 6558 tr = nr; 6559 if (tr->src.addr.type == PF_ADDR_TABLE) 6560 pfr_update_stats(tr->src.addr.p.tbl, 6561 (s == NULL) ? pd.src : 6562 &s->key[(s->direction == PF_IN)]->addr[0], 6563 pd.af, pd.tot_len, dir == PF_OUT, 6564 r->action == PF_PASS, tr->src.neg); 6565 if (tr->dst.addr.type == PF_ADDR_TABLE) 6566 pfr_update_stats(tr->dst.addr.p.tbl, 6567 (s == NULL) ? pd.dst : 6568 &s->key[(s->direction == PF_IN)]->addr[1], 6569 pd.af, pd.tot_len, dir == PF_OUT, 6570 r->action == PF_PASS, tr->dst.neg); 6571 } 6572 6573 6574 if (action == PF_SYNPROXY_DROP) { 6575 m_freem(*m0); 6576 *m0 = NULL; 6577 action = PF_PASS; 6578 } else if (r->rt) 6579 /* pf_route6 can free the mbuf causing *m0 to become NULL */ 6580 pf_route6(m0, r, dir, kif->pfik_ifp, s, &pd); 6581 6582 return (action); 6583 } 6584 #endif /* INET6 */ 6585 6586 int 6587 pf_check_congestion(struct ifqueue *ifq) 6588 { 6589 return (0); 6590 } 6591