1 /* $OpenBSD: pf.c,v 1.614 2008/08/02 12:34:37 henning Exp $ */ 2 3 /* 4 * Copyright (c) 2004 The DragonFly Project. All rights reserved. 5 * 6 * Copyright (c) 2001 Daniel Hartmeier 7 * Copyright (c) 2002 - 2008 Henning Brauer 8 * All rights reserved. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 14 * - Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * - Redistributions in binary form must reproduce the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer in the documentation and/or other materials provided 19 * with the distribution. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 29 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 31 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 32 * POSSIBILITY OF SUCH DAMAGE. 33 * 34 * Effort sponsored in part by the Defense Advanced Research Projects 35 * Agency (DARPA) and Air Force Research Laboratory, Air Force 36 * Materiel Command, USAF, under agreement number F30602-01-2-0537. 37 * 38 */ 39 40 #include "opt_inet.h" 41 #include "opt_inet6.h" 42 43 #include <sys/param.h> 44 #include <sys/systm.h> 45 #include <sys/malloc.h> 46 #include <sys/mbuf.h> 47 #include <sys/filio.h> 48 #include <sys/socket.h> 49 #include <sys/socketvar.h> 50 #include <sys/kernel.h> 51 #include <sys/time.h> 52 #include <sys/sysctl.h> 53 #include <sys/endian.h> 54 #include <sys/proc.h> 55 #include <sys/kthread.h> 56 57 #include <machine/inttypes.h> 58 59 #include <sys/md5.h> 60 61 #include <net/if.h> 62 #include <net/if_types.h> 63 #include <net/bpf.h> 64 #include <net/netisr2.h> 65 #include <net/route.h> 66 67 #include <netinet/in.h> 68 #include <netinet/in_var.h> 69 #include <netinet/in_systm.h> 70 #include <netinet/ip.h> 71 #include <netinet/ip_var.h> 72 #include <netinet/tcp.h> 73 #include <netinet/tcp_seq.h> 74 #include <netinet/udp.h> 75 #include <netinet/ip_icmp.h> 76 #include <netinet/in_pcb.h> 77 #include <netinet/tcp_timer.h> 78 #include <netinet/tcp_var.h> 79 #include <netinet/udp_var.h> 80 #include <netinet/icmp_var.h> 81 #include <netinet/if_ether.h> 82 83 #include <net/pf/pfvar.h> 84 #include <net/pf/if_pflog.h> 85 86 #include <net/pf/if_pfsync.h> 87 88 #ifdef INET6 89 #include <netinet/ip6.h> 90 #include <netinet/in_pcb.h> 91 #include <netinet/icmp6.h> 92 #include <netinet6/nd6.h> 93 #include <netinet6/ip6_var.h> 94 #include <netinet6/in6_pcb.h> 95 #endif /* INET6 */ 96 97 #include <sys/in_cksum.h> 98 #include <sys/ucred.h> 99 #include <machine/limits.h> 100 #include <sys/msgport2.h> 101 #include <net/netmsg2.h> 102 103 extern int ip_optcopy(struct ip *, struct ip *); 104 extern int debug_pfugidhack; 105 106 struct lwkt_token pf_token = LWKT_TOKEN_INITIALIZER(pf_token); 107 108 #define DPFPRINTF(n, x) if (pf_status.debug >= (n)) kprintf x 109 110 /* 111 * Global variables 112 */ 113 114 /* mask radix tree */ 115 struct radix_node_head *pf_maskhead; 116 117 /* state tables */ 118 struct pf_state_tree pf_statetbl; 119 120 struct pf_altqqueue pf_altqs[2]; 121 struct pf_palist pf_pabuf; 122 struct pf_altqqueue *pf_altqs_active; 123 struct pf_altqqueue *pf_altqs_inactive; 124 struct pf_status pf_status; 125 126 u_int32_t ticket_altqs_active; 127 u_int32_t ticket_altqs_inactive; 128 int altqs_inactive_open; 129 u_int32_t ticket_pabuf; 130 131 MD5_CTX pf_tcp_secret_ctx; 132 u_char pf_tcp_secret[16]; 133 int pf_tcp_secret_init; 134 int pf_tcp_iss_off; 135 136 struct pf_anchor_stackframe { 137 struct pf_ruleset *rs; 138 struct pf_rule *r; 139 struct pf_anchor_node *parent; 140 struct pf_anchor *child; 141 } pf_anchor_stack[64]; 142 143 struct malloc_type *pf_src_tree_pl, *pf_rule_pl, *pf_pooladdr_pl; 144 struct malloc_type *pf_state_pl, *pf_state_key_pl, *pf_state_item_pl; 145 struct malloc_type *pf_altq_pl; 146 147 void pf_print_host(struct pf_addr *, u_int16_t, u_int8_t); 148 149 void pf_init_threshold(struct pf_threshold *, u_int32_t, 150 u_int32_t); 151 void pf_add_threshold(struct pf_threshold *); 152 int pf_check_threshold(struct pf_threshold *); 153 154 void pf_change_ap(struct pf_addr *, u_int16_t *, 155 u_int16_t *, u_int16_t *, struct pf_addr *, 156 u_int16_t, u_int8_t, sa_family_t); 157 int pf_modulate_sack(struct mbuf *, int, struct pf_pdesc *, 158 struct tcphdr *, struct pf_state_peer *); 159 #ifdef INET6 160 void pf_change_a6(struct pf_addr *, u_int16_t *, 161 struct pf_addr *, u_int8_t); 162 #endif /* INET6 */ 163 void pf_change_icmp(struct pf_addr *, u_int16_t *, 164 struct pf_addr *, struct pf_addr *, u_int16_t, 165 u_int16_t *, u_int16_t *, u_int16_t *, 166 u_int16_t *, u_int8_t, sa_family_t); 167 void pf_send_tcp(const struct pf_rule *, sa_family_t, 168 const struct pf_addr *, const struct pf_addr *, 169 u_int16_t, u_int16_t, u_int32_t, u_int32_t, 170 u_int8_t, u_int16_t, u_int16_t, u_int8_t, int, 171 u_int16_t, struct ether_header *, struct ifnet *); 172 void pf_send_icmp(struct mbuf *, u_int8_t, u_int8_t, 173 sa_family_t, struct pf_rule *); 174 struct pf_rule *pf_match_translation(struct pf_pdesc *, struct mbuf *, 175 int, int, struct pfi_kif *, 176 struct pf_addr *, u_int16_t, struct pf_addr *, 177 u_int16_t, int); 178 struct pf_rule *pf_get_translation(struct pf_pdesc *, struct mbuf *, 179 int, int, struct pfi_kif *, struct pf_src_node **, 180 struct pf_state_key **, struct pf_state_key **, 181 struct pf_state_key **, struct pf_state_key **, 182 struct pf_addr *, struct pf_addr *, 183 u_int16_t, u_int16_t); 184 void pf_detach_state(struct pf_state *); 185 int pf_state_key_setup(struct pf_pdesc *, struct pf_rule *, 186 struct pf_state_key **, struct pf_state_key **, 187 struct pf_state_key **, struct pf_state_key **, 188 struct pf_addr *, struct pf_addr *, 189 u_int16_t, u_int16_t); 190 void pf_state_key_detach(struct pf_state *, int); 191 u_int32_t pf_tcp_iss(struct pf_pdesc *); 192 int pf_test_rule(struct pf_rule **, struct pf_state **, 193 int, struct pfi_kif *, struct mbuf *, int, 194 void *, struct pf_pdesc *, struct pf_rule **, 195 struct pf_ruleset **, struct ifqueue *, struct inpcb *); 196 static __inline int pf_create_state(struct pf_rule *, struct pf_rule *, 197 struct pf_rule *, struct pf_pdesc *, 198 struct pf_src_node *, struct pf_state_key *, 199 struct pf_state_key *, struct pf_state_key *, 200 struct pf_state_key *, struct mbuf *, int, 201 u_int16_t, u_int16_t, int *, struct pfi_kif *, 202 struct pf_state **, int, u_int16_t, u_int16_t, 203 int); 204 int pf_test_fragment(struct pf_rule **, int, 205 struct pfi_kif *, struct mbuf *, void *, 206 struct pf_pdesc *, struct pf_rule **, 207 struct pf_ruleset **); 208 int pf_tcp_track_full(struct pf_state_peer *, 209 struct pf_state_peer *, struct pf_state **, 210 struct pfi_kif *, struct mbuf *, int, 211 struct pf_pdesc *, u_short *, int *); 212 int pf_tcp_track_sloppy(struct pf_state_peer *, 213 struct pf_state_peer *, struct pf_state **, 214 struct pf_pdesc *, u_short *); 215 int pf_test_state_tcp(struct pf_state **, int, 216 struct pfi_kif *, struct mbuf *, int, 217 void *, struct pf_pdesc *, u_short *); 218 int pf_test_state_udp(struct pf_state **, int, 219 struct pfi_kif *, struct mbuf *, int, 220 void *, struct pf_pdesc *); 221 int pf_test_state_icmp(struct pf_state **, int, 222 struct pfi_kif *, struct mbuf *, int, 223 void *, struct pf_pdesc *, u_short *); 224 int pf_test_state_other(struct pf_state **, int, 225 struct pfi_kif *, struct mbuf *, struct pf_pdesc *); 226 void pf_step_into_anchor(int *, struct pf_ruleset **, int, 227 struct pf_rule **, struct pf_rule **, int *); 228 int pf_step_out_of_anchor(int *, struct pf_ruleset **, 229 int, struct pf_rule **, struct pf_rule **, 230 int *); 231 void pf_hash(struct pf_addr *, struct pf_addr *, 232 struct pf_poolhashkey *, sa_family_t); 233 int pf_map_addr(u_int8_t, struct pf_rule *, 234 struct pf_addr *, struct pf_addr *, 235 struct pf_addr *, struct pf_src_node **); 236 int pf_get_sport(sa_family_t, u_int8_t, struct pf_rule *, 237 struct pf_addr *, struct pf_addr *, u_int16_t, 238 struct pf_addr *, u_int16_t*, u_int16_t, u_int16_t, 239 struct pf_src_node **); 240 void pf_route(struct mbuf **, struct pf_rule *, int, 241 struct ifnet *, struct pf_state *, 242 struct pf_pdesc *); 243 void pf_route6(struct mbuf **, struct pf_rule *, int, 244 struct ifnet *, struct pf_state *, 245 struct pf_pdesc *); 246 u_int8_t pf_get_wscale(struct mbuf *, int, u_int16_t, 247 sa_family_t); 248 u_int16_t pf_get_mss(struct mbuf *, int, u_int16_t, 249 sa_family_t); 250 u_int16_t pf_calc_mss(struct pf_addr *, sa_family_t, 251 u_int16_t); 252 void pf_set_rt_ifp(struct pf_state *, 253 struct pf_addr *); 254 int pf_check_proto_cksum(struct mbuf *, int, int, 255 u_int8_t, sa_family_t); 256 struct pf_divert *pf_get_divert(struct mbuf *); 257 void pf_print_state_parts(struct pf_state *, 258 struct pf_state_key *, struct pf_state_key *); 259 int pf_addr_wrap_neq(struct pf_addr_wrap *, 260 struct pf_addr_wrap *); 261 struct pf_state *pf_find_state(struct pfi_kif *, 262 struct pf_state_key_cmp *, u_int, struct mbuf *); 263 int pf_src_connlimit(struct pf_state **); 264 int pf_check_congestion(struct ifqueue *); 265 266 extern int pf_end_threads; 267 268 struct pf_pool_limit pf_pool_limits[PF_LIMIT_MAX] = { 269 { &pf_state_pl, PFSTATE_HIWAT }, 270 { &pf_src_tree_pl, PFSNODE_HIWAT }, 271 { &pf_frent_pl, PFFRAG_FRENT_HIWAT }, 272 { &pfr_ktable_pl, PFR_KTABLE_HIWAT }, 273 { &pfr_kentry_pl, PFR_KENTRY_HIWAT } 274 }; 275 276 #define STATE_LOOKUP(i, k, d, s, m) \ 277 do { \ 278 s = pf_find_state(i, k, d, m); \ 279 if (s == NULL || (s)->timeout == PFTM_PURGE) \ 280 return (PF_DROP); \ 281 if (d == PF_OUT && \ 282 (((s)->rule.ptr->rt == PF_ROUTETO && \ 283 (s)->rule.ptr->direction == PF_OUT) || \ 284 ((s)->rule.ptr->rt == PF_REPLYTO && \ 285 (s)->rule.ptr->direction == PF_IN)) && \ 286 (s)->rt_kif != NULL && \ 287 (s)->rt_kif != i) \ 288 return (PF_PASS); \ 289 } while (0) 290 291 #define BOUND_IFACE(r, k) \ 292 ((r)->rule_flag & PFRULE_IFBOUND) ? (k) : pfi_all 293 294 #define STATE_INC_COUNTERS(s) \ 295 do { \ 296 s->rule.ptr->states_cur++; \ 297 s->rule.ptr->states_tot++; \ 298 if (s->anchor.ptr != NULL) { \ 299 s->anchor.ptr->states_cur++; \ 300 s->anchor.ptr->states_tot++; \ 301 } \ 302 if (s->nat_rule.ptr != NULL) { \ 303 s->nat_rule.ptr->states_cur++; \ 304 s->nat_rule.ptr->states_tot++; \ 305 } \ 306 } while (0) 307 308 #define STATE_DEC_COUNTERS(s) \ 309 do { \ 310 if (s->nat_rule.ptr != NULL) \ 311 s->nat_rule.ptr->states_cur--; \ 312 if (s->anchor.ptr != NULL) \ 313 s->anchor.ptr->states_cur--; \ 314 s->rule.ptr->states_cur--; \ 315 } while (0) 316 317 static MALLOC_DEFINE(M_PFSTATEPL, "pfstatepl", "pf state pool list"); 318 static MALLOC_DEFINE(M_PFSRCTREEPL, "pfsrctpl", "pf source tree pool list"); 319 static MALLOC_DEFINE(M_PFSTATEKEYPL, "pfstatekeypl", "pf state key pool list"); 320 static MALLOC_DEFINE(M_PFSTATEITEMPL, "pfstateitempl", "pf state item pool list"); 321 322 static __inline int pf_src_compare(struct pf_src_node *, struct pf_src_node *); 323 static __inline int pf_state_compare_key(struct pf_state_key *, 324 struct pf_state_key *); 325 static __inline int pf_state_compare_id(struct pf_state *, 326 struct pf_state *); 327 328 struct pf_src_tree tree_src_tracking; 329 330 struct pf_state_tree_id tree_id; 331 struct pf_state_queue state_list; 332 333 RB_GENERATE(pf_src_tree, pf_src_node, entry, pf_src_compare); 334 RB_GENERATE(pf_state_tree, pf_state_key, entry, pf_state_compare_key); 335 RB_GENERATE(pf_state_tree_id, pf_state, 336 entry_id, pf_state_compare_id); 337 338 static __inline int 339 pf_src_compare(struct pf_src_node *a, struct pf_src_node *b) 340 { 341 int diff; 342 343 if (a->rule.ptr > b->rule.ptr) 344 return (1); 345 if (a->rule.ptr < b->rule.ptr) 346 return (-1); 347 if ((diff = a->af - b->af) != 0) 348 return (diff); 349 switch (a->af) { 350 #ifdef INET 351 case AF_INET: 352 if (a->addr.addr32[0] > b->addr.addr32[0]) 353 return (1); 354 if (a->addr.addr32[0] < b->addr.addr32[0]) 355 return (-1); 356 break; 357 #endif /* INET */ 358 #ifdef INET6 359 case AF_INET6: 360 if (a->addr.addr32[3] > b->addr.addr32[3]) 361 return (1); 362 if (a->addr.addr32[3] < b->addr.addr32[3]) 363 return (-1); 364 if (a->addr.addr32[2] > b->addr.addr32[2]) 365 return (1); 366 if (a->addr.addr32[2] < b->addr.addr32[2]) 367 return (-1); 368 if (a->addr.addr32[1] > b->addr.addr32[1]) 369 return (1); 370 if (a->addr.addr32[1] < b->addr.addr32[1]) 371 return (-1); 372 if (a->addr.addr32[0] > b->addr.addr32[0]) 373 return (1); 374 if (a->addr.addr32[0] < b->addr.addr32[0]) 375 return (-1); 376 break; 377 #endif /* INET6 */ 378 } 379 return (0); 380 } 381 382 u_int32_t 383 pf_state_hash(struct pf_state_key *sk) 384 { 385 u_int32_t hv = (u_int32_t)(((intptr_t)sk >> 6) ^ ((intptr_t)sk >> 15)); 386 if (hv == 0) /* disallow 0 */ 387 hv = 1; 388 return(hv); 389 } 390 391 #ifdef INET6 392 void 393 pf_addrcpy(struct pf_addr *dst, struct pf_addr *src, sa_family_t af) 394 { 395 switch (af) { 396 #ifdef INET 397 case AF_INET: 398 dst->addr32[0] = src->addr32[0]; 399 break; 400 #endif /* INET */ 401 case AF_INET6: 402 dst->addr32[0] = src->addr32[0]; 403 dst->addr32[1] = src->addr32[1]; 404 dst->addr32[2] = src->addr32[2]; 405 dst->addr32[3] = src->addr32[3]; 406 break; 407 } 408 } 409 #endif /* INET6 */ 410 411 void 412 pf_init_threshold(struct pf_threshold *threshold, 413 u_int32_t limit, u_int32_t seconds) 414 { 415 threshold->limit = limit * PF_THRESHOLD_MULT; 416 threshold->seconds = seconds; 417 threshold->count = 0; 418 threshold->last = time_second; 419 } 420 421 void 422 pf_add_threshold(struct pf_threshold *threshold) 423 { 424 u_int32_t t = time_second, diff = t - threshold->last; 425 426 if (diff >= threshold->seconds) 427 threshold->count = 0; 428 else 429 threshold->count -= threshold->count * diff / 430 threshold->seconds; 431 threshold->count += PF_THRESHOLD_MULT; 432 threshold->last = t; 433 } 434 435 int 436 pf_check_threshold(struct pf_threshold *threshold) 437 { 438 return (threshold->count > threshold->limit); 439 } 440 441 int 442 pf_src_connlimit(struct pf_state **state) 443 { 444 int bad = 0; 445 446 (*state)->src_node->conn++; 447 (*state)->src.tcp_est = 1; 448 pf_add_threshold(&(*state)->src_node->conn_rate); 449 450 if ((*state)->rule.ptr->max_src_conn && 451 (*state)->rule.ptr->max_src_conn < 452 (*state)->src_node->conn) { 453 pf_status.lcounters[LCNT_SRCCONN]++; 454 bad++; 455 } 456 457 if ((*state)->rule.ptr->max_src_conn_rate.limit && 458 pf_check_threshold(&(*state)->src_node->conn_rate)) { 459 pf_status.lcounters[LCNT_SRCCONNRATE]++; 460 bad++; 461 } 462 463 if (!bad) 464 return (0); 465 466 if ((*state)->rule.ptr->overload_tbl) { 467 struct pfr_addr p; 468 u_int32_t killed = 0; 469 470 pf_status.lcounters[LCNT_OVERLOAD_TABLE]++; 471 if (pf_status.debug >= PF_DEBUG_MISC) { 472 kprintf("pf_src_connlimit: blocking address "); 473 pf_print_host(&(*state)->src_node->addr, 0, 474 (*state)->key[PF_SK_WIRE]->af); 475 } 476 477 bzero(&p, sizeof(p)); 478 p.pfra_af = (*state)->key[PF_SK_WIRE]->af; 479 switch ((*state)->key[PF_SK_WIRE]->af) { 480 #ifdef INET 481 case AF_INET: 482 p.pfra_net = 32; 483 p.pfra_ip4addr = (*state)->src_node->addr.v4; 484 break; 485 #endif /* INET */ 486 #ifdef INET6 487 case AF_INET6: 488 p.pfra_net = 128; 489 p.pfra_ip6addr = (*state)->src_node->addr.v6; 490 break; 491 #endif /* INET6 */ 492 } 493 494 pfr_insert_kentry((*state)->rule.ptr->overload_tbl, 495 &p, time_second); 496 497 /* kill existing states if that's required. */ 498 if ((*state)->rule.ptr->flush) { 499 struct pf_state_key *sk; 500 struct pf_state *st; 501 502 pf_status.lcounters[LCNT_OVERLOAD_FLUSH]++; 503 RB_FOREACH(st, pf_state_tree_id, &tree_id) { 504 sk = st->key[PF_SK_WIRE]; 505 /* 506 * Kill states from this source. (Only those 507 * from the same rule if PF_FLUSH_GLOBAL is not 508 * set) 509 */ 510 if (sk->af == 511 (*state)->key[PF_SK_WIRE]->af && 512 (((*state)->direction == PF_OUT && 513 PF_AEQ(&(*state)->src_node->addr, 514 &sk->addr[0], sk->af)) || 515 ((*state)->direction == PF_IN && 516 PF_AEQ(&(*state)->src_node->addr, 517 &sk->addr[1], sk->af))) && 518 ((*state)->rule.ptr->flush & 519 PF_FLUSH_GLOBAL || 520 (*state)->rule.ptr == st->rule.ptr)) { 521 st->timeout = PFTM_PURGE; 522 st->src.state = st->dst.state = 523 TCPS_CLOSED; 524 killed++; 525 } 526 } 527 if (pf_status.debug >= PF_DEBUG_MISC) 528 kprintf(", %u states killed", killed); 529 } 530 if (pf_status.debug >= PF_DEBUG_MISC) 531 kprintf("\n"); 532 } 533 534 /* kill this state */ 535 (*state)->timeout = PFTM_PURGE; 536 (*state)->src.state = (*state)->dst.state = TCPS_CLOSED; 537 return (1); 538 } 539 540 int 541 pf_insert_src_node(struct pf_src_node **sn, struct pf_rule *rule, 542 struct pf_addr *src, sa_family_t af) 543 { 544 struct pf_src_node k; 545 546 if (*sn == NULL) { 547 k.af = af; 548 PF_ACPY(&k.addr, src, af); 549 if (rule->rule_flag & PFRULE_RULESRCTRACK || 550 rule->rpool.opts & PF_POOL_STICKYADDR) 551 k.rule.ptr = rule; 552 else 553 k.rule.ptr = NULL; 554 pf_status.scounters[SCNT_SRC_NODE_SEARCH]++; 555 *sn = RB_FIND(pf_src_tree, &tree_src_tracking, &k); 556 } 557 if (*sn == NULL) { 558 if (!rule->max_src_nodes || 559 rule->src_nodes < rule->max_src_nodes) 560 (*sn) = kmalloc(sizeof(struct pf_src_node), M_PFSRCTREEPL, M_NOWAIT|M_ZERO); 561 else 562 pf_status.lcounters[LCNT_SRCNODES]++; 563 if ((*sn) == NULL) 564 return (-1); 565 566 pf_init_threshold(&(*sn)->conn_rate, 567 rule->max_src_conn_rate.limit, 568 rule->max_src_conn_rate.seconds); 569 570 (*sn)->af = af; 571 if (rule->rule_flag & PFRULE_RULESRCTRACK || 572 rule->rpool.opts & PF_POOL_STICKYADDR) 573 (*sn)->rule.ptr = rule; 574 else 575 (*sn)->rule.ptr = NULL; 576 PF_ACPY(&(*sn)->addr, src, af); 577 if (RB_INSERT(pf_src_tree, 578 &tree_src_tracking, *sn) != NULL) { 579 if (pf_status.debug >= PF_DEBUG_MISC) { 580 kprintf("pf: src_tree insert failed: "); 581 pf_print_host(&(*sn)->addr, 0, af); 582 kprintf("\n"); 583 } 584 kfree(*sn, M_PFSRCTREEPL); 585 return (-1); 586 } 587 (*sn)->creation = time_second; 588 (*sn)->ruletype = rule->action; 589 if ((*sn)->rule.ptr != NULL) 590 (*sn)->rule.ptr->src_nodes++; 591 pf_status.scounters[SCNT_SRC_NODE_INSERT]++; 592 pf_status.src_nodes++; 593 } else { 594 if (rule->max_src_states && 595 (*sn)->states >= rule->max_src_states) { 596 pf_status.lcounters[LCNT_SRCSTATES]++; 597 return (-1); 598 } 599 } 600 return (0); 601 } 602 603 /* state table stuff */ 604 605 static __inline int 606 pf_state_compare_key(struct pf_state_key *a, struct pf_state_key *b) 607 { 608 int diff; 609 610 if ((diff = a->proto - b->proto) != 0) 611 return (diff); 612 if ((diff = a->af - b->af) != 0) 613 return (diff); 614 switch (a->af) { 615 #ifdef INET 616 case AF_INET: 617 if (a->addr[0].addr32[0] > b->addr[0].addr32[0]) 618 return (1); 619 if (a->addr[0].addr32[0] < b->addr[0].addr32[0]) 620 return (-1); 621 if (a->addr[1].addr32[0] > b->addr[1].addr32[0]) 622 return (1); 623 if (a->addr[1].addr32[0] < b->addr[1].addr32[0]) 624 return (-1); 625 break; 626 #endif /* INET */ 627 #ifdef INET6 628 case AF_INET6: 629 if (a->addr[0].addr32[3] > b->addr[0].addr32[3]) 630 return (1); 631 if (a->addr[0].addr32[3] < b->addr[0].addr32[3]) 632 return (-1); 633 if (a->addr[1].addr32[3] > b->addr[1].addr32[3]) 634 return (1); 635 if (a->addr[1].addr32[3] < b->addr[1].addr32[3]) 636 return (-1); 637 if (a->addr[0].addr32[2] > b->addr[0].addr32[2]) 638 return (1); 639 if (a->addr[0].addr32[2] < b->addr[0].addr32[2]) 640 return (-1); 641 if (a->addr[1].addr32[2] > b->addr[1].addr32[2]) 642 return (1); 643 if (a->addr[1].addr32[2] < b->addr[1].addr32[2]) 644 return (-1); 645 if (a->addr[0].addr32[1] > b->addr[0].addr32[1]) 646 return (1); 647 if (a->addr[0].addr32[1] < b->addr[0].addr32[1]) 648 return (-1); 649 if (a->addr[1].addr32[1] > b->addr[1].addr32[1]) 650 return (1); 651 if (a->addr[1].addr32[1] < b->addr[1].addr32[1]) 652 return (-1); 653 if (a->addr[0].addr32[0] > b->addr[0].addr32[0]) 654 return (1); 655 if (a->addr[0].addr32[0] < b->addr[0].addr32[0]) 656 return (-1); 657 if (a->addr[1].addr32[0] > b->addr[1].addr32[0]) 658 return (1); 659 if (a->addr[1].addr32[0] < b->addr[1].addr32[0]) 660 return (-1); 661 break; 662 #endif /* INET6 */ 663 } 664 665 if ((diff = a->port[0] - b->port[0]) != 0) 666 return (diff); 667 if ((diff = a->port[1] - b->port[1]) != 0) 668 return (diff); 669 670 return (0); 671 } 672 673 static __inline int 674 pf_state_compare_id(struct pf_state *a, struct pf_state *b) 675 { 676 if (a->id > b->id) 677 return (1); 678 if (a->id < b->id) 679 return (-1); 680 if (a->creatorid > b->creatorid) 681 return (1); 682 if (a->creatorid < b->creatorid) 683 return (-1); 684 685 return (0); 686 } 687 688 int 689 pf_state_key_attach(struct pf_state_key *sk, struct pf_state *s, int idx) 690 { 691 struct pf_state_item *si; 692 struct pf_state_key *cur; 693 694 KKASSERT(s->key[idx] == NULL); /* XXX handle this? */ 695 696 if ((cur = RB_INSERT(pf_state_tree, &pf_statetbl, sk)) != NULL) { 697 /* key exists. check for same kif, if none, add to key */ 698 TAILQ_FOREACH(si, &cur->states, entry) 699 if (si->s->kif == s->kif && 700 si->s->direction == s->direction) { 701 if (pf_status.debug >= PF_DEBUG_MISC) { 702 kprintf( 703 "pf: %s key attach failed on %s: ", 704 (idx == PF_SK_WIRE) ? 705 "wire" : "stack", 706 s->kif->pfik_name); 707 pf_print_state_parts(s, 708 (idx == PF_SK_WIRE) ? sk : NULL, 709 (idx == PF_SK_STACK) ? sk : NULL); 710 kprintf("\n"); 711 } 712 kfree(sk, M_PFSTATEKEYPL); 713 return (-1); /* collision! */ 714 } 715 kfree(sk, M_PFSTATEKEYPL); 716 717 s->key[idx] = cur; 718 } else 719 s->key[idx] = sk; 720 721 if ((si = kmalloc(sizeof(struct pf_state_item), M_PFSTATEITEMPL, M_NOWAIT)) == NULL) { 722 pf_state_key_detach(s, idx); 723 return (-1); 724 } 725 si->s = s; 726 727 /* list is sorted, if-bound states before floating */ 728 if (s->kif == pfi_all) 729 TAILQ_INSERT_TAIL(&s->key[idx]->states, si, entry); 730 else 731 TAILQ_INSERT_HEAD(&s->key[idx]->states, si, entry); 732 return (0); 733 } 734 735 void 736 pf_detach_state(struct pf_state *s) 737 { 738 if (s->key[PF_SK_WIRE] == s->key[PF_SK_STACK]) 739 s->key[PF_SK_WIRE] = NULL; 740 741 if (s->key[PF_SK_STACK] != NULL) 742 pf_state_key_detach(s, PF_SK_STACK); 743 744 if (s->key[PF_SK_WIRE] != NULL) 745 pf_state_key_detach(s, PF_SK_WIRE); 746 } 747 748 void 749 pf_state_key_detach(struct pf_state *s, int idx) 750 { 751 struct pf_state_item *si; 752 si = TAILQ_FIRST(&s->key[idx]->states); 753 while (si && si->s != s) 754 si = TAILQ_NEXT(si, entry); 755 756 if (si) { 757 TAILQ_REMOVE(&s->key[idx]->states, si, entry); 758 kfree(si, M_PFSTATEITEMPL); 759 } 760 761 if (TAILQ_EMPTY(&s->key[idx]->states)) { 762 RB_REMOVE(pf_state_tree, &pf_statetbl, s->key[idx]); 763 if (s->key[idx]->reverse) 764 s->key[idx]->reverse->reverse = NULL; 765 if (s->key[idx]->inp) 766 s->key[idx]->inp->inp_pf_sk = NULL; 767 kfree(s->key[idx], M_PFSTATEKEYPL); 768 } 769 s->key[idx] = NULL; 770 } 771 772 struct pf_state_key * 773 pf_alloc_state_key(int pool_flags) 774 { 775 struct pf_state_key *sk; 776 777 if ((sk = kmalloc(sizeof(struct pf_state_key), M_PFSTATEKEYPL, pool_flags)) == NULL) 778 return (NULL); 779 TAILQ_INIT(&sk->states); 780 781 return (sk); 782 } 783 784 int 785 pf_state_key_setup(struct pf_pdesc *pd, struct pf_rule *nr, 786 struct pf_state_key **skw, struct pf_state_key **sks, 787 struct pf_state_key **skp, struct pf_state_key **nkp, 788 struct pf_addr *saddr, struct pf_addr *daddr, 789 u_int16_t sport, u_int16_t dport) 790 { 791 KKASSERT((*skp == NULL && *nkp == NULL)); 792 793 if ((*skp = pf_alloc_state_key(M_NOWAIT | M_ZERO)) == NULL) 794 return (ENOMEM); 795 796 PF_ACPY(&(*skp)->addr[pd->sidx], saddr, pd->af); 797 PF_ACPY(&(*skp)->addr[pd->didx], daddr, pd->af); 798 (*skp)->port[pd->sidx] = sport; 799 (*skp)->port[pd->didx] = dport; 800 (*skp)->proto = pd->proto; 801 (*skp)->af = pd->af; 802 803 if (nr != NULL) { 804 if ((*nkp = pf_alloc_state_key(M_NOWAIT | M_ZERO)) == NULL) 805 return (ENOMEM); /* caller must handle cleanup */ 806 807 /* XXX maybe just bcopy and TAILQ_INIT(&(*nkp)->states) */ 808 PF_ACPY(&(*nkp)->addr[0], &(*skp)->addr[0], pd->af); 809 PF_ACPY(&(*nkp)->addr[1], &(*skp)->addr[1], pd->af); 810 (*nkp)->port[0] = (*skp)->port[0]; 811 (*nkp)->port[1] = (*skp)->port[1]; 812 (*nkp)->proto = pd->proto; 813 (*nkp)->af = pd->af; 814 } else 815 *nkp = *skp; 816 817 if (pd->dir == PF_IN) { 818 *skw = *skp; 819 *sks = *nkp; 820 } else { 821 *sks = *skp; 822 *skw = *nkp; 823 } 824 return (0); 825 } 826 827 828 int 829 pf_state_insert(struct pfi_kif *kif, struct pf_state_key *skw, 830 struct pf_state_key *sks, struct pf_state *s) 831 { 832 s->kif = kif; 833 834 if (skw == sks) { 835 if (pf_state_key_attach(skw, s, PF_SK_WIRE)) 836 return (-1); 837 s->key[PF_SK_STACK] = s->key[PF_SK_WIRE]; 838 } else { 839 if (pf_state_key_attach(skw, s, PF_SK_WIRE)) { 840 kfree(sks, M_PFSTATEKEYPL); 841 return (-1); 842 } 843 if (pf_state_key_attach(sks, s, PF_SK_STACK)) { 844 pf_state_key_detach(s, PF_SK_WIRE); 845 return (-1); 846 } 847 } 848 849 if (s->id == 0 && s->creatorid == 0) { 850 s->id = htobe64(pf_status.stateid++); 851 s->creatorid = pf_status.hostid; 852 } 853 854 /* 855 * Calculate hash code for altq 856 */ 857 s->hash = crc32(s->key[PF_SK_WIRE], sizeof(*sks)); 858 859 if (RB_INSERT(pf_state_tree_id, &tree_id, s) != NULL) { 860 if (pf_status.debug >= PF_DEBUG_MISC) { 861 kprintf("pf: state insert failed: " 862 "id: %016jx creatorid: %08x", 863 (uintmax_t)be64toh(s->id), ntohl(s->creatorid)); 864 if (s->sync_flags & PFSTATE_FROMSYNC) 865 kprintf(" (from sync)"); 866 kprintf("\n"); 867 } 868 pf_detach_state(s); 869 return (-1); 870 } 871 TAILQ_INSERT_TAIL(&state_list, s, entry_list); 872 pf_status.fcounters[FCNT_STATE_INSERT]++; 873 pf_status.states++; 874 pfi_kif_ref(kif, PFI_KIF_REF_STATE); 875 pfsync_insert_state(s); 876 return (0); 877 } 878 879 struct pf_state * 880 pf_find_state_byid(struct pf_state_cmp *key) 881 { 882 pf_status.fcounters[FCNT_STATE_SEARCH]++; 883 884 return (RB_FIND(pf_state_tree_id, &tree_id, (struct pf_state *)key)); 885 } 886 887 struct pf_state * 888 pf_find_state(struct pfi_kif *kif, struct pf_state_key_cmp *key, u_int dir, 889 struct mbuf *m) 890 { 891 struct pf_state_key *sk; 892 struct pf_state_item *si; 893 894 pf_status.fcounters[FCNT_STATE_SEARCH]++; 895 896 if (dir == PF_OUT && m->m_pkthdr.pf.statekey && 897 ((struct pf_state_key *)m->m_pkthdr.pf.statekey)->reverse) 898 sk = ((struct pf_state_key *)m->m_pkthdr.pf.statekey)->reverse; 899 else { 900 if ((sk = RB_FIND(pf_state_tree, &pf_statetbl, 901 (struct pf_state_key *)key)) == NULL) 902 return (NULL); 903 if (dir == PF_OUT && m->m_pkthdr.pf.statekey) { 904 ((struct pf_state_key *) 905 m->m_pkthdr.pf.statekey)->reverse = sk; 906 sk->reverse = m->m_pkthdr.pf.statekey; 907 } 908 } 909 910 if (dir == PF_OUT) 911 m->m_pkthdr.pf.statekey = NULL; 912 913 /* list is sorted, if-bound states before floating ones */ 914 TAILQ_FOREACH(si, &sk->states, entry) 915 if ((si->s->kif == pfi_all || si->s->kif == kif) && 916 sk == (dir == PF_IN ? si->s->key[PF_SK_WIRE] : 917 si->s->key[PF_SK_STACK])) 918 return (si->s); 919 920 return (NULL); 921 } 922 923 struct pf_state * 924 pf_find_state_all(struct pf_state_key_cmp *key, u_int dir, int *more) 925 { 926 struct pf_state_key *sk; 927 struct pf_state_item *si, *ret = NULL; 928 929 pf_status.fcounters[FCNT_STATE_SEARCH]++; 930 931 sk = RB_FIND(pf_state_tree, &pf_statetbl, (struct pf_state_key *)key); 932 933 if (sk != NULL) { 934 TAILQ_FOREACH(si, &sk->states, entry) 935 if (dir == PF_INOUT || 936 (sk == (dir == PF_IN ? si->s->key[PF_SK_WIRE] : 937 si->s->key[PF_SK_STACK]))) { 938 if (more == NULL) 939 return (si->s); 940 941 if (ret) 942 (*more)++; 943 else 944 ret = si; 945 } 946 } 947 return (ret ? ret->s : NULL); 948 } 949 950 /* END state table stuff */ 951 952 953 void 954 pf_purge_thread(void *v) 955 { 956 int nloops = 0; 957 int locked = 0; 958 959 lwkt_gettoken(&pf_token); 960 for (;;) { 961 tsleep(pf_purge_thread, PWAIT, "pftm", 1 * hz); 962 963 lockmgr(&pf_consistency_lock, LK_EXCLUSIVE); 964 965 if (pf_end_threads) { 966 pf_purge_expired_states(pf_status.states, 0); 967 pf_purge_expired_fragments(); 968 pf_purge_expired_src_nodes(1); 969 pf_end_threads++; 970 971 lockmgr(&pf_consistency_lock, LK_RELEASE); 972 wakeup(pf_purge_thread); 973 kthread_exit(); 974 } 975 crit_enter(); 976 977 /* process a fraction of the state table every second */ 978 if(!pf_purge_expired_states(1 + (pf_status.states 979 / pf_default_rule.timeout[PFTM_INTERVAL]), 0)) { 980 981 pf_purge_expired_states(1 + (pf_status.states 982 / pf_default_rule.timeout[PFTM_INTERVAL]), 1); 983 } 984 985 /* purge other expired types every PFTM_INTERVAL seconds */ 986 if (++nloops >= pf_default_rule.timeout[PFTM_INTERVAL]) { 987 pf_purge_expired_fragments(); 988 if (!pf_purge_expired_src_nodes(locked)) { 989 pf_purge_expired_src_nodes(1); 990 } 991 nloops = 0; 992 } 993 crit_exit(); 994 lockmgr(&pf_consistency_lock, LK_RELEASE); 995 } 996 lwkt_reltoken(&pf_token); 997 } 998 999 u_int32_t 1000 pf_state_expires(const struct pf_state *state) 1001 { 1002 u_int32_t timeout; 1003 u_int32_t start; 1004 u_int32_t end; 1005 u_int32_t states; 1006 1007 /* handle all PFTM_* > PFTM_MAX here */ 1008 if (state->timeout == PFTM_PURGE) 1009 return (time_second); 1010 if (state->timeout == PFTM_UNTIL_PACKET) 1011 return (0); 1012 KKASSERT(state->timeout != PFTM_UNLINKED); 1013 KKASSERT(state->timeout < PFTM_MAX); 1014 timeout = state->rule.ptr->timeout[state->timeout]; 1015 if (!timeout) 1016 timeout = pf_default_rule.timeout[state->timeout]; 1017 start = state->rule.ptr->timeout[PFTM_ADAPTIVE_START]; 1018 if (start) { 1019 end = state->rule.ptr->timeout[PFTM_ADAPTIVE_END]; 1020 states = state->rule.ptr->states_cur; 1021 } else { 1022 start = pf_default_rule.timeout[PFTM_ADAPTIVE_START]; 1023 end = pf_default_rule.timeout[PFTM_ADAPTIVE_END]; 1024 states = pf_status.states; 1025 } 1026 if (end && states > start && start < end) { 1027 if (states < end) 1028 return (state->expire + timeout * (end - states) / 1029 (end - start)); 1030 else 1031 return (time_second); 1032 } 1033 return (state->expire + timeout); 1034 } 1035 1036 int 1037 pf_purge_expired_src_nodes(int waslocked) 1038 { 1039 struct pf_src_node *cur, *next; 1040 int locked = waslocked; 1041 1042 for (cur = RB_MIN(pf_src_tree, &tree_src_tracking); cur; cur = next) { 1043 next = RB_NEXT(pf_src_tree, &tree_src_tracking, cur); 1044 1045 if (cur->states <= 0 && cur->expire <= time_second) { 1046 if (! locked) { 1047 lockmgr(&pf_consistency_lock, LK_EXCLUSIVE); 1048 next = RB_NEXT(pf_src_tree, 1049 &tree_src_tracking, cur); 1050 locked = 1; 1051 } 1052 if (cur->rule.ptr != NULL) { 1053 cur->rule.ptr->src_nodes--; 1054 if (cur->rule.ptr->states_cur <= 0 && 1055 cur->rule.ptr->max_src_nodes <= 0) 1056 pf_rm_rule(NULL, cur->rule.ptr); 1057 } 1058 RB_REMOVE(pf_src_tree, &tree_src_tracking, cur); 1059 pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++; 1060 pf_status.src_nodes--; 1061 kfree(cur, M_PFSRCTREEPL); 1062 } 1063 } 1064 1065 if (locked && !waslocked) 1066 lockmgr(&pf_consistency_lock, LK_RELEASE); 1067 return(1); 1068 } 1069 1070 void 1071 pf_src_tree_remove_state(struct pf_state *s) 1072 { 1073 u_int32_t timeout; 1074 1075 if (s->src_node != NULL) { 1076 if (s->src.tcp_est) 1077 --s->src_node->conn; 1078 if (--s->src_node->states <= 0) { 1079 timeout = s->rule.ptr->timeout[PFTM_SRC_NODE]; 1080 if (!timeout) 1081 timeout = 1082 pf_default_rule.timeout[PFTM_SRC_NODE]; 1083 s->src_node->expire = time_second + timeout; 1084 } 1085 } 1086 if (s->nat_src_node != s->src_node && s->nat_src_node != NULL) { 1087 if (--s->nat_src_node->states <= 0) { 1088 timeout = s->rule.ptr->timeout[PFTM_SRC_NODE]; 1089 if (!timeout) 1090 timeout = 1091 pf_default_rule.timeout[PFTM_SRC_NODE]; 1092 s->nat_src_node->expire = time_second + timeout; 1093 } 1094 } 1095 s->src_node = s->nat_src_node = NULL; 1096 } 1097 1098 /* callers should be at crit_enter() */ 1099 void 1100 pf_unlink_state(struct pf_state *cur) 1101 { 1102 if (cur->src.state == PF_TCPS_PROXY_DST) { 1103 /* XXX wire key the right one? */ 1104 pf_send_tcp(cur->rule.ptr, cur->key[PF_SK_WIRE]->af, 1105 &cur->key[PF_SK_WIRE]->addr[1], 1106 &cur->key[PF_SK_WIRE]->addr[0], 1107 cur->key[PF_SK_WIRE]->port[1], 1108 cur->key[PF_SK_WIRE]->port[0], 1109 cur->src.seqhi, cur->src.seqlo + 1, 1110 TH_RST|TH_ACK, 0, 0, 0, 1, cur->tag, NULL, NULL); 1111 } 1112 RB_REMOVE(pf_state_tree_id, &tree_id, cur); 1113 if (cur->creatorid == pf_status.hostid) 1114 pfsync_delete_state(cur); 1115 cur->timeout = PFTM_UNLINKED; 1116 pf_src_tree_remove_state(cur); 1117 pf_detach_state(cur); 1118 } 1119 1120 static struct pf_state *purge_cur; 1121 1122 /* callers should be at crit_enter() and hold the 1123 * write_lock on pf_consistency_lock */ 1124 void 1125 pf_free_state(struct pf_state *cur) 1126 { 1127 if (pfsyncif != NULL && 1128 (pfsyncif->sc_bulk_send_next == cur || 1129 pfsyncif->sc_bulk_terminator == cur)) 1130 return; 1131 KKASSERT(cur->timeout == PFTM_UNLINKED); 1132 if (--cur->rule.ptr->states_cur <= 0 && 1133 cur->rule.ptr->src_nodes <= 0) 1134 pf_rm_rule(NULL, cur->rule.ptr); 1135 if (cur->nat_rule.ptr != NULL) 1136 if (--cur->nat_rule.ptr->states_cur <= 0 && 1137 cur->nat_rule.ptr->src_nodes <= 0) 1138 pf_rm_rule(NULL, cur->nat_rule.ptr); 1139 if (cur->anchor.ptr != NULL) 1140 if (--cur->anchor.ptr->states_cur <= 0) 1141 pf_rm_rule(NULL, cur->anchor.ptr); 1142 pf_normalize_tcp_cleanup(cur); 1143 pfi_kif_unref(cur->kif, PFI_KIF_REF_STATE); 1144 1145 /* 1146 * We may be freeing pf_purge_expired_states()'s saved scan entry, 1147 * adjust it if necessary. 1148 */ 1149 if (purge_cur == cur) { 1150 kprintf("PURGE CONFLICT\n"); 1151 purge_cur = TAILQ_NEXT(purge_cur, entry_list); 1152 } 1153 TAILQ_REMOVE(&state_list, cur, entry_list); 1154 if (cur->tag) 1155 pf_tag_unref(cur->tag); 1156 kfree(cur, M_PFSTATEPL); 1157 pf_status.fcounters[FCNT_STATE_REMOVALS]++; 1158 pf_status.states--; 1159 } 1160 1161 int 1162 pf_purge_expired_states(u_int32_t maxcheck, int waslocked) 1163 { 1164 struct pf_state *cur; 1165 int locked = waslocked; 1166 1167 while (maxcheck--) { 1168 /* 1169 * Wrap to start of list when we hit the end 1170 */ 1171 cur = purge_cur; 1172 if (cur == NULL) { 1173 cur = TAILQ_FIRST(&state_list); 1174 if (cur == NULL) 1175 break; /* list empty */ 1176 } 1177 1178 /* 1179 * Setup next (purge_cur) while we process this one. If we block and 1180 * something else deletes purge_cur, pf_free_state() will adjust it further 1181 * ahead. 1182 */ 1183 purge_cur = TAILQ_NEXT(cur, entry_list); 1184 1185 if (cur->timeout == PFTM_UNLINKED) { 1186 /* free unlinked state */ 1187 if (! locked) { 1188 lockmgr(&pf_consistency_lock, LK_EXCLUSIVE); 1189 locked = 1; 1190 } 1191 pf_free_state(cur); 1192 } else if (pf_state_expires(cur) <= time_second) { 1193 /* unlink and free expired state */ 1194 pf_unlink_state(cur); 1195 if (! locked) { 1196 if (!lockmgr(&pf_consistency_lock, LK_EXCLUSIVE)) 1197 return (0); 1198 locked = 1; 1199 } 1200 pf_free_state(cur); 1201 } 1202 } 1203 1204 if (locked) 1205 lockmgr(&pf_consistency_lock, LK_RELEASE); 1206 return (1); 1207 } 1208 1209 int 1210 pf_tbladdr_setup(struct pf_ruleset *rs, struct pf_addr_wrap *aw) 1211 { 1212 if (aw->type != PF_ADDR_TABLE) 1213 return (0); 1214 if ((aw->p.tbl = pfr_attach_table(rs, aw->v.tblname)) == NULL) 1215 return (1); 1216 return (0); 1217 } 1218 1219 void 1220 pf_tbladdr_remove(struct pf_addr_wrap *aw) 1221 { 1222 if (aw->type != PF_ADDR_TABLE || aw->p.tbl == NULL) 1223 return; 1224 pfr_detach_table(aw->p.tbl); 1225 aw->p.tbl = NULL; 1226 } 1227 1228 void 1229 pf_tbladdr_copyout(struct pf_addr_wrap *aw) 1230 { 1231 struct pfr_ktable *kt = aw->p.tbl; 1232 1233 if (aw->type != PF_ADDR_TABLE || kt == NULL) 1234 return; 1235 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL) 1236 kt = kt->pfrkt_root; 1237 aw->p.tbl = NULL; 1238 aw->p.tblcnt = (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) ? 1239 kt->pfrkt_cnt : -1; 1240 } 1241 1242 void 1243 pf_print_host(struct pf_addr *addr, u_int16_t p, sa_family_t af) 1244 { 1245 switch (af) { 1246 #ifdef INET 1247 case AF_INET: { 1248 u_int32_t a = ntohl(addr->addr32[0]); 1249 kprintf("%u.%u.%u.%u", (a>>24)&255, (a>>16)&255, 1250 (a>>8)&255, a&255); 1251 if (p) { 1252 p = ntohs(p); 1253 kprintf(":%u", p); 1254 } 1255 break; 1256 } 1257 #endif /* INET */ 1258 #ifdef INET6 1259 case AF_INET6: { 1260 u_int16_t b; 1261 u_int8_t i, curstart = 255, curend = 0, 1262 maxstart = 0, maxend = 0; 1263 for (i = 0; i < 8; i++) { 1264 if (!addr->addr16[i]) { 1265 if (curstart == 255) 1266 curstart = i; 1267 else 1268 curend = i; 1269 } else { 1270 if (curstart) { 1271 if ((curend - curstart) > 1272 (maxend - maxstart)) { 1273 maxstart = curstart; 1274 maxend = curend; 1275 curstart = 255; 1276 } 1277 } 1278 } 1279 } 1280 for (i = 0; i < 8; i++) { 1281 if (i >= maxstart && i <= maxend) { 1282 if (maxend != 7) { 1283 if (i == maxstart) 1284 kprintf(":"); 1285 } else { 1286 if (i == maxend) 1287 kprintf(":"); 1288 } 1289 } else { 1290 b = ntohs(addr->addr16[i]); 1291 kprintf("%x", b); 1292 if (i < 7) 1293 kprintf(":"); 1294 } 1295 } 1296 if (p) { 1297 p = ntohs(p); 1298 kprintf("[%u]", p); 1299 } 1300 break; 1301 } 1302 #endif /* INET6 */ 1303 } 1304 } 1305 1306 void 1307 pf_print_state(struct pf_state *s) 1308 { 1309 pf_print_state_parts(s, NULL, NULL); 1310 } 1311 1312 void 1313 pf_print_state_parts(struct pf_state *s, 1314 struct pf_state_key *skwp, struct pf_state_key *sksp) 1315 { 1316 struct pf_state_key *skw, *sks; 1317 u_int8_t proto, dir; 1318 1319 /* Do our best to fill these, but they're skipped if NULL */ 1320 skw = skwp ? skwp : (s ? s->key[PF_SK_WIRE] : NULL); 1321 sks = sksp ? sksp : (s ? s->key[PF_SK_STACK] : NULL); 1322 proto = skw ? skw->proto : (sks ? sks->proto : 0); 1323 dir = s ? s->direction : 0; 1324 1325 switch (proto) { 1326 case IPPROTO_TCP: 1327 kprintf("TCP "); 1328 break; 1329 case IPPROTO_UDP: 1330 kprintf("UDP "); 1331 break; 1332 case IPPROTO_ICMP: 1333 kprintf("ICMP "); 1334 break; 1335 case IPPROTO_ICMPV6: 1336 kprintf("ICMPV6 "); 1337 break; 1338 default: 1339 kprintf("%u ", skw->proto); 1340 break; 1341 } 1342 switch (dir) { 1343 case PF_IN: 1344 kprintf(" in"); 1345 break; 1346 case PF_OUT: 1347 kprintf(" out"); 1348 break; 1349 } 1350 if (skw) { 1351 kprintf(" wire: "); 1352 pf_print_host(&skw->addr[0], skw->port[0], skw->af); 1353 kprintf(" "); 1354 pf_print_host(&skw->addr[1], skw->port[1], skw->af); 1355 } 1356 if (sks) { 1357 kprintf(" stack: "); 1358 if (sks != skw) { 1359 pf_print_host(&sks->addr[0], sks->port[0], sks->af); 1360 kprintf(" "); 1361 pf_print_host(&sks->addr[1], sks->port[1], sks->af); 1362 } else 1363 kprintf("-"); 1364 } 1365 if (s) { 1366 if (proto == IPPROTO_TCP) { 1367 kprintf(" [lo=%u high=%u win=%u modulator=%u", 1368 s->src.seqlo, s->src.seqhi, 1369 s->src.max_win, s->src.seqdiff); 1370 if (s->src.wscale && s->dst.wscale) 1371 kprintf(" wscale=%u", 1372 s->src.wscale & PF_WSCALE_MASK); 1373 kprintf("]"); 1374 kprintf(" [lo=%u high=%u win=%u modulator=%u", 1375 s->dst.seqlo, s->dst.seqhi, 1376 s->dst.max_win, s->dst.seqdiff); 1377 if (s->src.wscale && s->dst.wscale) 1378 kprintf(" wscale=%u", 1379 s->dst.wscale & PF_WSCALE_MASK); 1380 kprintf("]"); 1381 } 1382 kprintf(" %u:%u", s->src.state, s->dst.state); 1383 } 1384 } 1385 1386 void 1387 pf_print_flags(u_int8_t f) 1388 { 1389 if (f) 1390 kprintf(" "); 1391 if (f & TH_FIN) 1392 kprintf("F"); 1393 if (f & TH_SYN) 1394 kprintf("S"); 1395 if (f & TH_RST) 1396 kprintf("R"); 1397 if (f & TH_PUSH) 1398 kprintf("P"); 1399 if (f & TH_ACK) 1400 kprintf("A"); 1401 if (f & TH_URG) 1402 kprintf("U"); 1403 if (f & TH_ECE) 1404 kprintf("E"); 1405 if (f & TH_CWR) 1406 kprintf("W"); 1407 } 1408 1409 #define PF_SET_SKIP_STEPS(i) \ 1410 do { \ 1411 while (head[i] != cur) { \ 1412 head[i]->skip[i].ptr = cur; \ 1413 head[i] = TAILQ_NEXT(head[i], entries); \ 1414 } \ 1415 } while (0) 1416 1417 void 1418 pf_calc_skip_steps(struct pf_rulequeue *rules) 1419 { 1420 struct pf_rule *cur, *prev, *head[PF_SKIP_COUNT]; 1421 int i; 1422 1423 cur = TAILQ_FIRST(rules); 1424 prev = cur; 1425 for (i = 0; i < PF_SKIP_COUNT; ++i) 1426 head[i] = cur; 1427 while (cur != NULL) { 1428 1429 if (cur->kif != prev->kif || cur->ifnot != prev->ifnot) 1430 PF_SET_SKIP_STEPS(PF_SKIP_IFP); 1431 if (cur->direction != prev->direction) 1432 PF_SET_SKIP_STEPS(PF_SKIP_DIR); 1433 if (cur->af != prev->af) 1434 PF_SET_SKIP_STEPS(PF_SKIP_AF); 1435 if (cur->proto != prev->proto) 1436 PF_SET_SKIP_STEPS(PF_SKIP_PROTO); 1437 if (cur->src.neg != prev->src.neg || 1438 pf_addr_wrap_neq(&cur->src.addr, &prev->src.addr)) 1439 PF_SET_SKIP_STEPS(PF_SKIP_SRC_ADDR); 1440 if (cur->src.port[0] != prev->src.port[0] || 1441 cur->src.port[1] != prev->src.port[1] || 1442 cur->src.port_op != prev->src.port_op) 1443 PF_SET_SKIP_STEPS(PF_SKIP_SRC_PORT); 1444 if (cur->dst.neg != prev->dst.neg || 1445 pf_addr_wrap_neq(&cur->dst.addr, &prev->dst.addr)) 1446 PF_SET_SKIP_STEPS(PF_SKIP_DST_ADDR); 1447 if (cur->dst.port[0] != prev->dst.port[0] || 1448 cur->dst.port[1] != prev->dst.port[1] || 1449 cur->dst.port_op != prev->dst.port_op) 1450 PF_SET_SKIP_STEPS(PF_SKIP_DST_PORT); 1451 1452 prev = cur; 1453 cur = TAILQ_NEXT(cur, entries); 1454 } 1455 for (i = 0; i < PF_SKIP_COUNT; ++i) 1456 PF_SET_SKIP_STEPS(i); 1457 } 1458 1459 int 1460 pf_addr_wrap_neq(struct pf_addr_wrap *aw1, struct pf_addr_wrap *aw2) 1461 { 1462 if (aw1->type != aw2->type) 1463 return (1); 1464 switch (aw1->type) { 1465 case PF_ADDR_ADDRMASK: 1466 case PF_ADDR_RANGE: 1467 if (PF_ANEQ(&aw1->v.a.addr, &aw2->v.a.addr, 0)) 1468 return (1); 1469 if (PF_ANEQ(&aw1->v.a.mask, &aw2->v.a.mask, 0)) 1470 return (1); 1471 return (0); 1472 case PF_ADDR_DYNIFTL: 1473 return (aw1->p.dyn->pfid_kt != aw2->p.dyn->pfid_kt); 1474 case PF_ADDR_NOROUTE: 1475 case PF_ADDR_URPFFAILED: 1476 return (0); 1477 case PF_ADDR_TABLE: 1478 return (aw1->p.tbl != aw2->p.tbl); 1479 case PF_ADDR_RTLABEL: 1480 return (aw1->v.rtlabel != aw2->v.rtlabel); 1481 default: 1482 kprintf("invalid address type: %d\n", aw1->type); 1483 return (1); 1484 } 1485 } 1486 1487 u_int16_t 1488 pf_cksum_fixup(u_int16_t cksum, u_int16_t old, u_int16_t new, u_int8_t udp) 1489 { 1490 u_int32_t l; 1491 1492 if (udp && !cksum) 1493 return (0x0000); 1494 l = cksum + old - new; 1495 l = (l >> 16) + (l & 65535); 1496 l = l & 65535; 1497 if (udp && !l) 1498 return (0xFFFF); 1499 return (l); 1500 } 1501 1502 void 1503 pf_change_ap(struct pf_addr *a, u_int16_t *p, u_int16_t *ic, u_int16_t *pc, 1504 struct pf_addr *an, u_int16_t pn, u_int8_t u, sa_family_t af) 1505 { 1506 struct pf_addr ao; 1507 u_int16_t po = *p; 1508 1509 PF_ACPY(&ao, a, af); 1510 PF_ACPY(a, an, af); 1511 1512 *p = pn; 1513 1514 switch (af) { 1515 #ifdef INET 1516 case AF_INET: 1517 *ic = pf_cksum_fixup(pf_cksum_fixup(*ic, 1518 ao.addr16[0], an->addr16[0], 0), 1519 ao.addr16[1], an->addr16[1], 0); 1520 *p = pn; 1521 *pc = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(*pc, 1522 ao.addr16[0], an->addr16[0], u), 1523 ao.addr16[1], an->addr16[1], u), 1524 po, pn, u); 1525 break; 1526 #endif /* INET */ 1527 #ifdef INET6 1528 case AF_INET6: 1529 *pc = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( 1530 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( 1531 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(*pc, 1532 ao.addr16[0], an->addr16[0], u), 1533 ao.addr16[1], an->addr16[1], u), 1534 ao.addr16[2], an->addr16[2], u), 1535 ao.addr16[3], an->addr16[3], u), 1536 ao.addr16[4], an->addr16[4], u), 1537 ao.addr16[5], an->addr16[5], u), 1538 ao.addr16[6], an->addr16[6], u), 1539 ao.addr16[7], an->addr16[7], u), 1540 po, pn, u); 1541 break; 1542 #endif /* INET6 */ 1543 } 1544 } 1545 1546 1547 /* Changes a u_int32_t. Uses a void * so there are no align restrictions */ 1548 void 1549 pf_change_a(void *a, u_int16_t *c, u_int32_t an, u_int8_t u) 1550 { 1551 u_int32_t ao; 1552 1553 memcpy(&ao, a, sizeof(ao)); 1554 memcpy(a, &an, sizeof(u_int32_t)); 1555 *c = pf_cksum_fixup(pf_cksum_fixup(*c, ao / 65536, an / 65536, u), 1556 ao % 65536, an % 65536, u); 1557 } 1558 1559 #ifdef INET6 1560 void 1561 pf_change_a6(struct pf_addr *a, u_int16_t *c, struct pf_addr *an, u_int8_t u) 1562 { 1563 struct pf_addr ao; 1564 1565 PF_ACPY(&ao, a, AF_INET6); 1566 PF_ACPY(a, an, AF_INET6); 1567 1568 *c = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( 1569 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( 1570 pf_cksum_fixup(pf_cksum_fixup(*c, 1571 ao.addr16[0], an->addr16[0], u), 1572 ao.addr16[1], an->addr16[1], u), 1573 ao.addr16[2], an->addr16[2], u), 1574 ao.addr16[3], an->addr16[3], u), 1575 ao.addr16[4], an->addr16[4], u), 1576 ao.addr16[5], an->addr16[5], u), 1577 ao.addr16[6], an->addr16[6], u), 1578 ao.addr16[7], an->addr16[7], u); 1579 } 1580 #endif /* INET6 */ 1581 1582 void 1583 pf_change_icmp(struct pf_addr *ia, u_int16_t *ip, struct pf_addr *oa, 1584 struct pf_addr *na, u_int16_t np, u_int16_t *pc, u_int16_t *h2c, 1585 u_int16_t *ic, u_int16_t *hc, u_int8_t u, sa_family_t af) 1586 { 1587 struct pf_addr oia, ooa; 1588 1589 PF_ACPY(&oia, ia, af); 1590 if (oa) 1591 PF_ACPY(&ooa, oa, af); 1592 1593 /* Change inner protocol port, fix inner protocol checksum. */ 1594 if (ip != NULL) { 1595 u_int16_t oip = *ip; 1596 u_int32_t opc = 0; 1597 1598 if (pc != NULL) 1599 opc = *pc; 1600 *ip = np; 1601 if (pc != NULL) 1602 *pc = pf_cksum_fixup(*pc, oip, *ip, u); 1603 *ic = pf_cksum_fixup(*ic, oip, *ip, 0); 1604 if (pc != NULL) 1605 *ic = pf_cksum_fixup(*ic, opc, *pc, 0); 1606 } 1607 /* Change inner ip address, fix inner ip and icmp checksums. */ 1608 PF_ACPY(ia, na, af); 1609 switch (af) { 1610 #ifdef INET 1611 case AF_INET: { 1612 u_int32_t oh2c = *h2c; 1613 1614 *h2c = pf_cksum_fixup(pf_cksum_fixup(*h2c, 1615 oia.addr16[0], ia->addr16[0], 0), 1616 oia.addr16[1], ia->addr16[1], 0); 1617 *ic = pf_cksum_fixup(pf_cksum_fixup(*ic, 1618 oia.addr16[0], ia->addr16[0], 0), 1619 oia.addr16[1], ia->addr16[1], 0); 1620 *ic = pf_cksum_fixup(*ic, oh2c, *h2c, 0); 1621 break; 1622 } 1623 #endif /* INET */ 1624 #ifdef INET6 1625 case AF_INET6: 1626 *ic = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( 1627 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( 1628 pf_cksum_fixup(pf_cksum_fixup(*ic, 1629 oia.addr16[0], ia->addr16[0], u), 1630 oia.addr16[1], ia->addr16[1], u), 1631 oia.addr16[2], ia->addr16[2], u), 1632 oia.addr16[3], ia->addr16[3], u), 1633 oia.addr16[4], ia->addr16[4], u), 1634 oia.addr16[5], ia->addr16[5], u), 1635 oia.addr16[6], ia->addr16[6], u), 1636 oia.addr16[7], ia->addr16[7], u); 1637 break; 1638 #endif /* INET6 */ 1639 } 1640 /* Outer ip address, fix outer ip or icmpv6 checksum, if necessary. */ 1641 if (oa) { 1642 PF_ACPY(oa, na, af); 1643 switch (af) { 1644 #ifdef INET 1645 case AF_INET: 1646 *hc = pf_cksum_fixup(pf_cksum_fixup(*hc, 1647 ooa.addr16[0], oa->addr16[0], 0), 1648 ooa.addr16[1], oa->addr16[1], 0); 1649 break; 1650 #endif /* INET */ 1651 #ifdef INET6 1652 case AF_INET6: 1653 *ic = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( 1654 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( 1655 pf_cksum_fixup(pf_cksum_fixup(*ic, 1656 ooa.addr16[0], oa->addr16[0], u), 1657 ooa.addr16[1], oa->addr16[1], u), 1658 ooa.addr16[2], oa->addr16[2], u), 1659 ooa.addr16[3], oa->addr16[3], u), 1660 ooa.addr16[4], oa->addr16[4], u), 1661 ooa.addr16[5], oa->addr16[5], u), 1662 ooa.addr16[6], oa->addr16[6], u), 1663 ooa.addr16[7], oa->addr16[7], u); 1664 break; 1665 #endif /* INET6 */ 1666 } 1667 } 1668 } 1669 1670 1671 /* 1672 * Need to modulate the sequence numbers in the TCP SACK option 1673 * (credits to Krzysztof Pfaff for report and patch) 1674 */ 1675 int 1676 pf_modulate_sack(struct mbuf *m, int off, struct pf_pdesc *pd, 1677 struct tcphdr *th, struct pf_state_peer *dst) 1678 { 1679 int hlen = (th->th_off << 2) - sizeof(*th), thoptlen = hlen; 1680 u_int8_t opts[TCP_MAXOLEN], *opt = opts; 1681 int copyback = 0, i, olen; 1682 struct raw_sackblock sack; 1683 1684 #define TCPOLEN_SACKLEN (TCPOLEN_SACK + 2) 1685 if (hlen < TCPOLEN_SACKLEN || 1686 !pf_pull_hdr(m, off + sizeof(*th), opts, hlen, NULL, NULL, pd->af)) 1687 return 0; 1688 1689 while (hlen >= TCPOLEN_SACKLEN) { 1690 olen = opt[1]; 1691 switch (*opt) { 1692 case TCPOPT_EOL: /* FALLTHROUGH */ 1693 case TCPOPT_NOP: 1694 opt++; 1695 hlen--; 1696 break; 1697 case TCPOPT_SACK: 1698 if (olen > hlen) 1699 olen = hlen; 1700 if (olen >= TCPOLEN_SACKLEN) { 1701 for (i = 2; i + TCPOLEN_SACK <= olen; 1702 i += TCPOLEN_SACK) { 1703 memcpy(&sack, &opt[i], sizeof(sack)); 1704 pf_change_a(&sack.rblk_start, &th->th_sum, 1705 htonl(ntohl(sack.rblk_start) - 1706 dst->seqdiff), 0); 1707 pf_change_a(&sack.rblk_end, &th->th_sum, 1708 htonl(ntohl(sack.rblk_end) - 1709 dst->seqdiff), 0); 1710 memcpy(&opt[i], &sack, sizeof(sack)); 1711 } 1712 copyback = 1; 1713 } 1714 /* FALLTHROUGH */ 1715 default: 1716 if (olen < 2) 1717 olen = 2; 1718 hlen -= olen; 1719 opt += olen; 1720 } 1721 } 1722 1723 if (copyback) 1724 m_copyback(m, off + sizeof(*th), thoptlen, opts); 1725 return (copyback); 1726 } 1727 1728 void 1729 pf_send_tcp(const struct pf_rule *r, sa_family_t af, 1730 const struct pf_addr *saddr, const struct pf_addr *daddr, 1731 u_int16_t sport, u_int16_t dport, u_int32_t seq, u_int32_t ack, 1732 u_int8_t flags, u_int16_t win, u_int16_t mss, u_int8_t ttl, int tag, 1733 u_int16_t rtag, struct ether_header *eh, struct ifnet *ifp) 1734 { 1735 struct mbuf *m; 1736 int len = 0, tlen; 1737 #ifdef INET 1738 struct ip *h = NULL; 1739 #endif /* INET */ 1740 #ifdef INET6 1741 struct ip6_hdr *h6 = NULL; 1742 #endif /* INET6 */ 1743 struct tcphdr *th = NULL; 1744 char *opt; 1745 1746 ASSERT_LWKT_TOKEN_HELD(&pf_token); 1747 1748 /* maximum segment size tcp option */ 1749 tlen = sizeof(struct tcphdr); 1750 if (mss) 1751 tlen += 4; 1752 1753 switch (af) { 1754 #ifdef INET 1755 case AF_INET: 1756 len = sizeof(struct ip) + tlen; 1757 break; 1758 #endif /* INET */ 1759 #ifdef INET6 1760 case AF_INET6: 1761 len = sizeof(struct ip6_hdr) + tlen; 1762 break; 1763 #endif /* INET6 */ 1764 } 1765 1766 /* 1767 * Create outgoing mbuf. 1768 * 1769 * DragonFly doesn't zero the auxillary pkghdr fields, only fw_flags, 1770 * so make sure pf.flags is clear. 1771 */ 1772 m = m_gethdr(MB_DONTWAIT, MT_HEADER); 1773 if (m == NULL) { 1774 return; 1775 } 1776 if (tag) 1777 m->m_pkthdr.fw_flags |= PF_MBUF_TAGGED; 1778 m->m_pkthdr.pf.flags = 0; 1779 m->m_pkthdr.pf.tag = rtag; 1780 /* XXX Recheck when upgrading to > 4.4 */ 1781 m->m_pkthdr.pf.statekey = NULL; 1782 if (r != NULL && r->rtableid >= 0) 1783 m->m_pkthdr.pf.rtableid = r->rtableid; 1784 1785 #ifdef ALTQ 1786 if (r != NULL && r->qid) { 1787 m->m_pkthdr.fw_flags |= PF_MBUF_STRUCTURE; 1788 m->m_pkthdr.pf.qid = r->qid; 1789 m->m_pkthdr.pf.ecn_af = af; 1790 m->m_pkthdr.pf.hdr = mtod(m, struct ip *); 1791 } 1792 #endif /* ALTQ */ 1793 m->m_data += max_linkhdr; 1794 m->m_pkthdr.len = m->m_len = len; 1795 m->m_pkthdr.rcvif = NULL; 1796 bzero(m->m_data, len); 1797 switch (af) { 1798 #ifdef INET 1799 case AF_INET: 1800 h = mtod(m, struct ip *); 1801 1802 /* IP header fields included in the TCP checksum */ 1803 h->ip_p = IPPROTO_TCP; 1804 h->ip_len = tlen; 1805 h->ip_src.s_addr = saddr->v4.s_addr; 1806 h->ip_dst.s_addr = daddr->v4.s_addr; 1807 1808 th = (struct tcphdr *)((caddr_t)h + sizeof(struct ip)); 1809 break; 1810 #endif /* INET */ 1811 #ifdef INET6 1812 case AF_INET6: 1813 h6 = mtod(m, struct ip6_hdr *); 1814 1815 /* IP header fields included in the TCP checksum */ 1816 h6->ip6_nxt = IPPROTO_TCP; 1817 h6->ip6_plen = htons(tlen); 1818 memcpy(&h6->ip6_src, &saddr->v6, sizeof(struct in6_addr)); 1819 memcpy(&h6->ip6_dst, &daddr->v6, sizeof(struct in6_addr)); 1820 1821 th = (struct tcphdr *)((caddr_t)h6 + sizeof(struct ip6_hdr)); 1822 break; 1823 #endif /* INET6 */ 1824 } 1825 1826 /* TCP header */ 1827 th->th_sport = sport; 1828 th->th_dport = dport; 1829 th->th_seq = htonl(seq); 1830 th->th_ack = htonl(ack); 1831 th->th_off = tlen >> 2; 1832 th->th_flags = flags; 1833 th->th_win = htons(win); 1834 1835 if (mss) { 1836 opt = (char *)(th + 1); 1837 opt[0] = TCPOPT_MAXSEG; 1838 opt[1] = 4; 1839 mss = htons(mss); 1840 bcopy((caddr_t)&mss, (caddr_t)(opt + 2), 2); 1841 } 1842 1843 switch (af) { 1844 #ifdef INET 1845 case AF_INET: 1846 /* TCP checksum */ 1847 th->th_sum = in_cksum(m, len); 1848 1849 /* Finish the IP header */ 1850 h->ip_v = 4; 1851 h->ip_hl = sizeof(*h) >> 2; 1852 h->ip_tos = IPTOS_LOWDELAY; 1853 h->ip_len = len; 1854 h->ip_off = path_mtu_discovery ? IP_DF : 0; 1855 h->ip_ttl = ttl ? ttl : ip_defttl; 1856 h->ip_sum = 0; 1857 if (eh == NULL) { 1858 lwkt_reltoken(&pf_token); 1859 ip_output(m, NULL, NULL, 0, NULL, NULL); 1860 lwkt_gettoken(&pf_token); 1861 } else { 1862 struct route ro; 1863 struct rtentry rt; 1864 struct ether_header *e = (void *)ro.ro_dst.sa_data; 1865 1866 if (ifp == NULL) { 1867 m_freem(m); 1868 return; 1869 } 1870 rt.rt_ifp = ifp; 1871 ro.ro_rt = &rt; 1872 ro.ro_dst.sa_len = sizeof(ro.ro_dst); 1873 ro.ro_dst.sa_family = pseudo_AF_HDRCMPLT; 1874 bcopy(eh->ether_dhost, e->ether_shost, ETHER_ADDR_LEN); 1875 bcopy(eh->ether_shost, e->ether_dhost, ETHER_ADDR_LEN); 1876 e->ether_type = eh->ether_type; 1877 /* XXX_IMPORT: later */ 1878 lwkt_reltoken(&pf_token); 1879 ip_output(m, NULL, &ro, 0, NULL, NULL); 1880 lwkt_gettoken(&pf_token); 1881 } 1882 break; 1883 #endif /* INET */ 1884 #ifdef INET6 1885 case AF_INET6: 1886 /* TCP checksum */ 1887 th->th_sum = in6_cksum(m, IPPROTO_TCP, 1888 sizeof(struct ip6_hdr), tlen); 1889 1890 h6->ip6_vfc |= IPV6_VERSION; 1891 h6->ip6_hlim = IPV6_DEFHLIM; 1892 1893 lwkt_reltoken(&pf_token); 1894 ip6_output(m, NULL, NULL, 0, NULL, NULL, NULL); 1895 lwkt_gettoken(&pf_token); 1896 break; 1897 #endif /* INET6 */ 1898 } 1899 } 1900 1901 void 1902 pf_send_icmp(struct mbuf *m, u_int8_t type, u_int8_t code, sa_family_t af, 1903 struct pf_rule *r) 1904 { 1905 struct mbuf *m0; 1906 1907 /* 1908 * DragonFly doesn't zero the auxillary pkghdr fields, only fw_flags, 1909 * so make sure pf.flags is clear. 1910 */ 1911 if ((m0 = m_copy(m, 0, M_COPYALL)) == NULL) 1912 return; 1913 1914 m0->m_pkthdr.fw_flags |= PF_MBUF_TAGGED; 1915 m0->m_pkthdr.pf.flags = 0; 1916 /* XXX Re-Check when Upgrading to > 4.4 */ 1917 m0->m_pkthdr.pf.statekey = NULL; 1918 1919 if (r->rtableid >= 0) 1920 m0->m_pkthdr.pf.rtableid = r->rtableid; 1921 1922 #ifdef ALTQ 1923 if (r->qid) { 1924 m->m_pkthdr.fw_flags |= PF_MBUF_STRUCTURE; 1925 m0->m_pkthdr.pf.qid = r->qid; 1926 m0->m_pkthdr.pf.ecn_af = af; 1927 m0->m_pkthdr.pf.hdr = mtod(m0, struct ip *); 1928 } 1929 #endif /* ALTQ */ 1930 1931 switch (af) { 1932 #ifdef INET 1933 case AF_INET: 1934 icmp_error(m0, type, code, 0, 0); 1935 break; 1936 #endif /* INET */ 1937 #ifdef INET6 1938 case AF_INET6: 1939 icmp6_error(m0, type, code, 0); 1940 break; 1941 #endif /* INET6 */ 1942 } 1943 } 1944 1945 /* 1946 * Return 1 if the addresses a and b match (with mask m), otherwise return 0. 1947 * If n is 0, they match if they are equal. If n is != 0, they match if they 1948 * are different. 1949 */ 1950 int 1951 pf_match_addr(u_int8_t n, struct pf_addr *a, struct pf_addr *m, 1952 struct pf_addr *b, sa_family_t af) 1953 { 1954 int match = 0; 1955 1956 switch (af) { 1957 #ifdef INET 1958 case AF_INET: 1959 if ((a->addr32[0] & m->addr32[0]) == 1960 (b->addr32[0] & m->addr32[0])) 1961 match++; 1962 break; 1963 #endif /* INET */ 1964 #ifdef INET6 1965 case AF_INET6: 1966 if (((a->addr32[0] & m->addr32[0]) == 1967 (b->addr32[0] & m->addr32[0])) && 1968 ((a->addr32[1] & m->addr32[1]) == 1969 (b->addr32[1] & m->addr32[1])) && 1970 ((a->addr32[2] & m->addr32[2]) == 1971 (b->addr32[2] & m->addr32[2])) && 1972 ((a->addr32[3] & m->addr32[3]) == 1973 (b->addr32[3] & m->addr32[3]))) 1974 match++; 1975 break; 1976 #endif /* INET6 */ 1977 } 1978 if (match) { 1979 if (n) 1980 return (0); 1981 else 1982 return (1); 1983 } else { 1984 if (n) 1985 return (1); 1986 else 1987 return (0); 1988 } 1989 } 1990 1991 /* 1992 * Return 1 if b <= a <= e, otherwise return 0. 1993 */ 1994 int 1995 pf_match_addr_range(struct pf_addr *b, struct pf_addr *e, 1996 struct pf_addr *a, sa_family_t af) 1997 { 1998 switch (af) { 1999 #ifdef INET 2000 case AF_INET: 2001 if ((a->addr32[0] < b->addr32[0]) || 2002 (a->addr32[0] > e->addr32[0])) 2003 return (0); 2004 break; 2005 #endif /* INET */ 2006 #ifdef INET6 2007 case AF_INET6: { 2008 int i; 2009 2010 /* check a >= b */ 2011 for (i = 0; i < 4; ++i) 2012 if (a->addr32[i] > b->addr32[i]) 2013 break; 2014 else if (a->addr32[i] < b->addr32[i]) 2015 return (0); 2016 /* check a <= e */ 2017 for (i = 0; i < 4; ++i) 2018 if (a->addr32[i] < e->addr32[i]) 2019 break; 2020 else if (a->addr32[i] > e->addr32[i]) 2021 return (0); 2022 break; 2023 } 2024 #endif /* INET6 */ 2025 } 2026 return (1); 2027 } 2028 2029 int 2030 pf_match(u_int8_t op, u_int32_t a1, u_int32_t a2, u_int32_t p) 2031 { 2032 switch (op) { 2033 case PF_OP_IRG: 2034 return ((p > a1) && (p < a2)); 2035 case PF_OP_XRG: 2036 return ((p < a1) || (p > a2)); 2037 case PF_OP_RRG: 2038 return ((p >= a1) && (p <= a2)); 2039 case PF_OP_EQ: 2040 return (p == a1); 2041 case PF_OP_NE: 2042 return (p != a1); 2043 case PF_OP_LT: 2044 return (p < a1); 2045 case PF_OP_LE: 2046 return (p <= a1); 2047 case PF_OP_GT: 2048 return (p > a1); 2049 case PF_OP_GE: 2050 return (p >= a1); 2051 } 2052 return (0); /* never reached */ 2053 } 2054 2055 int 2056 pf_match_port(u_int8_t op, u_int16_t a1, u_int16_t a2, u_int16_t p) 2057 { 2058 a1 = ntohs(a1); 2059 a2 = ntohs(a2); 2060 p = ntohs(p); 2061 return (pf_match(op, a1, a2, p)); 2062 } 2063 2064 int 2065 pf_match_uid(u_int8_t op, uid_t a1, uid_t a2, uid_t u) 2066 { 2067 if (u == UID_MAX && op != PF_OP_EQ && op != PF_OP_NE) 2068 return (0); 2069 return (pf_match(op, a1, a2, u)); 2070 } 2071 2072 int 2073 pf_match_gid(u_int8_t op, gid_t a1, gid_t a2, gid_t g) 2074 { 2075 if (g == GID_MAX && op != PF_OP_EQ && op != PF_OP_NE) 2076 return (0); 2077 return (pf_match(op, a1, a2, g)); 2078 } 2079 2080 int 2081 pf_match_tag(struct mbuf *m, struct pf_rule *r, int *tag) 2082 { 2083 if (*tag == -1) 2084 *tag = m->m_pkthdr.pf.tag; 2085 2086 return ((!r->match_tag_not && r->match_tag == *tag) || 2087 (r->match_tag_not && r->match_tag != *tag)); 2088 } 2089 2090 int 2091 pf_tag_packet(struct mbuf *m, int tag, int rtableid) 2092 { 2093 if (tag <= 0 && rtableid < 0) 2094 return (0); 2095 2096 if (tag > 0) 2097 m->m_pkthdr.pf.tag = tag; 2098 if (rtableid >= 0) 2099 m->m_pkthdr.pf.rtableid = rtableid; 2100 2101 return (0); 2102 } 2103 2104 void 2105 pf_step_into_anchor(int *depth, struct pf_ruleset **rs, int n, 2106 struct pf_rule **r, struct pf_rule **a, int *match) 2107 { 2108 struct pf_anchor_stackframe *f; 2109 2110 (*r)->anchor->match = 0; 2111 if (match) 2112 *match = 0; 2113 if (*depth >= NELEM(pf_anchor_stack)) { 2114 kprintf("pf_step_into_anchor: stack overflow\n"); 2115 *r = TAILQ_NEXT(*r, entries); 2116 return; 2117 } else if (*depth == 0 && a != NULL) 2118 *a = *r; 2119 f = pf_anchor_stack + (*depth)++; 2120 f->rs = *rs; 2121 f->r = *r; 2122 if ((*r)->anchor_wildcard) { 2123 f->parent = &(*r)->anchor->children; 2124 if ((f->child = RB_MIN(pf_anchor_node, f->parent)) == 2125 NULL) { 2126 *r = NULL; 2127 return; 2128 } 2129 *rs = &f->child->ruleset; 2130 } else { 2131 f->parent = NULL; 2132 f->child = NULL; 2133 *rs = &(*r)->anchor->ruleset; 2134 } 2135 *r = TAILQ_FIRST((*rs)->rules[n].active.ptr); 2136 } 2137 2138 int 2139 pf_step_out_of_anchor(int *depth, struct pf_ruleset **rs, int n, 2140 struct pf_rule **r, struct pf_rule **a, int *match) 2141 { 2142 struct pf_anchor_stackframe *f; 2143 int quick = 0; 2144 2145 do { 2146 if (*depth <= 0) 2147 break; 2148 f = pf_anchor_stack + *depth - 1; 2149 if (f->parent != NULL && f->child != NULL) { 2150 if (f->child->match || 2151 (match != NULL && *match)) { 2152 f->r->anchor->match = 1; 2153 *match = 0; 2154 } 2155 f->child = RB_NEXT(pf_anchor_node, f->parent, f->child); 2156 if (f->child != NULL) { 2157 *rs = &f->child->ruleset; 2158 *r = TAILQ_FIRST((*rs)->rules[n].active.ptr); 2159 if (*r == NULL) 2160 continue; 2161 else 2162 break; 2163 } 2164 } 2165 (*depth)--; 2166 if (*depth == 0 && a != NULL) 2167 *a = NULL; 2168 *rs = f->rs; 2169 if (f->r->anchor->match || (match != NULL && *match)) 2170 quick = f->r->quick; 2171 *r = TAILQ_NEXT(f->r, entries); 2172 } while (*r == NULL); 2173 2174 return (quick); 2175 } 2176 2177 #ifdef INET6 2178 void 2179 pf_poolmask(struct pf_addr *naddr, struct pf_addr *raddr, 2180 struct pf_addr *rmask, struct pf_addr *saddr, sa_family_t af) 2181 { 2182 switch (af) { 2183 #ifdef INET 2184 case AF_INET: 2185 naddr->addr32[0] = (raddr->addr32[0] & rmask->addr32[0]) | 2186 ((rmask->addr32[0] ^ 0xffffffff ) & saddr->addr32[0]); 2187 break; 2188 #endif /* INET */ 2189 case AF_INET6: 2190 naddr->addr32[0] = (raddr->addr32[0] & rmask->addr32[0]) | 2191 ((rmask->addr32[0] ^ 0xffffffff ) & saddr->addr32[0]); 2192 naddr->addr32[1] = (raddr->addr32[1] & rmask->addr32[1]) | 2193 ((rmask->addr32[1] ^ 0xffffffff ) & saddr->addr32[1]); 2194 naddr->addr32[2] = (raddr->addr32[2] & rmask->addr32[2]) | 2195 ((rmask->addr32[2] ^ 0xffffffff ) & saddr->addr32[2]); 2196 naddr->addr32[3] = (raddr->addr32[3] & rmask->addr32[3]) | 2197 ((rmask->addr32[3] ^ 0xffffffff ) & saddr->addr32[3]); 2198 break; 2199 } 2200 } 2201 2202 void 2203 pf_addr_inc(struct pf_addr *addr, sa_family_t af) 2204 { 2205 switch (af) { 2206 #ifdef INET 2207 case AF_INET: 2208 addr->addr32[0] = htonl(ntohl(addr->addr32[0]) + 1); 2209 break; 2210 #endif /* INET */ 2211 case AF_INET6: 2212 if (addr->addr32[3] == 0xffffffff) { 2213 addr->addr32[3] = 0; 2214 if (addr->addr32[2] == 0xffffffff) { 2215 addr->addr32[2] = 0; 2216 if (addr->addr32[1] == 0xffffffff) { 2217 addr->addr32[1] = 0; 2218 addr->addr32[0] = 2219 htonl(ntohl(addr->addr32[0]) + 1); 2220 } else 2221 addr->addr32[1] = 2222 htonl(ntohl(addr->addr32[1]) + 1); 2223 } else 2224 addr->addr32[2] = 2225 htonl(ntohl(addr->addr32[2]) + 1); 2226 } else 2227 addr->addr32[3] = 2228 htonl(ntohl(addr->addr32[3]) + 1); 2229 break; 2230 } 2231 } 2232 #endif /* INET6 */ 2233 2234 #define mix(a,b,c) \ 2235 do { \ 2236 a -= b; a -= c; a ^= (c >> 13); \ 2237 b -= c; b -= a; b ^= (a << 8); \ 2238 c -= a; c -= b; c ^= (b >> 13); \ 2239 a -= b; a -= c; a ^= (c >> 12); \ 2240 b -= c; b -= a; b ^= (a << 16); \ 2241 c -= a; c -= b; c ^= (b >> 5); \ 2242 a -= b; a -= c; a ^= (c >> 3); \ 2243 b -= c; b -= a; b ^= (a << 10); \ 2244 c -= a; c -= b; c ^= (b >> 15); \ 2245 } while (0) 2246 2247 /* 2248 * hash function based on bridge_hash in if_bridge.c 2249 */ 2250 void 2251 pf_hash(struct pf_addr *inaddr, struct pf_addr *hash, 2252 struct pf_poolhashkey *key, sa_family_t af) 2253 { 2254 u_int32_t a = 0x9e3779b9, b = 0x9e3779b9, c = key->key32[0]; 2255 2256 switch (af) { 2257 #ifdef INET 2258 case AF_INET: 2259 a += inaddr->addr32[0]; 2260 b += key->key32[1]; 2261 mix(a, b, c); 2262 hash->addr32[0] = c + key->key32[2]; 2263 break; 2264 #endif /* INET */ 2265 #ifdef INET6 2266 case AF_INET6: 2267 a += inaddr->addr32[0]; 2268 b += inaddr->addr32[2]; 2269 mix(a, b, c); 2270 hash->addr32[0] = c; 2271 a += inaddr->addr32[1]; 2272 b += inaddr->addr32[3]; 2273 c += key->key32[1]; 2274 mix(a, b, c); 2275 hash->addr32[1] = c; 2276 a += inaddr->addr32[2]; 2277 b += inaddr->addr32[1]; 2278 c += key->key32[2]; 2279 mix(a, b, c); 2280 hash->addr32[2] = c; 2281 a += inaddr->addr32[3]; 2282 b += inaddr->addr32[0]; 2283 c += key->key32[3]; 2284 mix(a, b, c); 2285 hash->addr32[3] = c; 2286 break; 2287 #endif /* INET6 */ 2288 } 2289 } 2290 2291 int 2292 pf_map_addr(sa_family_t af, struct pf_rule *r, struct pf_addr *saddr, 2293 struct pf_addr *naddr, struct pf_addr *init_addr, struct pf_src_node **sn) 2294 { 2295 unsigned char hash[16]; 2296 struct pf_pool *rpool = &r->rpool; 2297 struct pf_addr *raddr = &rpool->cur->addr.v.a.addr; 2298 struct pf_addr *rmask = &rpool->cur->addr.v.a.mask; 2299 struct pf_pooladdr *acur = rpool->cur; 2300 struct pf_src_node k; 2301 2302 if (*sn == NULL && r->rpool.opts & PF_POOL_STICKYADDR && 2303 (r->rpool.opts & PF_POOL_TYPEMASK) != PF_POOL_NONE) { 2304 k.af = af; 2305 PF_ACPY(&k.addr, saddr, af); 2306 if (r->rule_flag & PFRULE_RULESRCTRACK || 2307 r->rpool.opts & PF_POOL_STICKYADDR) 2308 k.rule.ptr = r; 2309 else 2310 k.rule.ptr = NULL; 2311 pf_status.scounters[SCNT_SRC_NODE_SEARCH]++; 2312 *sn = RB_FIND(pf_src_tree, &tree_src_tracking, &k); 2313 if (*sn != NULL && !PF_AZERO(&(*sn)->raddr, af)) { 2314 PF_ACPY(naddr, &(*sn)->raddr, af); 2315 if (pf_status.debug >= PF_DEBUG_MISC) { 2316 kprintf("pf_map_addr: src tracking maps "); 2317 pf_print_host(&k.addr, 0, af); 2318 kprintf(" to "); 2319 pf_print_host(naddr, 0, af); 2320 kprintf("\n"); 2321 } 2322 return (0); 2323 } 2324 } 2325 2326 if (rpool->cur->addr.type == PF_ADDR_NOROUTE) 2327 return (1); 2328 if (rpool->cur->addr.type == PF_ADDR_DYNIFTL) { 2329 switch (af) { 2330 #ifdef INET 2331 case AF_INET: 2332 if (rpool->cur->addr.p.dyn->pfid_acnt4 < 1 && 2333 (rpool->opts & PF_POOL_TYPEMASK) != 2334 PF_POOL_ROUNDROBIN) 2335 return (1); 2336 raddr = &rpool->cur->addr.p.dyn->pfid_addr4; 2337 rmask = &rpool->cur->addr.p.dyn->pfid_mask4; 2338 break; 2339 #endif /* INET */ 2340 #ifdef INET6 2341 case AF_INET6: 2342 if (rpool->cur->addr.p.dyn->pfid_acnt6 < 1 && 2343 (rpool->opts & PF_POOL_TYPEMASK) != 2344 PF_POOL_ROUNDROBIN) 2345 return (1); 2346 raddr = &rpool->cur->addr.p.dyn->pfid_addr6; 2347 rmask = &rpool->cur->addr.p.dyn->pfid_mask6; 2348 break; 2349 #endif /* INET6 */ 2350 } 2351 } else if (rpool->cur->addr.type == PF_ADDR_TABLE) { 2352 if ((rpool->opts & PF_POOL_TYPEMASK) != PF_POOL_ROUNDROBIN) 2353 return (1); /* unsupported */ 2354 } else { 2355 raddr = &rpool->cur->addr.v.a.addr; 2356 rmask = &rpool->cur->addr.v.a.mask; 2357 } 2358 2359 switch (rpool->opts & PF_POOL_TYPEMASK) { 2360 case PF_POOL_NONE: 2361 PF_ACPY(naddr, raddr, af); 2362 break; 2363 case PF_POOL_BITMASK: 2364 PF_POOLMASK(naddr, raddr, rmask, saddr, af); 2365 break; 2366 case PF_POOL_RANDOM: 2367 if (init_addr != NULL && PF_AZERO(init_addr, af)) { 2368 switch (af) { 2369 #ifdef INET 2370 case AF_INET: 2371 rpool->counter.addr32[0] = htonl(karc4random()); 2372 break; 2373 #endif /* INET */ 2374 #ifdef INET6 2375 case AF_INET6: 2376 if (rmask->addr32[3] != 0xffffffff) 2377 rpool->counter.addr32[3] = 2378 htonl(karc4random()); 2379 else 2380 break; 2381 if (rmask->addr32[2] != 0xffffffff) 2382 rpool->counter.addr32[2] = 2383 htonl(karc4random()); 2384 else 2385 break; 2386 if (rmask->addr32[1] != 0xffffffff) 2387 rpool->counter.addr32[1] = 2388 htonl(karc4random()); 2389 else 2390 break; 2391 if (rmask->addr32[0] != 0xffffffff) 2392 rpool->counter.addr32[0] = 2393 htonl(karc4random()); 2394 break; 2395 #endif /* INET6 */ 2396 } 2397 PF_POOLMASK(naddr, raddr, rmask, &rpool->counter, af); 2398 PF_ACPY(init_addr, naddr, af); 2399 2400 } else { 2401 PF_AINC(&rpool->counter, af); 2402 PF_POOLMASK(naddr, raddr, rmask, &rpool->counter, af); 2403 } 2404 break; 2405 case PF_POOL_SRCHASH: 2406 pf_hash(saddr, (struct pf_addr *)&hash, &rpool->key, af); 2407 PF_POOLMASK(naddr, raddr, rmask, (struct pf_addr *)&hash, af); 2408 break; 2409 case PF_POOL_ROUNDROBIN: 2410 if (rpool->cur->addr.type == PF_ADDR_TABLE) { 2411 if (!pfr_pool_get(rpool->cur->addr.p.tbl, 2412 &rpool->tblidx, &rpool->counter, 2413 &raddr, &rmask, af)) 2414 goto get_addr; 2415 } else if (rpool->cur->addr.type == PF_ADDR_DYNIFTL) { 2416 if (!pfr_pool_get(rpool->cur->addr.p.dyn->pfid_kt, 2417 &rpool->tblidx, &rpool->counter, 2418 &raddr, &rmask, af)) 2419 goto get_addr; 2420 } else if (pf_match_addr(0, raddr, rmask, &rpool->counter, af)) 2421 goto get_addr; 2422 2423 try_next: 2424 if ((rpool->cur = TAILQ_NEXT(rpool->cur, entries)) == NULL) 2425 rpool->cur = TAILQ_FIRST(&rpool->list); 2426 if (rpool->cur->addr.type == PF_ADDR_TABLE) { 2427 rpool->tblidx = -1; 2428 if (pfr_pool_get(rpool->cur->addr.p.tbl, 2429 &rpool->tblidx, &rpool->counter, 2430 &raddr, &rmask, af)) { 2431 /* table contains no address of type 'af' */ 2432 if (rpool->cur != acur) 2433 goto try_next; 2434 return (1); 2435 } 2436 } else if (rpool->cur->addr.type == PF_ADDR_DYNIFTL) { 2437 rpool->tblidx = -1; 2438 if (pfr_pool_get(rpool->cur->addr.p.dyn->pfid_kt, 2439 &rpool->tblidx, &rpool->counter, 2440 &raddr, &rmask, af)) { 2441 /* table contains no address of type 'af' */ 2442 if (rpool->cur != acur) 2443 goto try_next; 2444 return (1); 2445 } 2446 } else { 2447 raddr = &rpool->cur->addr.v.a.addr; 2448 rmask = &rpool->cur->addr.v.a.mask; 2449 PF_ACPY(&rpool->counter, raddr, af); 2450 } 2451 2452 get_addr: 2453 PF_ACPY(naddr, &rpool->counter, af); 2454 if (init_addr != NULL && PF_AZERO(init_addr, af)) 2455 PF_ACPY(init_addr, naddr, af); 2456 PF_AINC(&rpool->counter, af); 2457 break; 2458 } 2459 if (*sn != NULL) 2460 PF_ACPY(&(*sn)->raddr, naddr, af); 2461 2462 if (pf_status.debug >= PF_DEBUG_MISC && 2463 (rpool->opts & PF_POOL_TYPEMASK) != PF_POOL_NONE) { 2464 kprintf("pf_map_addr: selected address "); 2465 pf_print_host(naddr, 0, af); 2466 kprintf("\n"); 2467 } 2468 2469 return (0); 2470 } 2471 2472 int 2473 pf_get_sport(sa_family_t af, u_int8_t proto, struct pf_rule *r, 2474 struct pf_addr *saddr, struct pf_addr *daddr, u_int16_t dport, 2475 struct pf_addr *naddr, u_int16_t *nport, u_int16_t low, u_int16_t high, 2476 struct pf_src_node **sn) 2477 { 2478 struct pf_state_key_cmp key; 2479 struct pf_addr init_addr; 2480 u_int16_t cut; 2481 2482 bzero(&init_addr, sizeof(init_addr)); 2483 if (pf_map_addr(af, r, saddr, naddr, &init_addr, sn)) 2484 return (1); 2485 2486 if (proto == IPPROTO_ICMP) { 2487 low = 1; 2488 high = 65535; 2489 } 2490 2491 do { 2492 key.af = af; 2493 key.proto = proto; 2494 PF_ACPY(&key.addr[1], daddr, key.af); 2495 PF_ACPY(&key.addr[0], naddr, key.af); 2496 key.port[1] = dport; 2497 2498 /* 2499 * port search; start random, step; 2500 * similar 2 portloop in in_pcbbind 2501 */ 2502 if (!(proto == IPPROTO_TCP || proto == IPPROTO_UDP || 2503 proto == IPPROTO_ICMP)) { 2504 key.port[0] = dport; 2505 if (pf_find_state_all(&key, PF_IN, NULL) == NULL) 2506 return (0); 2507 } else if (low == 0 && high == 0) { 2508 key.port[0] = *nport; 2509 if (pf_find_state_all(&key, PF_IN, NULL) == NULL) 2510 return (0); 2511 } else if (low == high) { 2512 key.port[0] = htons(low); 2513 if (pf_find_state_all(&key, PF_IN, NULL) == NULL) { 2514 *nport = htons(low); 2515 return (0); 2516 } 2517 } else { 2518 u_int16_t tmp; 2519 2520 if (low > high) { 2521 tmp = low; 2522 low = high; 2523 high = tmp; 2524 } 2525 /* low < high */ 2526 cut = htonl(karc4random()) % (1 + high - low) + low; 2527 /* low <= cut <= high */ 2528 for (tmp = cut; tmp <= high; ++(tmp)) { 2529 key.port[0] = htons(tmp); 2530 if (pf_find_state_all(&key, PF_IN, NULL) == 2531 NULL && !in_baddynamic(tmp, proto)) { 2532 *nport = htons(tmp); 2533 return (0); 2534 } 2535 } 2536 for (tmp = cut - 1; tmp >= low; --(tmp)) { 2537 key.port[0] = htons(tmp); 2538 if (pf_find_state_all(&key, PF_IN, NULL) == 2539 NULL && !in_baddynamic(tmp, proto)) { 2540 *nport = htons(tmp); 2541 return (0); 2542 } 2543 } 2544 } 2545 2546 switch (r->rpool.opts & PF_POOL_TYPEMASK) { 2547 case PF_POOL_RANDOM: 2548 case PF_POOL_ROUNDROBIN: 2549 if (pf_map_addr(af, r, saddr, naddr, &init_addr, sn)) 2550 return (1); 2551 break; 2552 case PF_POOL_NONE: 2553 case PF_POOL_SRCHASH: 2554 case PF_POOL_BITMASK: 2555 default: 2556 return (1); 2557 } 2558 } while (! PF_AEQ(&init_addr, naddr, af) ); 2559 return (1); /* none available */ 2560 } 2561 2562 struct pf_rule * 2563 pf_match_translation(struct pf_pdesc *pd, struct mbuf *m, int off, 2564 int direction, struct pfi_kif *kif, struct pf_addr *saddr, u_int16_t sport, 2565 struct pf_addr *daddr, u_int16_t dport, int rs_num) 2566 { 2567 struct pf_rule *r, *rm = NULL; 2568 struct pf_ruleset *ruleset = NULL; 2569 int tag = -1; 2570 int rtableid = -1; 2571 int asd = 0; 2572 2573 r = TAILQ_FIRST(pf_main_ruleset.rules[rs_num].active.ptr); 2574 while (r && rm == NULL) { 2575 struct pf_rule_addr *src = NULL, *dst = NULL; 2576 struct pf_addr_wrap *xdst = NULL; 2577 2578 if (r->action == PF_BINAT && direction == PF_IN) { 2579 src = &r->dst; 2580 if (r->rpool.cur != NULL) 2581 xdst = &r->rpool.cur->addr; 2582 } else { 2583 src = &r->src; 2584 dst = &r->dst; 2585 } 2586 2587 r->evaluations++; 2588 if (pfi_kif_match(r->kif, kif) == r->ifnot) 2589 r = r->skip[PF_SKIP_IFP].ptr; 2590 else if (r->direction && r->direction != direction) 2591 r = r->skip[PF_SKIP_DIR].ptr; 2592 else if (r->af && r->af != pd->af) 2593 r = r->skip[PF_SKIP_AF].ptr; 2594 else if (r->proto && r->proto != pd->proto) 2595 r = r->skip[PF_SKIP_PROTO].ptr; 2596 else if (PF_MISMATCHAW(&src->addr, saddr, pd->af, 2597 src->neg, kif)) 2598 r = r->skip[src == &r->src ? PF_SKIP_SRC_ADDR : 2599 PF_SKIP_DST_ADDR].ptr; 2600 else if (src->port_op && !pf_match_port(src->port_op, 2601 src->port[0], src->port[1], sport)) 2602 r = r->skip[src == &r->src ? PF_SKIP_SRC_PORT : 2603 PF_SKIP_DST_PORT].ptr; 2604 else if (dst != NULL && 2605 PF_MISMATCHAW(&dst->addr, daddr, pd->af, dst->neg, NULL)) 2606 r = r->skip[PF_SKIP_DST_ADDR].ptr; 2607 else if (xdst != NULL && PF_MISMATCHAW(xdst, daddr, pd->af, 2608 0, NULL)) 2609 r = TAILQ_NEXT(r, entries); 2610 else if (dst != NULL && dst->port_op && 2611 !pf_match_port(dst->port_op, dst->port[0], 2612 dst->port[1], dport)) 2613 r = r->skip[PF_SKIP_DST_PORT].ptr; 2614 else if (r->match_tag && !pf_match_tag(m, r, &tag)) 2615 r = TAILQ_NEXT(r, entries); 2616 else if (r->os_fingerprint != PF_OSFP_ANY && (pd->proto != 2617 IPPROTO_TCP || !pf_osfp_match(pf_osfp_fingerprint(pd, m, 2618 off, pd->hdr.tcp), r->os_fingerprint))) 2619 r = TAILQ_NEXT(r, entries); 2620 else { 2621 if (r->tag) 2622 tag = r->tag; 2623 if (r->rtableid >= 0) 2624 rtableid = r->rtableid; 2625 if (r->anchor == NULL) { 2626 rm = r; 2627 } else 2628 pf_step_into_anchor(&asd, &ruleset, rs_num, 2629 &r, NULL, NULL); 2630 } 2631 if (r == NULL) 2632 pf_step_out_of_anchor(&asd, &ruleset, rs_num, &r, 2633 NULL, NULL); 2634 } 2635 if (pf_tag_packet(m, tag, rtableid)) 2636 return (NULL); 2637 if (rm != NULL && (rm->action == PF_NONAT || 2638 rm->action == PF_NORDR || rm->action == PF_NOBINAT)) 2639 return (NULL); 2640 return (rm); 2641 } 2642 2643 struct pf_rule * 2644 pf_get_translation(struct pf_pdesc *pd, struct mbuf *m, int off, int direction, 2645 struct pfi_kif *kif, struct pf_src_node **sn, 2646 struct pf_state_key **skw, struct pf_state_key **sks, 2647 struct pf_state_key **skp, struct pf_state_key **nkp, 2648 struct pf_addr *saddr, struct pf_addr *daddr, 2649 u_int16_t sport, u_int16_t dport) 2650 { 2651 struct pf_rule *r = NULL; 2652 2653 2654 if (direction == PF_OUT) { 2655 r = pf_match_translation(pd, m, off, direction, kif, saddr, 2656 sport, daddr, dport, PF_RULESET_BINAT); 2657 if (r == NULL) 2658 r = pf_match_translation(pd, m, off, direction, kif, 2659 saddr, sport, daddr, dport, PF_RULESET_NAT); 2660 } else { 2661 r = pf_match_translation(pd, m, off, direction, kif, saddr, 2662 sport, daddr, dport, PF_RULESET_RDR); 2663 if (r == NULL) 2664 r = pf_match_translation(pd, m, off, direction, kif, 2665 saddr, sport, daddr, dport, PF_RULESET_BINAT); 2666 } 2667 2668 if (r != NULL) { 2669 struct pf_addr *naddr; 2670 u_int16_t *nport; 2671 2672 if (pf_state_key_setup(pd, r, skw, sks, skp, nkp, 2673 saddr, daddr, sport, dport)) 2674 return r; 2675 2676 /* XXX We only modify one side for now. */ 2677 naddr = &(*nkp)->addr[1]; 2678 nport = &(*nkp)->port[1]; 2679 2680 /* 2681 * NOTE: Currently all translations will clear 2682 * BRIDGE_MBUF_TAGGED, telling the bridge to 2683 * ignore the original input encapsulation. 2684 */ 2685 switch (r->action) { 2686 case PF_NONAT: 2687 case PF_NOBINAT: 2688 case PF_NORDR: 2689 return (NULL); 2690 case PF_NAT: 2691 m->m_pkthdr.fw_flags &= ~BRIDGE_MBUF_TAGGED; 2692 if (pf_get_sport(pd->af, pd->proto, r, saddr, 2693 daddr, dport, naddr, nport, r->rpool.proxy_port[0], 2694 r->rpool.proxy_port[1], sn)) { 2695 DPFPRINTF(PF_DEBUG_MISC, 2696 ("pf: NAT proxy port allocation " 2697 "(%u-%u) failed\n", 2698 r->rpool.proxy_port[0], 2699 r->rpool.proxy_port[1])); 2700 return (NULL); 2701 } 2702 break; 2703 case PF_BINAT: 2704 m->m_pkthdr.fw_flags &= ~BRIDGE_MBUF_TAGGED; 2705 switch (direction) { 2706 case PF_OUT: 2707 if (r->rpool.cur->addr.type == PF_ADDR_DYNIFTL){ 2708 switch (pd->af) { 2709 #ifdef INET 2710 case AF_INET: 2711 if (r->rpool.cur->addr.p.dyn-> 2712 pfid_acnt4 < 1) 2713 return (NULL); 2714 PF_POOLMASK(naddr, 2715 &r->rpool.cur->addr.p.dyn-> 2716 pfid_addr4, 2717 &r->rpool.cur->addr.p.dyn-> 2718 pfid_mask4, 2719 saddr, AF_INET); 2720 break; 2721 #endif /* INET */ 2722 #ifdef INET6 2723 case AF_INET6: 2724 if (r->rpool.cur->addr.p.dyn-> 2725 pfid_acnt6 < 1) 2726 return (NULL); 2727 PF_POOLMASK(naddr, 2728 &r->rpool.cur->addr.p.dyn-> 2729 pfid_addr6, 2730 &r->rpool.cur->addr.p.dyn-> 2731 pfid_mask6, 2732 saddr, AF_INET6); 2733 break; 2734 #endif /* INET6 */ 2735 } 2736 } else 2737 PF_POOLMASK(naddr, 2738 &r->rpool.cur->addr.v.a.addr, 2739 &r->rpool.cur->addr.v.a.mask, 2740 saddr, pd->af); 2741 break; 2742 case PF_IN: 2743 if (r->src.addr.type == PF_ADDR_DYNIFTL) { 2744 switch (pd->af) { 2745 #ifdef INET 2746 case AF_INET: 2747 if (r->src.addr.p.dyn-> 2748 pfid_acnt4 < 1) 2749 return (NULL); 2750 PF_POOLMASK(naddr, 2751 &r->src.addr.p.dyn-> 2752 pfid_addr4, 2753 &r->src.addr.p.dyn-> 2754 pfid_mask4, 2755 daddr, AF_INET); 2756 break; 2757 #endif /* INET */ 2758 #ifdef INET6 2759 case AF_INET6: 2760 if (r->src.addr.p.dyn-> 2761 pfid_acnt6 < 1) 2762 return (NULL); 2763 PF_POOLMASK(naddr, 2764 &r->src.addr.p.dyn-> 2765 pfid_addr6, 2766 &r->src.addr.p.dyn-> 2767 pfid_mask6, 2768 daddr, AF_INET6); 2769 break; 2770 #endif /* INET6 */ 2771 } 2772 } else 2773 PF_POOLMASK(naddr, 2774 &r->src.addr.v.a.addr, 2775 &r->src.addr.v.a.mask, daddr, 2776 pd->af); 2777 break; 2778 } 2779 break; 2780 case PF_RDR: { 2781 m->m_pkthdr.fw_flags &= ~BRIDGE_MBUF_TAGGED; 2782 if (pf_map_addr(pd->af, r, saddr, naddr, NULL, sn)) 2783 return (NULL); 2784 if ((r->rpool.opts & PF_POOL_TYPEMASK) == 2785 PF_POOL_BITMASK) 2786 PF_POOLMASK(naddr, naddr, 2787 &r->rpool.cur->addr.v.a.mask, daddr, 2788 pd->af); 2789 2790 if (r->rpool.proxy_port[1]) { 2791 u_int32_t tmp_nport; 2792 2793 tmp_nport = ((ntohs(dport) - 2794 ntohs(r->dst.port[0])) % 2795 (r->rpool.proxy_port[1] - 2796 r->rpool.proxy_port[0] + 1)) + 2797 r->rpool.proxy_port[0]; 2798 2799 /* wrap around if necessary */ 2800 if (tmp_nport > 65535) 2801 tmp_nport -= 65535; 2802 *nport = htons((u_int16_t)tmp_nport); 2803 } else if (r->rpool.proxy_port[0]) 2804 *nport = htons(r->rpool.proxy_port[0]); 2805 break; 2806 } 2807 default: 2808 return (NULL); 2809 } 2810 } 2811 2812 return (r); 2813 } 2814 2815 struct netmsg_hashlookup { 2816 struct netmsg_base base; 2817 struct inpcb **nm_pinp; 2818 struct inpcbinfo *nm_pcbinfo; 2819 struct pf_addr *nm_saddr; 2820 struct pf_addr *nm_daddr; 2821 uint16_t nm_sport; 2822 uint16_t nm_dport; 2823 sa_family_t nm_af; 2824 }; 2825 2826 #ifdef PF_SOCKET_LOOKUP_DOMSG 2827 static void 2828 in_pcblookup_hash_handler(netmsg_t msg) 2829 { 2830 struct netmsg_hashlookup *rmsg = (struct netmsg_hashlookup *)msg; 2831 2832 if (rmsg->nm_af == AF_INET) 2833 *rmsg->nm_pinp = in_pcblookup_hash(rmsg->nm_pcbinfo, 2834 rmsg->nm_saddr->v4, rmsg->nm_sport, rmsg->nm_daddr->v4, 2835 rmsg->nm_dport, INPLOOKUP_WILDCARD, NULL); 2836 #ifdef INET6 2837 else 2838 *rmsg->nm_pinp = in6_pcblookup_hash(rmsg->nm_pcbinfo, 2839 &rmsg->nm_saddr->v6, rmsg->nm_sport, &rmsg->nm_daddr->v6, 2840 rmsg->nm_dport, INPLOOKUP_WILDCARD, NULL); 2841 #endif /* INET6 */ 2842 lwkt_replymsg(&rmsg->base.lmsg, 0); 2843 } 2844 #endif /* PF_SOCKET_LOOKUP_DOMSG */ 2845 2846 int 2847 pf_socket_lookup(int direction, struct pf_pdesc *pd) 2848 { 2849 struct pf_addr *saddr, *daddr; 2850 u_int16_t sport, dport; 2851 struct inpcbinfo *pi; 2852 struct inpcb *inp; 2853 struct netmsg_hashlookup *msg = NULL; 2854 #ifdef PF_SOCKET_LOOKUP_DOMSG 2855 struct netmsg_hashlookup msg0; 2856 #endif 2857 int pi_cpu = 0; 2858 2859 if (pd == NULL) 2860 return (-1); 2861 pd->lookup.uid = UID_MAX; 2862 pd->lookup.gid = GID_MAX; 2863 pd->lookup.pid = NO_PID; 2864 if (direction == PF_IN) { 2865 saddr = pd->src; 2866 daddr = pd->dst; 2867 } else { 2868 saddr = pd->dst; 2869 daddr = pd->src; 2870 } 2871 switch (pd->proto) { 2872 case IPPROTO_TCP: 2873 if (pd->hdr.tcp == NULL) 2874 return (-1); 2875 sport = pd->hdr.tcp->th_sport; 2876 dport = pd->hdr.tcp->th_dport; 2877 2878 pi_cpu = tcp_addrcpu(saddr->v4.s_addr, sport, daddr->v4.s_addr, dport); 2879 pi = &tcbinfo[pi_cpu]; 2880 /* 2881 * Our netstack runs lockless on MP systems 2882 * (only for TCP connections at the moment). 2883 * 2884 * As we are not allowed to read another CPU's tcbinfo, 2885 * we have to ask that CPU via remote call to search the 2886 * table for us. 2887 * 2888 * Prepare a msg iff data belongs to another CPU. 2889 */ 2890 if (pi_cpu != mycpu->gd_cpuid) { 2891 #ifdef PF_SOCKET_LOOKUP_DOMSG 2892 /* 2893 * NOTE: 2894 * 2895 * Following lwkt_domsg() is dangerous and could 2896 * lockup the network system, e.g. 2897 * 2898 * On 2 CPU system: 2899 * netisr0 domsg to netisr1 (due to lookup) 2900 * netisr1 domsg to netisr0 (due to lookup) 2901 * 2902 * We simply return -1 here, since we are probably 2903 * called before NAT, so the TCP packet should 2904 * already be on the correct CPU. 2905 */ 2906 msg = &msg0; 2907 netmsg_init(&msg->base, NULL, &curthread->td_msgport, 2908 0, in_pcblookup_hash_handler); 2909 msg->nm_pinp = &inp; 2910 msg->nm_pcbinfo = pi; 2911 msg->nm_saddr = saddr; 2912 msg->nm_sport = sport; 2913 msg->nm_daddr = daddr; 2914 msg->nm_dport = dport; 2915 msg->nm_af = pd->af; 2916 #else /* !PF_SOCKET_LOOKUP_DOMSG */ 2917 kprintf("pf_socket_lookup: tcp packet not on the " 2918 "correct cpu %d, cur cpu %d\n", 2919 pi_cpu, mycpuid); 2920 print_backtrace(-1); 2921 return -1; 2922 #endif /* PF_SOCKET_LOOKUP_DOMSG */ 2923 } 2924 break; 2925 case IPPROTO_UDP: 2926 if (pd->hdr.udp == NULL) 2927 return (-1); 2928 sport = pd->hdr.udp->uh_sport; 2929 dport = pd->hdr.udp->uh_dport; 2930 pi = &udbinfo; 2931 break; 2932 default: 2933 return (-1); 2934 } 2935 if (direction != PF_IN) { 2936 u_int16_t p; 2937 2938 p = sport; 2939 sport = dport; 2940 dport = p; 2941 } 2942 switch (pd->af) { 2943 #ifdef INET6 2944 case AF_INET6: 2945 /* 2946 * Query other CPU, second part 2947 * 2948 * msg only gets initialized when: 2949 * 1) packet is TCP 2950 * 2) the info belongs to another CPU 2951 * 2952 * Use some switch/case magic to avoid code duplication. 2953 */ 2954 if (msg == NULL) { 2955 inp = in6_pcblookup_hash(pi, &saddr->v6, sport, 2956 &daddr->v6, dport, INPLOOKUP_WILDCARD, NULL); 2957 2958 if (inp == NULL) 2959 return (-1); 2960 break; 2961 } 2962 /* FALLTHROUGH if SMP and on other CPU */ 2963 #endif /* INET6 */ 2964 case AF_INET: 2965 if (msg != NULL) { 2966 lwkt_domsg(netisr_cpuport(pi_cpu), 2967 &msg->base.lmsg, 0); 2968 } else 2969 { 2970 inp = in_pcblookup_hash(pi, saddr->v4, sport, daddr->v4, 2971 dport, INPLOOKUP_WILDCARD, NULL); 2972 } 2973 if (inp == NULL) 2974 return (-1); 2975 break; 2976 2977 default: 2978 return (-1); 2979 } 2980 pd->lookup.uid = inp->inp_socket->so_cred->cr_uid; 2981 pd->lookup.gid = inp->inp_socket->so_cred->cr_groups[0]; 2982 return (1); 2983 } 2984 2985 u_int8_t 2986 pf_get_wscale(struct mbuf *m, int off, u_int16_t th_off, sa_family_t af) 2987 { 2988 int hlen; 2989 u_int8_t hdr[60]; 2990 u_int8_t *opt, optlen; 2991 u_int8_t wscale = 0; 2992 2993 hlen = th_off << 2; /* hlen <= sizeof(hdr) */ 2994 if (hlen <= sizeof(struct tcphdr)) 2995 return (0); 2996 if (!pf_pull_hdr(m, off, hdr, hlen, NULL, NULL, af)) 2997 return (0); 2998 opt = hdr + sizeof(struct tcphdr); 2999 hlen -= sizeof(struct tcphdr); 3000 while (hlen >= 3) { 3001 switch (*opt) { 3002 case TCPOPT_EOL: 3003 case TCPOPT_NOP: 3004 ++opt; 3005 --hlen; 3006 break; 3007 case TCPOPT_WINDOW: 3008 wscale = opt[2]; 3009 if (wscale > TCP_MAX_WINSHIFT) 3010 wscale = TCP_MAX_WINSHIFT; 3011 wscale |= PF_WSCALE_FLAG; 3012 /* FALLTHROUGH */ 3013 default: 3014 optlen = opt[1]; 3015 if (optlen < 2) 3016 optlen = 2; 3017 hlen -= optlen; 3018 opt += optlen; 3019 break; 3020 } 3021 } 3022 return (wscale); 3023 } 3024 3025 u_int16_t 3026 pf_get_mss(struct mbuf *m, int off, u_int16_t th_off, sa_family_t af) 3027 { 3028 int hlen; 3029 u_int8_t hdr[60]; 3030 u_int8_t *opt, optlen; 3031 u_int16_t mss = tcp_mssdflt; 3032 3033 hlen = th_off << 2; /* hlen <= sizeof(hdr) */ 3034 if (hlen <= sizeof(struct tcphdr)) 3035 return (0); 3036 if (!pf_pull_hdr(m, off, hdr, hlen, NULL, NULL, af)) 3037 return (0); 3038 opt = hdr + sizeof(struct tcphdr); 3039 hlen -= sizeof(struct tcphdr); 3040 while (hlen >= TCPOLEN_MAXSEG) { 3041 switch (*opt) { 3042 case TCPOPT_EOL: 3043 case TCPOPT_NOP: 3044 ++opt; 3045 --hlen; 3046 break; 3047 case TCPOPT_MAXSEG: 3048 bcopy((caddr_t)(opt + 2), (caddr_t)&mss, 2); 3049 /* FALLTHROUGH */ 3050 default: 3051 optlen = opt[1]; 3052 if (optlen < 2) 3053 optlen = 2; 3054 hlen -= optlen; 3055 opt += optlen; 3056 break; 3057 } 3058 } 3059 return (mss); 3060 } 3061 3062 u_int16_t 3063 pf_calc_mss(struct pf_addr *addr, sa_family_t af, u_int16_t offer) 3064 { 3065 #ifdef INET 3066 struct sockaddr_in *dst; 3067 struct route ro; 3068 #endif /* INET */ 3069 #ifdef INET6 3070 struct sockaddr_in6 *dst6; 3071 struct route_in6 ro6; 3072 #endif /* INET6 */ 3073 struct rtentry *rt = NULL; 3074 int hlen = 0; 3075 u_int16_t mss = tcp_mssdflt; 3076 3077 switch (af) { 3078 #ifdef INET 3079 case AF_INET: 3080 hlen = sizeof(struct ip); 3081 bzero(&ro, sizeof(ro)); 3082 dst = (struct sockaddr_in *)&ro.ro_dst; 3083 dst->sin_family = AF_INET; 3084 dst->sin_len = sizeof(*dst); 3085 dst->sin_addr = addr->v4; 3086 rtalloc_ign(&ro, (RTF_CLONING | RTF_PRCLONING)); 3087 rt = ro.ro_rt; 3088 break; 3089 #endif /* INET */ 3090 #ifdef INET6 3091 case AF_INET6: 3092 hlen = sizeof(struct ip6_hdr); 3093 bzero(&ro6, sizeof(ro6)); 3094 dst6 = (struct sockaddr_in6 *)&ro6.ro_dst; 3095 dst6->sin6_family = AF_INET6; 3096 dst6->sin6_len = sizeof(*dst6); 3097 dst6->sin6_addr = addr->v6; 3098 rtalloc_ign((struct route *)&ro6, (RTF_CLONING | RTF_PRCLONING)); 3099 rt = ro6.ro_rt; 3100 break; 3101 #endif /* INET6 */ 3102 } 3103 3104 if (rt && rt->rt_ifp) { 3105 mss = rt->rt_ifp->if_mtu - hlen - sizeof(struct tcphdr); 3106 mss = max(tcp_mssdflt, mss); 3107 RTFREE(rt); 3108 } 3109 mss = min(mss, offer); 3110 mss = max(mss, 64); /* sanity - at least max opt space */ 3111 return (mss); 3112 } 3113 3114 void 3115 pf_set_rt_ifp(struct pf_state *s, struct pf_addr *saddr) 3116 { 3117 struct pf_rule *r = s->rule.ptr; 3118 3119 s->rt_kif = NULL; 3120 if (!r->rt || r->rt == PF_FASTROUTE) 3121 return; 3122 switch (s->key[PF_SK_WIRE]->af) { 3123 #ifdef INET 3124 case AF_INET: 3125 pf_map_addr(AF_INET, r, saddr, &s->rt_addr, NULL, 3126 &s->nat_src_node); 3127 s->rt_kif = r->rpool.cur->kif; 3128 break; 3129 #endif /* INET */ 3130 #ifdef INET6 3131 case AF_INET6: 3132 pf_map_addr(AF_INET6, r, saddr, &s->rt_addr, NULL, 3133 &s->nat_src_node); 3134 s->rt_kif = r->rpool.cur->kif; 3135 break; 3136 #endif /* INET6 */ 3137 } 3138 } 3139 3140 u_int32_t 3141 pf_tcp_iss(struct pf_pdesc *pd) 3142 { 3143 MD5_CTX ctx; 3144 u_int32_t digest[4]; 3145 3146 if (pf_tcp_secret_init == 0) { 3147 karc4rand(pf_tcp_secret, sizeof(pf_tcp_secret)); 3148 MD5Init(&pf_tcp_secret_ctx); 3149 MD5Update(&pf_tcp_secret_ctx, pf_tcp_secret, 3150 sizeof(pf_tcp_secret)); 3151 pf_tcp_secret_init = 1; 3152 } 3153 ctx = pf_tcp_secret_ctx; 3154 3155 MD5Update(&ctx, (char *)&pd->hdr.tcp->th_sport, sizeof(u_short)); 3156 MD5Update(&ctx, (char *)&pd->hdr.tcp->th_dport, sizeof(u_short)); 3157 if (pd->af == AF_INET6) { 3158 MD5Update(&ctx, (char *)&pd->src->v6, sizeof(struct in6_addr)); 3159 MD5Update(&ctx, (char *)&pd->dst->v6, sizeof(struct in6_addr)); 3160 } else { 3161 MD5Update(&ctx, (char *)&pd->src->v4, sizeof(struct in_addr)); 3162 MD5Update(&ctx, (char *)&pd->dst->v4, sizeof(struct in_addr)); 3163 } 3164 MD5Final((u_char *)digest, &ctx); 3165 pf_tcp_iss_off += 4096; 3166 return (digest[0] + pd->hdr.tcp->th_seq + pf_tcp_iss_off); 3167 } 3168 3169 int 3170 pf_test_rule(struct pf_rule **rm, struct pf_state **sm, int direction, 3171 struct pfi_kif *kif, struct mbuf *m, int off, void *h, 3172 struct pf_pdesc *pd, struct pf_rule **am, struct pf_ruleset **rsm, 3173 struct ifqueue *ifq, struct inpcb *inp) 3174 { 3175 struct pf_rule *nr = NULL; 3176 struct pf_addr *saddr = pd->src, *daddr = pd->dst; 3177 sa_family_t af = pd->af; 3178 struct pf_rule *r, *a = NULL; 3179 struct pf_ruleset *ruleset = NULL; 3180 struct pf_src_node *nsn = NULL; 3181 struct tcphdr *th = pd->hdr.tcp; 3182 struct pf_state_key *skw = NULL, *sks = NULL; 3183 struct pf_state_key *sk = NULL, *nk = NULL; 3184 u_short reason; 3185 int rewrite = 0, hdrlen = 0; 3186 int tag = -1, rtableid = -1; 3187 int asd = 0; 3188 int match = 0; 3189 int state_icmp = 0; 3190 u_int16_t sport = 0, dport = 0; 3191 u_int16_t bproto_sum = 0, bip_sum = 0; 3192 u_int8_t icmptype = 0, icmpcode = 0; 3193 3194 3195 if (direction == PF_IN && pf_check_congestion(ifq)) { 3196 REASON_SET(&reason, PFRES_CONGEST); 3197 return (PF_DROP); 3198 } 3199 3200 if (inp != NULL) 3201 pd->lookup.done = pf_socket_lookup(direction, pd); 3202 else if (debug_pfugidhack) { 3203 DPFPRINTF(PF_DEBUG_MISC, ("pf: unlocked lookup\n")); 3204 pd->lookup.done = pf_socket_lookup(direction, pd); 3205 } 3206 3207 switch (pd->proto) { 3208 case IPPROTO_TCP: 3209 sport = th->th_sport; 3210 dport = th->th_dport; 3211 hdrlen = sizeof(*th); 3212 break; 3213 case IPPROTO_UDP: 3214 sport = pd->hdr.udp->uh_sport; 3215 dport = pd->hdr.udp->uh_dport; 3216 hdrlen = sizeof(*pd->hdr.udp); 3217 break; 3218 #ifdef INET 3219 case IPPROTO_ICMP: 3220 if (pd->af != AF_INET) 3221 break; 3222 sport = dport = pd->hdr.icmp->icmp_id; 3223 hdrlen = sizeof(*pd->hdr.icmp); 3224 icmptype = pd->hdr.icmp->icmp_type; 3225 icmpcode = pd->hdr.icmp->icmp_code; 3226 3227 if (icmptype == ICMP_UNREACH || 3228 icmptype == ICMP_SOURCEQUENCH || 3229 icmptype == ICMP_REDIRECT || 3230 icmptype == ICMP_TIMXCEED || 3231 icmptype == ICMP_PARAMPROB) 3232 state_icmp++; 3233 break; 3234 #endif /* INET */ 3235 #ifdef INET6 3236 case IPPROTO_ICMPV6: 3237 if (af != AF_INET6) 3238 break; 3239 sport = dport = pd->hdr.icmp6->icmp6_id; 3240 hdrlen = sizeof(*pd->hdr.icmp6); 3241 icmptype = pd->hdr.icmp6->icmp6_type; 3242 icmpcode = pd->hdr.icmp6->icmp6_code; 3243 3244 if (icmptype == ICMP6_DST_UNREACH || 3245 icmptype == ICMP6_PACKET_TOO_BIG || 3246 icmptype == ICMP6_TIME_EXCEEDED || 3247 icmptype == ICMP6_PARAM_PROB) 3248 state_icmp++; 3249 break; 3250 #endif /* INET6 */ 3251 default: 3252 sport = dport = hdrlen = 0; 3253 break; 3254 } 3255 3256 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_FILTER].active.ptr); 3257 3258 /* check packet for BINAT/NAT/RDR */ 3259 if ((nr = pf_get_translation(pd, m, off, direction, kif, &nsn, 3260 &skw, &sks, &sk, &nk, saddr, daddr, sport, dport)) != NULL) { 3261 if (nk == NULL || sk == NULL) { 3262 REASON_SET(&reason, PFRES_MEMORY); 3263 goto cleanup; 3264 } 3265 3266 if (pd->ip_sum) 3267 bip_sum = *pd->ip_sum; 3268 3269 m->m_flags &= ~M_HASH; 3270 switch (pd->proto) { 3271 case IPPROTO_TCP: 3272 bproto_sum = th->th_sum; 3273 pd->proto_sum = &th->th_sum; 3274 3275 if (PF_ANEQ(saddr, &nk->addr[pd->sidx], af) || 3276 nk->port[pd->sidx] != sport) { 3277 pf_change_ap(saddr, &th->th_sport, pd->ip_sum, 3278 &th->th_sum, &nk->addr[pd->sidx], 3279 nk->port[pd->sidx], 0, af); 3280 pd->sport = &th->th_sport; 3281 sport = th->th_sport; 3282 } 3283 3284 if (PF_ANEQ(daddr, &nk->addr[pd->didx], af) || 3285 nk->port[pd->didx] != dport) { 3286 pf_change_ap(daddr, &th->th_dport, pd->ip_sum, 3287 &th->th_sum, &nk->addr[pd->didx], 3288 nk->port[pd->didx], 0, af); 3289 dport = th->th_dport; 3290 pd->dport = &th->th_dport; 3291 } 3292 rewrite++; 3293 break; 3294 case IPPROTO_UDP: 3295 bproto_sum = pd->hdr.udp->uh_sum; 3296 pd->proto_sum = &pd->hdr.udp->uh_sum; 3297 3298 if (PF_ANEQ(saddr, &nk->addr[pd->sidx], af) || 3299 nk->port[pd->sidx] != sport) { 3300 pf_change_ap(saddr, &pd->hdr.udp->uh_sport, 3301 pd->ip_sum, &pd->hdr.udp->uh_sum, 3302 &nk->addr[pd->sidx], 3303 nk->port[pd->sidx], 1, af); 3304 sport = pd->hdr.udp->uh_sport; 3305 pd->sport = &pd->hdr.udp->uh_sport; 3306 } 3307 3308 if (PF_ANEQ(daddr, &nk->addr[pd->didx], af) || 3309 nk->port[pd->didx] != dport) { 3310 pf_change_ap(daddr, &pd->hdr.udp->uh_dport, 3311 pd->ip_sum, &pd->hdr.udp->uh_sum, 3312 &nk->addr[pd->didx], 3313 nk->port[pd->didx], 1, af); 3314 dport = pd->hdr.udp->uh_dport; 3315 pd->dport = &pd->hdr.udp->uh_dport; 3316 } 3317 rewrite++; 3318 break; 3319 #ifdef INET 3320 case IPPROTO_ICMP: 3321 nk->port[0] = nk->port[1]; 3322 if (PF_ANEQ(saddr, &nk->addr[pd->sidx], AF_INET)) 3323 pf_change_a(&saddr->v4.s_addr, pd->ip_sum, 3324 nk->addr[pd->sidx].v4.s_addr, 0); 3325 3326 if (PF_ANEQ(daddr, &nk->addr[pd->didx], AF_INET)) 3327 pf_change_a(&daddr->v4.s_addr, pd->ip_sum, 3328 nk->addr[pd->didx].v4.s_addr, 0); 3329 3330 if (nk->port[1] != pd->hdr.icmp->icmp_id) { 3331 pd->hdr.icmp->icmp_cksum = pf_cksum_fixup( 3332 pd->hdr.icmp->icmp_cksum, sport, 3333 nk->port[1], 0); 3334 pd->hdr.icmp->icmp_id = nk->port[1]; 3335 pd->sport = &pd->hdr.icmp->icmp_id; 3336 } 3337 m_copyback(m, off, ICMP_MINLEN, (caddr_t)pd->hdr.icmp); 3338 break; 3339 #endif /* INET */ 3340 #ifdef INET6 3341 case IPPROTO_ICMPV6: 3342 nk->port[0] = nk->port[1]; 3343 if (PF_ANEQ(saddr, &nk->addr[pd->sidx], AF_INET6)) 3344 pf_change_a6(saddr, &pd->hdr.icmp6->icmp6_cksum, 3345 &nk->addr[pd->sidx], 0); 3346 3347 if (PF_ANEQ(daddr, &nk->addr[pd->didx], AF_INET6)) 3348 pf_change_a6(daddr, &pd->hdr.icmp6->icmp6_cksum, 3349 &nk->addr[pd->didx], 0); 3350 rewrite++; 3351 break; 3352 #endif /* INET */ 3353 default: 3354 switch (af) { 3355 #ifdef INET 3356 case AF_INET: 3357 if (PF_ANEQ(saddr, 3358 &nk->addr[pd->sidx], AF_INET)) 3359 pf_change_a(&saddr->v4.s_addr, 3360 pd->ip_sum, 3361 nk->addr[pd->sidx].v4.s_addr, 0); 3362 3363 if (PF_ANEQ(daddr, 3364 &nk->addr[pd->didx], AF_INET)) 3365 pf_change_a(&daddr->v4.s_addr, 3366 pd->ip_sum, 3367 nk->addr[pd->didx].v4.s_addr, 0); 3368 break; 3369 #endif /* INET */ 3370 #ifdef INET6 3371 case AF_INET6: 3372 if (PF_ANEQ(saddr, 3373 &nk->addr[pd->sidx], AF_INET6)) 3374 PF_ACPY(saddr, &nk->addr[pd->sidx], af); 3375 3376 if (PF_ANEQ(daddr, 3377 &nk->addr[pd->didx], AF_INET6)) 3378 PF_ACPY(saddr, &nk->addr[pd->didx], af); 3379 break; 3380 #endif /* INET */ 3381 } 3382 break; 3383 } 3384 if (nr->natpass) 3385 r = NULL; 3386 pd->nat_rule = nr; 3387 } 3388 3389 while (r != NULL) { 3390 r->evaluations++; 3391 if (pfi_kif_match(r->kif, kif) == r->ifnot) 3392 r = r->skip[PF_SKIP_IFP].ptr; 3393 else if (r->direction && r->direction != direction) 3394 r = r->skip[PF_SKIP_DIR].ptr; 3395 else if (r->af && r->af != af) 3396 r = r->skip[PF_SKIP_AF].ptr; 3397 else if (r->proto && r->proto != pd->proto) 3398 r = r->skip[PF_SKIP_PROTO].ptr; 3399 else if (PF_MISMATCHAW(&r->src.addr, saddr, af, 3400 r->src.neg, kif)) 3401 r = r->skip[PF_SKIP_SRC_ADDR].ptr; 3402 /* tcp/udp only. port_op always 0 in other cases */ 3403 else if (r->src.port_op && !pf_match_port(r->src.port_op, 3404 r->src.port[0], r->src.port[1], sport)) 3405 r = r->skip[PF_SKIP_SRC_PORT].ptr; 3406 else if (PF_MISMATCHAW(&r->dst.addr, daddr, af, 3407 r->dst.neg, NULL)) 3408 r = r->skip[PF_SKIP_DST_ADDR].ptr; 3409 /* tcp/udp only. port_op always 0 in other cases */ 3410 else if (r->dst.port_op && !pf_match_port(r->dst.port_op, 3411 r->dst.port[0], r->dst.port[1], dport)) 3412 r = r->skip[PF_SKIP_DST_PORT].ptr; 3413 /* icmp only. type always 0 in other cases */ 3414 else if (r->type && r->type != icmptype + 1) 3415 r = TAILQ_NEXT(r, entries); 3416 /* icmp only. type always 0 in other cases */ 3417 else if (r->code && r->code != icmpcode + 1) 3418 r = TAILQ_NEXT(r, entries); 3419 else if (r->tos && !(r->tos == pd->tos)) 3420 r = TAILQ_NEXT(r, entries); 3421 else if (r->rule_flag & PFRULE_FRAGMENT) 3422 r = TAILQ_NEXT(r, entries); 3423 else if (pd->proto == IPPROTO_TCP && 3424 (r->flagset & th->th_flags) != r->flags) 3425 r = TAILQ_NEXT(r, entries); 3426 /* tcp/udp only. uid.op always 0 in other cases */ 3427 else if (r->uid.op && (pd->lookup.done || (pd->lookup.done = 3428 pf_socket_lookup(direction, pd), 1)) && 3429 !pf_match_uid(r->uid.op, r->uid.uid[0], r->uid.uid[1], 3430 pd->lookup.uid)) 3431 r = TAILQ_NEXT(r, entries); 3432 /* tcp/udp only. gid.op always 0 in other cases */ 3433 else if (r->gid.op && (pd->lookup.done || (pd->lookup.done = 3434 pf_socket_lookup(direction, pd), 1)) && 3435 !pf_match_gid(r->gid.op, r->gid.gid[0], r->gid.gid[1], 3436 pd->lookup.gid)) 3437 r = TAILQ_NEXT(r, entries); 3438 else if (r->prob && 3439 r->prob <= karc4random()) 3440 r = TAILQ_NEXT(r, entries); 3441 else if (r->match_tag && !pf_match_tag(m, r, &tag)) 3442 r = TAILQ_NEXT(r, entries); 3443 else if (r->os_fingerprint != PF_OSFP_ANY && 3444 (pd->proto != IPPROTO_TCP || !pf_osfp_match( 3445 pf_osfp_fingerprint(pd, m, off, th), 3446 r->os_fingerprint))) 3447 r = TAILQ_NEXT(r, entries); 3448 else { 3449 if (r->tag) 3450 tag = r->tag; 3451 if (r->rtableid >= 0) 3452 rtableid = r->rtableid; 3453 if (r->anchor == NULL) { 3454 match = 1; 3455 *rm = r; 3456 *am = a; 3457 *rsm = ruleset; 3458 if ((*rm)->quick) 3459 break; 3460 r = TAILQ_NEXT(r, entries); 3461 } else 3462 pf_step_into_anchor(&asd, &ruleset, 3463 PF_RULESET_FILTER, &r, &a, &match); 3464 } 3465 if (r == NULL && pf_step_out_of_anchor(&asd, &ruleset, 3466 PF_RULESET_FILTER, &r, &a, &match)) 3467 break; 3468 } 3469 r = *rm; 3470 a = *am; 3471 ruleset = *rsm; 3472 3473 REASON_SET(&reason, PFRES_MATCH); 3474 3475 if (r->log || (nr != NULL && nr->log)) { 3476 if (rewrite) 3477 m_copyback(m, off, hdrlen, pd->hdr.any); 3478 PFLOG_PACKET(kif, h, m, af, direction, reason, r->log ? r : nr, 3479 a, ruleset, pd); 3480 } 3481 3482 if ((r->action == PF_DROP) && 3483 ((r->rule_flag & PFRULE_RETURNRST) || 3484 (r->rule_flag & PFRULE_RETURNICMP) || 3485 (r->rule_flag & PFRULE_RETURN))) { 3486 /* undo NAT changes, if they have taken place */ 3487 if (nr != NULL) { 3488 PF_ACPY(saddr, &sk->addr[pd->sidx], af); 3489 PF_ACPY(daddr, &sk->addr[pd->didx], af); 3490 if (pd->sport) 3491 *pd->sport = sk->port[pd->sidx]; 3492 if (pd->dport) 3493 *pd->dport = sk->port[pd->didx]; 3494 if (pd->proto_sum) 3495 *pd->proto_sum = bproto_sum; 3496 if (pd->ip_sum) 3497 *pd->ip_sum = bip_sum; 3498 m_copyback(m, off, hdrlen, pd->hdr.any); 3499 } 3500 if (pd->proto == IPPROTO_TCP && 3501 ((r->rule_flag & PFRULE_RETURNRST) || 3502 (r->rule_flag & PFRULE_RETURN)) && 3503 !(th->th_flags & TH_RST)) { 3504 u_int32_t ack = ntohl(th->th_seq) + pd->p_len; 3505 int len = 0; 3506 struct ip *h4; 3507 #ifdef INET6 3508 struct ip6_hdr *h6; 3509 #endif 3510 switch (af) { 3511 case AF_INET: 3512 h4 = mtod(m, struct ip *); 3513 len = h4->ip_len - off; 3514 break; 3515 #ifdef INET6 3516 case AF_INET6: 3517 h6 = mtod(m, struct ip6_hdr *); 3518 len = h6->ip6_plen - (off - sizeof(*h6)); 3519 break; 3520 #endif 3521 } 3522 3523 if (pf_check_proto_cksum(m, off, len, IPPROTO_TCP, af)) 3524 REASON_SET(&reason, PFRES_PROTCKSUM); 3525 else { 3526 if (th->th_flags & TH_SYN) 3527 ack++; 3528 if (th->th_flags & TH_FIN) 3529 ack++; 3530 pf_send_tcp(r, af, pd->dst, 3531 pd->src, th->th_dport, th->th_sport, 3532 ntohl(th->th_ack), ack, TH_RST|TH_ACK, 0, 0, 3533 r->return_ttl, 1, 0, pd->eh, kif->pfik_ifp); 3534 } 3535 } else if (pd->proto != IPPROTO_ICMP && af == AF_INET && 3536 r->return_icmp) 3537 pf_send_icmp(m, r->return_icmp >> 8, 3538 r->return_icmp & 255, af, r); 3539 else if (pd->proto != IPPROTO_ICMPV6 && af == AF_INET6 && 3540 r->return_icmp6) 3541 pf_send_icmp(m, r->return_icmp6 >> 8, 3542 r->return_icmp6 & 255, af, r); 3543 } 3544 3545 if (r->action == PF_DROP) 3546 goto cleanup; 3547 3548 if (pf_tag_packet(m, tag, rtableid)) { 3549 REASON_SET(&reason, PFRES_MEMORY); 3550 goto cleanup; 3551 } 3552 3553 if (!state_icmp && (r->keep_state || nr != NULL || 3554 (pd->flags & PFDESC_TCP_NORM))) { 3555 int action; 3556 action = pf_create_state(r, nr, a, pd, nsn, skw, sks, nk, sk, m, 3557 off, sport, dport, &rewrite, kif, sm, tag, bproto_sum, 3558 bip_sum, hdrlen); 3559 if (action != PF_PASS) 3560 return (action); 3561 } 3562 3563 /* copy back packet headers if we performed NAT operations */ 3564 if (rewrite) 3565 m_copyback(m, off, hdrlen, pd->hdr.any); 3566 3567 return (PF_PASS); 3568 3569 cleanup: 3570 if (sk != NULL) 3571 kfree(sk, M_PFSTATEKEYPL); 3572 if (nk != NULL) 3573 kfree(nk, M_PFSTATEKEYPL); 3574 return (PF_DROP); 3575 } 3576 3577 static __inline int 3578 pf_create_state(struct pf_rule *r, struct pf_rule *nr, struct pf_rule *a, 3579 struct pf_pdesc *pd, struct pf_src_node *nsn, struct pf_state_key *skw, 3580 struct pf_state_key *sks, struct pf_state_key *nk, struct pf_state_key *sk, 3581 struct mbuf *m, int off, u_int16_t sport, u_int16_t dport, int *rewrite, 3582 struct pfi_kif *kif, struct pf_state **sm, int tag, u_int16_t bproto_sum, 3583 u_int16_t bip_sum, int hdrlen) 3584 { 3585 struct pf_state *s = NULL; 3586 struct pf_src_node *sn = NULL; 3587 struct tcphdr *th = pd->hdr.tcp; 3588 u_int16_t mss = tcp_mssdflt; 3589 u_short reason; 3590 3591 /* check maximums */ 3592 if (r->max_states && (r->states_cur >= r->max_states)) { 3593 pf_status.lcounters[LCNT_STATES]++; 3594 REASON_SET(&reason, PFRES_MAXSTATES); 3595 return (PF_DROP); 3596 } 3597 /* src node for filter rule */ 3598 if ((r->rule_flag & PFRULE_SRCTRACK || 3599 r->rpool.opts & PF_POOL_STICKYADDR) && 3600 pf_insert_src_node(&sn, r, pd->src, pd->af) != 0) { 3601 REASON_SET(&reason, PFRES_SRCLIMIT); 3602 goto csfailed; 3603 } 3604 /* src node for translation rule */ 3605 if (nr != NULL && (nr->rpool.opts & PF_POOL_STICKYADDR) && 3606 pf_insert_src_node(&nsn, nr, &sk->addr[pd->sidx], pd->af)) { 3607 REASON_SET(&reason, PFRES_SRCLIMIT); 3608 goto csfailed; 3609 } 3610 s = kmalloc(sizeof(struct pf_state), M_PFSTATEPL, M_NOWAIT|M_ZERO); 3611 if (s == NULL) { 3612 REASON_SET(&reason, PFRES_MEMORY); 3613 goto csfailed; 3614 } 3615 s->id = 0; /* XXX Do we really need that? not in OpenBSD */ 3616 s->creatorid = 0; 3617 s->rule.ptr = r; 3618 s->nat_rule.ptr = nr; 3619 s->anchor.ptr = a; 3620 STATE_INC_COUNTERS(s); 3621 if (r->allow_opts) 3622 s->state_flags |= PFSTATE_ALLOWOPTS; 3623 if (r->rule_flag & PFRULE_STATESLOPPY) 3624 s->state_flags |= PFSTATE_SLOPPY; 3625 s->log = r->log & PF_LOG_ALL; 3626 if (nr != NULL) 3627 s->log |= nr->log & PF_LOG_ALL; 3628 switch (pd->proto) { 3629 case IPPROTO_TCP: 3630 s->src.seqlo = ntohl(th->th_seq); 3631 s->src.seqhi = s->src.seqlo + pd->p_len + 1; 3632 if ((th->th_flags & (TH_SYN|TH_ACK)) == TH_SYN && 3633 r->keep_state == PF_STATE_MODULATE) { 3634 /* Generate sequence number modulator */ 3635 if ((s->src.seqdiff = pf_tcp_iss(pd) - s->src.seqlo) == 3636 0) 3637 s->src.seqdiff = 1; 3638 pf_change_a(&th->th_seq, &th->th_sum, 3639 htonl(s->src.seqlo + s->src.seqdiff), 0); 3640 *rewrite = 1; 3641 } else 3642 s->src.seqdiff = 0; 3643 if (th->th_flags & TH_SYN) { 3644 s->src.seqhi++; 3645 s->src.wscale = pf_get_wscale(m, off, 3646 th->th_off, pd->af); 3647 } 3648 s->src.max_win = MAX(ntohs(th->th_win), 1); 3649 if (s->src.wscale & PF_WSCALE_MASK) { 3650 /* Remove scale factor from initial window */ 3651 int win = s->src.max_win; 3652 win += 1 << (s->src.wscale & PF_WSCALE_MASK); 3653 s->src.max_win = (win - 1) >> 3654 (s->src.wscale & PF_WSCALE_MASK); 3655 } 3656 if (th->th_flags & TH_FIN) 3657 s->src.seqhi++; 3658 s->dst.seqhi = 1; 3659 s->dst.max_win = 1; 3660 s->src.state = TCPS_SYN_SENT; 3661 s->dst.state = TCPS_CLOSED; 3662 s->timeout = PFTM_TCP_FIRST_PACKET; 3663 break; 3664 case IPPROTO_UDP: 3665 s->src.state = PFUDPS_SINGLE; 3666 s->dst.state = PFUDPS_NO_TRAFFIC; 3667 s->timeout = PFTM_UDP_FIRST_PACKET; 3668 break; 3669 case IPPROTO_ICMP: 3670 #ifdef INET6 3671 case IPPROTO_ICMPV6: 3672 #endif 3673 s->timeout = PFTM_ICMP_FIRST_PACKET; 3674 break; 3675 default: 3676 s->src.state = PFOTHERS_SINGLE; 3677 s->dst.state = PFOTHERS_NO_TRAFFIC; 3678 s->timeout = PFTM_OTHER_FIRST_PACKET; 3679 } 3680 3681 s->creation = time_second; 3682 s->expire = time_second; 3683 3684 if (sn != NULL) { 3685 s->src_node = sn; 3686 s->src_node->states++; 3687 } 3688 if (nsn != NULL) { 3689 /* XXX We only modify one side for now. */ 3690 PF_ACPY(&nsn->raddr, &nk->addr[1], pd->af); 3691 s->nat_src_node = nsn; 3692 s->nat_src_node->states++; 3693 } 3694 if (pd->proto == IPPROTO_TCP) { 3695 if ((pd->flags & PFDESC_TCP_NORM) && pf_normalize_tcp_init(m, 3696 off, pd, th, &s->src, &s->dst)) { 3697 REASON_SET(&reason, PFRES_MEMORY); 3698 pf_src_tree_remove_state(s); 3699 STATE_DEC_COUNTERS(s); 3700 kfree(s, M_PFSTATEPL); 3701 return (PF_DROP); 3702 } 3703 if ((pd->flags & PFDESC_TCP_NORM) && s->src.scrub && 3704 pf_normalize_tcp_stateful(m, off, pd, &reason, th, s, 3705 &s->src, &s->dst, rewrite)) { 3706 /* This really shouldn't happen!!! */ 3707 DPFPRINTF(PF_DEBUG_URGENT, 3708 ("pf_normalize_tcp_stateful failed on first pkt")); 3709 pf_normalize_tcp_cleanup(s); 3710 pf_src_tree_remove_state(s); 3711 STATE_DEC_COUNTERS(s); 3712 kfree(s, M_PFSTATEPL); 3713 return (PF_DROP); 3714 } 3715 } 3716 s->direction = pd->dir; 3717 3718 if (sk == NULL && pf_state_key_setup(pd, nr, &skw, &sks, &sk, &nk, 3719 pd->src, pd->dst, sport, dport)) 3720 goto csfailed; 3721 3722 if (pf_state_insert(BOUND_IFACE(r, kif), skw, sks, s)) { 3723 if (pd->proto == IPPROTO_TCP) 3724 pf_normalize_tcp_cleanup(s); 3725 REASON_SET(&reason, PFRES_STATEINS); 3726 pf_src_tree_remove_state(s); 3727 STATE_DEC_COUNTERS(s); 3728 kfree(s, M_PFSTATEPL); 3729 return (PF_DROP); 3730 } else 3731 *sm = s; 3732 3733 pf_set_rt_ifp(s, pd->src); /* needs s->state_key set */ 3734 if (tag > 0) { 3735 pf_tag_ref(tag); 3736 s->tag = tag; 3737 } 3738 if (pd->proto == IPPROTO_TCP && (th->th_flags & (TH_SYN|TH_ACK)) == 3739 TH_SYN && r->keep_state == PF_STATE_SYNPROXY) { 3740 s->src.state = PF_TCPS_PROXY_SRC; 3741 /* undo NAT changes, if they have taken place */ 3742 if (nr != NULL) { 3743 struct pf_state_key *skt = s->key[PF_SK_WIRE]; 3744 if (pd->dir == PF_OUT) 3745 skt = s->key[PF_SK_STACK]; 3746 PF_ACPY(pd->src, &skt->addr[pd->sidx], pd->af); 3747 PF_ACPY(pd->dst, &skt->addr[pd->didx], pd->af); 3748 if (pd->sport) 3749 *pd->sport = skt->port[pd->sidx]; 3750 if (pd->dport) 3751 *pd->dport = skt->port[pd->didx]; 3752 if (pd->proto_sum) 3753 *pd->proto_sum = bproto_sum; 3754 if (pd->ip_sum) 3755 *pd->ip_sum = bip_sum; 3756 m_copyback(m, off, hdrlen, pd->hdr.any); 3757 } 3758 s->src.seqhi = htonl(karc4random()); 3759 /* Find mss option */ 3760 mss = pf_get_mss(m, off, th->th_off, pd->af); 3761 mss = pf_calc_mss(pd->src, pd->af, mss); 3762 mss = pf_calc_mss(pd->dst, pd->af, mss); 3763 s->src.mss = mss; 3764 pf_send_tcp(r, pd->af, pd->dst, pd->src, th->th_dport, 3765 th->th_sport, s->src.seqhi, ntohl(th->th_seq) + 1, 3766 TH_SYN|TH_ACK, 0, s->src.mss, 0, 1, 0, NULL, NULL); 3767 REASON_SET(&reason, PFRES_SYNPROXY); 3768 return (PF_SYNPROXY_DROP); 3769 } 3770 3771 return (PF_PASS); 3772 3773 csfailed: 3774 if (sk != NULL) 3775 kfree(sk, M_PFSTATEKEYPL); 3776 if (nk != NULL) 3777 kfree(nk, M_PFSTATEKEYPL); 3778 3779 if (sn != NULL && sn->states == 0 && sn->expire == 0) { 3780 RB_REMOVE(pf_src_tree, &tree_src_tracking, sn); 3781 pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++; 3782 pf_status.src_nodes--; 3783 kfree(sn, M_PFSRCTREEPL); 3784 } 3785 if (nsn != sn && nsn != NULL && nsn->states == 0 && nsn->expire == 0) { 3786 RB_REMOVE(pf_src_tree, &tree_src_tracking, nsn); 3787 pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++; 3788 pf_status.src_nodes--; 3789 kfree(nsn, M_PFSRCTREEPL); 3790 } 3791 return (PF_DROP); 3792 } 3793 3794 int 3795 pf_test_fragment(struct pf_rule **rm, int direction, struct pfi_kif *kif, 3796 struct mbuf *m, void *h, struct pf_pdesc *pd, struct pf_rule **am, 3797 struct pf_ruleset **rsm) 3798 { 3799 struct pf_rule *r, *a = NULL; 3800 struct pf_ruleset *ruleset = NULL; 3801 sa_family_t af = pd->af; 3802 u_short reason; 3803 int tag = -1; 3804 int asd = 0; 3805 int match = 0; 3806 3807 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_FILTER].active.ptr); 3808 while (r != NULL) { 3809 r->evaluations++; 3810 if (pfi_kif_match(r->kif, kif) == r->ifnot) 3811 r = r->skip[PF_SKIP_IFP].ptr; 3812 else if (r->direction && r->direction != direction) 3813 r = r->skip[PF_SKIP_DIR].ptr; 3814 else if (r->af && r->af != af) 3815 r = r->skip[PF_SKIP_AF].ptr; 3816 else if (r->proto && r->proto != pd->proto) 3817 r = r->skip[PF_SKIP_PROTO].ptr; 3818 else if (PF_MISMATCHAW(&r->src.addr, pd->src, af, 3819 r->src.neg, kif)) 3820 r = r->skip[PF_SKIP_SRC_ADDR].ptr; 3821 else if (PF_MISMATCHAW(&r->dst.addr, pd->dst, af, 3822 r->dst.neg, NULL)) 3823 r = r->skip[PF_SKIP_DST_ADDR].ptr; 3824 else if (r->tos && !(r->tos == pd->tos)) 3825 r = TAILQ_NEXT(r, entries); 3826 else if (r->os_fingerprint != PF_OSFP_ANY) 3827 r = TAILQ_NEXT(r, entries); 3828 else if (pd->proto == IPPROTO_UDP && 3829 (r->src.port_op || r->dst.port_op)) 3830 r = TAILQ_NEXT(r, entries); 3831 else if (pd->proto == IPPROTO_TCP && 3832 (r->src.port_op || r->dst.port_op || r->flagset)) 3833 r = TAILQ_NEXT(r, entries); 3834 else if ((pd->proto == IPPROTO_ICMP || 3835 pd->proto == IPPROTO_ICMPV6) && 3836 (r->type || r->code)) 3837 r = TAILQ_NEXT(r, entries); 3838 else if (r->prob && r->prob <= karc4random()) 3839 r = TAILQ_NEXT(r, entries); 3840 else if (r->match_tag && !pf_match_tag(m, r, &tag)) 3841 r = TAILQ_NEXT(r, entries); 3842 else { 3843 if (r->anchor == NULL) { 3844 match = 1; 3845 *rm = r; 3846 *am = a; 3847 *rsm = ruleset; 3848 if ((*rm)->quick) 3849 break; 3850 r = TAILQ_NEXT(r, entries); 3851 } else 3852 pf_step_into_anchor(&asd, &ruleset, 3853 PF_RULESET_FILTER, &r, &a, &match); 3854 } 3855 if (r == NULL && pf_step_out_of_anchor(&asd, &ruleset, 3856 PF_RULESET_FILTER, &r, &a, &match)) 3857 break; 3858 } 3859 r = *rm; 3860 a = *am; 3861 ruleset = *rsm; 3862 3863 REASON_SET(&reason, PFRES_MATCH); 3864 3865 if (r->log) 3866 PFLOG_PACKET(kif, h, m, af, direction, reason, r, a, ruleset, 3867 pd); 3868 3869 if (r->action != PF_PASS) 3870 return (PF_DROP); 3871 3872 if (pf_tag_packet(m, tag, -1)) { 3873 REASON_SET(&reason, PFRES_MEMORY); 3874 return (PF_DROP); 3875 } 3876 3877 return (PF_PASS); 3878 } 3879 3880 int 3881 pf_tcp_track_full(struct pf_state_peer *src, struct pf_state_peer *dst, 3882 struct pf_state **state, struct pfi_kif *kif, struct mbuf *m, int off, 3883 struct pf_pdesc *pd, u_short *reason, int *copyback) 3884 { 3885 struct tcphdr *th = pd->hdr.tcp; 3886 u_int16_t win = ntohs(th->th_win); 3887 u_int32_t ack, end, seq, orig_seq; 3888 u_int8_t sws, dws; 3889 int ackskew; 3890 3891 if (src->wscale && dst->wscale && !(th->th_flags & TH_SYN)) { 3892 sws = src->wscale & PF_WSCALE_MASK; 3893 dws = dst->wscale & PF_WSCALE_MASK; 3894 } else 3895 sws = dws = 0; 3896 3897 /* 3898 * Sequence tracking algorithm from Guido van Rooij's paper: 3899 * http://www.madison-gurkha.com/publications/tcp_filtering/ 3900 * tcp_filtering.ps 3901 */ 3902 3903 orig_seq = seq = ntohl(th->th_seq); 3904 if (src->seqlo == 0) { 3905 /* First packet from this end. Set its state */ 3906 3907 if ((pd->flags & PFDESC_TCP_NORM || dst->scrub) && 3908 src->scrub == NULL) { 3909 if (pf_normalize_tcp_init(m, off, pd, th, src, dst)) { 3910 REASON_SET(reason, PFRES_MEMORY); 3911 return (PF_DROP); 3912 } 3913 } 3914 3915 /* Deferred generation of sequence number modulator */ 3916 if (dst->seqdiff && !src->seqdiff) { 3917 /* use random iss for the TCP server */ 3918 while ((src->seqdiff = karc4random() - seq) == 0) 3919 ; 3920 ack = ntohl(th->th_ack) - dst->seqdiff; 3921 pf_change_a(&th->th_seq, &th->th_sum, htonl(seq + 3922 src->seqdiff), 0); 3923 pf_change_a(&th->th_ack, &th->th_sum, htonl(ack), 0); 3924 *copyback = 1; 3925 } else { 3926 ack = ntohl(th->th_ack); 3927 } 3928 3929 end = seq + pd->p_len; 3930 if (th->th_flags & TH_SYN) { 3931 end++; 3932 (*state)->sync_flags |= PFSTATE_GOT_SYN2; 3933 if (dst->wscale & PF_WSCALE_FLAG) { 3934 src->wscale = pf_get_wscale(m, off, th->th_off, 3935 pd->af); 3936 if (src->wscale & PF_WSCALE_FLAG) { 3937 /* Remove scale factor from initial 3938 * window */ 3939 sws = src->wscale & PF_WSCALE_MASK; 3940 win = ((u_int32_t)win + (1 << sws) - 1) 3941 >> sws; 3942 dws = dst->wscale & PF_WSCALE_MASK; 3943 } else { 3944 /* fixup other window */ 3945 dst->max_win <<= dst->wscale & 3946 PF_WSCALE_MASK; 3947 /* in case of a retrans SYN|ACK */ 3948 dst->wscale = 0; 3949 } 3950 } 3951 } 3952 if (th->th_flags & TH_FIN) 3953 end++; 3954 3955 src->seqlo = seq; 3956 if (src->state < TCPS_SYN_SENT) 3957 src->state = TCPS_SYN_SENT; 3958 3959 /* 3960 * May need to slide the window (seqhi may have been set by 3961 * the crappy stack check or if we picked up the connection 3962 * after establishment) 3963 */ 3964 if (src->seqhi == 1 || 3965 SEQ_GEQ(end + MAX(1, dst->max_win << dws), src->seqhi)) 3966 src->seqhi = end + MAX(1, dst->max_win << dws); 3967 if (win > src->max_win) 3968 src->max_win = win; 3969 3970 } else { 3971 ack = ntohl(th->th_ack) - dst->seqdiff; 3972 if (src->seqdiff) { 3973 /* Modulate sequence numbers */ 3974 pf_change_a(&th->th_seq, &th->th_sum, htonl(seq + 3975 src->seqdiff), 0); 3976 pf_change_a(&th->th_ack, &th->th_sum, htonl(ack), 0); 3977 *copyback = 1; 3978 } 3979 end = seq + pd->p_len; 3980 if (th->th_flags & TH_SYN) 3981 end++; 3982 if (th->th_flags & TH_FIN) 3983 end++; 3984 } 3985 3986 if ((th->th_flags & TH_ACK) == 0) { 3987 /* Let it pass through the ack skew check */ 3988 ack = dst->seqlo; 3989 } else if ((ack == 0 && 3990 (th->th_flags & (TH_ACK|TH_RST)) == (TH_ACK|TH_RST)) || 3991 /* broken tcp stacks do not set ack */ 3992 (dst->state < TCPS_SYN_SENT)) { 3993 /* 3994 * Many stacks (ours included) will set the ACK number in an 3995 * FIN|ACK if the SYN times out -- no sequence to ACK. 3996 */ 3997 ack = dst->seqlo; 3998 } 3999 4000 if (seq == end) { 4001 /* Ease sequencing restrictions on no data packets */ 4002 seq = src->seqlo; 4003 end = seq; 4004 } 4005 4006 ackskew = dst->seqlo - ack; 4007 4008 4009 /* 4010 * Need to demodulate the sequence numbers in any TCP SACK options 4011 * (Selective ACK). We could optionally validate the SACK values 4012 * against the current ACK window, either forwards or backwards, but 4013 * I'm not confident that SACK has been implemented properly 4014 * everywhere. It wouldn't surprise me if several stacks accidently 4015 * SACK too far backwards of previously ACKed data. There really aren't 4016 * any security implications of bad SACKing unless the target stack 4017 * doesn't validate the option length correctly. Someone trying to 4018 * spoof into a TCP connection won't bother blindly sending SACK 4019 * options anyway. 4020 */ 4021 if (dst->seqdiff && (th->th_off << 2) > sizeof(struct tcphdr)) { 4022 if (pf_modulate_sack(m, off, pd, th, dst)) 4023 *copyback = 1; 4024 } 4025 4026 4027 #define MAXACKWINDOW (0xffff + 1500) /* 1500 is an arbitrary fudge factor */ 4028 if (SEQ_GEQ(src->seqhi, end) && 4029 /* Last octet inside other's window space */ 4030 SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)) && 4031 /* Retrans: not more than one window back */ 4032 (ackskew >= -MAXACKWINDOW) && 4033 /* Acking not more than one reassembled fragment backwards */ 4034 (ackskew <= (MAXACKWINDOW << sws)) && 4035 /* Acking not more than one window forward */ 4036 ((th->th_flags & TH_RST) == 0 || orig_seq == src->seqlo || 4037 (orig_seq == src->seqlo + 1) || (orig_seq + 1 == src->seqlo) || 4038 (pd->flags & PFDESC_IP_REAS) == 0)) { 4039 /* Require an exact/+1 sequence match on resets when possible */ 4040 4041 if (dst->scrub || src->scrub) { 4042 if (pf_normalize_tcp_stateful(m, off, pd, reason, th, 4043 *state, src, dst, copyback)) 4044 return (PF_DROP); 4045 } 4046 4047 /* update max window */ 4048 if (src->max_win < win) 4049 src->max_win = win; 4050 /* synchronize sequencing */ 4051 if (SEQ_GT(end, src->seqlo)) 4052 src->seqlo = end; 4053 /* slide the window of what the other end can send */ 4054 if (SEQ_GEQ(ack + (win << sws), dst->seqhi)) 4055 dst->seqhi = ack + MAX((win << sws), 1); 4056 4057 4058 /* update states */ 4059 if (th->th_flags & TH_SYN) 4060 if (src->state < TCPS_SYN_SENT) 4061 src->state = TCPS_SYN_SENT; 4062 if (th->th_flags & TH_FIN) 4063 if (src->state < TCPS_CLOSING) 4064 src->state = TCPS_CLOSING; 4065 if (th->th_flags & TH_ACK) { 4066 if (dst->state == TCPS_SYN_SENT) { 4067 dst->state = TCPS_ESTABLISHED; 4068 if (src->state == TCPS_ESTABLISHED && 4069 (*state)->src_node != NULL && 4070 pf_src_connlimit(state)) { 4071 REASON_SET(reason, PFRES_SRCLIMIT); 4072 return (PF_DROP); 4073 } 4074 } else if (dst->state == TCPS_CLOSING) 4075 dst->state = TCPS_FIN_WAIT_2; 4076 } 4077 if (th->th_flags & TH_RST) 4078 src->state = dst->state = TCPS_TIME_WAIT; 4079 4080 /* update expire time */ 4081 (*state)->expire = time_second; 4082 if (src->state >= TCPS_FIN_WAIT_2 && 4083 dst->state >= TCPS_FIN_WAIT_2) 4084 (*state)->timeout = PFTM_TCP_CLOSED; 4085 else if (src->state >= TCPS_CLOSING && 4086 dst->state >= TCPS_CLOSING) 4087 (*state)->timeout = PFTM_TCP_FIN_WAIT; 4088 else if (src->state < TCPS_ESTABLISHED || 4089 dst->state < TCPS_ESTABLISHED) 4090 (*state)->timeout = PFTM_TCP_OPENING; 4091 else if (src->state >= TCPS_CLOSING || 4092 dst->state >= TCPS_CLOSING) 4093 (*state)->timeout = PFTM_TCP_CLOSING; 4094 else 4095 (*state)->timeout = PFTM_TCP_ESTABLISHED; 4096 4097 /* Fall through to PASS packet */ 4098 4099 } else if ((dst->state < TCPS_SYN_SENT || 4100 dst->state >= TCPS_FIN_WAIT_2 || 4101 src->state >= TCPS_FIN_WAIT_2) && 4102 SEQ_GEQ(src->seqhi + MAXACKWINDOW, end) && 4103 /* Within a window forward of the originating packet */ 4104 SEQ_GEQ(seq, src->seqlo - MAXACKWINDOW)) { 4105 /* Within a window backward of the originating packet */ 4106 4107 /* 4108 * This currently handles three situations: 4109 * 1) Stupid stacks will shotgun SYNs before their peer 4110 * replies. 4111 * 2) When PF catches an already established stream (the 4112 * firewall rebooted, the state table was flushed, routes 4113 * changed...) 4114 * 3) Packets get funky immediately after the connection 4115 * closes (this should catch Solaris spurious ACK|FINs 4116 * that web servers like to spew after a close) 4117 * 4118 * This must be a little more careful than the above code 4119 * since packet floods will also be caught here. We don't 4120 * update the TTL here to mitigate the damage of a packet 4121 * flood and so the same code can handle awkward establishment 4122 * and a loosened connection close. 4123 * In the establishment case, a correct peer response will 4124 * validate the connection, go through the normal state code 4125 * and keep updating the state TTL. 4126 */ 4127 4128 if (pf_status.debug >= PF_DEBUG_MISC) { 4129 kprintf("pf: loose state match: "); 4130 pf_print_state(*state); 4131 pf_print_flags(th->th_flags); 4132 kprintf(" seq=%u (%u) ack=%u len=%u ackskew=%d " 4133 "pkts=%llu:%llu dir=%s,%s\n", seq, orig_seq, ack, pd->p_len, 4134 ackskew, (unsigned long long)(*state)->packets[0], 4135 (unsigned long long)(*state)->packets[1], 4136 pd->dir == PF_IN ? "in" : "out", 4137 pd->dir == (*state)->direction ? "fwd" : "rev"); 4138 } 4139 4140 if (dst->scrub || src->scrub) { 4141 if (pf_normalize_tcp_stateful(m, off, pd, reason, th, 4142 *state, src, dst, copyback)) 4143 return (PF_DROP); 4144 } 4145 4146 /* update max window */ 4147 if (src->max_win < win) 4148 src->max_win = win; 4149 /* synchronize sequencing */ 4150 if (SEQ_GT(end, src->seqlo)) 4151 src->seqlo = end; 4152 /* slide the window of what the other end can send */ 4153 if (SEQ_GEQ(ack + (win << sws), dst->seqhi)) 4154 dst->seqhi = ack + MAX((win << sws), 1); 4155 4156 /* 4157 * Cannot set dst->seqhi here since this could be a shotgunned 4158 * SYN and not an already established connection. 4159 */ 4160 4161 if (th->th_flags & TH_FIN) 4162 if (src->state < TCPS_CLOSING) 4163 src->state = TCPS_CLOSING; 4164 if (th->th_flags & TH_RST) 4165 src->state = dst->state = TCPS_TIME_WAIT; 4166 4167 /* Fall through to PASS packet */ 4168 4169 } else if ((*state)->pickup_mode == PF_PICKUPS_HASHONLY || 4170 ((*state)->pickup_mode == PF_PICKUPS_ENABLED && 4171 ((*state)->sync_flags & PFSTATE_GOT_SYN_MASK) != 4172 PFSTATE_GOT_SYN_MASK)) { 4173 /* 4174 * If pickup mode is hash only, do not fail on sequence checks. 4175 * 4176 * If pickup mode is enabled and we did not see the SYN in 4177 * both direction, do not fail on sequence checks because 4178 * we do not have complete information on window scale. 4179 * 4180 * Adjust expiration and fall through to PASS packet. 4181 * XXX Add a FIN check to reduce timeout? 4182 */ 4183 (*state)->expire = time_second; 4184 } else { 4185 /* 4186 * Failure processing 4187 */ 4188 if ((*state)->dst.state == TCPS_SYN_SENT && 4189 (*state)->src.state == TCPS_SYN_SENT) { 4190 /* Send RST for state mismatches during handshake */ 4191 if (!(th->th_flags & TH_RST)) 4192 pf_send_tcp((*state)->rule.ptr, pd->af, 4193 pd->dst, pd->src, th->th_dport, 4194 th->th_sport, ntohl(th->th_ack), 0, 4195 TH_RST, 0, 0, 4196 (*state)->rule.ptr->return_ttl, 1, 0, 4197 pd->eh, kif->pfik_ifp); 4198 src->seqlo = 0; 4199 src->seqhi = 1; 4200 src->max_win = 1; 4201 } else if (pf_status.debug >= PF_DEBUG_MISC) { 4202 kprintf("pf: BAD state: "); 4203 pf_print_state(*state); 4204 pf_print_flags(th->th_flags); 4205 kprintf(" seq=%u (%u) ack=%u len=%u ackskew=%d " 4206 "pkts=%llu:%llu dir=%s,%s\n", 4207 seq, orig_seq, ack, pd->p_len, ackskew, 4208 (unsigned long long)(*state)->packets[0], 4209 (unsigned long long)(*state)->packets[1], 4210 pd->dir == PF_IN ? "in" : "out", 4211 pd->dir == (*state)->direction ? "fwd" : "rev"); 4212 kprintf("pf: State failure on: %c %c %c %c | %c %c\n", 4213 SEQ_GEQ(src->seqhi, end) ? ' ' : '1', 4214 SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)) ? 4215 ' ': '2', 4216 (ackskew >= -MAXACKWINDOW) ? ' ' : '3', 4217 (ackskew <= (MAXACKWINDOW << sws)) ? ' ' : '4', 4218 SEQ_GEQ(src->seqhi + MAXACKWINDOW, end) ?' ' :'5', 4219 SEQ_GEQ(seq, src->seqlo - MAXACKWINDOW) ?' ' :'6'); 4220 } 4221 REASON_SET(reason, PFRES_BADSTATE); 4222 return (PF_DROP); 4223 } 4224 4225 return (PF_PASS); 4226 } 4227 4228 int 4229 pf_tcp_track_sloppy(struct pf_state_peer *src, struct pf_state_peer *dst, 4230 struct pf_state **state, struct pf_pdesc *pd, u_short *reason) 4231 { 4232 struct tcphdr *th = pd->hdr.tcp; 4233 4234 if (th->th_flags & TH_SYN) 4235 if (src->state < TCPS_SYN_SENT) 4236 src->state = TCPS_SYN_SENT; 4237 if (th->th_flags & TH_FIN) 4238 if (src->state < TCPS_CLOSING) 4239 src->state = TCPS_CLOSING; 4240 if (th->th_flags & TH_ACK) { 4241 if (dst->state == TCPS_SYN_SENT) { 4242 dst->state = TCPS_ESTABLISHED; 4243 if (src->state == TCPS_ESTABLISHED && 4244 (*state)->src_node != NULL && 4245 pf_src_connlimit(state)) { 4246 REASON_SET(reason, PFRES_SRCLIMIT); 4247 return (PF_DROP); 4248 } 4249 } else if (dst->state == TCPS_CLOSING) { 4250 dst->state = TCPS_FIN_WAIT_2; 4251 } else if (src->state == TCPS_SYN_SENT && 4252 dst->state < TCPS_SYN_SENT) { 4253 /* 4254 * Handle a special sloppy case where we only see one 4255 * half of the connection. If there is a ACK after 4256 * the initial SYN without ever seeing a packet from 4257 * the destination, set the connection to established. 4258 */ 4259 dst->state = src->state = TCPS_ESTABLISHED; 4260 if ((*state)->src_node != NULL && 4261 pf_src_connlimit(state)) { 4262 REASON_SET(reason, PFRES_SRCLIMIT); 4263 return (PF_DROP); 4264 } 4265 } else if (src->state == TCPS_CLOSING && 4266 dst->state == TCPS_ESTABLISHED && 4267 dst->seqlo == 0) { 4268 /* 4269 * Handle the closing of half connections where we 4270 * don't see the full bidirectional FIN/ACK+ACK 4271 * handshake. 4272 */ 4273 dst->state = TCPS_CLOSING; 4274 } 4275 } 4276 if (th->th_flags & TH_RST) 4277 src->state = dst->state = TCPS_TIME_WAIT; 4278 4279 /* update expire time */ 4280 (*state)->expire = time_second; 4281 if (src->state >= TCPS_FIN_WAIT_2 && 4282 dst->state >= TCPS_FIN_WAIT_2) 4283 (*state)->timeout = PFTM_TCP_CLOSED; 4284 else if (src->state >= TCPS_CLOSING && 4285 dst->state >= TCPS_CLOSING) 4286 (*state)->timeout = PFTM_TCP_FIN_WAIT; 4287 else if (src->state < TCPS_ESTABLISHED || 4288 dst->state < TCPS_ESTABLISHED) 4289 (*state)->timeout = PFTM_TCP_OPENING; 4290 else if (src->state >= TCPS_CLOSING || 4291 dst->state >= TCPS_CLOSING) 4292 (*state)->timeout = PFTM_TCP_CLOSING; 4293 else 4294 (*state)->timeout = PFTM_TCP_ESTABLISHED; 4295 4296 return (PF_PASS); 4297 } 4298 4299 int 4300 pf_test_state_tcp(struct pf_state **state, int direction, struct pfi_kif *kif, 4301 struct mbuf *m, int off, void *h, struct pf_pdesc *pd, 4302 u_short *reason) 4303 { 4304 struct pf_state_key_cmp key; 4305 struct tcphdr *th = pd->hdr.tcp; 4306 int copyback = 0; 4307 struct pf_state_peer *src, *dst; 4308 struct pf_state_key *sk; 4309 4310 key.af = pd->af; 4311 key.proto = IPPROTO_TCP; 4312 if (direction == PF_IN) { /* wire side, straight */ 4313 PF_ACPY(&key.addr[0], pd->src, key.af); 4314 PF_ACPY(&key.addr[1], pd->dst, key.af); 4315 key.port[0] = th->th_sport; 4316 key.port[1] = th->th_dport; 4317 } else { /* stack side, reverse */ 4318 PF_ACPY(&key.addr[1], pd->src, key.af); 4319 PF_ACPY(&key.addr[0], pd->dst, key.af); 4320 key.port[1] = th->th_sport; 4321 key.port[0] = th->th_dport; 4322 } 4323 4324 STATE_LOOKUP(kif, &key, direction, *state, m); 4325 4326 if (direction == (*state)->direction) { 4327 src = &(*state)->src; 4328 dst = &(*state)->dst; 4329 } else { 4330 src = &(*state)->dst; 4331 dst = &(*state)->src; 4332 } 4333 4334 sk = (*state)->key[pd->didx]; 4335 4336 if ((*state)->src.state == PF_TCPS_PROXY_SRC) { 4337 if (direction != (*state)->direction) { 4338 REASON_SET(reason, PFRES_SYNPROXY); 4339 return (PF_SYNPROXY_DROP); 4340 } 4341 if (th->th_flags & TH_SYN) { 4342 if (ntohl(th->th_seq) != (*state)->src.seqlo) { 4343 REASON_SET(reason, PFRES_SYNPROXY); 4344 return (PF_DROP); 4345 } 4346 pf_send_tcp((*state)->rule.ptr, pd->af, pd->dst, 4347 pd->src, th->th_dport, th->th_sport, 4348 (*state)->src.seqhi, ntohl(th->th_seq) + 1, 4349 TH_SYN|TH_ACK, 0, (*state)->src.mss, 0, 1, 4350 0, NULL, NULL); 4351 REASON_SET(reason, PFRES_SYNPROXY); 4352 return (PF_SYNPROXY_DROP); 4353 } else if (!(th->th_flags & TH_ACK) || 4354 (ntohl(th->th_ack) != (*state)->src.seqhi + 1) || 4355 (ntohl(th->th_seq) != (*state)->src.seqlo + 1)) { 4356 REASON_SET(reason, PFRES_SYNPROXY); 4357 return (PF_DROP); 4358 } else if ((*state)->src_node != NULL && 4359 pf_src_connlimit(state)) { 4360 REASON_SET(reason, PFRES_SRCLIMIT); 4361 return (PF_DROP); 4362 } else 4363 (*state)->src.state = PF_TCPS_PROXY_DST; 4364 } 4365 if ((*state)->src.state == PF_TCPS_PROXY_DST) { 4366 if (direction == (*state)->direction) { 4367 if (((th->th_flags & (TH_SYN|TH_ACK)) != TH_ACK) || 4368 (ntohl(th->th_ack) != (*state)->src.seqhi + 1) || 4369 (ntohl(th->th_seq) != (*state)->src.seqlo + 1)) { 4370 REASON_SET(reason, PFRES_SYNPROXY); 4371 return (PF_DROP); 4372 } 4373 (*state)->src.max_win = MAX(ntohs(th->th_win), 1); 4374 if ((*state)->dst.seqhi == 1) 4375 (*state)->dst.seqhi = htonl(karc4random()); 4376 pf_send_tcp((*state)->rule.ptr, pd->af, 4377 &sk->addr[pd->sidx], &sk->addr[pd->didx], 4378 sk->port[pd->sidx], sk->port[pd->didx], 4379 (*state)->dst.seqhi, 0, TH_SYN, 0, 4380 (*state)->src.mss, 0, 0, (*state)->tag, NULL, NULL); 4381 REASON_SET(reason, PFRES_SYNPROXY); 4382 return (PF_SYNPROXY_DROP); 4383 } else if (((th->th_flags & (TH_SYN|TH_ACK)) != 4384 (TH_SYN|TH_ACK)) || 4385 (ntohl(th->th_ack) != (*state)->dst.seqhi + 1)) { 4386 REASON_SET(reason, PFRES_SYNPROXY); 4387 return (PF_DROP); 4388 } else { 4389 (*state)->dst.max_win = MAX(ntohs(th->th_win), 1); 4390 (*state)->dst.seqlo = ntohl(th->th_seq); 4391 pf_send_tcp((*state)->rule.ptr, pd->af, pd->dst, 4392 pd->src, th->th_dport, th->th_sport, 4393 ntohl(th->th_ack), ntohl(th->th_seq) + 1, 4394 TH_ACK, (*state)->src.max_win, 0, 0, 0, 4395 (*state)->tag, NULL, NULL); 4396 pf_send_tcp((*state)->rule.ptr, pd->af, 4397 &sk->addr[pd->sidx], &sk->addr[pd->didx], 4398 sk->port[pd->sidx], sk->port[pd->didx], 4399 (*state)->src.seqhi + 1, (*state)->src.seqlo + 1, 4400 TH_ACK, (*state)->dst.max_win, 0, 0, 1, 4401 0, NULL, NULL); 4402 (*state)->src.seqdiff = (*state)->dst.seqhi - 4403 (*state)->src.seqlo; 4404 (*state)->dst.seqdiff = (*state)->src.seqhi - 4405 (*state)->dst.seqlo; 4406 (*state)->src.seqhi = (*state)->src.seqlo + 4407 (*state)->dst.max_win; 4408 (*state)->dst.seqhi = (*state)->dst.seqlo + 4409 (*state)->src.max_win; 4410 (*state)->src.wscale = (*state)->dst.wscale = 0; 4411 (*state)->src.state = (*state)->dst.state = 4412 TCPS_ESTABLISHED; 4413 REASON_SET(reason, PFRES_SYNPROXY); 4414 return (PF_SYNPROXY_DROP); 4415 } 4416 } 4417 4418 if (((th->th_flags & (TH_SYN|TH_ACK)) == TH_SYN) && 4419 dst->state >= TCPS_FIN_WAIT_2 && 4420 src->state >= TCPS_FIN_WAIT_2) { 4421 if (pf_status.debug >= PF_DEBUG_MISC) { 4422 kprintf("pf: state reuse "); 4423 pf_print_state(*state); 4424 pf_print_flags(th->th_flags); 4425 kprintf("\n"); 4426 } 4427 /* XXX make sure it's the same direction ?? */ 4428 (*state)->src.state = (*state)->dst.state = TCPS_CLOSED; 4429 pf_unlink_state(*state); 4430 *state = NULL; 4431 return (PF_DROP); 4432 } 4433 4434 if ((*state)->state_flags & PFSTATE_SLOPPY) { 4435 if (pf_tcp_track_sloppy(src, dst, state, pd, reason) == PF_DROP) 4436 return (PF_DROP); 4437 } else { 4438 if (pf_tcp_track_full(src, dst, state, kif, m, off, pd, reason, 4439 ©back) == PF_DROP) 4440 return (PF_DROP); 4441 } 4442 4443 /* translate source/destination address, if necessary */ 4444 if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) { 4445 struct pf_state_key *nk = (*state)->key[pd->didx]; 4446 4447 if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], pd->af) || 4448 nk->port[pd->sidx] != th->th_sport) { 4449 /* 4450 * The translated source address may be completely 4451 * unrelated to the saved link header, make sure 4452 * a bridge doesn't try to use it. 4453 */ 4454 m->m_pkthdr.fw_flags &= ~BRIDGE_MBUF_TAGGED; 4455 m->m_flags &= ~M_HASH; 4456 pf_change_ap(pd->src, &th->th_sport, pd->ip_sum, 4457 &th->th_sum, &nk->addr[pd->sidx], 4458 nk->port[pd->sidx], 0, pd->af); 4459 } 4460 4461 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], pd->af) || 4462 nk->port[pd->didx] != th->th_dport) { 4463 /* 4464 * If we don't redispatch the packet will go into 4465 * the protocol stack on the wrong cpu for the 4466 * post-translated address. 4467 */ 4468 m->m_flags &= ~M_HASH; 4469 pf_change_ap(pd->dst, &th->th_dport, pd->ip_sum, 4470 &th->th_sum, &nk->addr[pd->didx], 4471 nk->port[pd->didx], 0, pd->af); 4472 } 4473 copyback = 1; 4474 } 4475 4476 /* Copyback sequence modulation or stateful scrub changes if needed */ 4477 if (copyback) 4478 m_copyback(m, off, sizeof(*th), (caddr_t)th); 4479 4480 return (PF_PASS); 4481 } 4482 4483 int 4484 pf_test_state_udp(struct pf_state **state, int direction, struct pfi_kif *kif, 4485 struct mbuf *m, int off, void *h, struct pf_pdesc *pd) 4486 { 4487 struct pf_state_peer *src, *dst; 4488 struct pf_state_key_cmp key; 4489 struct udphdr *uh = pd->hdr.udp; 4490 4491 key.af = pd->af; 4492 key.proto = IPPROTO_UDP; 4493 if (direction == PF_IN) { /* wire side, straight */ 4494 PF_ACPY(&key.addr[0], pd->src, key.af); 4495 PF_ACPY(&key.addr[1], pd->dst, key.af); 4496 key.port[0] = uh->uh_sport; 4497 key.port[1] = uh->uh_dport; 4498 } else { /* stack side, reverse */ 4499 PF_ACPY(&key.addr[1], pd->src, key.af); 4500 PF_ACPY(&key.addr[0], pd->dst, key.af); 4501 key.port[1] = uh->uh_sport; 4502 key.port[0] = uh->uh_dport; 4503 } 4504 4505 STATE_LOOKUP(kif, &key, direction, *state, m); 4506 4507 if (direction == (*state)->direction) { 4508 src = &(*state)->src; 4509 dst = &(*state)->dst; 4510 } else { 4511 src = &(*state)->dst; 4512 dst = &(*state)->src; 4513 } 4514 4515 /* update states */ 4516 if (src->state < PFUDPS_SINGLE) 4517 src->state = PFUDPS_SINGLE; 4518 if (dst->state == PFUDPS_SINGLE) 4519 dst->state = PFUDPS_MULTIPLE; 4520 4521 /* update expire time */ 4522 (*state)->expire = time_second; 4523 if (src->state == PFUDPS_MULTIPLE && dst->state == PFUDPS_MULTIPLE) 4524 (*state)->timeout = PFTM_UDP_MULTIPLE; 4525 else 4526 (*state)->timeout = PFTM_UDP_SINGLE; 4527 4528 /* translate source/destination address, if necessary */ 4529 if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) { 4530 struct pf_state_key *nk = (*state)->key[pd->didx]; 4531 4532 if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], pd->af) || 4533 nk->port[pd->sidx] != uh->uh_sport) { 4534 /* 4535 * The translated source address may be completely 4536 * unrelated to the saved link header, make sure 4537 * a bridge doesn't try to use it. 4538 */ 4539 m->m_pkthdr.fw_flags &= ~BRIDGE_MBUF_TAGGED; 4540 m->m_flags &= ~M_HASH; 4541 pf_change_ap(pd->src, &uh->uh_sport, pd->ip_sum, 4542 &uh->uh_sum, &nk->addr[pd->sidx], 4543 nk->port[pd->sidx], 1, pd->af); 4544 } 4545 4546 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], pd->af) || 4547 nk->port[pd->didx] != uh->uh_dport) { 4548 /* 4549 * If we don't redispatch the packet will go into 4550 * the protocol stack on the wrong cpu for the 4551 * post-translated address. 4552 */ 4553 m->m_flags &= ~M_HASH; 4554 pf_change_ap(pd->dst, &uh->uh_dport, pd->ip_sum, 4555 &uh->uh_sum, &nk->addr[pd->didx], 4556 nk->port[pd->didx], 1, pd->af); 4557 } 4558 m_copyback(m, off, sizeof(*uh), (caddr_t)uh); 4559 } 4560 4561 return (PF_PASS); 4562 } 4563 4564 int 4565 pf_test_state_icmp(struct pf_state **state, int direction, struct pfi_kif *kif, 4566 struct mbuf *m, int off, void *h, struct pf_pdesc *pd, u_short *reason) 4567 { 4568 struct pf_addr *saddr = pd->src, *daddr = pd->dst; 4569 u_int16_t icmpid = 0, *icmpsum; 4570 u_int8_t icmptype; 4571 int state_icmp = 0; 4572 struct pf_state_key_cmp key; 4573 4574 switch (pd->proto) { 4575 #ifdef INET 4576 case IPPROTO_ICMP: 4577 icmptype = pd->hdr.icmp->icmp_type; 4578 icmpid = pd->hdr.icmp->icmp_id; 4579 icmpsum = &pd->hdr.icmp->icmp_cksum; 4580 4581 if (icmptype == ICMP_UNREACH || 4582 icmptype == ICMP_SOURCEQUENCH || 4583 icmptype == ICMP_REDIRECT || 4584 icmptype == ICMP_TIMXCEED || 4585 icmptype == ICMP_PARAMPROB) 4586 state_icmp++; 4587 break; 4588 #endif /* INET */ 4589 #ifdef INET6 4590 case IPPROTO_ICMPV6: 4591 icmptype = pd->hdr.icmp6->icmp6_type; 4592 icmpid = pd->hdr.icmp6->icmp6_id; 4593 icmpsum = &pd->hdr.icmp6->icmp6_cksum; 4594 4595 if (icmptype == ICMP6_DST_UNREACH || 4596 icmptype == ICMP6_PACKET_TOO_BIG || 4597 icmptype == ICMP6_TIME_EXCEEDED || 4598 icmptype == ICMP6_PARAM_PROB) 4599 state_icmp++; 4600 break; 4601 #endif /* INET6 */ 4602 } 4603 4604 if (!state_icmp) { 4605 4606 /* 4607 * ICMP query/reply message not related to a TCP/UDP packet. 4608 * Search for an ICMP state. 4609 */ 4610 key.af = pd->af; 4611 key.proto = pd->proto; 4612 key.port[0] = key.port[1] = icmpid; 4613 if (direction == PF_IN) { /* wire side, straight */ 4614 PF_ACPY(&key.addr[0], pd->src, key.af); 4615 PF_ACPY(&key.addr[1], pd->dst, key.af); 4616 } else { /* stack side, reverse */ 4617 PF_ACPY(&key.addr[1], pd->src, key.af); 4618 PF_ACPY(&key.addr[0], pd->dst, key.af); 4619 } 4620 4621 STATE_LOOKUP(kif, &key, direction, *state, m); 4622 4623 (*state)->expire = time_second; 4624 (*state)->timeout = PFTM_ICMP_ERROR_REPLY; 4625 4626 /* translate source/destination address, if necessary */ 4627 if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) { 4628 struct pf_state_key *nk = (*state)->key[pd->didx]; 4629 4630 switch (pd->af) { 4631 #ifdef INET 4632 case AF_INET: 4633 if (PF_ANEQ(pd->src, 4634 &nk->addr[pd->sidx], AF_INET)) 4635 pf_change_a(&saddr->v4.s_addr, 4636 pd->ip_sum, 4637 nk->addr[pd->sidx].v4.s_addr, 0); 4638 4639 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], 4640 AF_INET)) 4641 pf_change_a(&daddr->v4.s_addr, 4642 pd->ip_sum, 4643 nk->addr[pd->didx].v4.s_addr, 0); 4644 4645 if (nk->port[0] != 4646 pd->hdr.icmp->icmp_id) { 4647 pd->hdr.icmp->icmp_cksum = 4648 pf_cksum_fixup( 4649 pd->hdr.icmp->icmp_cksum, icmpid, 4650 nk->port[pd->sidx], 0); 4651 pd->hdr.icmp->icmp_id = 4652 nk->port[pd->sidx]; 4653 } 4654 4655 m_copyback(m, off, ICMP_MINLEN, 4656 (caddr_t)pd->hdr.icmp); 4657 break; 4658 #endif /* INET */ 4659 #ifdef INET6 4660 case AF_INET6: 4661 if (PF_ANEQ(pd->src, 4662 &nk->addr[pd->sidx], AF_INET6)) 4663 pf_change_a6(saddr, 4664 &pd->hdr.icmp6->icmp6_cksum, 4665 &nk->addr[pd->sidx], 0); 4666 4667 if (PF_ANEQ(pd->dst, 4668 &nk->addr[pd->didx], AF_INET6)) 4669 pf_change_a6(daddr, 4670 &pd->hdr.icmp6->icmp6_cksum, 4671 &nk->addr[pd->didx], 0); 4672 4673 m_copyback(m, off, 4674 sizeof(struct icmp6_hdr), 4675 (caddr_t)pd->hdr.icmp6); 4676 break; 4677 #endif /* INET6 */ 4678 } 4679 } 4680 return (PF_PASS); 4681 4682 } else { 4683 /* 4684 * ICMP error message in response to a TCP/UDP packet. 4685 * Extract the inner TCP/UDP header and search for that state. 4686 */ 4687 4688 struct pf_pdesc pd2; 4689 #ifdef INET 4690 struct ip h2; 4691 #endif /* INET */ 4692 #ifdef INET6 4693 struct ip6_hdr h2_6; 4694 int terminal = 0; 4695 #endif /* INET6 */ 4696 int ipoff2; 4697 int off2; 4698 4699 pd2.af = pd->af; 4700 /* Payload packet is from the opposite direction. */ 4701 pd2.sidx = (direction == PF_IN) ? 1 : 0; 4702 pd2.didx = (direction == PF_IN) ? 0 : 1; 4703 switch (pd->af) { 4704 #ifdef INET 4705 case AF_INET: 4706 /* offset of h2 in mbuf chain */ 4707 ipoff2 = off + ICMP_MINLEN; 4708 4709 if (!pf_pull_hdr(m, ipoff2, &h2, sizeof(h2), 4710 NULL, reason, pd2.af)) { 4711 DPFPRINTF(PF_DEBUG_MISC, 4712 ("pf: ICMP error message too short " 4713 "(ip)\n")); 4714 return (PF_DROP); 4715 } 4716 /* 4717 * ICMP error messages don't refer to non-first 4718 * fragments 4719 */ 4720 if (h2.ip_off & htons(IP_OFFMASK)) { 4721 REASON_SET(reason, PFRES_FRAG); 4722 return (PF_DROP); 4723 } 4724 4725 /* offset of protocol header that follows h2 */ 4726 off2 = ipoff2 + (h2.ip_hl << 2); 4727 4728 pd2.proto = h2.ip_p; 4729 pd2.src = (struct pf_addr *)&h2.ip_src; 4730 pd2.dst = (struct pf_addr *)&h2.ip_dst; 4731 pd2.ip_sum = &h2.ip_sum; 4732 break; 4733 #endif /* INET */ 4734 #ifdef INET6 4735 case AF_INET6: 4736 ipoff2 = off + sizeof(struct icmp6_hdr); 4737 4738 if (!pf_pull_hdr(m, ipoff2, &h2_6, sizeof(h2_6), 4739 NULL, reason, pd2.af)) { 4740 DPFPRINTF(PF_DEBUG_MISC, 4741 ("pf: ICMP error message too short " 4742 "(ip6)\n")); 4743 return (PF_DROP); 4744 } 4745 pd2.proto = h2_6.ip6_nxt; 4746 pd2.src = (struct pf_addr *)&h2_6.ip6_src; 4747 pd2.dst = (struct pf_addr *)&h2_6.ip6_dst; 4748 pd2.ip_sum = NULL; 4749 off2 = ipoff2 + sizeof(h2_6); 4750 do { 4751 switch (pd2.proto) { 4752 case IPPROTO_FRAGMENT: 4753 /* 4754 * ICMPv6 error messages for 4755 * non-first fragments 4756 */ 4757 REASON_SET(reason, PFRES_FRAG); 4758 return (PF_DROP); 4759 case IPPROTO_AH: 4760 case IPPROTO_HOPOPTS: 4761 case IPPROTO_ROUTING: 4762 case IPPROTO_DSTOPTS: { 4763 /* get next header and header length */ 4764 struct ip6_ext opt6; 4765 4766 if (!pf_pull_hdr(m, off2, &opt6, 4767 sizeof(opt6), NULL, reason, 4768 pd2.af)) { 4769 DPFPRINTF(PF_DEBUG_MISC, 4770 ("pf: ICMPv6 short opt\n")); 4771 return (PF_DROP); 4772 } 4773 if (pd2.proto == IPPROTO_AH) 4774 off2 += (opt6.ip6e_len + 2) * 4; 4775 else 4776 off2 += (opt6.ip6e_len + 1) * 8; 4777 pd2.proto = opt6.ip6e_nxt; 4778 /* goto the next header */ 4779 break; 4780 } 4781 default: 4782 terminal++; 4783 break; 4784 } 4785 } while (!terminal); 4786 break; 4787 #endif /* INET6 */ 4788 default: 4789 DPFPRINTF(PF_DEBUG_MISC, 4790 ("pf: ICMP AF %d unknown (ip6)\n", pd->af)); 4791 return (PF_DROP); 4792 break; 4793 } 4794 4795 switch (pd2.proto) { 4796 case IPPROTO_TCP: { 4797 struct tcphdr th; 4798 u_int32_t seq; 4799 struct pf_state_peer *src, *dst; 4800 u_int8_t dws; 4801 int copyback = 0; 4802 4803 /* 4804 * Only the first 8 bytes of the TCP header can be 4805 * expected. Don't access any TCP header fields after 4806 * th_seq, an ackskew test is not possible. 4807 */ 4808 if (!pf_pull_hdr(m, off2, &th, 8, NULL, reason, 4809 pd2.af)) { 4810 DPFPRINTF(PF_DEBUG_MISC, 4811 ("pf: ICMP error message too short " 4812 "(tcp)\n")); 4813 return (PF_DROP); 4814 } 4815 4816 key.af = pd2.af; 4817 key.proto = IPPROTO_TCP; 4818 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af); 4819 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af); 4820 key.port[pd2.sidx] = th.th_sport; 4821 key.port[pd2.didx] = th.th_dport; 4822 4823 STATE_LOOKUP(kif, &key, direction, *state, m); 4824 4825 if (direction == (*state)->direction) { 4826 src = &(*state)->dst; 4827 dst = &(*state)->src; 4828 } else { 4829 src = &(*state)->src; 4830 dst = &(*state)->dst; 4831 } 4832 4833 if (src->wscale && dst->wscale) 4834 dws = dst->wscale & PF_WSCALE_MASK; 4835 else 4836 dws = 0; 4837 4838 /* Demodulate sequence number */ 4839 seq = ntohl(th.th_seq) - src->seqdiff; 4840 if (src->seqdiff) { 4841 pf_change_a(&th.th_seq, icmpsum, 4842 htonl(seq), 0); 4843 copyback = 1; 4844 } 4845 4846 if (!((*state)->state_flags & PFSTATE_SLOPPY) && 4847 (!SEQ_GEQ(src->seqhi, seq) || 4848 !SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)))) { 4849 if (pf_status.debug >= PF_DEBUG_MISC) { 4850 kprintf("pf: BAD ICMP %d:%d ", 4851 icmptype, pd->hdr.icmp->icmp_code); 4852 pf_print_host(pd->src, 0, pd->af); 4853 kprintf(" -> "); 4854 pf_print_host(pd->dst, 0, pd->af); 4855 kprintf(" state: "); 4856 pf_print_state(*state); 4857 kprintf(" seq=%u\n", seq); 4858 } 4859 REASON_SET(reason, PFRES_BADSTATE); 4860 return (PF_DROP); 4861 } else { 4862 if (pf_status.debug >= PF_DEBUG_MISC) { 4863 kprintf("pf: OK ICMP %d:%d ", 4864 icmptype, pd->hdr.icmp->icmp_code); 4865 pf_print_host(pd->src, 0, pd->af); 4866 kprintf(" -> "); 4867 pf_print_host(pd->dst, 0, pd->af); 4868 kprintf(" state: "); 4869 pf_print_state(*state); 4870 kprintf(" seq=%u\n", seq); 4871 } 4872 } 4873 4874 /* translate source/destination address, if necessary */ 4875 if ((*state)->key[PF_SK_WIRE] != 4876 (*state)->key[PF_SK_STACK]) { 4877 struct pf_state_key *nk = 4878 (*state)->key[pd->didx]; 4879 4880 if (PF_ANEQ(pd2.src, 4881 &nk->addr[pd2.sidx], pd2.af) || 4882 nk->port[pd2.sidx] != th.th_sport) 4883 pf_change_icmp(pd2.src, &th.th_sport, 4884 daddr, &nk->addr[pd2.sidx], 4885 nk->port[pd2.sidx], NULL, 4886 pd2.ip_sum, icmpsum, 4887 pd->ip_sum, 0, pd2.af); 4888 4889 if (PF_ANEQ(pd2.dst, 4890 &nk->addr[pd2.didx], pd2.af) || 4891 nk->port[pd2.didx] != th.th_dport) 4892 pf_change_icmp(pd2.dst, &th.th_dport, 4893 NULL, /* XXX Inbound NAT? */ 4894 &nk->addr[pd2.didx], 4895 nk->port[pd2.didx], NULL, 4896 pd2.ip_sum, icmpsum, 4897 pd->ip_sum, 0, pd2.af); 4898 copyback = 1; 4899 } 4900 4901 if (copyback) { 4902 switch (pd2.af) { 4903 #ifdef INET 4904 case AF_INET: 4905 m_copyback(m, off, ICMP_MINLEN, 4906 (caddr_t)pd->hdr.icmp); 4907 m_copyback(m, ipoff2, sizeof(h2), 4908 (caddr_t)&h2); 4909 break; 4910 #endif /* INET */ 4911 #ifdef INET6 4912 case AF_INET6: 4913 m_copyback(m, off, 4914 sizeof(struct icmp6_hdr), 4915 (caddr_t)pd->hdr.icmp6); 4916 m_copyback(m, ipoff2, sizeof(h2_6), 4917 (caddr_t)&h2_6); 4918 break; 4919 #endif /* INET6 */ 4920 } 4921 m_copyback(m, off2, 8, (caddr_t)&th); 4922 } 4923 4924 return (PF_PASS); 4925 break; 4926 } 4927 case IPPROTO_UDP: { 4928 struct udphdr uh; 4929 4930 if (!pf_pull_hdr(m, off2, &uh, sizeof(uh), 4931 NULL, reason, pd2.af)) { 4932 DPFPRINTF(PF_DEBUG_MISC, 4933 ("pf: ICMP error message too short " 4934 "(udp)\n")); 4935 return (PF_DROP); 4936 } 4937 4938 key.af = pd2.af; 4939 key.proto = IPPROTO_UDP; 4940 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af); 4941 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af); 4942 key.port[pd2.sidx] = uh.uh_sport; 4943 key.port[pd2.didx] = uh.uh_dport; 4944 4945 STATE_LOOKUP(kif, &key, direction, *state, m); 4946 4947 /* translate source/destination address, if necessary */ 4948 if ((*state)->key[PF_SK_WIRE] != 4949 (*state)->key[PF_SK_STACK]) { 4950 struct pf_state_key *nk = 4951 (*state)->key[pd->didx]; 4952 4953 if (PF_ANEQ(pd2.src, 4954 &nk->addr[pd2.sidx], pd2.af) || 4955 nk->port[pd2.sidx] != uh.uh_sport) 4956 pf_change_icmp(pd2.src, &uh.uh_sport, 4957 daddr, &nk->addr[pd2.sidx], 4958 nk->port[pd2.sidx], &uh.uh_sum, 4959 pd2.ip_sum, icmpsum, 4960 pd->ip_sum, 1, pd2.af); 4961 4962 if (PF_ANEQ(pd2.dst, 4963 &nk->addr[pd2.didx], pd2.af) || 4964 nk->port[pd2.didx] != uh.uh_dport) 4965 pf_change_icmp(pd2.dst, &uh.uh_dport, 4966 NULL, /* XXX Inbound NAT? */ 4967 &nk->addr[pd2.didx], 4968 nk->port[pd2.didx], &uh.uh_sum, 4969 pd2.ip_sum, icmpsum, 4970 pd->ip_sum, 1, pd2.af); 4971 4972 switch (pd2.af) { 4973 #ifdef INET 4974 case AF_INET: 4975 m_copyback(m, off, ICMP_MINLEN, 4976 (caddr_t)pd->hdr.icmp); 4977 m_copyback(m, ipoff2, sizeof(h2), (caddr_t)&h2); 4978 break; 4979 #endif /* INET */ 4980 #ifdef INET6 4981 case AF_INET6: 4982 m_copyback(m, off, 4983 sizeof(struct icmp6_hdr), 4984 (caddr_t)pd->hdr.icmp6); 4985 m_copyback(m, ipoff2, sizeof(h2_6), 4986 (caddr_t)&h2_6); 4987 break; 4988 #endif /* INET6 */ 4989 } 4990 m_copyback(m, off2, sizeof(uh), (caddr_t)&uh); 4991 } 4992 4993 return (PF_PASS); 4994 break; 4995 } 4996 #ifdef INET 4997 case IPPROTO_ICMP: { 4998 struct icmp iih; 4999 5000 if (!pf_pull_hdr(m, off2, &iih, ICMP_MINLEN, 5001 NULL, reason, pd2.af)) { 5002 DPFPRINTF(PF_DEBUG_MISC, 5003 ("pf: ICMP error message too short i" 5004 "(icmp)\n")); 5005 return (PF_DROP); 5006 } 5007 5008 key.af = pd2.af; 5009 key.proto = IPPROTO_ICMP; 5010 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af); 5011 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af); 5012 key.port[0] = key.port[1] = iih.icmp_id; 5013 5014 STATE_LOOKUP(kif, &key, direction, *state, m); 5015 5016 /* translate source/destination address, if necessary */ 5017 if ((*state)->key[PF_SK_WIRE] != 5018 (*state)->key[PF_SK_STACK]) { 5019 struct pf_state_key *nk = 5020 (*state)->key[pd->didx]; 5021 5022 if (PF_ANEQ(pd2.src, 5023 &nk->addr[pd2.sidx], pd2.af) || 5024 nk->port[pd2.sidx] != iih.icmp_id) 5025 pf_change_icmp(pd2.src, &iih.icmp_id, 5026 daddr, &nk->addr[pd2.sidx], 5027 nk->port[pd2.sidx], NULL, 5028 pd2.ip_sum, icmpsum, 5029 pd->ip_sum, 0, AF_INET); 5030 5031 if (PF_ANEQ(pd2.dst, 5032 &nk->addr[pd2.didx], pd2.af) || 5033 nk->port[pd2.didx] != iih.icmp_id) 5034 pf_change_icmp(pd2.dst, &iih.icmp_id, 5035 NULL, /* XXX Inbound NAT? */ 5036 &nk->addr[pd2.didx], 5037 nk->port[pd2.didx], NULL, 5038 pd2.ip_sum, icmpsum, 5039 pd->ip_sum, 0, AF_INET); 5040 5041 m_copyback(m, off, ICMP_MINLEN, (caddr_t)pd->hdr.icmp); 5042 m_copyback(m, ipoff2, sizeof(h2), (caddr_t)&h2); 5043 m_copyback(m, off2, ICMP_MINLEN, (caddr_t)&iih); 5044 } 5045 return (PF_PASS); 5046 break; 5047 } 5048 #endif /* INET */ 5049 #ifdef INET6 5050 case IPPROTO_ICMPV6: { 5051 struct icmp6_hdr iih; 5052 5053 if (!pf_pull_hdr(m, off2, &iih, 5054 sizeof(struct icmp6_hdr), NULL, reason, pd2.af)) { 5055 DPFPRINTF(PF_DEBUG_MISC, 5056 ("pf: ICMP error message too short " 5057 "(icmp6)\n")); 5058 return (PF_DROP); 5059 } 5060 5061 key.af = pd2.af; 5062 key.proto = IPPROTO_ICMPV6; 5063 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af); 5064 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af); 5065 key.port[0] = key.port[1] = iih.icmp6_id; 5066 5067 STATE_LOOKUP(kif, &key, direction, *state, m); 5068 5069 /* translate source/destination address, if necessary */ 5070 if ((*state)->key[PF_SK_WIRE] != 5071 (*state)->key[PF_SK_STACK]) { 5072 struct pf_state_key *nk = 5073 (*state)->key[pd->didx]; 5074 5075 if (PF_ANEQ(pd2.src, 5076 &nk->addr[pd2.sidx], pd2.af) || 5077 nk->port[pd2.sidx] != iih.icmp6_id) 5078 pf_change_icmp(pd2.src, &iih.icmp6_id, 5079 daddr, &nk->addr[pd2.sidx], 5080 nk->port[pd2.sidx], NULL, 5081 pd2.ip_sum, icmpsum, 5082 pd->ip_sum, 0, AF_INET6); 5083 5084 if (PF_ANEQ(pd2.dst, 5085 &nk->addr[pd2.didx], pd2.af) || 5086 nk->port[pd2.didx] != iih.icmp6_id) 5087 pf_change_icmp(pd2.dst, &iih.icmp6_id, 5088 NULL, /* XXX Inbound NAT? */ 5089 &nk->addr[pd2.didx], 5090 nk->port[pd2.didx], NULL, 5091 pd2.ip_sum, icmpsum, 5092 pd->ip_sum, 0, AF_INET6); 5093 5094 m_copyback(m, off, sizeof(struct icmp6_hdr), 5095 (caddr_t)pd->hdr.icmp6); 5096 m_copyback(m, ipoff2, sizeof(h2_6), (caddr_t)&h2_6); 5097 m_copyback(m, off2, sizeof(struct icmp6_hdr), 5098 (caddr_t)&iih); 5099 } 5100 5101 return (PF_PASS); 5102 break; 5103 } 5104 #endif /* INET6 */ 5105 default: { 5106 key.af = pd2.af; 5107 key.proto = pd2.proto; 5108 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af); 5109 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af); 5110 key.port[0] = key.port[1] = 0; 5111 5112 STATE_LOOKUP(kif, &key, direction, *state, m); 5113 5114 /* translate source/destination address, if necessary */ 5115 if ((*state)->key[PF_SK_WIRE] != 5116 (*state)->key[PF_SK_STACK]) { 5117 struct pf_state_key *nk = 5118 (*state)->key[pd->didx]; 5119 5120 if (PF_ANEQ(pd2.src, 5121 &nk->addr[pd2.sidx], pd2.af)) 5122 pf_change_icmp(pd2.src, NULL, daddr, 5123 &nk->addr[pd2.sidx], 0, NULL, 5124 pd2.ip_sum, icmpsum, 5125 pd->ip_sum, 0, pd2.af); 5126 5127 if (PF_ANEQ(pd2.dst, 5128 &nk->addr[pd2.didx], pd2.af)) 5129 pf_change_icmp(pd2.src, NULL, 5130 NULL, /* XXX Inbound NAT? */ 5131 &nk->addr[pd2.didx], 0, NULL, 5132 pd2.ip_sum, icmpsum, 5133 pd->ip_sum, 0, pd2.af); 5134 5135 switch (pd2.af) { 5136 #ifdef INET 5137 case AF_INET: 5138 m_copyback(m, off, ICMP_MINLEN, 5139 (caddr_t)pd->hdr.icmp); 5140 m_copyback(m, ipoff2, sizeof(h2), (caddr_t)&h2); 5141 break; 5142 #endif /* INET */ 5143 #ifdef INET6 5144 case AF_INET6: 5145 m_copyback(m, off, 5146 sizeof(struct icmp6_hdr), 5147 (caddr_t)pd->hdr.icmp6); 5148 m_copyback(m, ipoff2, sizeof(h2_6), 5149 (caddr_t)&h2_6); 5150 break; 5151 #endif /* INET6 */ 5152 } 5153 } 5154 return (PF_PASS); 5155 break; 5156 } 5157 } 5158 } 5159 } 5160 5161 int 5162 pf_test_state_other(struct pf_state **state, int direction, struct pfi_kif *kif, 5163 struct mbuf *m, struct pf_pdesc *pd) 5164 { 5165 struct pf_state_peer *src, *dst; 5166 struct pf_state_key_cmp key; 5167 5168 key.af = pd->af; 5169 key.proto = pd->proto; 5170 if (direction == PF_IN) { 5171 PF_ACPY(&key.addr[0], pd->src, key.af); 5172 PF_ACPY(&key.addr[1], pd->dst, key.af); 5173 key.port[0] = key.port[1] = 0; 5174 } else { 5175 PF_ACPY(&key.addr[1], pd->src, key.af); 5176 PF_ACPY(&key.addr[0], pd->dst, key.af); 5177 key.port[1] = key.port[0] = 0; 5178 } 5179 5180 STATE_LOOKUP(kif, &key, direction, *state, m); 5181 5182 if (direction == (*state)->direction) { 5183 src = &(*state)->src; 5184 dst = &(*state)->dst; 5185 } else { 5186 src = &(*state)->dst; 5187 dst = &(*state)->src; 5188 } 5189 5190 /* update states */ 5191 if (src->state < PFOTHERS_SINGLE) 5192 src->state = PFOTHERS_SINGLE; 5193 if (dst->state == PFOTHERS_SINGLE) 5194 dst->state = PFOTHERS_MULTIPLE; 5195 5196 /* update expire time */ 5197 (*state)->expire = time_second; 5198 if (src->state == PFOTHERS_MULTIPLE && dst->state == PFOTHERS_MULTIPLE) 5199 (*state)->timeout = PFTM_OTHER_MULTIPLE; 5200 else 5201 (*state)->timeout = PFTM_OTHER_SINGLE; 5202 5203 /* translate source/destination address, if necessary */ 5204 if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) { 5205 struct pf_state_key *nk = (*state)->key[pd->didx]; 5206 5207 KKASSERT(nk); 5208 KKASSERT(pd); 5209 KKASSERT(pd->src); 5210 KKASSERT(pd->dst); 5211 switch (pd->af) { 5212 #ifdef INET 5213 case AF_INET: 5214 if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], AF_INET)) 5215 pf_change_a(&pd->src->v4.s_addr, 5216 pd->ip_sum, 5217 nk->addr[pd->sidx].v4.s_addr, 5218 0); 5219 5220 5221 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], AF_INET)) 5222 pf_change_a(&pd->dst->v4.s_addr, 5223 pd->ip_sum, 5224 nk->addr[pd->didx].v4.s_addr, 5225 0); 5226 5227 break; 5228 #endif /* INET */ 5229 #ifdef INET6 5230 case AF_INET6: 5231 if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], AF_INET)) 5232 PF_ACPY(pd->src, &nk->addr[pd->sidx], pd->af); 5233 5234 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], AF_INET)) 5235 PF_ACPY(pd->dst, &nk->addr[pd->didx], pd->af); 5236 #endif /* INET6 */ 5237 } 5238 } 5239 return (PF_PASS); 5240 } 5241 5242 /* 5243 * ipoff and off are measured from the start of the mbuf chain. 5244 * h must be at "ipoff" on the mbuf chain. 5245 */ 5246 void * 5247 pf_pull_hdr(struct mbuf *m, int off, void *p, int len, 5248 u_short *actionp, u_short *reasonp, sa_family_t af) 5249 { 5250 switch (af) { 5251 #ifdef INET 5252 case AF_INET: { 5253 struct ip *h = mtod(m, struct ip *); 5254 u_int16_t fragoff = (h->ip_off & IP_OFFMASK) << 3; 5255 5256 if (fragoff) { 5257 if (fragoff >= len) 5258 ACTION_SET(actionp, PF_PASS); 5259 else { 5260 ACTION_SET(actionp, PF_DROP); 5261 REASON_SET(reasonp, PFRES_FRAG); 5262 } 5263 return (NULL); 5264 } 5265 if (m->m_pkthdr.len < off + len || 5266 h->ip_len < off + len) { 5267 ACTION_SET(actionp, PF_DROP); 5268 REASON_SET(reasonp, PFRES_SHORT); 5269 return (NULL); 5270 } 5271 break; 5272 } 5273 #endif /* INET */ 5274 #ifdef INET6 5275 case AF_INET6: { 5276 struct ip6_hdr *h = mtod(m, struct ip6_hdr *); 5277 5278 if (m->m_pkthdr.len < off + len || 5279 (ntohs(h->ip6_plen) + sizeof(struct ip6_hdr)) < 5280 (unsigned)(off + len)) { 5281 ACTION_SET(actionp, PF_DROP); 5282 REASON_SET(reasonp, PFRES_SHORT); 5283 return (NULL); 5284 } 5285 break; 5286 } 5287 #endif /* INET6 */ 5288 } 5289 m_copydata(m, off, len, p); 5290 return (p); 5291 } 5292 5293 int 5294 pf_routable(struct pf_addr *addr, sa_family_t af, struct pfi_kif *kif) 5295 { 5296 struct sockaddr_in *dst; 5297 int ret = 1; 5298 int check_mpath; 5299 #ifdef INET6 5300 struct sockaddr_in6 *dst6; 5301 struct route_in6 ro; 5302 #else 5303 struct route ro; 5304 #endif 5305 struct radix_node *rn; 5306 struct rtentry *rt; 5307 struct ifnet *ifp; 5308 5309 check_mpath = 0; 5310 bzero(&ro, sizeof(ro)); 5311 switch (af) { 5312 case AF_INET: 5313 dst = satosin(&ro.ro_dst); 5314 dst->sin_family = AF_INET; 5315 dst->sin_len = sizeof(*dst); 5316 dst->sin_addr = addr->v4; 5317 break; 5318 #ifdef INET6 5319 case AF_INET6: 5320 dst6 = (struct sockaddr_in6 *)&ro.ro_dst; 5321 dst6->sin6_family = AF_INET6; 5322 dst6->sin6_len = sizeof(*dst6); 5323 dst6->sin6_addr = addr->v6; 5324 break; 5325 #endif /* INET6 */ 5326 default: 5327 return (0); 5328 } 5329 5330 /* Skip checks for ipsec interfaces */ 5331 if (kif != NULL && kif->pfik_ifp->if_type == IFT_ENC) 5332 goto out; 5333 5334 rtalloc_ign((struct route *)&ro, 0); 5335 5336 if (ro.ro_rt != NULL) { 5337 /* No interface given, this is a no-route check */ 5338 if (kif == NULL) 5339 goto out; 5340 5341 if (kif->pfik_ifp == NULL) { 5342 ret = 0; 5343 goto out; 5344 } 5345 5346 /* Perform uRPF check if passed input interface */ 5347 ret = 0; 5348 rn = (struct radix_node *)ro.ro_rt; 5349 do { 5350 rt = (struct rtentry *)rn; 5351 ifp = rt->rt_ifp; 5352 5353 if (kif->pfik_ifp == ifp) 5354 ret = 1; 5355 rn = NULL; 5356 } while (check_mpath == 1 && rn != NULL && ret == 0); 5357 } else 5358 ret = 0; 5359 out: 5360 if (ro.ro_rt != NULL) 5361 RTFREE(ro.ro_rt); 5362 return (ret); 5363 } 5364 5365 int 5366 pf_rtlabel_match(struct pf_addr *addr, sa_family_t af, struct pf_addr_wrap *aw) 5367 { 5368 struct sockaddr_in *dst; 5369 #ifdef INET6 5370 struct sockaddr_in6 *dst6; 5371 struct route_in6 ro; 5372 #else 5373 struct route ro; 5374 #endif 5375 int ret = 0; 5376 5377 ASSERT_LWKT_TOKEN_HELD(&pf_token); 5378 5379 bzero(&ro, sizeof(ro)); 5380 switch (af) { 5381 case AF_INET: 5382 dst = satosin(&ro.ro_dst); 5383 dst->sin_family = AF_INET; 5384 dst->sin_len = sizeof(*dst); 5385 dst->sin_addr = addr->v4; 5386 break; 5387 #ifdef INET6 5388 case AF_INET6: 5389 dst6 = (struct sockaddr_in6 *)&ro.ro_dst; 5390 dst6->sin6_family = AF_INET6; 5391 dst6->sin6_len = sizeof(*dst6); 5392 dst6->sin6_addr = addr->v6; 5393 break; 5394 #endif /* INET6 */ 5395 default: 5396 return (0); 5397 } 5398 5399 rtalloc_ign((struct route *)&ro, (RTF_CLONING | RTF_PRCLONING)); 5400 5401 if (ro.ro_rt != NULL) { 5402 RTFREE(ro.ro_rt); 5403 } 5404 5405 return (ret); 5406 } 5407 5408 #ifdef INET 5409 void 5410 pf_route(struct mbuf **m, struct pf_rule *r, int dir, struct ifnet *oifp, 5411 struct pf_state *s, struct pf_pdesc *pd) 5412 { 5413 struct mbuf *m0, *m1; 5414 struct route iproute; 5415 struct route *ro = NULL; 5416 struct sockaddr_in *dst; 5417 struct ip *ip; 5418 struct ifnet *ifp = NULL; 5419 struct pf_addr naddr; 5420 struct pf_src_node *sn = NULL; 5421 int error = 0; 5422 int sw_csum; 5423 #ifdef IPSEC 5424 struct m_tag *mtag; 5425 #endif /* IPSEC */ 5426 5427 ASSERT_LWKT_TOKEN_HELD(&pf_token); 5428 5429 if (m == NULL || *m == NULL || r == NULL || 5430 (dir != PF_IN && dir != PF_OUT) || oifp == NULL) 5431 panic("pf_route: invalid parameters"); 5432 5433 if (((*m)->m_pkthdr.fw_flags & PF_MBUF_ROUTED) == 0) { 5434 (*m)->m_pkthdr.fw_flags |= PF_MBUF_ROUTED; 5435 (*m)->m_pkthdr.pf.routed = 1; 5436 } else { 5437 if ((*m)->m_pkthdr.pf.routed++ > 3) { 5438 m0 = *m; 5439 *m = NULL; 5440 goto bad; 5441 } 5442 } 5443 5444 if (r->rt == PF_DUPTO) { 5445 if ((m0 = m_dup(*m, MB_DONTWAIT)) == NULL) { 5446 return; 5447 } 5448 } else { 5449 if ((r->rt == PF_REPLYTO) == (r->direction == dir)) { 5450 return; 5451 } 5452 m0 = *m; 5453 } 5454 5455 if (m0->m_len < sizeof(struct ip)) { 5456 DPFPRINTF(PF_DEBUG_URGENT, 5457 ("pf_route: m0->m_len < sizeof(struct ip)\n")); 5458 goto bad; 5459 } 5460 5461 ip = mtod(m0, struct ip *); 5462 5463 ro = &iproute; 5464 bzero((caddr_t)ro, sizeof(*ro)); 5465 dst = satosin(&ro->ro_dst); 5466 dst->sin_family = AF_INET; 5467 dst->sin_len = sizeof(*dst); 5468 dst->sin_addr = ip->ip_dst; 5469 5470 if (r->rt == PF_FASTROUTE) { 5471 rtalloc(ro); 5472 if (ro->ro_rt == 0) { 5473 ipstat.ips_noroute++; 5474 goto bad; 5475 } 5476 5477 ifp = ro->ro_rt->rt_ifp; 5478 ro->ro_rt->rt_use++; 5479 5480 if (ro->ro_rt->rt_flags & RTF_GATEWAY) 5481 dst = satosin(ro->ro_rt->rt_gateway); 5482 } else { 5483 if (TAILQ_EMPTY(&r->rpool.list)) { 5484 DPFPRINTF(PF_DEBUG_URGENT, 5485 ("pf_route: TAILQ_EMPTY(&r->rpool.list)\n")); 5486 goto bad; 5487 } 5488 if (s == NULL) { 5489 pf_map_addr(AF_INET, r, (struct pf_addr *)&ip->ip_src, 5490 &naddr, NULL, &sn); 5491 if (!PF_AZERO(&naddr, AF_INET)) 5492 dst->sin_addr.s_addr = naddr.v4.s_addr; 5493 ifp = r->rpool.cur->kif ? 5494 r->rpool.cur->kif->pfik_ifp : NULL; 5495 } else { 5496 if (!PF_AZERO(&s->rt_addr, AF_INET)) 5497 dst->sin_addr.s_addr = 5498 s->rt_addr.v4.s_addr; 5499 ifp = s->rt_kif ? s->rt_kif->pfik_ifp : NULL; 5500 } 5501 } 5502 if (ifp == NULL) 5503 goto bad; 5504 5505 if (oifp != ifp) { 5506 if (pf_test(PF_OUT, ifp, &m0, NULL, NULL) != PF_PASS) { 5507 goto bad; 5508 } else if (m0 == NULL) { 5509 goto done; 5510 } 5511 if (m0->m_len < sizeof(struct ip)) { 5512 DPFPRINTF(PF_DEBUG_URGENT, 5513 ("pf_route: m0->m_len < sizeof(struct ip)\n")); 5514 goto bad; 5515 } 5516 ip = mtod(m0, struct ip *); 5517 } 5518 5519 /* Copied from FreeBSD 5.1-CURRENT ip_output. */ 5520 m0->m_pkthdr.csum_flags |= CSUM_IP; 5521 sw_csum = m0->m_pkthdr.csum_flags & ~ifp->if_hwassist; 5522 if (sw_csum & CSUM_DELAY_DATA) { 5523 in_delayed_cksum(m0); 5524 sw_csum &= ~CSUM_DELAY_DATA; 5525 } 5526 m0->m_pkthdr.csum_flags &= ifp->if_hwassist; 5527 m0->m_pkthdr.csum_iphlen = (ip->ip_hl << 2); 5528 5529 if (ip->ip_len <= ifp->if_mtu || 5530 (ifp->if_hwassist & CSUM_FRAGMENT && 5531 (ip->ip_off & IP_DF) == 0)) { 5532 ip->ip_len = htons(ip->ip_len); 5533 ip->ip_off = htons(ip->ip_off); 5534 ip->ip_sum = 0; 5535 if (sw_csum & CSUM_DELAY_IP) { 5536 /* From KAME */ 5537 if (ip->ip_v == IPVERSION && 5538 (ip->ip_hl << 2) == sizeof(*ip)) { 5539 ip->ip_sum = in_cksum_hdr(ip); 5540 } else { 5541 ip->ip_sum = in_cksum(m0, ip->ip_hl << 2); 5542 } 5543 } 5544 lwkt_reltoken(&pf_token); 5545 error = ifp->if_output(ifp, m0, sintosa(dst), ro->ro_rt); 5546 lwkt_gettoken(&pf_token); 5547 goto done; 5548 } 5549 5550 /* 5551 * Too large for interface; fragment if possible. 5552 * Must be able to put at least 8 bytes per fragment. 5553 */ 5554 if (ip->ip_off & IP_DF) { 5555 ipstat.ips_cantfrag++; 5556 if (r->rt != PF_DUPTO) { 5557 icmp_error(m0, ICMP_UNREACH, ICMP_UNREACH_NEEDFRAG, 0, 5558 ifp->if_mtu); 5559 goto done; 5560 } else 5561 goto bad; 5562 } 5563 5564 m1 = m0; 5565 error = ip_fragment(ip, &m0, ifp->if_mtu, ifp->if_hwassist, sw_csum); 5566 if (error) { 5567 goto bad; 5568 } 5569 5570 for (m0 = m1; m0; m0 = m1) { 5571 m1 = m0->m_nextpkt; 5572 m0->m_nextpkt = 0; 5573 if (error == 0) { 5574 lwkt_reltoken(&pf_token); 5575 error = (*ifp->if_output)(ifp, m0, sintosa(dst), 5576 NULL); 5577 lwkt_gettoken(&pf_token); 5578 } else 5579 m_freem(m0); 5580 } 5581 5582 if (error == 0) 5583 ipstat.ips_fragmented++; 5584 5585 done: 5586 if (r->rt != PF_DUPTO) 5587 *m = NULL; 5588 if (ro == &iproute && ro->ro_rt) 5589 RTFREE(ro->ro_rt); 5590 return; 5591 5592 bad: 5593 m_freem(m0); 5594 goto done; 5595 } 5596 #endif /* INET */ 5597 5598 #ifdef INET6 5599 void 5600 pf_route6(struct mbuf **m, struct pf_rule *r, int dir, struct ifnet *oifp, 5601 struct pf_state *s, struct pf_pdesc *pd) 5602 { 5603 struct mbuf *m0; 5604 struct route_in6 ip6route; 5605 struct route_in6 *ro; 5606 struct sockaddr_in6 *dst; 5607 struct ip6_hdr *ip6; 5608 struct ifnet *ifp = NULL; 5609 struct pf_addr naddr; 5610 struct pf_src_node *sn = NULL; 5611 5612 if (m == NULL || *m == NULL || r == NULL || 5613 (dir != PF_IN && dir != PF_OUT) || oifp == NULL) 5614 panic("pf_route6: invalid parameters"); 5615 5616 if (((*m)->m_pkthdr.fw_flags & PF_MBUF_ROUTED) == 0) { 5617 (*m)->m_pkthdr.fw_flags |= PF_MBUF_ROUTED; 5618 (*m)->m_pkthdr.pf.routed = 1; 5619 } else { 5620 if ((*m)->m_pkthdr.pf.routed++ > 3) { 5621 m0 = *m; 5622 *m = NULL; 5623 goto bad; 5624 } 5625 } 5626 5627 if (r->rt == PF_DUPTO) { 5628 if ((m0 = m_dup(*m, MB_DONTWAIT)) == NULL) 5629 return; 5630 } else { 5631 if ((r->rt == PF_REPLYTO) == (r->direction == dir)) 5632 return; 5633 m0 = *m; 5634 } 5635 5636 if (m0->m_len < sizeof(struct ip6_hdr)) { 5637 DPFPRINTF(PF_DEBUG_URGENT, 5638 ("pf_route6: m0->m_len < sizeof(struct ip6_hdr)\n")); 5639 goto bad; 5640 } 5641 ip6 = mtod(m0, struct ip6_hdr *); 5642 5643 ro = &ip6route; 5644 bzero((caddr_t)ro, sizeof(*ro)); 5645 dst = (struct sockaddr_in6 *)&ro->ro_dst; 5646 dst->sin6_family = AF_INET6; 5647 dst->sin6_len = sizeof(*dst); 5648 dst->sin6_addr = ip6->ip6_dst; 5649 5650 /* 5651 * DragonFly doesn't zero the auxillary pkghdr fields, only fw_flags, 5652 * so make sure pf.flags is clear. 5653 * 5654 * Cheat. XXX why only in the v6 case??? 5655 */ 5656 if (r->rt == PF_FASTROUTE) { 5657 m0->m_pkthdr.fw_flags |= PF_MBUF_TAGGED; 5658 m0->m_pkthdr.pf.flags = 0; 5659 /* XXX Re-Check when Upgrading to > 4.4 */ 5660 m0->m_pkthdr.pf.statekey = NULL; 5661 ip6_output(m0, NULL, NULL, 0, NULL, NULL, NULL); 5662 return; 5663 } 5664 5665 if (TAILQ_EMPTY(&r->rpool.list)) { 5666 DPFPRINTF(PF_DEBUG_URGENT, 5667 ("pf_route6: TAILQ_EMPTY(&r->rpool.list)\n")); 5668 goto bad; 5669 } 5670 if (s == NULL) { 5671 pf_map_addr(AF_INET6, r, (struct pf_addr *)&ip6->ip6_src, 5672 &naddr, NULL, &sn); 5673 if (!PF_AZERO(&naddr, AF_INET6)) 5674 PF_ACPY((struct pf_addr *)&dst->sin6_addr, 5675 &naddr, AF_INET6); 5676 ifp = r->rpool.cur->kif ? r->rpool.cur->kif->pfik_ifp : NULL; 5677 } else { 5678 if (!PF_AZERO(&s->rt_addr, AF_INET6)) 5679 PF_ACPY((struct pf_addr *)&dst->sin6_addr, 5680 &s->rt_addr, AF_INET6); 5681 ifp = s->rt_kif ? s->rt_kif->pfik_ifp : NULL; 5682 } 5683 if (ifp == NULL) 5684 goto bad; 5685 5686 if (oifp != ifp) { 5687 if (pf_test6(PF_OUT, ifp, &m0, NULL, NULL) != PF_PASS) { 5688 goto bad; 5689 } else if (m0 == NULL) { 5690 goto done; 5691 } 5692 if (m0->m_len < sizeof(struct ip6_hdr)) { 5693 DPFPRINTF(PF_DEBUG_URGENT, 5694 ("pf_route6: m0->m_len < sizeof(struct ip6_hdr)\n")); 5695 goto bad; 5696 } 5697 ip6 = mtod(m0, struct ip6_hdr *); 5698 } 5699 5700 /* 5701 * If the packet is too large for the outgoing interface, 5702 * send back an icmp6 error. 5703 */ 5704 if (IN6_IS_ADDR_LINKLOCAL(&dst->sin6_addr)) 5705 dst->sin6_addr.s6_addr16[1] = htons(ifp->if_index); 5706 if ((u_long)m0->m_pkthdr.len <= ifp->if_mtu) { 5707 nd6_output(ifp, ifp, m0, dst, NULL); 5708 } else { 5709 in6_ifstat_inc(ifp, ifs6_in_toobig); 5710 if (r->rt != PF_DUPTO) 5711 icmp6_error(m0, ICMP6_PACKET_TOO_BIG, 0, ifp->if_mtu); 5712 else 5713 goto bad; 5714 } 5715 5716 done: 5717 if (r->rt != PF_DUPTO) 5718 *m = NULL; 5719 return; 5720 5721 bad: 5722 m_freem(m0); 5723 goto done; 5724 } 5725 #endif /* INET6 */ 5726 5727 5728 /* 5729 * check protocol (tcp/udp/icmp/icmp6) checksum and set mbuf flag 5730 * off is the offset where the protocol header starts 5731 * len is the total length of protocol header plus payload 5732 * returns 0 when the checksum is valid, otherwise returns 1. 5733 */ 5734 /* 5735 * XXX 5736 * FreeBSD supports cksum offload for the following drivers. 5737 * em(4), gx(4), lge(4), nge(4), ti(4), xl(4) 5738 * If we can make full use of it we would outperform ipfw/ipfilter in 5739 * very heavy traffic. 5740 * I have not tested 'cause I don't have NICs that supports cksum offload. 5741 * (There might be problems. Typical phenomena would be 5742 * 1. No route message for UDP packet. 5743 * 2. No connection acceptance from external hosts regardless of rule set.) 5744 */ 5745 int 5746 pf_check_proto_cksum(struct mbuf *m, int off, int len, u_int8_t p, 5747 sa_family_t af) 5748 { 5749 u_int16_t sum = 0; 5750 int hw_assist = 0; 5751 struct ip *ip; 5752 5753 if (off < sizeof(struct ip) || len < sizeof(struct udphdr)) 5754 return (1); 5755 if (m->m_pkthdr.len < off + len) 5756 return (1); 5757 5758 switch (p) { 5759 case IPPROTO_TCP: 5760 case IPPROTO_UDP: 5761 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) { 5762 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) { 5763 sum = m->m_pkthdr.csum_data; 5764 } else { 5765 ip = mtod(m, struct ip *); 5766 sum = in_pseudo(ip->ip_src.s_addr, 5767 ip->ip_dst.s_addr, htonl((u_short)len + 5768 m->m_pkthdr.csum_data + p)); 5769 } 5770 sum ^= 0xffff; 5771 ++hw_assist; 5772 } 5773 break; 5774 case IPPROTO_ICMP: 5775 #ifdef INET6 5776 case IPPROTO_ICMPV6: 5777 #endif /* INET6 */ 5778 break; 5779 default: 5780 return (1); 5781 } 5782 5783 if (!hw_assist) { 5784 switch (af) { 5785 case AF_INET: 5786 if (p == IPPROTO_ICMP) { 5787 if (m->m_len < off) 5788 return (1); 5789 m->m_data += off; 5790 m->m_len -= off; 5791 sum = in_cksum(m, len); 5792 m->m_data -= off; 5793 m->m_len += off; 5794 } else { 5795 if (m->m_len < sizeof(struct ip)) 5796 return (1); 5797 sum = in_cksum_range(m, p, off, len); 5798 if (sum == 0) { 5799 m->m_pkthdr.csum_flags |= 5800 (CSUM_DATA_VALID | 5801 CSUM_PSEUDO_HDR); 5802 m->m_pkthdr.csum_data = 0xffff; 5803 } 5804 } 5805 break; 5806 #ifdef INET6 5807 case AF_INET6: 5808 if (m->m_len < sizeof(struct ip6_hdr)) 5809 return (1); 5810 sum = in6_cksum(m, p, off, len); 5811 /* 5812 * XXX 5813 * IPv6 H/W cksum off-load not supported yet! 5814 * 5815 * if (sum == 0) { 5816 * m->m_pkthdr.csum_flags |= 5817 * (CSUM_DATA_VALID|CSUM_PSEUDO_HDR); 5818 * m->m_pkthdr.csum_data = 0xffff; 5819 *} 5820 */ 5821 break; 5822 #endif /* INET6 */ 5823 default: 5824 return (1); 5825 } 5826 } 5827 if (sum) { 5828 switch (p) { 5829 case IPPROTO_TCP: 5830 tcpstat.tcps_rcvbadsum++; 5831 break; 5832 case IPPROTO_UDP: 5833 udp_stat.udps_badsum++; 5834 break; 5835 case IPPROTO_ICMP: 5836 icmpstat.icps_checksum++; 5837 break; 5838 #ifdef INET6 5839 case IPPROTO_ICMPV6: 5840 icmp6stat.icp6s_checksum++; 5841 break; 5842 #endif /* INET6 */ 5843 } 5844 return (1); 5845 } 5846 return (0); 5847 } 5848 5849 struct pf_divert * 5850 pf_find_divert(struct mbuf *m) 5851 { 5852 struct m_tag *mtag; 5853 5854 if ((mtag = m_tag_find(m, PACKET_TAG_PF_DIVERT, NULL)) == NULL) 5855 return (NULL); 5856 5857 return ((struct pf_divert *)(mtag + 1)); 5858 } 5859 5860 struct pf_divert * 5861 pf_get_divert(struct mbuf *m) 5862 { 5863 struct m_tag *mtag; 5864 5865 if ((mtag = m_tag_find(m, PACKET_TAG_PF_DIVERT, NULL)) == NULL) { 5866 mtag = m_tag_get(PACKET_TAG_PF_DIVERT, sizeof(struct pf_divert), 5867 M_NOWAIT); 5868 if (mtag == NULL) 5869 return (NULL); 5870 bzero(mtag + 1, sizeof(struct pf_divert)); 5871 m_tag_prepend(m, mtag); 5872 } 5873 5874 return ((struct pf_divert *)(mtag + 1)); 5875 } 5876 5877 #ifdef INET 5878 int 5879 pf_test(int dir, struct ifnet *ifp, struct mbuf **m0, 5880 struct ether_header *eh, struct inpcb *inp) 5881 { 5882 struct pfi_kif *kif; 5883 u_short action, reason = 0, log = 0; 5884 struct mbuf *m = *m0; 5885 struct ip *h = NULL; 5886 struct pf_rule *a = NULL, *r = &pf_default_rule, *tr, *nr; 5887 struct pf_state *s = NULL; 5888 struct pf_ruleset *ruleset = NULL; 5889 struct pf_pdesc pd; 5890 int off, dirndx; 5891 #ifdef ALTQ 5892 int pqid = 0; 5893 #endif 5894 5895 if (!pf_status.running) 5896 return (PF_PASS); 5897 5898 memset(&pd, 0, sizeof(pd)); 5899 #ifdef foo 5900 if (ifp->if_type == IFT_CARP && ifp->if_carpdev) 5901 kif = (struct pfi_kif *)ifp->if_carpdev->if_pf_kif; 5902 else 5903 #endif 5904 kif = (struct pfi_kif *)ifp->if_pf_kif; 5905 5906 if (kif == NULL) { 5907 DPFPRINTF(PF_DEBUG_URGENT, 5908 ("pf_test: kif == NULL, if_xname %s\n", ifp->if_xname)); 5909 return (PF_DROP); 5910 } 5911 if (kif->pfik_flags & PFI_IFLAG_SKIP) 5912 return (PF_PASS); 5913 5914 #ifdef DIAGNOSTIC 5915 if ((m->m_flags & M_PKTHDR) == 0) 5916 panic("non-M_PKTHDR is passed to pf_test"); 5917 #endif /* DIAGNOSTIC */ 5918 5919 if (m->m_pkthdr.len < (int)sizeof(*h)) { 5920 action = PF_DROP; 5921 REASON_SET(&reason, PFRES_SHORT); 5922 log = 1; 5923 goto done; 5924 } 5925 5926 /* 5927 * DragonFly doesn't zero the auxillary pkghdr fields, only fw_flags, 5928 * so make sure pf.flags is clear. 5929 */ 5930 if (m->m_pkthdr.fw_flags & PF_MBUF_TAGGED) 5931 return (PF_PASS); 5932 m->m_pkthdr.pf.flags = 0; 5933 /* Re-Check when updating to > 4.4 */ 5934 m->m_pkthdr.pf.statekey = NULL; 5935 5936 /* We do IP header normalization and packet reassembly here */ 5937 if (pf_normalize_ip(m0, dir, kif, &reason, &pd) != PF_PASS) { 5938 action = PF_DROP; 5939 goto done; 5940 } 5941 m = *m0; /* pf_normalize messes with m0 */ 5942 h = mtod(m, struct ip *); 5943 5944 off = h->ip_hl << 2; 5945 if (off < (int)sizeof(*h)) { 5946 action = PF_DROP; 5947 REASON_SET(&reason, PFRES_SHORT); 5948 log = 1; 5949 goto done; 5950 } 5951 5952 pd.src = (struct pf_addr *)&h->ip_src; 5953 pd.dst = (struct pf_addr *)&h->ip_dst; 5954 pd.sport = pd.dport = NULL; 5955 pd.ip_sum = &h->ip_sum; 5956 pd.proto_sum = NULL; 5957 pd.proto = h->ip_p; 5958 pd.dir = dir; 5959 pd.sidx = (dir == PF_IN) ? 0 : 1; 5960 pd.didx = (dir == PF_IN) ? 1 : 0; 5961 pd.af = AF_INET; 5962 pd.tos = h->ip_tos; 5963 pd.tot_len = h->ip_len; 5964 pd.eh = eh; 5965 5966 /* handle fragments that didn't get reassembled by normalization */ 5967 if (h->ip_off & (IP_MF | IP_OFFMASK)) { 5968 action = pf_test_fragment(&r, dir, kif, m, h, 5969 &pd, &a, &ruleset); 5970 goto done; 5971 } 5972 5973 switch (h->ip_p) { 5974 5975 case IPPROTO_TCP: { 5976 struct tcphdr th; 5977 5978 pd.hdr.tcp = &th; 5979 if (!pf_pull_hdr(m, off, &th, sizeof(th), 5980 &action, &reason, AF_INET)) { 5981 log = action != PF_PASS; 5982 goto done; 5983 } 5984 pd.p_len = pd.tot_len - off - (th.th_off << 2); 5985 #ifdef ALTQ 5986 if ((th.th_flags & TH_ACK) && pd.p_len == 0) 5987 pqid = 1; 5988 #endif 5989 action = pf_normalize_tcp(dir, kif, m, 0, off, h, &pd); 5990 if (action == PF_DROP) 5991 goto done; 5992 action = pf_test_state_tcp(&s, dir, kif, m, off, h, &pd, 5993 &reason); 5994 if (action == PF_PASS) { 5995 pfsync_update_state(s); 5996 r = s->rule.ptr; 5997 a = s->anchor.ptr; 5998 log = s->log; 5999 } else if (s == NULL) 6000 action = pf_test_rule(&r, &s, dir, kif, 6001 m, off, h, &pd, &a, &ruleset, NULL, inp); 6002 break; 6003 } 6004 6005 case IPPROTO_UDP: { 6006 struct udphdr uh; 6007 6008 pd.hdr.udp = &uh; 6009 if (!pf_pull_hdr(m, off, &uh, sizeof(uh), 6010 &action, &reason, AF_INET)) { 6011 log = action != PF_PASS; 6012 goto done; 6013 } 6014 if (uh.uh_dport == 0 || 6015 ntohs(uh.uh_ulen) > m->m_pkthdr.len - off || 6016 ntohs(uh.uh_ulen) < sizeof(struct udphdr)) { 6017 action = PF_DROP; 6018 REASON_SET(&reason, PFRES_SHORT); 6019 goto done; 6020 } 6021 action = pf_test_state_udp(&s, dir, kif, m, off, h, &pd); 6022 if (action == PF_PASS) { 6023 pfsync_update_state(s); 6024 r = s->rule.ptr; 6025 a = s->anchor.ptr; 6026 log = s->log; 6027 } else if (s == NULL) 6028 action = pf_test_rule(&r, &s, dir, kif, 6029 m, off, h, &pd, &a, &ruleset, NULL, inp); 6030 break; 6031 } 6032 6033 case IPPROTO_ICMP: { 6034 struct icmp ih; 6035 6036 pd.hdr.icmp = &ih; 6037 if (!pf_pull_hdr(m, off, &ih, ICMP_MINLEN, 6038 &action, &reason, AF_INET)) { 6039 log = action != PF_PASS; 6040 goto done; 6041 } 6042 action = pf_test_state_icmp(&s, dir, kif, m, off, h, &pd, 6043 &reason); 6044 if (action == PF_PASS) { 6045 pfsync_update_state(s); 6046 r = s->rule.ptr; 6047 a = s->anchor.ptr; 6048 log = s->log; 6049 } else if (s == NULL) 6050 action = pf_test_rule(&r, &s, dir, kif, 6051 m, off, h, &pd, &a, &ruleset, NULL, inp); 6052 break; 6053 } 6054 6055 default: 6056 action = pf_test_state_other(&s, dir, kif, m, &pd); 6057 if (action == PF_PASS) { 6058 pfsync_update_state(s); 6059 r = s->rule.ptr; 6060 a = s->anchor.ptr; 6061 log = s->log; 6062 } else if (s == NULL) 6063 action = pf_test_rule(&r, &s, dir, kif, m, off, h, 6064 &pd, &a, &ruleset, NULL, inp); 6065 break; 6066 } 6067 6068 done: 6069 if (action == PF_PASS && h->ip_hl > 5 && 6070 !((s && s->state_flags & PFSTATE_ALLOWOPTS) || r->allow_opts)) { 6071 action = PF_DROP; 6072 REASON_SET(&reason, PFRES_IPOPTIONS); 6073 log = 1; 6074 DPFPRINTF(PF_DEBUG_MISC, 6075 ("pf: dropping packet with ip options\n")); 6076 } 6077 6078 if ((s && s->tag) || r->rtableid) 6079 pf_tag_packet(m, s ? s->tag : 0, r->rtableid); 6080 6081 #if 0 6082 if (dir == PF_IN && s && s->key[PF_SK_STACK]) 6083 m->m_pkthdr.pf.statekey = s->key[PF_SK_STACK]; 6084 #endif 6085 6086 #ifdef ALTQ 6087 if (action == PF_PASS && r->qid) { 6088 m->m_pkthdr.fw_flags |= PF_MBUF_STRUCTURE; 6089 if (pqid || (pd.tos & IPTOS_LOWDELAY)) 6090 m->m_pkthdr.pf.qid = r->pqid; 6091 else 6092 m->m_pkthdr.pf.qid = r->qid; 6093 m->m_pkthdr.pf.ecn_af = AF_INET; 6094 m->m_pkthdr.pf.hdr = h; 6095 /* add connection hash for fairq */ 6096 if (s) { 6097 /* for fairq */ 6098 m->m_pkthdr.pf.state_hash = s->hash; 6099 m->m_pkthdr.pf.flags |= PF_TAG_STATE_HASHED; 6100 } 6101 } 6102 #endif /* ALTQ */ 6103 6104 /* 6105 * connections redirected to loopback should not match sockets 6106 * bound specifically to loopback due to security implications, 6107 * see tcp_input() and in_pcblookup_listen(). 6108 */ 6109 if (dir == PF_IN && action == PF_PASS && (pd.proto == IPPROTO_TCP || 6110 pd.proto == IPPROTO_UDP) && s != NULL && s->nat_rule.ptr != NULL && 6111 (s->nat_rule.ptr->action == PF_RDR || 6112 s->nat_rule.ptr->action == PF_BINAT) && 6113 (ntohl(pd.dst->v4.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET) 6114 m->m_pkthdr.pf.flags |= PF_TAG_TRANSLATE_LOCALHOST; 6115 6116 if (dir == PF_IN && action == PF_PASS && r->divert.port) { 6117 struct pf_divert *divert; 6118 6119 if ((divert = pf_get_divert(m))) { 6120 m->m_pkthdr.pf.flags |= PF_TAG_DIVERTED; 6121 divert->port = r->divert.port; 6122 divert->addr.ipv4 = r->divert.addr.v4; 6123 } 6124 } 6125 6126 if (log) { 6127 struct pf_rule *lr; 6128 6129 if (s != NULL && s->nat_rule.ptr != NULL && 6130 s->nat_rule.ptr->log & PF_LOG_ALL) 6131 lr = s->nat_rule.ptr; 6132 else 6133 lr = r; 6134 PFLOG_PACKET(kif, h, m, AF_INET, dir, reason, lr, a, ruleset, 6135 &pd); 6136 } 6137 6138 kif->pfik_bytes[0][dir == PF_OUT][action != PF_PASS] += pd.tot_len; 6139 kif->pfik_packets[0][dir == PF_OUT][action != PF_PASS]++; 6140 6141 if (action == PF_PASS || r->action == PF_DROP) { 6142 dirndx = (dir == PF_OUT); 6143 r->packets[dirndx]++; 6144 r->bytes[dirndx] += pd.tot_len; 6145 if (a != NULL) { 6146 a->packets[dirndx]++; 6147 a->bytes[dirndx] += pd.tot_len; 6148 } 6149 if (s != NULL) { 6150 if (s->nat_rule.ptr != NULL) { 6151 s->nat_rule.ptr->packets[dirndx]++; 6152 s->nat_rule.ptr->bytes[dirndx] += pd.tot_len; 6153 } 6154 if (s->src_node != NULL) { 6155 s->src_node->packets[dirndx]++; 6156 s->src_node->bytes[dirndx] += pd.tot_len; 6157 } 6158 if (s->nat_src_node != NULL) { 6159 s->nat_src_node->packets[dirndx]++; 6160 s->nat_src_node->bytes[dirndx] += pd.tot_len; 6161 } 6162 dirndx = (dir == s->direction) ? 0 : 1; 6163 s->packets[dirndx]++; 6164 s->bytes[dirndx] += pd.tot_len; 6165 } 6166 tr = r; 6167 nr = (s != NULL) ? s->nat_rule.ptr : pd.nat_rule; 6168 if (nr != NULL && r == &pf_default_rule) 6169 tr = nr; 6170 if (tr->src.addr.type == PF_ADDR_TABLE) 6171 pfr_update_stats(tr->src.addr.p.tbl, 6172 (s == NULL) ? pd.src : 6173 &s->key[(s->direction == PF_IN)]-> 6174 addr[(s->direction == PF_OUT)], 6175 pd.af, pd.tot_len, dir == PF_OUT, 6176 r->action == PF_PASS, tr->src.neg); 6177 if (tr->dst.addr.type == PF_ADDR_TABLE) 6178 pfr_update_stats(tr->dst.addr.p.tbl, 6179 (s == NULL) ? pd.dst : 6180 &s->key[(s->direction == PF_IN)]-> 6181 addr[(s->direction == PF_IN)], 6182 pd.af, pd.tot_len, dir == PF_OUT, 6183 r->action == PF_PASS, tr->dst.neg); 6184 } 6185 6186 6187 if (action == PF_SYNPROXY_DROP) { 6188 m_freem(*m0); 6189 *m0 = NULL; 6190 action = PF_PASS; 6191 } else if (r->rt) 6192 /* pf_route can free the mbuf causing *m0 to become NULL */ 6193 pf_route(m0, r, dir, kif->pfik_ifp, s, &pd); 6194 6195 return (action); 6196 } 6197 #endif /* INET */ 6198 6199 #ifdef INET6 6200 int 6201 pf_test6(int dir, struct ifnet *ifp, struct mbuf **m0, 6202 struct ether_header *eh, struct inpcb *inp) 6203 { 6204 struct pfi_kif *kif; 6205 u_short action, reason = 0, log = 0; 6206 struct mbuf *m = *m0, *n = NULL; 6207 struct ip6_hdr *h = NULL; 6208 struct pf_rule *a = NULL, *r = &pf_default_rule, *tr, *nr; 6209 struct pf_state *s = NULL; 6210 struct pf_ruleset *ruleset = NULL; 6211 struct pf_pdesc pd; 6212 int off, terminal = 0, dirndx, rh_cnt = 0; 6213 6214 if (!pf_status.running) 6215 return (PF_PASS); 6216 6217 memset(&pd, 0, sizeof(pd)); 6218 #ifdef foo 6219 if (ifp->if_type == IFT_CARP && ifp->if_carpdev) 6220 kif = (struct pfi_kif *)ifp->if_carpdev->if_pf_kif; 6221 else 6222 #endif 6223 kif = (struct pfi_kif *)ifp->if_pf_kif; 6224 6225 if (kif == NULL) { 6226 DPFPRINTF(PF_DEBUG_URGENT, 6227 ("pf_test6: kif == NULL, if_xname %s\n", ifp->if_xname)); 6228 return (PF_DROP); 6229 } 6230 if (kif->pfik_flags & PFI_IFLAG_SKIP) 6231 return (PF_PASS); 6232 6233 #ifdef DIAGNOSTIC 6234 if ((m->m_flags & M_PKTHDR) == 0) 6235 panic("non-M_PKTHDR is passed to pf_test6"); 6236 #endif /* DIAGNOSTIC */ 6237 6238 if (m->m_pkthdr.len < (int)sizeof(*h)) { 6239 action = PF_DROP; 6240 REASON_SET(&reason, PFRES_SHORT); 6241 log = 1; 6242 goto done; 6243 } 6244 6245 /* 6246 * DragonFly doesn't zero the auxillary pkghdr fields, only fw_flags, 6247 * so make sure pf.flags is clear. 6248 */ 6249 if (m->m_pkthdr.fw_flags & PF_MBUF_TAGGED) 6250 return (PF_PASS); 6251 m->m_pkthdr.pf.flags = 0; 6252 /* Re-Check when updating to > 4.4 */ 6253 m->m_pkthdr.pf.statekey = NULL; 6254 6255 /* We do IP header normalization and packet reassembly here */ 6256 if (pf_normalize_ip6(m0, dir, kif, &reason, &pd) != PF_PASS) { 6257 action = PF_DROP; 6258 goto done; 6259 } 6260 m = *m0; /* pf_normalize messes with m0 */ 6261 h = mtod(m, struct ip6_hdr *); 6262 6263 #if 1 6264 /* 6265 * we do not support jumbogram yet. if we keep going, zero ip6_plen 6266 * will do something bad, so drop the packet for now. 6267 */ 6268 if (htons(h->ip6_plen) == 0) { 6269 action = PF_DROP; 6270 REASON_SET(&reason, PFRES_NORM); /*XXX*/ 6271 goto done; 6272 } 6273 #endif 6274 6275 pd.src = (struct pf_addr *)&h->ip6_src; 6276 pd.dst = (struct pf_addr *)&h->ip6_dst; 6277 pd.sport = pd.dport = NULL; 6278 pd.ip_sum = NULL; 6279 pd.proto_sum = NULL; 6280 pd.dir = dir; 6281 pd.sidx = (dir == PF_IN) ? 0 : 1; 6282 pd.didx = (dir == PF_IN) ? 1 : 0; 6283 pd.af = AF_INET6; 6284 pd.tos = 0; 6285 pd.tot_len = ntohs(h->ip6_plen) + sizeof(struct ip6_hdr); 6286 pd.eh = eh; 6287 6288 off = ((caddr_t)h - m->m_data) + sizeof(struct ip6_hdr); 6289 pd.proto = h->ip6_nxt; 6290 do { 6291 switch (pd.proto) { 6292 case IPPROTO_FRAGMENT: 6293 action = pf_test_fragment(&r, dir, kif, m, h, 6294 &pd, &a, &ruleset); 6295 if (action == PF_DROP) 6296 REASON_SET(&reason, PFRES_FRAG); 6297 goto done; 6298 case IPPROTO_ROUTING: { 6299 struct ip6_rthdr rthdr; 6300 6301 if (rh_cnt++) { 6302 DPFPRINTF(PF_DEBUG_MISC, 6303 ("pf: IPv6 more than one rthdr\n")); 6304 action = PF_DROP; 6305 REASON_SET(&reason, PFRES_IPOPTIONS); 6306 log = 1; 6307 goto done; 6308 } 6309 if (!pf_pull_hdr(m, off, &rthdr, sizeof(rthdr), NULL, 6310 &reason, pd.af)) { 6311 DPFPRINTF(PF_DEBUG_MISC, 6312 ("pf: IPv6 short rthdr\n")); 6313 action = PF_DROP; 6314 REASON_SET(&reason, PFRES_SHORT); 6315 log = 1; 6316 goto done; 6317 } 6318 if (rthdr.ip6r_type == IPV6_RTHDR_TYPE_0) { 6319 DPFPRINTF(PF_DEBUG_MISC, 6320 ("pf: IPv6 rthdr0\n")); 6321 action = PF_DROP; 6322 REASON_SET(&reason, PFRES_IPOPTIONS); 6323 log = 1; 6324 goto done; 6325 } 6326 /* FALLTHROUGH */ 6327 } 6328 case IPPROTO_AH: 6329 case IPPROTO_HOPOPTS: 6330 case IPPROTO_DSTOPTS: { 6331 /* get next header and header length */ 6332 struct ip6_ext opt6; 6333 6334 if (!pf_pull_hdr(m, off, &opt6, sizeof(opt6), 6335 NULL, &reason, pd.af)) { 6336 DPFPRINTF(PF_DEBUG_MISC, 6337 ("pf: IPv6 short opt\n")); 6338 action = PF_DROP; 6339 log = 1; 6340 goto done; 6341 } 6342 if (pd.proto == IPPROTO_AH) 6343 off += (opt6.ip6e_len + 2) * 4; 6344 else 6345 off += (opt6.ip6e_len + 1) * 8; 6346 pd.proto = opt6.ip6e_nxt; 6347 /* goto the next header */ 6348 break; 6349 } 6350 default: 6351 terminal++; 6352 break; 6353 } 6354 } while (!terminal); 6355 6356 /* if there's no routing header, use unmodified mbuf for checksumming */ 6357 if (!n) 6358 n = m; 6359 6360 switch (pd.proto) { 6361 6362 case IPPROTO_TCP: { 6363 struct tcphdr th; 6364 6365 pd.hdr.tcp = &th; 6366 if (!pf_pull_hdr(m, off, &th, sizeof(th), 6367 &action, &reason, AF_INET6)) { 6368 log = action != PF_PASS; 6369 goto done; 6370 } 6371 pd.p_len = pd.tot_len - off - (th.th_off << 2); 6372 action = pf_normalize_tcp(dir, kif, m, 0, off, h, &pd); 6373 if (action == PF_DROP) 6374 goto done; 6375 action = pf_test_state_tcp(&s, dir, kif, m, off, h, &pd, 6376 &reason); 6377 if (action == PF_PASS) { 6378 pfsync_update_state(s); 6379 r = s->rule.ptr; 6380 a = s->anchor.ptr; 6381 log = s->log; 6382 } else if (s == NULL) 6383 action = pf_test_rule(&r, &s, dir, kif, 6384 m, off, h, &pd, &a, &ruleset, NULL, inp); 6385 break; 6386 } 6387 6388 case IPPROTO_UDP: { 6389 struct udphdr uh; 6390 6391 pd.hdr.udp = &uh; 6392 if (!pf_pull_hdr(m, off, &uh, sizeof(uh), 6393 &action, &reason, AF_INET6)) { 6394 log = action != PF_PASS; 6395 goto done; 6396 } 6397 if (uh.uh_dport == 0 || 6398 ntohs(uh.uh_ulen) > m->m_pkthdr.len - off || 6399 ntohs(uh.uh_ulen) < sizeof(struct udphdr)) { 6400 action = PF_DROP; 6401 REASON_SET(&reason, PFRES_SHORT); 6402 goto done; 6403 } 6404 action = pf_test_state_udp(&s, dir, kif, m, off, h, &pd); 6405 if (action == PF_PASS) { 6406 pfsync_update_state(s); 6407 r = s->rule.ptr; 6408 a = s->anchor.ptr; 6409 log = s->log; 6410 } else if (s == NULL) 6411 action = pf_test_rule(&r, &s, dir, kif, 6412 m, off, h, &pd, &a, &ruleset, NULL, inp); 6413 break; 6414 } 6415 6416 case IPPROTO_ICMPV6: { 6417 struct icmp6_hdr ih; 6418 6419 pd.hdr.icmp6 = &ih; 6420 if (!pf_pull_hdr(m, off, &ih, sizeof(ih), 6421 &action, &reason, AF_INET6)) { 6422 log = action != PF_PASS; 6423 goto done; 6424 } 6425 action = pf_test_state_icmp(&s, dir, kif, 6426 m, off, h, &pd, &reason); 6427 if (action == PF_PASS) { 6428 pfsync_update_state(s); 6429 r = s->rule.ptr; 6430 a = s->anchor.ptr; 6431 log = s->log; 6432 } else if (s == NULL) 6433 action = pf_test_rule(&r, &s, dir, kif, 6434 m, off, h, &pd, &a, &ruleset, NULL, inp); 6435 break; 6436 } 6437 6438 default: 6439 action = pf_test_state_other(&s, dir, kif, m, &pd); 6440 if (action == PF_PASS) { 6441 pfsync_update_state(s); 6442 r = s->rule.ptr; 6443 a = s->anchor.ptr; 6444 log = s->log; 6445 } else if (s == NULL) 6446 action = pf_test_rule(&r, &s, dir, kif, m, off, h, 6447 &pd, &a, &ruleset, NULL, inp); 6448 break; 6449 } 6450 6451 done: 6452 if (n != m) { 6453 m_freem(n); 6454 n = NULL; 6455 } 6456 6457 /* handle dangerous IPv6 extension headers. */ 6458 if (action == PF_PASS && rh_cnt && 6459 !((s && s->state_flags & PFSTATE_ALLOWOPTS) || r->allow_opts)) { 6460 action = PF_DROP; 6461 REASON_SET(&reason, PFRES_IPOPTIONS); 6462 log = 1; 6463 DPFPRINTF(PF_DEBUG_MISC, 6464 ("pf: dropping packet with dangerous v6 headers\n")); 6465 } 6466 6467 if ((s && s->tag) || r->rtableid) 6468 pf_tag_packet(m, s ? s->tag : 0, r->rtableid); 6469 6470 #if 0 6471 if (dir == PF_IN && s && s->key[PF_SK_STACK]) 6472 m->m_pkthdr.pf.statekey = s->key[PF_SK_STACK]; 6473 #endif 6474 6475 #ifdef ALTQ 6476 if (action == PF_PASS && r->qid) { 6477 m->m_pkthdr.fw_flags |= PF_MBUF_STRUCTURE; 6478 if (pd.tos & IPTOS_LOWDELAY) 6479 m->m_pkthdr.pf.qid = r->pqid; 6480 else 6481 m->m_pkthdr.pf.qid = r->qid; 6482 m->m_pkthdr.pf.ecn_af = AF_INET6; 6483 m->m_pkthdr.pf.hdr = h; 6484 if (s) { 6485 /* for fairq */ 6486 m->m_pkthdr.pf.state_hash = s->hash; 6487 m->m_pkthdr.pf.flags |= PF_TAG_STATE_HASHED; 6488 } 6489 } 6490 #endif /* ALTQ */ 6491 6492 if (dir == PF_IN && action == PF_PASS && (pd.proto == IPPROTO_TCP || 6493 pd.proto == IPPROTO_UDP) && s != NULL && s->nat_rule.ptr != NULL && 6494 (s->nat_rule.ptr->action == PF_RDR || 6495 s->nat_rule.ptr->action == PF_BINAT) && 6496 IN6_IS_ADDR_LOOPBACK(&pd.dst->v6)) 6497 m->m_pkthdr.pf.flags |= PF_TAG_TRANSLATE_LOCALHOST; 6498 6499 if (dir == PF_IN && action == PF_PASS && r->divert.port) { 6500 struct pf_divert *divert; 6501 6502 if ((divert = pf_get_divert(m))) { 6503 m->m_pkthdr.pf.flags |= PF_TAG_DIVERTED; 6504 divert->port = r->divert.port; 6505 divert->addr.ipv6 = r->divert.addr.v6; 6506 } 6507 } 6508 6509 if (log) { 6510 struct pf_rule *lr; 6511 6512 if (s != NULL && s->nat_rule.ptr != NULL && 6513 s->nat_rule.ptr->log & PF_LOG_ALL) 6514 lr = s->nat_rule.ptr; 6515 else 6516 lr = r; 6517 PFLOG_PACKET(kif, h, m, AF_INET6, dir, reason, lr, a, ruleset, 6518 &pd); 6519 } 6520 6521 kif->pfik_bytes[1][dir == PF_OUT][action != PF_PASS] += pd.tot_len; 6522 kif->pfik_packets[1][dir == PF_OUT][action != PF_PASS]++; 6523 6524 if (action == PF_PASS || r->action == PF_DROP) { 6525 dirndx = (dir == PF_OUT); 6526 r->packets[dirndx]++; 6527 r->bytes[dirndx] += pd.tot_len; 6528 if (a != NULL) { 6529 a->packets[dirndx]++; 6530 a->bytes[dirndx] += pd.tot_len; 6531 } 6532 if (s != NULL) { 6533 if (s->nat_rule.ptr != NULL) { 6534 s->nat_rule.ptr->packets[dirndx]++; 6535 s->nat_rule.ptr->bytes[dirndx] += pd.tot_len; 6536 } 6537 if (s->src_node != NULL) { 6538 s->src_node->packets[dirndx]++; 6539 s->src_node->bytes[dirndx] += pd.tot_len; 6540 } 6541 if (s->nat_src_node != NULL) { 6542 s->nat_src_node->packets[dirndx]++; 6543 s->nat_src_node->bytes[dirndx] += pd.tot_len; 6544 } 6545 dirndx = (dir == s->direction) ? 0 : 1; 6546 s->packets[dirndx]++; 6547 s->bytes[dirndx] += pd.tot_len; 6548 } 6549 tr = r; 6550 nr = (s != NULL) ? s->nat_rule.ptr : pd.nat_rule; 6551 if (nr != NULL && r == &pf_default_rule) 6552 tr = nr; 6553 if (tr->src.addr.type == PF_ADDR_TABLE) 6554 pfr_update_stats(tr->src.addr.p.tbl, 6555 (s == NULL) ? pd.src : 6556 &s->key[(s->direction == PF_IN)]->addr[0], 6557 pd.af, pd.tot_len, dir == PF_OUT, 6558 r->action == PF_PASS, tr->src.neg); 6559 if (tr->dst.addr.type == PF_ADDR_TABLE) 6560 pfr_update_stats(tr->dst.addr.p.tbl, 6561 (s == NULL) ? pd.dst : 6562 &s->key[(s->direction == PF_IN)]->addr[1], 6563 pd.af, pd.tot_len, dir == PF_OUT, 6564 r->action == PF_PASS, tr->dst.neg); 6565 } 6566 6567 6568 if (action == PF_SYNPROXY_DROP) { 6569 m_freem(*m0); 6570 *m0 = NULL; 6571 action = PF_PASS; 6572 } else if (r->rt) 6573 /* pf_route6 can free the mbuf causing *m0 to become NULL */ 6574 pf_route6(m0, r, dir, kif->pfik_ifp, s, &pd); 6575 6576 return (action); 6577 } 6578 #endif /* INET6 */ 6579 6580 int 6581 pf_check_congestion(struct ifqueue *ifq) 6582 { 6583 return (0); 6584 } 6585