1 /* $OpenBSD: pf.c,v 1.614 2008/08/02 12:34:37 henning Exp $ */ 2 3 /* 4 * Copyright (c) 2004 The DragonFly Project. All rights reserved. 5 * 6 * Copyright (c) 2001 Daniel Hartmeier 7 * Copyright (c) 2002 - 2008 Henning Brauer 8 * All rights reserved. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 14 * - Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * - Redistributions in binary form must reproduce the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer in the documentation and/or other materials provided 19 * with the distribution. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 29 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 31 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 32 * POSSIBILITY OF SUCH DAMAGE. 33 * 34 * Effort sponsored in part by the Defense Advanced Research Projects 35 * Agency (DARPA) and Air Force Research Laboratory, Air Force 36 * Materiel Command, USAF, under agreement number F30602-01-2-0537. 37 * 38 */ 39 40 #include "opt_inet.h" 41 #include "opt_inet6.h" 42 43 #include <sys/param.h> 44 #include <sys/systm.h> 45 #include <sys/malloc.h> 46 #include <sys/mbuf.h> 47 #include <sys/filio.h> 48 #include <sys/socket.h> 49 #include <sys/socketvar.h> 50 #include <sys/kernel.h> 51 #include <sys/time.h> 52 #include <sys/sysctl.h> 53 #include <sys/endian.h> 54 #include <vm/vm_zone.h> 55 #include <sys/proc.h> 56 #include <sys/kthread.h> 57 58 #include <machine/inttypes.h> 59 60 #include <sys/md5.h> 61 62 #include <net/if.h> 63 #include <net/if_types.h> 64 #include <net/bpf.h> 65 #include <net/netisr.h> 66 #include <net/route.h> 67 68 #include <netinet/in.h> 69 #include <netinet/in_var.h> 70 #include <netinet/in_systm.h> 71 #include <netinet/ip.h> 72 #include <netinet/ip_var.h> 73 #include <netinet/tcp.h> 74 #include <netinet/tcp_seq.h> 75 #include <netinet/udp.h> 76 #include <netinet/ip_icmp.h> 77 #include <netinet/in_pcb.h> 78 #include <netinet/tcp_timer.h> 79 #include <netinet/tcp_var.h> 80 #include <netinet/udp_var.h> 81 #include <netinet/icmp_var.h> 82 #include <netinet/if_ether.h> 83 84 #include <net/pf/pfvar.h> 85 #include <net/pf/if_pflog.h> 86 87 #include <net/pf/if_pfsync.h> 88 89 #ifdef INET6 90 #include <netinet/ip6.h> 91 #include <netinet/in_pcb.h> 92 #include <netinet/icmp6.h> 93 #include <netinet6/nd6.h> 94 #include <netinet6/ip6_var.h> 95 #include <netinet6/in6_pcb.h> 96 #endif /* INET6 */ 97 98 #include <sys/in_cksum.h> 99 #include <sys/ucred.h> 100 #include <machine/limits.h> 101 #include <sys/msgport2.h> 102 #include <net/netmsg2.h> 103 104 extern int ip_optcopy(struct ip *, struct ip *); 105 extern int debug_pfugidhack; 106 107 struct lwkt_token pf_token = LWKT_TOKEN_INITIALIZER(pf_token); 108 109 #define DPFPRINTF(n, x) if (pf_status.debug >= (n)) kprintf x 110 111 /* 112 * Global variables 113 */ 114 115 /* mask radix tree */ 116 struct radix_node_head *pf_maskhead; 117 118 /* state tables */ 119 struct pf_state_tree pf_statetbl; 120 121 struct pf_altqqueue pf_altqs[2]; 122 struct pf_palist pf_pabuf; 123 struct pf_altqqueue *pf_altqs_active; 124 struct pf_altqqueue *pf_altqs_inactive; 125 struct pf_status pf_status; 126 127 u_int32_t ticket_altqs_active; 128 u_int32_t ticket_altqs_inactive; 129 int altqs_inactive_open; 130 u_int32_t ticket_pabuf; 131 132 MD5_CTX pf_tcp_secret_ctx; 133 u_char pf_tcp_secret[16]; 134 int pf_tcp_secret_init; 135 int pf_tcp_iss_off; 136 137 struct pf_anchor_stackframe { 138 struct pf_ruleset *rs; 139 struct pf_rule *r; 140 struct pf_anchor_node *parent; 141 struct pf_anchor *child; 142 } pf_anchor_stack[64]; 143 144 vm_zone_t pf_src_tree_pl, pf_rule_pl, pf_pooladdr_pl; 145 vm_zone_t pf_state_pl, pf_state_key_pl, pf_state_item_pl; 146 vm_zone_t pf_altq_pl; 147 148 void pf_print_host(struct pf_addr *, u_int16_t, u_int8_t); 149 150 void pf_init_threshold(struct pf_threshold *, u_int32_t, 151 u_int32_t); 152 void pf_add_threshold(struct pf_threshold *); 153 int pf_check_threshold(struct pf_threshold *); 154 155 void pf_change_ap(struct pf_addr *, u_int16_t *, 156 u_int16_t *, u_int16_t *, struct pf_addr *, 157 u_int16_t, u_int8_t, sa_family_t); 158 int pf_modulate_sack(struct mbuf *, int, struct pf_pdesc *, 159 struct tcphdr *, struct pf_state_peer *); 160 #ifdef INET6 161 void pf_change_a6(struct pf_addr *, u_int16_t *, 162 struct pf_addr *, u_int8_t); 163 #endif /* INET6 */ 164 void pf_change_icmp(struct pf_addr *, u_int16_t *, 165 struct pf_addr *, struct pf_addr *, u_int16_t, 166 u_int16_t *, u_int16_t *, u_int16_t *, 167 u_int16_t *, u_int8_t, sa_family_t); 168 void pf_send_tcp(const struct pf_rule *, sa_family_t, 169 const struct pf_addr *, const struct pf_addr *, 170 u_int16_t, u_int16_t, u_int32_t, u_int32_t, 171 u_int8_t, u_int16_t, u_int16_t, u_int8_t, int, 172 u_int16_t, struct ether_header *, struct ifnet *); 173 void pf_send_icmp(struct mbuf *, u_int8_t, u_int8_t, 174 sa_family_t, struct pf_rule *); 175 struct pf_rule *pf_match_translation(struct pf_pdesc *, struct mbuf *, 176 int, int, struct pfi_kif *, 177 struct pf_addr *, u_int16_t, struct pf_addr *, 178 u_int16_t, int); 179 struct pf_rule *pf_get_translation(struct pf_pdesc *, struct mbuf *, 180 int, int, struct pfi_kif *, struct pf_src_node **, 181 struct pf_state_key **, struct pf_state_key **, 182 struct pf_state_key **, struct pf_state_key **, 183 struct pf_addr *, struct pf_addr *, 184 u_int16_t, u_int16_t); 185 void pf_detach_state(struct pf_state *); 186 int pf_state_key_setup(struct pf_pdesc *, struct pf_rule *, 187 struct pf_state_key **, struct pf_state_key **, 188 struct pf_state_key **, struct pf_state_key **, 189 struct pf_addr *, struct pf_addr *, 190 u_int16_t, u_int16_t); 191 void pf_state_key_detach(struct pf_state *, int); 192 u_int32_t pf_tcp_iss(struct pf_pdesc *); 193 int pf_test_rule(struct pf_rule **, struct pf_state **, 194 int, struct pfi_kif *, struct mbuf *, int, 195 void *, struct pf_pdesc *, struct pf_rule **, 196 struct pf_ruleset **, struct ifqueue *, struct inpcb *); 197 static __inline int pf_create_state(struct pf_rule *, struct pf_rule *, 198 struct pf_rule *, struct pf_pdesc *, 199 struct pf_src_node *, struct pf_state_key *, 200 struct pf_state_key *, struct pf_state_key *, 201 struct pf_state_key *, struct mbuf *, int, 202 u_int16_t, u_int16_t, int *, struct pfi_kif *, 203 struct pf_state **, int, u_int16_t, u_int16_t, 204 int); 205 int pf_test_fragment(struct pf_rule **, int, 206 struct pfi_kif *, struct mbuf *, void *, 207 struct pf_pdesc *, struct pf_rule **, 208 struct pf_ruleset **); 209 int pf_tcp_track_full(struct pf_state_peer *, 210 struct pf_state_peer *, struct pf_state **, 211 struct pfi_kif *, struct mbuf *, int, 212 struct pf_pdesc *, u_short *, int *); 213 int pf_tcp_track_sloppy(struct pf_state_peer *, 214 struct pf_state_peer *, struct pf_state **, 215 struct pf_pdesc *, u_short *); 216 int pf_test_state_tcp(struct pf_state **, int, 217 struct pfi_kif *, struct mbuf *, int, 218 void *, struct pf_pdesc *, u_short *); 219 int pf_test_state_udp(struct pf_state **, int, 220 struct pfi_kif *, struct mbuf *, int, 221 void *, struct pf_pdesc *); 222 int pf_test_state_icmp(struct pf_state **, int, 223 struct pfi_kif *, struct mbuf *, int, 224 void *, struct pf_pdesc *, u_short *); 225 int pf_test_state_other(struct pf_state **, int, 226 struct pfi_kif *, struct mbuf *, struct pf_pdesc *); 227 void pf_step_into_anchor(int *, struct pf_ruleset **, int, 228 struct pf_rule **, struct pf_rule **, int *); 229 int pf_step_out_of_anchor(int *, struct pf_ruleset **, 230 int, struct pf_rule **, struct pf_rule **, 231 int *); 232 void pf_hash(struct pf_addr *, struct pf_addr *, 233 struct pf_poolhashkey *, sa_family_t); 234 int pf_map_addr(u_int8_t, struct pf_rule *, 235 struct pf_addr *, struct pf_addr *, 236 struct pf_addr *, struct pf_src_node **); 237 int pf_get_sport(sa_family_t, u_int8_t, struct pf_rule *, 238 struct pf_addr *, struct pf_addr *, u_int16_t, 239 struct pf_addr *, u_int16_t*, u_int16_t, u_int16_t, 240 struct pf_src_node **); 241 void pf_route(struct mbuf **, struct pf_rule *, int, 242 struct ifnet *, struct pf_state *, 243 struct pf_pdesc *); 244 void pf_route6(struct mbuf **, struct pf_rule *, int, 245 struct ifnet *, struct pf_state *, 246 struct pf_pdesc *); 247 u_int8_t pf_get_wscale(struct mbuf *, int, u_int16_t, 248 sa_family_t); 249 u_int16_t pf_get_mss(struct mbuf *, int, u_int16_t, 250 sa_family_t); 251 u_int16_t pf_calc_mss(struct pf_addr *, sa_family_t, 252 u_int16_t); 253 void pf_set_rt_ifp(struct pf_state *, 254 struct pf_addr *); 255 int pf_check_proto_cksum(struct mbuf *, int, int, 256 u_int8_t, sa_family_t); 257 struct pf_divert *pf_get_divert(struct mbuf *); 258 void pf_print_state_parts(struct pf_state *, 259 struct pf_state_key *, struct pf_state_key *); 260 int pf_addr_wrap_neq(struct pf_addr_wrap *, 261 struct pf_addr_wrap *); 262 struct pf_state *pf_find_state(struct pfi_kif *, 263 struct pf_state_key_cmp *, u_int, struct mbuf *); 264 int pf_src_connlimit(struct pf_state **); 265 int pf_check_congestion(struct ifqueue *); 266 267 extern int pf_end_threads; 268 269 struct pf_pool_limit pf_pool_limits[PF_LIMIT_MAX] = { 270 { &pf_state_pl, PFSTATE_HIWAT }, 271 { &pf_src_tree_pl, PFSNODE_HIWAT }, 272 { &pf_frent_pl, PFFRAG_FRENT_HIWAT }, 273 { &pfr_ktable_pl, PFR_KTABLE_HIWAT }, 274 { &pfr_kentry_pl, PFR_KENTRY_HIWAT } 275 }; 276 277 #define STATE_LOOKUP(i, k, d, s, m) \ 278 do { \ 279 s = pf_find_state(i, k, d, m); \ 280 if (s == NULL || (s)->timeout == PFTM_PURGE) \ 281 return (PF_DROP); \ 282 if (d == PF_OUT && \ 283 (((s)->rule.ptr->rt == PF_ROUTETO && \ 284 (s)->rule.ptr->direction == PF_OUT) || \ 285 ((s)->rule.ptr->rt == PF_REPLYTO && \ 286 (s)->rule.ptr->direction == PF_IN)) && \ 287 (s)->rt_kif != NULL && \ 288 (s)->rt_kif != i) \ 289 return (PF_PASS); \ 290 } while (0) 291 292 #define BOUND_IFACE(r, k) \ 293 ((r)->rule_flag & PFRULE_IFBOUND) ? (k) : pfi_all 294 295 #define STATE_INC_COUNTERS(s) \ 296 do { \ 297 s->rule.ptr->states_cur++; \ 298 s->rule.ptr->states_tot++; \ 299 if (s->anchor.ptr != NULL) { \ 300 s->anchor.ptr->states_cur++; \ 301 s->anchor.ptr->states_tot++; \ 302 } \ 303 if (s->nat_rule.ptr != NULL) { \ 304 s->nat_rule.ptr->states_cur++; \ 305 s->nat_rule.ptr->states_tot++; \ 306 } \ 307 } while (0) 308 309 #define STATE_DEC_COUNTERS(s) \ 310 do { \ 311 if (s->nat_rule.ptr != NULL) \ 312 s->nat_rule.ptr->states_cur--; \ 313 if (s->anchor.ptr != NULL) \ 314 s->anchor.ptr->states_cur--; \ 315 s->rule.ptr->states_cur--; \ 316 } while (0) 317 318 static __inline int pf_src_compare(struct pf_src_node *, struct pf_src_node *); 319 static __inline int pf_state_compare_key(struct pf_state_key *, 320 struct pf_state_key *); 321 static __inline int pf_state_compare_id(struct pf_state *, 322 struct pf_state *); 323 324 struct pf_src_tree tree_src_tracking; 325 326 struct pf_state_tree_id tree_id; 327 struct pf_state_queue state_list; 328 329 RB_GENERATE(pf_src_tree, pf_src_node, entry, pf_src_compare); 330 RB_GENERATE(pf_state_tree, pf_state_key, entry, pf_state_compare_key); 331 RB_GENERATE(pf_state_tree_id, pf_state, 332 entry_id, pf_state_compare_id); 333 334 static __inline int 335 pf_src_compare(struct pf_src_node *a, struct pf_src_node *b) 336 { 337 int diff; 338 339 if (a->rule.ptr > b->rule.ptr) 340 return (1); 341 if (a->rule.ptr < b->rule.ptr) 342 return (-1); 343 if ((diff = a->af - b->af) != 0) 344 return (diff); 345 switch (a->af) { 346 #ifdef INET 347 case AF_INET: 348 if (a->addr.addr32[0] > b->addr.addr32[0]) 349 return (1); 350 if (a->addr.addr32[0] < b->addr.addr32[0]) 351 return (-1); 352 break; 353 #endif /* INET */ 354 #ifdef INET6 355 case AF_INET6: 356 if (a->addr.addr32[3] > b->addr.addr32[3]) 357 return (1); 358 if (a->addr.addr32[3] < b->addr.addr32[3]) 359 return (-1); 360 if (a->addr.addr32[2] > b->addr.addr32[2]) 361 return (1); 362 if (a->addr.addr32[2] < b->addr.addr32[2]) 363 return (-1); 364 if (a->addr.addr32[1] > b->addr.addr32[1]) 365 return (1); 366 if (a->addr.addr32[1] < b->addr.addr32[1]) 367 return (-1); 368 if (a->addr.addr32[0] > b->addr.addr32[0]) 369 return (1); 370 if (a->addr.addr32[0] < b->addr.addr32[0]) 371 return (-1); 372 break; 373 #endif /* INET6 */ 374 } 375 return (0); 376 } 377 378 u_int32_t 379 pf_state_hash(struct pf_state_key *sk) 380 { 381 u_int32_t hv = (u_int32_t)(((intptr_t)sk >> 6) ^ ((intptr_t)sk >> 15)); 382 if (hv == 0) /* disallow 0 */ 383 hv = 1; 384 return(hv); 385 } 386 387 #ifdef INET6 388 void 389 pf_addrcpy(struct pf_addr *dst, struct pf_addr *src, sa_family_t af) 390 { 391 switch (af) { 392 #ifdef INET 393 case AF_INET: 394 dst->addr32[0] = src->addr32[0]; 395 break; 396 #endif /* INET */ 397 case AF_INET6: 398 dst->addr32[0] = src->addr32[0]; 399 dst->addr32[1] = src->addr32[1]; 400 dst->addr32[2] = src->addr32[2]; 401 dst->addr32[3] = src->addr32[3]; 402 break; 403 } 404 } 405 #endif /* INET6 */ 406 407 void 408 pf_init_threshold(struct pf_threshold *threshold, 409 u_int32_t limit, u_int32_t seconds) 410 { 411 threshold->limit = limit * PF_THRESHOLD_MULT; 412 threshold->seconds = seconds; 413 threshold->count = 0; 414 threshold->last = time_second; 415 } 416 417 void 418 pf_add_threshold(struct pf_threshold *threshold) 419 { 420 u_int32_t t = time_second, diff = t - threshold->last; 421 422 if (diff >= threshold->seconds) 423 threshold->count = 0; 424 else 425 threshold->count -= threshold->count * diff / 426 threshold->seconds; 427 threshold->count += PF_THRESHOLD_MULT; 428 threshold->last = t; 429 } 430 431 int 432 pf_check_threshold(struct pf_threshold *threshold) 433 { 434 return (threshold->count > threshold->limit); 435 } 436 437 int 438 pf_src_connlimit(struct pf_state **state) 439 { 440 int bad = 0; 441 442 (*state)->src_node->conn++; 443 (*state)->src.tcp_est = 1; 444 pf_add_threshold(&(*state)->src_node->conn_rate); 445 446 if ((*state)->rule.ptr->max_src_conn && 447 (*state)->rule.ptr->max_src_conn < 448 (*state)->src_node->conn) { 449 pf_status.lcounters[LCNT_SRCCONN]++; 450 bad++; 451 } 452 453 if ((*state)->rule.ptr->max_src_conn_rate.limit && 454 pf_check_threshold(&(*state)->src_node->conn_rate)) { 455 pf_status.lcounters[LCNT_SRCCONNRATE]++; 456 bad++; 457 } 458 459 if (!bad) 460 return (0); 461 462 if ((*state)->rule.ptr->overload_tbl) { 463 struct pfr_addr p; 464 u_int32_t killed = 0; 465 466 pf_status.lcounters[LCNT_OVERLOAD_TABLE]++; 467 if (pf_status.debug >= PF_DEBUG_MISC) { 468 kprintf("pf_src_connlimit: blocking address "); 469 pf_print_host(&(*state)->src_node->addr, 0, 470 (*state)->key[PF_SK_WIRE]->af); 471 } 472 473 bzero(&p, sizeof(p)); 474 p.pfra_af = (*state)->key[PF_SK_WIRE]->af; 475 switch ((*state)->key[PF_SK_WIRE]->af) { 476 #ifdef INET 477 case AF_INET: 478 p.pfra_net = 32; 479 p.pfra_ip4addr = (*state)->src_node->addr.v4; 480 break; 481 #endif /* INET */ 482 #ifdef INET6 483 case AF_INET6: 484 p.pfra_net = 128; 485 p.pfra_ip6addr = (*state)->src_node->addr.v6; 486 break; 487 #endif /* INET6 */ 488 } 489 490 pfr_insert_kentry((*state)->rule.ptr->overload_tbl, 491 &p, time_second); 492 493 /* kill existing states if that's required. */ 494 if ((*state)->rule.ptr->flush) { 495 struct pf_state_key *sk; 496 struct pf_state *st; 497 498 pf_status.lcounters[LCNT_OVERLOAD_FLUSH]++; 499 RB_FOREACH(st, pf_state_tree_id, &tree_id) { 500 sk = st->key[PF_SK_WIRE]; 501 /* 502 * Kill states from this source. (Only those 503 * from the same rule if PF_FLUSH_GLOBAL is not 504 * set) 505 */ 506 if (sk->af == 507 (*state)->key[PF_SK_WIRE]->af && 508 (((*state)->direction == PF_OUT && 509 PF_AEQ(&(*state)->src_node->addr, 510 &sk->addr[0], sk->af)) || 511 ((*state)->direction == PF_IN && 512 PF_AEQ(&(*state)->src_node->addr, 513 &sk->addr[1], sk->af))) && 514 ((*state)->rule.ptr->flush & 515 PF_FLUSH_GLOBAL || 516 (*state)->rule.ptr == st->rule.ptr)) { 517 st->timeout = PFTM_PURGE; 518 st->src.state = st->dst.state = 519 TCPS_CLOSED; 520 killed++; 521 } 522 } 523 if (pf_status.debug >= PF_DEBUG_MISC) 524 kprintf(", %u states killed", killed); 525 } 526 if (pf_status.debug >= PF_DEBUG_MISC) 527 kprintf("\n"); 528 } 529 530 /* kill this state */ 531 (*state)->timeout = PFTM_PURGE; 532 (*state)->src.state = (*state)->dst.state = TCPS_CLOSED; 533 return (1); 534 } 535 536 int 537 pf_insert_src_node(struct pf_src_node **sn, struct pf_rule *rule, 538 struct pf_addr *src, sa_family_t af) 539 { 540 struct pf_src_node k; 541 542 if (*sn == NULL) { 543 k.af = af; 544 PF_ACPY(&k.addr, src, af); 545 if (rule->rule_flag & PFRULE_RULESRCTRACK || 546 rule->rpool.opts & PF_POOL_STICKYADDR) 547 k.rule.ptr = rule; 548 else 549 k.rule.ptr = NULL; 550 pf_status.scounters[SCNT_SRC_NODE_SEARCH]++; 551 *sn = RB_FIND(pf_src_tree, &tree_src_tracking, &k); 552 } 553 if (*sn == NULL) { 554 if (!rule->max_src_nodes || 555 rule->src_nodes < rule->max_src_nodes) 556 (*sn) = pool_get(&pf_src_tree_pl, PR_NOWAIT | PR_ZERO); 557 else 558 pf_status.lcounters[LCNT_SRCNODES]++; 559 if ((*sn) == NULL) 560 return (-1); 561 562 pf_init_threshold(&(*sn)->conn_rate, 563 rule->max_src_conn_rate.limit, 564 rule->max_src_conn_rate.seconds); 565 566 (*sn)->af = af; 567 if (rule->rule_flag & PFRULE_RULESRCTRACK || 568 rule->rpool.opts & PF_POOL_STICKYADDR) 569 (*sn)->rule.ptr = rule; 570 else 571 (*sn)->rule.ptr = NULL; 572 PF_ACPY(&(*sn)->addr, src, af); 573 if (RB_INSERT(pf_src_tree, 574 &tree_src_tracking, *sn) != NULL) { 575 if (pf_status.debug >= PF_DEBUG_MISC) { 576 kprintf("pf: src_tree insert failed: "); 577 pf_print_host(&(*sn)->addr, 0, af); 578 kprintf("\n"); 579 } 580 pool_put(&pf_src_tree_pl, *sn); 581 return (-1); 582 } 583 (*sn)->creation = time_second; 584 (*sn)->ruletype = rule->action; 585 if ((*sn)->rule.ptr != NULL) 586 (*sn)->rule.ptr->src_nodes++; 587 pf_status.scounters[SCNT_SRC_NODE_INSERT]++; 588 pf_status.src_nodes++; 589 } else { 590 if (rule->max_src_states && 591 (*sn)->states >= rule->max_src_states) { 592 pf_status.lcounters[LCNT_SRCSTATES]++; 593 return (-1); 594 } 595 } 596 return (0); 597 } 598 599 /* state table stuff */ 600 601 static __inline int 602 pf_state_compare_key(struct pf_state_key *a, struct pf_state_key *b) 603 { 604 int diff; 605 606 if ((diff = a->proto - b->proto) != 0) 607 return (diff); 608 if ((diff = a->af - b->af) != 0) 609 return (diff); 610 switch (a->af) { 611 #ifdef INET 612 case AF_INET: 613 if (a->addr[0].addr32[0] > b->addr[0].addr32[0]) 614 return (1); 615 if (a->addr[0].addr32[0] < b->addr[0].addr32[0]) 616 return (-1); 617 if (a->addr[1].addr32[0] > b->addr[1].addr32[0]) 618 return (1); 619 if (a->addr[1].addr32[0] < b->addr[1].addr32[0]) 620 return (-1); 621 break; 622 #endif /* INET */ 623 #ifdef INET6 624 case AF_INET6: 625 if (a->addr[0].addr32[3] > b->addr[0].addr32[3]) 626 return (1); 627 if (a->addr[0].addr32[3] < b->addr[0].addr32[3]) 628 return (-1); 629 if (a->addr[1].addr32[3] > b->addr[1].addr32[3]) 630 return (1); 631 if (a->addr[1].addr32[3] < b->addr[1].addr32[3]) 632 return (-1); 633 if (a->addr[0].addr32[2] > b->addr[0].addr32[2]) 634 return (1); 635 if (a->addr[0].addr32[2] < b->addr[0].addr32[2]) 636 return (-1); 637 if (a->addr[1].addr32[2] > b->addr[1].addr32[2]) 638 return (1); 639 if (a->addr[1].addr32[2] < b->addr[1].addr32[2]) 640 return (-1); 641 if (a->addr[0].addr32[1] > b->addr[0].addr32[1]) 642 return (1); 643 if (a->addr[0].addr32[1] < b->addr[0].addr32[1]) 644 return (-1); 645 if (a->addr[1].addr32[1] > b->addr[1].addr32[1]) 646 return (1); 647 if (a->addr[1].addr32[1] < b->addr[1].addr32[1]) 648 return (-1); 649 if (a->addr[0].addr32[0] > b->addr[0].addr32[0]) 650 return (1); 651 if (a->addr[0].addr32[0] < b->addr[0].addr32[0]) 652 return (-1); 653 if (a->addr[1].addr32[0] > b->addr[1].addr32[0]) 654 return (1); 655 if (a->addr[1].addr32[0] < b->addr[1].addr32[0]) 656 return (-1); 657 break; 658 #endif /* INET6 */ 659 } 660 661 if ((diff = a->port[0] - b->port[0]) != 0) 662 return (diff); 663 if ((diff = a->port[1] - b->port[1]) != 0) 664 return (diff); 665 666 return (0); 667 } 668 669 static __inline int 670 pf_state_compare_id(struct pf_state *a, struct pf_state *b) 671 { 672 if (a->id > b->id) 673 return (1); 674 if (a->id < b->id) 675 return (-1); 676 if (a->creatorid > b->creatorid) 677 return (1); 678 if (a->creatorid < b->creatorid) 679 return (-1); 680 681 return (0); 682 } 683 684 int 685 pf_state_key_attach(struct pf_state_key *sk, struct pf_state *s, int idx) 686 { 687 struct pf_state_item *si; 688 struct pf_state_key *cur; 689 690 KKASSERT(s->key[idx] == NULL); /* XXX handle this? */ 691 692 if ((cur = RB_INSERT(pf_state_tree, &pf_statetbl, sk)) != NULL) { 693 /* key exists. check for same kif, if none, add to key */ 694 TAILQ_FOREACH(si, &cur->states, entry) 695 if (si->s->kif == s->kif && 696 si->s->direction == s->direction) { 697 if (pf_status.debug >= PF_DEBUG_MISC) { 698 kprintf( 699 "pf: %s key attach failed on %s: ", 700 (idx == PF_SK_WIRE) ? 701 "wire" : "stack", 702 s->kif->pfik_name); 703 pf_print_state_parts(s, 704 (idx == PF_SK_WIRE) ? sk : NULL, 705 (idx == PF_SK_STACK) ? sk : NULL); 706 kprintf("\n"); 707 } 708 pool_put(&pf_state_key_pl, sk); 709 return (-1); /* collision! */ 710 } 711 pool_put(&pf_state_key_pl, sk); 712 s->key[idx] = cur; 713 } else 714 s->key[idx] = sk; 715 716 if ((si = pool_get(&pf_state_item_pl, PR_NOWAIT)) == NULL) { 717 pf_state_key_detach(s, idx); 718 return (-1); 719 } 720 si->s = s; 721 722 /* list is sorted, if-bound states before floating */ 723 if (s->kif == pfi_all) 724 TAILQ_INSERT_TAIL(&s->key[idx]->states, si, entry); 725 else 726 TAILQ_INSERT_HEAD(&s->key[idx]->states, si, entry); 727 return (0); 728 } 729 730 void 731 pf_detach_state(struct pf_state *s) 732 { 733 if (s->key[PF_SK_WIRE] == s->key[PF_SK_STACK]) 734 s->key[PF_SK_WIRE] = NULL; 735 736 if (s->key[PF_SK_STACK] != NULL) 737 pf_state_key_detach(s, PF_SK_STACK); 738 739 if (s->key[PF_SK_WIRE] != NULL) 740 pf_state_key_detach(s, PF_SK_WIRE); 741 } 742 743 void 744 pf_state_key_detach(struct pf_state *s, int idx) 745 { 746 struct pf_state_item *si; 747 748 si = TAILQ_FIRST(&s->key[idx]->states); 749 while (si && si->s != s) 750 si = TAILQ_NEXT(si, entry); 751 752 if (si) { 753 TAILQ_REMOVE(&s->key[idx]->states, si, entry); 754 pool_put(&pf_state_item_pl, si); 755 } 756 757 if (TAILQ_EMPTY(&s->key[idx]->states)) { 758 RB_REMOVE(pf_state_tree, &pf_statetbl, s->key[idx]); 759 if (s->key[idx]->reverse) 760 s->key[idx]->reverse->reverse = NULL; 761 if (s->key[idx]->inp) 762 s->key[idx]->inp->inp_pf_sk = NULL; 763 pool_put(&pf_state_key_pl, s->key[idx]); 764 } 765 s->key[idx] = NULL; 766 } 767 768 struct pf_state_key * 769 pf_alloc_state_key(int pool_flags) 770 { 771 struct pf_state_key *sk; 772 773 if ((sk = pool_get(&pf_state_key_pl, pool_flags)) == NULL) 774 return (NULL); 775 TAILQ_INIT(&sk->states); 776 777 return (sk); 778 } 779 780 int 781 pf_state_key_setup(struct pf_pdesc *pd, struct pf_rule *nr, 782 struct pf_state_key **skw, struct pf_state_key **sks, 783 struct pf_state_key **skp, struct pf_state_key **nkp, 784 struct pf_addr *saddr, struct pf_addr *daddr, 785 u_int16_t sport, u_int16_t dport) 786 { 787 KKASSERT((*skp == NULL && *nkp == NULL)); 788 789 if ((*skp = pf_alloc_state_key(PR_NOWAIT | PR_ZERO)) == NULL) 790 return (ENOMEM); 791 792 PF_ACPY(&(*skp)->addr[pd->sidx], saddr, pd->af); 793 PF_ACPY(&(*skp)->addr[pd->didx], daddr, pd->af); 794 (*skp)->port[pd->sidx] = sport; 795 (*skp)->port[pd->didx] = dport; 796 (*skp)->proto = pd->proto; 797 (*skp)->af = pd->af; 798 799 if (nr != NULL) { 800 if ((*nkp = pf_alloc_state_key(PR_NOWAIT | PR_ZERO)) == NULL) 801 return (ENOMEM); /* caller must handle cleanup */ 802 803 /* XXX maybe just bcopy and TAILQ_INIT(&(*nkp)->states) */ 804 PF_ACPY(&(*nkp)->addr[0], &(*skp)->addr[0], pd->af); 805 PF_ACPY(&(*nkp)->addr[1], &(*skp)->addr[1], pd->af); 806 (*nkp)->port[0] = (*skp)->port[0]; 807 (*nkp)->port[1] = (*skp)->port[1]; 808 (*nkp)->proto = pd->proto; 809 (*nkp)->af = pd->af; 810 } else 811 *nkp = *skp; 812 813 if (pd->dir == PF_IN) { 814 *skw = *skp; 815 *sks = *nkp; 816 } else { 817 *sks = *skp; 818 *skw = *nkp; 819 } 820 return (0); 821 } 822 823 824 int 825 pf_state_insert(struct pfi_kif *kif, struct pf_state_key *skw, 826 struct pf_state_key *sks, struct pf_state *s) 827 { 828 s->kif = kif; 829 830 if (skw == sks) { 831 if (pf_state_key_attach(skw, s, PF_SK_WIRE)) 832 return (-1); 833 s->key[PF_SK_STACK] = s->key[PF_SK_WIRE]; 834 } else { 835 if (pf_state_key_attach(skw, s, PF_SK_WIRE)) { 836 pool_put(&pf_state_key_pl, sks); 837 return (-1); 838 } 839 if (pf_state_key_attach(sks, s, PF_SK_STACK)) { 840 pf_state_key_detach(s, PF_SK_WIRE); 841 return (-1); 842 } 843 } 844 845 if (s->id == 0 && s->creatorid == 0) { 846 s->id = htobe64(pf_status.stateid++); 847 s->creatorid = pf_status.hostid; 848 } 849 850 /* 851 * Calculate hash code for altq 852 */ 853 s->hash = crc32(s->key[PF_SK_WIRE], sizeof(*sks)); 854 855 if (RB_INSERT(pf_state_tree_id, &tree_id, s) != NULL) { 856 if (pf_status.debug >= PF_DEBUG_MISC) { 857 kprintf("pf: state insert failed: " 858 "id: %016jx creatorid: %08x", 859 (uintmax_t)be64toh(s->id), ntohl(s->creatorid)); 860 if (s->sync_flags & PFSTATE_FROMSYNC) 861 kprintf(" (from sync)"); 862 kprintf("\n"); 863 } 864 pf_detach_state(s); 865 return (-1); 866 } 867 TAILQ_INSERT_TAIL(&state_list, s, entry_list); 868 pf_status.fcounters[FCNT_STATE_INSERT]++; 869 pf_status.states++; 870 pfi_kif_ref(kif, PFI_KIF_REF_STATE); 871 pfsync_insert_state(s); 872 return (0); 873 } 874 875 struct pf_state * 876 pf_find_state_byid(struct pf_state_cmp *key) 877 { 878 pf_status.fcounters[FCNT_STATE_SEARCH]++; 879 880 return (RB_FIND(pf_state_tree_id, &tree_id, (struct pf_state *)key)); 881 } 882 883 struct pf_state * 884 pf_find_state(struct pfi_kif *kif, struct pf_state_key_cmp *key, u_int dir, 885 struct mbuf *m) 886 { 887 struct pf_state_key *sk; 888 struct pf_state_item *si; 889 890 pf_status.fcounters[FCNT_STATE_SEARCH]++; 891 892 if (dir == PF_OUT && m->m_pkthdr.pf.statekey && 893 ((struct pf_state_key *)m->m_pkthdr.pf.statekey)->reverse) 894 sk = ((struct pf_state_key *)m->m_pkthdr.pf.statekey)->reverse; 895 else { 896 if ((sk = RB_FIND(pf_state_tree, &pf_statetbl, 897 (struct pf_state_key *)key)) == NULL) 898 return (NULL); 899 if (dir == PF_OUT && m->m_pkthdr.pf.statekey) { 900 ((struct pf_state_key *) 901 m->m_pkthdr.pf.statekey)->reverse = sk; 902 sk->reverse = m->m_pkthdr.pf.statekey; 903 } 904 } 905 906 if (dir == PF_OUT) 907 m->m_pkthdr.pf.statekey = NULL; 908 909 /* list is sorted, if-bound states before floating ones */ 910 TAILQ_FOREACH(si, &sk->states, entry) 911 if ((si->s->kif == pfi_all || si->s->kif == kif) && 912 sk == (dir == PF_IN ? si->s->key[PF_SK_WIRE] : 913 si->s->key[PF_SK_STACK])) 914 return (si->s); 915 916 return (NULL); 917 } 918 919 struct pf_state * 920 pf_find_state_all(struct pf_state_key_cmp *key, u_int dir, int *more) 921 { 922 struct pf_state_key *sk; 923 struct pf_state_item *si, *ret = NULL; 924 925 pf_status.fcounters[FCNT_STATE_SEARCH]++; 926 927 sk = RB_FIND(pf_state_tree, &pf_statetbl, (struct pf_state_key *)key); 928 929 if (sk != NULL) { 930 TAILQ_FOREACH(si, &sk->states, entry) 931 if (dir == PF_INOUT || 932 (sk == (dir == PF_IN ? si->s->key[PF_SK_WIRE] : 933 si->s->key[PF_SK_STACK]))) { 934 if (more == NULL) 935 return (si->s); 936 937 if (ret) 938 (*more)++; 939 else 940 ret = si; 941 } 942 } 943 return (ret ? ret->s : NULL); 944 } 945 946 /* END state table stuff */ 947 948 949 void 950 pf_purge_thread(void *v) 951 { 952 int nloops = 0; 953 int locked = 0; 954 955 lwkt_gettoken(&pf_token); 956 for (;;) { 957 tsleep(pf_purge_thread, PWAIT, "pftm", 1 * hz); 958 959 lockmgr(&pf_consistency_lock, LK_EXCLUSIVE); 960 961 if (pf_end_threads) { 962 pf_purge_expired_states(pf_status.states, 1); 963 pf_purge_expired_fragments(); 964 pf_purge_expired_src_nodes(1); 965 pf_end_threads++; 966 967 lockmgr(&pf_consistency_lock, LK_RELEASE); 968 wakeup(pf_purge_thread); 969 kthread_exit(); 970 } 971 crit_enter(); 972 973 /* process a fraction of the state table every second */ 974 if(!pf_purge_expired_states(1 + (pf_status.states 975 / pf_default_rule.timeout[PFTM_INTERVAL]), 0)) { 976 977 pf_purge_expired_states(1 + (pf_status.states 978 / pf_default_rule.timeout[PFTM_INTERVAL]), 1); 979 } 980 981 /* purge other expired types every PFTM_INTERVAL seconds */ 982 if (++nloops >= pf_default_rule.timeout[PFTM_INTERVAL]) { 983 pf_purge_expired_fragments(); 984 if (!pf_purge_expired_src_nodes(locked)) { 985 pf_purge_expired_src_nodes(1); 986 } 987 nloops = 0; 988 } 989 crit_exit(); 990 lockmgr(&pf_consistency_lock, LK_RELEASE); 991 } 992 lwkt_reltoken(&pf_token); 993 } 994 995 u_int32_t 996 pf_state_expires(const struct pf_state *state) 997 { 998 u_int32_t timeout; 999 u_int32_t start; 1000 u_int32_t end; 1001 u_int32_t states; 1002 1003 /* handle all PFTM_* > PFTM_MAX here */ 1004 if (state->timeout == PFTM_PURGE) 1005 return (time_second); 1006 if (state->timeout == PFTM_UNTIL_PACKET) 1007 return (0); 1008 KKASSERT(state->timeout != PFTM_UNLINKED); 1009 KKASSERT(state->timeout < PFTM_MAX); 1010 timeout = state->rule.ptr->timeout[state->timeout]; 1011 if (!timeout) 1012 timeout = pf_default_rule.timeout[state->timeout]; 1013 start = state->rule.ptr->timeout[PFTM_ADAPTIVE_START]; 1014 if (start) { 1015 end = state->rule.ptr->timeout[PFTM_ADAPTIVE_END]; 1016 states = state->rule.ptr->states_cur; 1017 } else { 1018 start = pf_default_rule.timeout[PFTM_ADAPTIVE_START]; 1019 end = pf_default_rule.timeout[PFTM_ADAPTIVE_END]; 1020 states = pf_status.states; 1021 } 1022 if (end && states > start && start < end) { 1023 if (states < end) 1024 return (state->expire + timeout * (end - states) / 1025 (end - start)); 1026 else 1027 return (time_second); 1028 } 1029 return (state->expire + timeout); 1030 } 1031 1032 int 1033 pf_purge_expired_src_nodes(int waslocked) 1034 { 1035 struct pf_src_node *cur, *next; 1036 int locked = waslocked; 1037 1038 for (cur = RB_MIN(pf_src_tree, &tree_src_tracking); cur; cur = next) { 1039 next = RB_NEXT(pf_src_tree, &tree_src_tracking, cur); 1040 1041 if (cur->states <= 0 && cur->expire <= time_second) { 1042 if (! locked) { 1043 lockmgr(&pf_consistency_lock, LK_EXCLUSIVE); 1044 next = RB_NEXT(pf_src_tree, 1045 &tree_src_tracking, cur); 1046 locked = 1; 1047 } 1048 if (cur->rule.ptr != NULL) { 1049 cur->rule.ptr->src_nodes--; 1050 if (cur->rule.ptr->states_cur <= 0 && 1051 cur->rule.ptr->max_src_nodes <= 0) 1052 pf_rm_rule(NULL, cur->rule.ptr); 1053 } 1054 RB_REMOVE(pf_src_tree, &tree_src_tracking, cur); 1055 pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++; 1056 pf_status.src_nodes--; 1057 pool_put(&pf_src_tree_pl, cur); 1058 } 1059 } 1060 1061 if (locked && !waslocked) 1062 lockmgr(&pf_consistency_lock, LK_RELEASE); 1063 return(1); 1064 } 1065 1066 void 1067 pf_src_tree_remove_state(struct pf_state *s) 1068 { 1069 u_int32_t timeout; 1070 1071 if (s->src_node != NULL) { 1072 if (s->src.tcp_est) 1073 --s->src_node->conn; 1074 if (--s->src_node->states <= 0) { 1075 timeout = s->rule.ptr->timeout[PFTM_SRC_NODE]; 1076 if (!timeout) 1077 timeout = 1078 pf_default_rule.timeout[PFTM_SRC_NODE]; 1079 s->src_node->expire = time_second + timeout; 1080 } 1081 } 1082 if (s->nat_src_node != s->src_node && s->nat_src_node != NULL) { 1083 if (--s->nat_src_node->states <= 0) { 1084 timeout = s->rule.ptr->timeout[PFTM_SRC_NODE]; 1085 if (!timeout) 1086 timeout = 1087 pf_default_rule.timeout[PFTM_SRC_NODE]; 1088 s->nat_src_node->expire = time_second + timeout; 1089 } 1090 } 1091 s->src_node = s->nat_src_node = NULL; 1092 } 1093 1094 /* callers should be at crit_enter() */ 1095 void 1096 pf_unlink_state(struct pf_state *cur) 1097 { 1098 if (cur->src.state == PF_TCPS_PROXY_DST) { 1099 /* XXX wire key the right one? */ 1100 pf_send_tcp(cur->rule.ptr, cur->key[PF_SK_WIRE]->af, 1101 &cur->key[PF_SK_WIRE]->addr[1], 1102 &cur->key[PF_SK_WIRE]->addr[0], 1103 cur->key[PF_SK_WIRE]->port[1], 1104 cur->key[PF_SK_WIRE]->port[0], 1105 cur->src.seqhi, cur->src.seqlo + 1, 1106 TH_RST|TH_ACK, 0, 0, 0, 1, cur->tag, NULL, NULL); 1107 } 1108 RB_REMOVE(pf_state_tree_id, &tree_id, cur); 1109 if (cur->creatorid == pf_status.hostid) 1110 pfsync_delete_state(cur); 1111 cur->timeout = PFTM_UNLINKED; 1112 pf_src_tree_remove_state(cur); 1113 pf_detach_state(cur); 1114 } 1115 1116 static struct pf_state *purge_cur; 1117 1118 /* callers should be at crit_enter() and hold the 1119 * write_lock on pf_consistency_lock */ 1120 void 1121 pf_free_state(struct pf_state *cur) 1122 { 1123 if (pfsyncif != NULL && 1124 (pfsyncif->sc_bulk_send_next == cur || 1125 pfsyncif->sc_bulk_terminator == cur)) 1126 return; 1127 KKASSERT(cur->timeout == PFTM_UNLINKED); 1128 if (--cur->rule.ptr->states_cur <= 0 && 1129 cur->rule.ptr->src_nodes <= 0) 1130 pf_rm_rule(NULL, cur->rule.ptr); 1131 if (cur->nat_rule.ptr != NULL) 1132 if (--cur->nat_rule.ptr->states_cur <= 0 && 1133 cur->nat_rule.ptr->src_nodes <= 0) 1134 pf_rm_rule(NULL, cur->nat_rule.ptr); 1135 if (cur->anchor.ptr != NULL) 1136 if (--cur->anchor.ptr->states_cur <= 0) 1137 pf_rm_rule(NULL, cur->anchor.ptr); 1138 pf_normalize_tcp_cleanup(cur); 1139 pfi_kif_unref(cur->kif, PFI_KIF_REF_STATE); 1140 1141 /* 1142 * We may be freeing pf_purge_expired_states()'s saved scan entry, 1143 * adjust it if necessary. 1144 */ 1145 if (purge_cur == cur) { 1146 kprintf("PURGE CONFLICT\n"); 1147 purge_cur = TAILQ_NEXT(purge_cur, entry_list); 1148 } 1149 TAILQ_REMOVE(&state_list, cur, entry_list); 1150 if (cur->tag) 1151 pf_tag_unref(cur->tag); 1152 pool_put(&pf_state_pl, cur); 1153 pf_status.fcounters[FCNT_STATE_REMOVALS]++; 1154 pf_status.states--; 1155 } 1156 1157 int 1158 pf_purge_expired_states(u_int32_t maxcheck, int waslocked) 1159 { 1160 struct pf_state *cur; 1161 int locked = waslocked; 1162 1163 while (maxcheck--) { 1164 /* 1165 * Wrap to start of list when we hit the end 1166 */ 1167 cur = purge_cur; 1168 if (cur == NULL) { 1169 cur = TAILQ_FIRST(&state_list); 1170 if (cur == NULL) 1171 break; /* list empty */ 1172 } 1173 1174 /* 1175 * Setup next (purge_cur) while we process this one. If we block and 1176 * something else deletes purge_cur, pf_free_state() will adjust it further 1177 * ahead. 1178 */ 1179 purge_cur = TAILQ_NEXT(cur, entry_list); 1180 1181 if (cur->timeout == PFTM_UNLINKED) { 1182 /* free unlinked state */ 1183 if (! locked) { 1184 lockmgr(&pf_consistency_lock, LK_EXCLUSIVE); 1185 locked = 1; 1186 } 1187 pf_free_state(cur); 1188 } else if (pf_state_expires(cur) <= time_second) { 1189 /* unlink and free expired state */ 1190 pf_unlink_state(cur); 1191 if (! locked) { 1192 if (!lockmgr(&pf_consistency_lock, LK_EXCLUSIVE)) 1193 return (0); 1194 locked = 1; 1195 } 1196 pf_free_state(cur); 1197 } 1198 } 1199 1200 if (locked) 1201 lockmgr(&pf_consistency_lock, LK_RELEASE); 1202 return (1); 1203 } 1204 1205 int 1206 pf_tbladdr_setup(struct pf_ruleset *rs, struct pf_addr_wrap *aw) 1207 { 1208 if (aw->type != PF_ADDR_TABLE) 1209 return (0); 1210 if ((aw->p.tbl = pfr_attach_table(rs, aw->v.tblname)) == NULL) 1211 return (1); 1212 return (0); 1213 } 1214 1215 void 1216 pf_tbladdr_remove(struct pf_addr_wrap *aw) 1217 { 1218 if (aw->type != PF_ADDR_TABLE || aw->p.tbl == NULL) 1219 return; 1220 pfr_detach_table(aw->p.tbl); 1221 aw->p.tbl = NULL; 1222 } 1223 1224 void 1225 pf_tbladdr_copyout(struct pf_addr_wrap *aw) 1226 { 1227 struct pfr_ktable *kt = aw->p.tbl; 1228 1229 if (aw->type != PF_ADDR_TABLE || kt == NULL) 1230 return; 1231 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL) 1232 kt = kt->pfrkt_root; 1233 aw->p.tbl = NULL; 1234 aw->p.tblcnt = (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) ? 1235 kt->pfrkt_cnt : -1; 1236 } 1237 1238 void 1239 pf_print_host(struct pf_addr *addr, u_int16_t p, sa_family_t af) 1240 { 1241 switch (af) { 1242 #ifdef INET 1243 case AF_INET: { 1244 u_int32_t a = ntohl(addr->addr32[0]); 1245 kprintf("%u.%u.%u.%u", (a>>24)&255, (a>>16)&255, 1246 (a>>8)&255, a&255); 1247 if (p) { 1248 p = ntohs(p); 1249 kprintf(":%u", p); 1250 } 1251 break; 1252 } 1253 #endif /* INET */ 1254 #ifdef INET6 1255 case AF_INET6: { 1256 u_int16_t b; 1257 u_int8_t i, curstart = 255, curend = 0, 1258 maxstart = 0, maxend = 0; 1259 for (i = 0; i < 8; i++) { 1260 if (!addr->addr16[i]) { 1261 if (curstart == 255) 1262 curstart = i; 1263 else 1264 curend = i; 1265 } else { 1266 if (curstart) { 1267 if ((curend - curstart) > 1268 (maxend - maxstart)) { 1269 maxstart = curstart; 1270 maxend = curend; 1271 curstart = 255; 1272 } 1273 } 1274 } 1275 } 1276 for (i = 0; i < 8; i++) { 1277 if (i >= maxstart && i <= maxend) { 1278 if (maxend != 7) { 1279 if (i == maxstart) 1280 kprintf(":"); 1281 } else { 1282 if (i == maxend) 1283 kprintf(":"); 1284 } 1285 } else { 1286 b = ntohs(addr->addr16[i]); 1287 kprintf("%x", b); 1288 if (i < 7) 1289 kprintf(":"); 1290 } 1291 } 1292 if (p) { 1293 p = ntohs(p); 1294 kprintf("[%u]", p); 1295 } 1296 break; 1297 } 1298 #endif /* INET6 */ 1299 } 1300 } 1301 1302 void 1303 pf_print_state(struct pf_state *s) 1304 { 1305 pf_print_state_parts(s, NULL, NULL); 1306 } 1307 1308 void 1309 pf_print_state_parts(struct pf_state *s, 1310 struct pf_state_key *skwp, struct pf_state_key *sksp) 1311 { 1312 struct pf_state_key *skw, *sks; 1313 u_int8_t proto, dir; 1314 1315 /* Do our best to fill these, but they're skipped if NULL */ 1316 skw = skwp ? skwp : (s ? s->key[PF_SK_WIRE] : NULL); 1317 sks = sksp ? sksp : (s ? s->key[PF_SK_STACK] : NULL); 1318 proto = skw ? skw->proto : (sks ? sks->proto : 0); 1319 dir = s ? s->direction : 0; 1320 1321 switch (proto) { 1322 case IPPROTO_TCP: 1323 kprintf("TCP "); 1324 break; 1325 case IPPROTO_UDP: 1326 kprintf("UDP "); 1327 break; 1328 case IPPROTO_ICMP: 1329 kprintf("ICMP "); 1330 break; 1331 case IPPROTO_ICMPV6: 1332 kprintf("ICMPV6 "); 1333 break; 1334 default: 1335 kprintf("%u ", skw->proto); 1336 break; 1337 } 1338 switch (dir) { 1339 case PF_IN: 1340 kprintf(" in"); 1341 break; 1342 case PF_OUT: 1343 kprintf(" out"); 1344 break; 1345 } 1346 if (skw) { 1347 kprintf(" wire: "); 1348 pf_print_host(&skw->addr[0], skw->port[0], skw->af); 1349 kprintf(" "); 1350 pf_print_host(&skw->addr[1], skw->port[1], skw->af); 1351 } 1352 if (sks) { 1353 kprintf(" stack: "); 1354 if (sks != skw) { 1355 pf_print_host(&sks->addr[0], sks->port[0], sks->af); 1356 kprintf(" "); 1357 pf_print_host(&sks->addr[1], sks->port[1], sks->af); 1358 } else 1359 kprintf("-"); 1360 } 1361 if (s) { 1362 if (proto == IPPROTO_TCP) { 1363 kprintf(" [lo=%u high=%u win=%u modulator=%u", 1364 s->src.seqlo, s->src.seqhi, 1365 s->src.max_win, s->src.seqdiff); 1366 if (s->src.wscale && s->dst.wscale) 1367 kprintf(" wscale=%u", 1368 s->src.wscale & PF_WSCALE_MASK); 1369 kprintf("]"); 1370 kprintf(" [lo=%u high=%u win=%u modulator=%u", 1371 s->dst.seqlo, s->dst.seqhi, 1372 s->dst.max_win, s->dst.seqdiff); 1373 if (s->src.wscale && s->dst.wscale) 1374 kprintf(" wscale=%u", 1375 s->dst.wscale & PF_WSCALE_MASK); 1376 kprintf("]"); 1377 } 1378 kprintf(" %u:%u", s->src.state, s->dst.state); 1379 } 1380 } 1381 1382 void 1383 pf_print_flags(u_int8_t f) 1384 { 1385 if (f) 1386 kprintf(" "); 1387 if (f & TH_FIN) 1388 kprintf("F"); 1389 if (f & TH_SYN) 1390 kprintf("S"); 1391 if (f & TH_RST) 1392 kprintf("R"); 1393 if (f & TH_PUSH) 1394 kprintf("P"); 1395 if (f & TH_ACK) 1396 kprintf("A"); 1397 if (f & TH_URG) 1398 kprintf("U"); 1399 if (f & TH_ECE) 1400 kprintf("E"); 1401 if (f & TH_CWR) 1402 kprintf("W"); 1403 } 1404 1405 #define PF_SET_SKIP_STEPS(i) \ 1406 do { \ 1407 while (head[i] != cur) { \ 1408 head[i]->skip[i].ptr = cur; \ 1409 head[i] = TAILQ_NEXT(head[i], entries); \ 1410 } \ 1411 } while (0) 1412 1413 void 1414 pf_calc_skip_steps(struct pf_rulequeue *rules) 1415 { 1416 struct pf_rule *cur, *prev, *head[PF_SKIP_COUNT]; 1417 int i; 1418 1419 cur = TAILQ_FIRST(rules); 1420 prev = cur; 1421 for (i = 0; i < PF_SKIP_COUNT; ++i) 1422 head[i] = cur; 1423 while (cur != NULL) { 1424 1425 if (cur->kif != prev->kif || cur->ifnot != prev->ifnot) 1426 PF_SET_SKIP_STEPS(PF_SKIP_IFP); 1427 if (cur->direction != prev->direction) 1428 PF_SET_SKIP_STEPS(PF_SKIP_DIR); 1429 if (cur->af != prev->af) 1430 PF_SET_SKIP_STEPS(PF_SKIP_AF); 1431 if (cur->proto != prev->proto) 1432 PF_SET_SKIP_STEPS(PF_SKIP_PROTO); 1433 if (cur->src.neg != prev->src.neg || 1434 pf_addr_wrap_neq(&cur->src.addr, &prev->src.addr)) 1435 PF_SET_SKIP_STEPS(PF_SKIP_SRC_ADDR); 1436 if (cur->src.port[0] != prev->src.port[0] || 1437 cur->src.port[1] != prev->src.port[1] || 1438 cur->src.port_op != prev->src.port_op) 1439 PF_SET_SKIP_STEPS(PF_SKIP_SRC_PORT); 1440 if (cur->dst.neg != prev->dst.neg || 1441 pf_addr_wrap_neq(&cur->dst.addr, &prev->dst.addr)) 1442 PF_SET_SKIP_STEPS(PF_SKIP_DST_ADDR); 1443 if (cur->dst.port[0] != prev->dst.port[0] || 1444 cur->dst.port[1] != prev->dst.port[1] || 1445 cur->dst.port_op != prev->dst.port_op) 1446 PF_SET_SKIP_STEPS(PF_SKIP_DST_PORT); 1447 1448 prev = cur; 1449 cur = TAILQ_NEXT(cur, entries); 1450 } 1451 for (i = 0; i < PF_SKIP_COUNT; ++i) 1452 PF_SET_SKIP_STEPS(i); 1453 } 1454 1455 int 1456 pf_addr_wrap_neq(struct pf_addr_wrap *aw1, struct pf_addr_wrap *aw2) 1457 { 1458 if (aw1->type != aw2->type) 1459 return (1); 1460 switch (aw1->type) { 1461 case PF_ADDR_ADDRMASK: 1462 case PF_ADDR_RANGE: 1463 if (PF_ANEQ(&aw1->v.a.addr, &aw2->v.a.addr, 0)) 1464 return (1); 1465 if (PF_ANEQ(&aw1->v.a.mask, &aw2->v.a.mask, 0)) 1466 return (1); 1467 return (0); 1468 case PF_ADDR_DYNIFTL: 1469 return (aw1->p.dyn->pfid_kt != aw2->p.dyn->pfid_kt); 1470 case PF_ADDR_NOROUTE: 1471 case PF_ADDR_URPFFAILED: 1472 return (0); 1473 case PF_ADDR_TABLE: 1474 return (aw1->p.tbl != aw2->p.tbl); 1475 case PF_ADDR_RTLABEL: 1476 return (aw1->v.rtlabel != aw2->v.rtlabel); 1477 default: 1478 kprintf("invalid address type: %d\n", aw1->type); 1479 return (1); 1480 } 1481 } 1482 1483 u_int16_t 1484 pf_cksum_fixup(u_int16_t cksum, u_int16_t old, u_int16_t new, u_int8_t udp) 1485 { 1486 u_int32_t l; 1487 1488 if (udp && !cksum) 1489 return (0x0000); 1490 l = cksum + old - new; 1491 l = (l >> 16) + (l & 65535); 1492 l = l & 65535; 1493 if (udp && !l) 1494 return (0xFFFF); 1495 return (l); 1496 } 1497 1498 void 1499 pf_change_ap(struct pf_addr *a, u_int16_t *p, u_int16_t *ic, u_int16_t *pc, 1500 struct pf_addr *an, u_int16_t pn, u_int8_t u, sa_family_t af) 1501 { 1502 struct pf_addr ao; 1503 u_int16_t po = *p; 1504 1505 PF_ACPY(&ao, a, af); 1506 PF_ACPY(a, an, af); 1507 1508 *p = pn; 1509 1510 switch (af) { 1511 #ifdef INET 1512 case AF_INET: 1513 *ic = pf_cksum_fixup(pf_cksum_fixup(*ic, 1514 ao.addr16[0], an->addr16[0], 0), 1515 ao.addr16[1], an->addr16[1], 0); 1516 *p = pn; 1517 *pc = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(*pc, 1518 ao.addr16[0], an->addr16[0], u), 1519 ao.addr16[1], an->addr16[1], u), 1520 po, pn, u); 1521 break; 1522 #endif /* INET */ 1523 #ifdef INET6 1524 case AF_INET6: 1525 *pc = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( 1526 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( 1527 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(*pc, 1528 ao.addr16[0], an->addr16[0], u), 1529 ao.addr16[1], an->addr16[1], u), 1530 ao.addr16[2], an->addr16[2], u), 1531 ao.addr16[3], an->addr16[3], u), 1532 ao.addr16[4], an->addr16[4], u), 1533 ao.addr16[5], an->addr16[5], u), 1534 ao.addr16[6], an->addr16[6], u), 1535 ao.addr16[7], an->addr16[7], u), 1536 po, pn, u); 1537 break; 1538 #endif /* INET6 */ 1539 } 1540 } 1541 1542 1543 /* Changes a u_int32_t. Uses a void * so there are no align restrictions */ 1544 void 1545 pf_change_a(void *a, u_int16_t *c, u_int32_t an, u_int8_t u) 1546 { 1547 u_int32_t ao; 1548 1549 memcpy(&ao, a, sizeof(ao)); 1550 memcpy(a, &an, sizeof(u_int32_t)); 1551 *c = pf_cksum_fixup(pf_cksum_fixup(*c, ao / 65536, an / 65536, u), 1552 ao % 65536, an % 65536, u); 1553 } 1554 1555 #ifdef INET6 1556 void 1557 pf_change_a6(struct pf_addr *a, u_int16_t *c, struct pf_addr *an, u_int8_t u) 1558 { 1559 struct pf_addr ao; 1560 1561 PF_ACPY(&ao, a, AF_INET6); 1562 PF_ACPY(a, an, AF_INET6); 1563 1564 *c = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( 1565 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( 1566 pf_cksum_fixup(pf_cksum_fixup(*c, 1567 ao.addr16[0], an->addr16[0], u), 1568 ao.addr16[1], an->addr16[1], u), 1569 ao.addr16[2], an->addr16[2], u), 1570 ao.addr16[3], an->addr16[3], u), 1571 ao.addr16[4], an->addr16[4], u), 1572 ao.addr16[5], an->addr16[5], u), 1573 ao.addr16[6], an->addr16[6], u), 1574 ao.addr16[7], an->addr16[7], u); 1575 } 1576 #endif /* INET6 */ 1577 1578 void 1579 pf_change_icmp(struct pf_addr *ia, u_int16_t *ip, struct pf_addr *oa, 1580 struct pf_addr *na, u_int16_t np, u_int16_t *pc, u_int16_t *h2c, 1581 u_int16_t *ic, u_int16_t *hc, u_int8_t u, sa_family_t af) 1582 { 1583 struct pf_addr oia, ooa; 1584 1585 PF_ACPY(&oia, ia, af); 1586 if (oa) 1587 PF_ACPY(&ooa, oa, af); 1588 1589 /* Change inner protocol port, fix inner protocol checksum. */ 1590 if (ip != NULL) { 1591 u_int16_t oip = *ip; 1592 u_int32_t opc = 0; 1593 1594 if (pc != NULL) 1595 opc = *pc; 1596 *ip = np; 1597 if (pc != NULL) 1598 *pc = pf_cksum_fixup(*pc, oip, *ip, u); 1599 *ic = pf_cksum_fixup(*ic, oip, *ip, 0); 1600 if (pc != NULL) 1601 *ic = pf_cksum_fixup(*ic, opc, *pc, 0); 1602 } 1603 /* Change inner ip address, fix inner ip and icmp checksums. */ 1604 PF_ACPY(ia, na, af); 1605 switch (af) { 1606 #ifdef INET 1607 case AF_INET: { 1608 u_int32_t oh2c = *h2c; 1609 1610 *h2c = pf_cksum_fixup(pf_cksum_fixup(*h2c, 1611 oia.addr16[0], ia->addr16[0], 0), 1612 oia.addr16[1], ia->addr16[1], 0); 1613 *ic = pf_cksum_fixup(pf_cksum_fixup(*ic, 1614 oia.addr16[0], ia->addr16[0], 0), 1615 oia.addr16[1], ia->addr16[1], 0); 1616 *ic = pf_cksum_fixup(*ic, oh2c, *h2c, 0); 1617 break; 1618 } 1619 #endif /* INET */ 1620 #ifdef INET6 1621 case AF_INET6: 1622 *ic = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( 1623 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( 1624 pf_cksum_fixup(pf_cksum_fixup(*ic, 1625 oia.addr16[0], ia->addr16[0], u), 1626 oia.addr16[1], ia->addr16[1], u), 1627 oia.addr16[2], ia->addr16[2], u), 1628 oia.addr16[3], ia->addr16[3], u), 1629 oia.addr16[4], ia->addr16[4], u), 1630 oia.addr16[5], ia->addr16[5], u), 1631 oia.addr16[6], ia->addr16[6], u), 1632 oia.addr16[7], ia->addr16[7], u); 1633 break; 1634 #endif /* INET6 */ 1635 } 1636 /* Outer ip address, fix outer ip or icmpv6 checksum, if necessary. */ 1637 if (oa) { 1638 PF_ACPY(oa, na, af); 1639 switch (af) { 1640 #ifdef INET 1641 case AF_INET: 1642 *hc = pf_cksum_fixup(pf_cksum_fixup(*hc, 1643 ooa.addr16[0], oa->addr16[0], 0), 1644 ooa.addr16[1], oa->addr16[1], 0); 1645 break; 1646 #endif /* INET */ 1647 #ifdef INET6 1648 case AF_INET6: 1649 *ic = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( 1650 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( 1651 pf_cksum_fixup(pf_cksum_fixup(*ic, 1652 ooa.addr16[0], oa->addr16[0], u), 1653 ooa.addr16[1], oa->addr16[1], u), 1654 ooa.addr16[2], oa->addr16[2], u), 1655 ooa.addr16[3], oa->addr16[3], u), 1656 ooa.addr16[4], oa->addr16[4], u), 1657 ooa.addr16[5], oa->addr16[5], u), 1658 ooa.addr16[6], oa->addr16[6], u), 1659 ooa.addr16[7], oa->addr16[7], u); 1660 break; 1661 #endif /* INET6 */ 1662 } 1663 } 1664 } 1665 1666 1667 /* 1668 * Need to modulate the sequence numbers in the TCP SACK option 1669 * (credits to Krzysztof Pfaff for report and patch) 1670 */ 1671 int 1672 pf_modulate_sack(struct mbuf *m, int off, struct pf_pdesc *pd, 1673 struct tcphdr *th, struct pf_state_peer *dst) 1674 { 1675 int hlen = (th->th_off << 2) - sizeof(*th), thoptlen = hlen; 1676 u_int8_t opts[TCP_MAXOLEN], *opt = opts; 1677 int copyback = 0, i, olen; 1678 struct raw_sackblock sack; 1679 1680 #define TCPOLEN_SACKLEN (TCPOLEN_SACK + 2) 1681 if (hlen < TCPOLEN_SACKLEN || 1682 !pf_pull_hdr(m, off + sizeof(*th), opts, hlen, NULL, NULL, pd->af)) 1683 return 0; 1684 1685 while (hlen >= TCPOLEN_SACKLEN) { 1686 olen = opt[1]; 1687 switch (*opt) { 1688 case TCPOPT_EOL: /* FALLTHROUGH */ 1689 case TCPOPT_NOP: 1690 opt++; 1691 hlen--; 1692 break; 1693 case TCPOPT_SACK: 1694 if (olen > hlen) 1695 olen = hlen; 1696 if (olen >= TCPOLEN_SACKLEN) { 1697 for (i = 2; i + TCPOLEN_SACK <= olen; 1698 i += TCPOLEN_SACK) { 1699 memcpy(&sack, &opt[i], sizeof(sack)); 1700 pf_change_a(&sack.rblk_start, &th->th_sum, 1701 htonl(ntohl(sack.rblk_start) - 1702 dst->seqdiff), 0); 1703 pf_change_a(&sack.rblk_end, &th->th_sum, 1704 htonl(ntohl(sack.rblk_end) - 1705 dst->seqdiff), 0); 1706 memcpy(&opt[i], &sack, sizeof(sack)); 1707 } 1708 copyback = 1; 1709 } 1710 /* FALLTHROUGH */ 1711 default: 1712 if (olen < 2) 1713 olen = 2; 1714 hlen -= olen; 1715 opt += olen; 1716 } 1717 } 1718 1719 if (copyback) 1720 m_copyback(m, off + sizeof(*th), thoptlen, opts); 1721 return (copyback); 1722 } 1723 1724 void 1725 pf_send_tcp(const struct pf_rule *r, sa_family_t af, 1726 const struct pf_addr *saddr, const struct pf_addr *daddr, 1727 u_int16_t sport, u_int16_t dport, u_int32_t seq, u_int32_t ack, 1728 u_int8_t flags, u_int16_t win, u_int16_t mss, u_int8_t ttl, int tag, 1729 u_int16_t rtag, struct ether_header *eh, struct ifnet *ifp) 1730 { 1731 struct mbuf *m; 1732 int len = 0, tlen; 1733 #ifdef INET 1734 struct ip *h = NULL; 1735 #endif /* INET */ 1736 #ifdef INET6 1737 struct ip6_hdr *h6 = NULL; 1738 #endif /* INET6 */ 1739 struct tcphdr *th = NULL; 1740 char *opt; 1741 1742 ASSERT_LWKT_TOKEN_HELD(&pf_token); 1743 1744 /* maximum segment size tcp option */ 1745 tlen = sizeof(struct tcphdr); 1746 if (mss) 1747 tlen += 4; 1748 1749 switch (af) { 1750 #ifdef INET 1751 case AF_INET: 1752 len = sizeof(struct ip) + tlen; 1753 break; 1754 #endif /* INET */ 1755 #ifdef INET6 1756 case AF_INET6: 1757 len = sizeof(struct ip6_hdr) + tlen; 1758 break; 1759 #endif /* INET6 */ 1760 } 1761 1762 /* 1763 * Create outgoing mbuf. 1764 * 1765 * DragonFly doesn't zero the auxillary pkghdr fields, only fw_flags, 1766 * so make sure pf.flags is clear. 1767 */ 1768 m = m_gethdr(MB_DONTWAIT, MT_HEADER); 1769 if (m == NULL) { 1770 return; 1771 } 1772 if (tag) 1773 m->m_pkthdr.fw_flags |= PF_MBUF_TAGGED; 1774 m->m_pkthdr.pf.flags = 0; 1775 m->m_pkthdr.pf.tag = rtag; 1776 /* XXX Recheck when upgrading to > 4.4 */ 1777 m->m_pkthdr.pf.statekey = NULL; 1778 if (r != NULL && r->rtableid >= 0) 1779 m->m_pkthdr.pf.rtableid = r->rtableid; 1780 1781 #ifdef ALTQ 1782 if (r != NULL && r->qid) { 1783 m->m_pkthdr.fw_flags |= PF_MBUF_STRUCTURE; 1784 m->m_pkthdr.pf.qid = r->qid; 1785 m->m_pkthdr.pf.ecn_af = af; 1786 m->m_pkthdr.pf.hdr = mtod(m, struct ip *); 1787 } 1788 #endif /* ALTQ */ 1789 m->m_data += max_linkhdr; 1790 m->m_pkthdr.len = m->m_len = len; 1791 m->m_pkthdr.rcvif = NULL; 1792 bzero(m->m_data, len); 1793 switch (af) { 1794 #ifdef INET 1795 case AF_INET: 1796 h = mtod(m, struct ip *); 1797 1798 /* IP header fields included in the TCP checksum */ 1799 h->ip_p = IPPROTO_TCP; 1800 h->ip_len = tlen; 1801 h->ip_src.s_addr = saddr->v4.s_addr; 1802 h->ip_dst.s_addr = daddr->v4.s_addr; 1803 1804 th = (struct tcphdr *)((caddr_t)h + sizeof(struct ip)); 1805 break; 1806 #endif /* INET */ 1807 #ifdef INET6 1808 case AF_INET6: 1809 h6 = mtod(m, struct ip6_hdr *); 1810 1811 /* IP header fields included in the TCP checksum */ 1812 h6->ip6_nxt = IPPROTO_TCP; 1813 h6->ip6_plen = htons(tlen); 1814 memcpy(&h6->ip6_src, &saddr->v6, sizeof(struct in6_addr)); 1815 memcpy(&h6->ip6_dst, &daddr->v6, sizeof(struct in6_addr)); 1816 1817 th = (struct tcphdr *)((caddr_t)h6 + sizeof(struct ip6_hdr)); 1818 break; 1819 #endif /* INET6 */ 1820 } 1821 1822 /* TCP header */ 1823 th->th_sport = sport; 1824 th->th_dport = dport; 1825 th->th_seq = htonl(seq); 1826 th->th_ack = htonl(ack); 1827 th->th_off = tlen >> 2; 1828 th->th_flags = flags; 1829 th->th_win = htons(win); 1830 1831 if (mss) { 1832 opt = (char *)(th + 1); 1833 opt[0] = TCPOPT_MAXSEG; 1834 opt[1] = 4; 1835 mss = htons(mss); 1836 bcopy((caddr_t)&mss, (caddr_t)(opt + 2), 2); 1837 } 1838 1839 switch (af) { 1840 #ifdef INET 1841 case AF_INET: 1842 /* TCP checksum */ 1843 th->th_sum = in_cksum(m, len); 1844 1845 /* Finish the IP header */ 1846 h->ip_v = 4; 1847 h->ip_hl = sizeof(*h) >> 2; 1848 h->ip_tos = IPTOS_LOWDELAY; 1849 h->ip_len = len; 1850 h->ip_off = path_mtu_discovery ? IP_DF : 0; 1851 h->ip_ttl = ttl ? ttl : ip_defttl; 1852 h->ip_sum = 0; 1853 if (eh == NULL) { 1854 lwkt_reltoken(&pf_token); 1855 ip_output(m, NULL, NULL, 0, NULL, NULL); 1856 lwkt_gettoken(&pf_token); 1857 } else { 1858 struct route ro; 1859 struct rtentry rt; 1860 struct ether_header *e = (void *)ro.ro_dst.sa_data; 1861 1862 if (ifp == NULL) { 1863 m_freem(m); 1864 return; 1865 } 1866 rt.rt_ifp = ifp; 1867 ro.ro_rt = &rt; 1868 ro.ro_dst.sa_len = sizeof(ro.ro_dst); 1869 ro.ro_dst.sa_family = pseudo_AF_HDRCMPLT; 1870 bcopy(eh->ether_dhost, e->ether_shost, ETHER_ADDR_LEN); 1871 bcopy(eh->ether_shost, e->ether_dhost, ETHER_ADDR_LEN); 1872 e->ether_type = eh->ether_type; 1873 /* XXX_IMPORT: later */ 1874 lwkt_reltoken(&pf_token); 1875 ip_output(m, (void *)NULL, &ro, 0, 1876 (void *)NULL, (void *)NULL); 1877 lwkt_gettoken(&pf_token); 1878 } 1879 break; 1880 #endif /* INET */ 1881 #ifdef INET6 1882 case AF_INET6: 1883 /* TCP checksum */ 1884 th->th_sum = in6_cksum(m, IPPROTO_TCP, 1885 sizeof(struct ip6_hdr), tlen); 1886 1887 h6->ip6_vfc |= IPV6_VERSION; 1888 h6->ip6_hlim = IPV6_DEFHLIM; 1889 1890 lwkt_reltoken(&pf_token); 1891 ip6_output(m, NULL, NULL, 0, NULL, NULL, NULL); 1892 lwkt_gettoken(&pf_token); 1893 break; 1894 #endif /* INET6 */ 1895 } 1896 } 1897 1898 void 1899 pf_send_icmp(struct mbuf *m, u_int8_t type, u_int8_t code, sa_family_t af, 1900 struct pf_rule *r) 1901 { 1902 struct mbuf *m0; 1903 1904 /* 1905 * DragonFly doesn't zero the auxillary pkghdr fields, only fw_flags, 1906 * so make sure pf.flags is clear. 1907 */ 1908 if ((m0 = m_copy(m, 0, M_COPYALL)) == NULL) 1909 return; 1910 1911 m0->m_pkthdr.fw_flags |= PF_MBUF_TAGGED; 1912 m0->m_pkthdr.pf.flags = 0; 1913 /* XXX Re-Check when Upgrading to > 4.4 */ 1914 m0->m_pkthdr.pf.statekey = NULL; 1915 1916 if (r->rtableid >= 0) 1917 m0->m_pkthdr.pf.rtableid = r->rtableid; 1918 1919 #ifdef ALTQ 1920 if (r->qid) { 1921 m->m_pkthdr.fw_flags |= PF_MBUF_STRUCTURE; 1922 m0->m_pkthdr.pf.qid = r->qid; 1923 m0->m_pkthdr.pf.ecn_af = af; 1924 m0->m_pkthdr.pf.hdr = mtod(m0, struct ip *); 1925 } 1926 #endif /* ALTQ */ 1927 1928 switch (af) { 1929 #ifdef INET 1930 case AF_INET: 1931 icmp_error(m0, type, code, 0, 0); 1932 break; 1933 #endif /* INET */ 1934 #ifdef INET6 1935 case AF_INET6: 1936 icmp6_error(m0, type, code, 0); 1937 break; 1938 #endif /* INET6 */ 1939 } 1940 } 1941 1942 /* 1943 * Return 1 if the addresses a and b match (with mask m), otherwise return 0. 1944 * If n is 0, they match if they are equal. If n is != 0, they match if they 1945 * are different. 1946 */ 1947 int 1948 pf_match_addr(u_int8_t n, struct pf_addr *a, struct pf_addr *m, 1949 struct pf_addr *b, sa_family_t af) 1950 { 1951 int match = 0; 1952 1953 switch (af) { 1954 #ifdef INET 1955 case AF_INET: 1956 if ((a->addr32[0] & m->addr32[0]) == 1957 (b->addr32[0] & m->addr32[0])) 1958 match++; 1959 break; 1960 #endif /* INET */ 1961 #ifdef INET6 1962 case AF_INET6: 1963 if (((a->addr32[0] & m->addr32[0]) == 1964 (b->addr32[0] & m->addr32[0])) && 1965 ((a->addr32[1] & m->addr32[1]) == 1966 (b->addr32[1] & m->addr32[1])) && 1967 ((a->addr32[2] & m->addr32[2]) == 1968 (b->addr32[2] & m->addr32[2])) && 1969 ((a->addr32[3] & m->addr32[3]) == 1970 (b->addr32[3] & m->addr32[3]))) 1971 match++; 1972 break; 1973 #endif /* INET6 */ 1974 } 1975 if (match) { 1976 if (n) 1977 return (0); 1978 else 1979 return (1); 1980 } else { 1981 if (n) 1982 return (1); 1983 else 1984 return (0); 1985 } 1986 } 1987 1988 /* 1989 * Return 1 if b <= a <= e, otherwise return 0. 1990 */ 1991 int 1992 pf_match_addr_range(struct pf_addr *b, struct pf_addr *e, 1993 struct pf_addr *a, sa_family_t af) 1994 { 1995 switch (af) { 1996 #ifdef INET 1997 case AF_INET: 1998 if ((a->addr32[0] < b->addr32[0]) || 1999 (a->addr32[0] > e->addr32[0])) 2000 return (0); 2001 break; 2002 #endif /* INET */ 2003 #ifdef INET6 2004 case AF_INET6: { 2005 int i; 2006 2007 /* check a >= b */ 2008 for (i = 0; i < 4; ++i) 2009 if (a->addr32[i] > b->addr32[i]) 2010 break; 2011 else if (a->addr32[i] < b->addr32[i]) 2012 return (0); 2013 /* check a <= e */ 2014 for (i = 0; i < 4; ++i) 2015 if (a->addr32[i] < e->addr32[i]) 2016 break; 2017 else if (a->addr32[i] > e->addr32[i]) 2018 return (0); 2019 break; 2020 } 2021 #endif /* INET6 */ 2022 } 2023 return (1); 2024 } 2025 2026 int 2027 pf_match(u_int8_t op, u_int32_t a1, u_int32_t a2, u_int32_t p) 2028 { 2029 switch (op) { 2030 case PF_OP_IRG: 2031 return ((p > a1) && (p < a2)); 2032 case PF_OP_XRG: 2033 return ((p < a1) || (p > a2)); 2034 case PF_OP_RRG: 2035 return ((p >= a1) && (p <= a2)); 2036 case PF_OP_EQ: 2037 return (p == a1); 2038 case PF_OP_NE: 2039 return (p != a1); 2040 case PF_OP_LT: 2041 return (p < a1); 2042 case PF_OP_LE: 2043 return (p <= a1); 2044 case PF_OP_GT: 2045 return (p > a1); 2046 case PF_OP_GE: 2047 return (p >= a1); 2048 } 2049 return (0); /* never reached */ 2050 } 2051 2052 int 2053 pf_match_port(u_int8_t op, u_int16_t a1, u_int16_t a2, u_int16_t p) 2054 { 2055 a1 = ntohs(a1); 2056 a2 = ntohs(a2); 2057 p = ntohs(p); 2058 return (pf_match(op, a1, a2, p)); 2059 } 2060 2061 int 2062 pf_match_uid(u_int8_t op, uid_t a1, uid_t a2, uid_t u) 2063 { 2064 if (u == UID_MAX && op != PF_OP_EQ && op != PF_OP_NE) 2065 return (0); 2066 return (pf_match(op, a1, a2, u)); 2067 } 2068 2069 int 2070 pf_match_gid(u_int8_t op, gid_t a1, gid_t a2, gid_t g) 2071 { 2072 if (g == GID_MAX && op != PF_OP_EQ && op != PF_OP_NE) 2073 return (0); 2074 return (pf_match(op, a1, a2, g)); 2075 } 2076 2077 int 2078 pf_match_tag(struct mbuf *m, struct pf_rule *r, int *tag) 2079 { 2080 if (*tag == -1) 2081 *tag = m->m_pkthdr.pf.tag; 2082 2083 return ((!r->match_tag_not && r->match_tag == *tag) || 2084 (r->match_tag_not && r->match_tag != *tag)); 2085 } 2086 2087 int 2088 pf_tag_packet(struct mbuf *m, int tag, int rtableid) 2089 { 2090 if (tag <= 0 && rtableid < 0) 2091 return (0); 2092 2093 if (tag > 0) 2094 m->m_pkthdr.pf.tag = tag; 2095 if (rtableid >= 0) 2096 m->m_pkthdr.pf.rtableid = rtableid; 2097 2098 return (0); 2099 } 2100 2101 void 2102 pf_step_into_anchor(int *depth, struct pf_ruleset **rs, int n, 2103 struct pf_rule **r, struct pf_rule **a, int *match) 2104 { 2105 struct pf_anchor_stackframe *f; 2106 2107 (*r)->anchor->match = 0; 2108 if (match) 2109 *match = 0; 2110 if (*depth >= sizeof(pf_anchor_stack) / 2111 sizeof(pf_anchor_stack[0])) { 2112 kprintf("pf_step_into_anchor: stack overflow\n"); 2113 *r = TAILQ_NEXT(*r, entries); 2114 return; 2115 } else if (*depth == 0 && a != NULL) 2116 *a = *r; 2117 f = pf_anchor_stack + (*depth)++; 2118 f->rs = *rs; 2119 f->r = *r; 2120 if ((*r)->anchor_wildcard) { 2121 f->parent = &(*r)->anchor->children; 2122 if ((f->child = RB_MIN(pf_anchor_node, f->parent)) == 2123 NULL) { 2124 *r = NULL; 2125 return; 2126 } 2127 *rs = &f->child->ruleset; 2128 } else { 2129 f->parent = NULL; 2130 f->child = NULL; 2131 *rs = &(*r)->anchor->ruleset; 2132 } 2133 *r = TAILQ_FIRST((*rs)->rules[n].active.ptr); 2134 } 2135 2136 int 2137 pf_step_out_of_anchor(int *depth, struct pf_ruleset **rs, int n, 2138 struct pf_rule **r, struct pf_rule **a, int *match) 2139 { 2140 struct pf_anchor_stackframe *f; 2141 int quick = 0; 2142 2143 do { 2144 if (*depth <= 0) 2145 break; 2146 f = pf_anchor_stack + *depth - 1; 2147 if (f->parent != NULL && f->child != NULL) { 2148 if (f->child->match || 2149 (match != NULL && *match)) { 2150 f->r->anchor->match = 1; 2151 *match = 0; 2152 } 2153 f->child = RB_NEXT(pf_anchor_node, f->parent, f->child); 2154 if (f->child != NULL) { 2155 *rs = &f->child->ruleset; 2156 *r = TAILQ_FIRST((*rs)->rules[n].active.ptr); 2157 if (*r == NULL) 2158 continue; 2159 else 2160 break; 2161 } 2162 } 2163 (*depth)--; 2164 if (*depth == 0 && a != NULL) 2165 *a = NULL; 2166 *rs = f->rs; 2167 if (f->r->anchor->match || (match != NULL && *match)) 2168 quick = f->r->quick; 2169 *r = TAILQ_NEXT(f->r, entries); 2170 } while (*r == NULL); 2171 2172 return (quick); 2173 } 2174 2175 #ifdef INET6 2176 void 2177 pf_poolmask(struct pf_addr *naddr, struct pf_addr *raddr, 2178 struct pf_addr *rmask, struct pf_addr *saddr, sa_family_t af) 2179 { 2180 switch (af) { 2181 #ifdef INET 2182 case AF_INET: 2183 naddr->addr32[0] = (raddr->addr32[0] & rmask->addr32[0]) | 2184 ((rmask->addr32[0] ^ 0xffffffff ) & saddr->addr32[0]); 2185 break; 2186 #endif /* INET */ 2187 case AF_INET6: 2188 naddr->addr32[0] = (raddr->addr32[0] & rmask->addr32[0]) | 2189 ((rmask->addr32[0] ^ 0xffffffff ) & saddr->addr32[0]); 2190 naddr->addr32[1] = (raddr->addr32[1] & rmask->addr32[1]) | 2191 ((rmask->addr32[1] ^ 0xffffffff ) & saddr->addr32[1]); 2192 naddr->addr32[2] = (raddr->addr32[2] & rmask->addr32[2]) | 2193 ((rmask->addr32[2] ^ 0xffffffff ) & saddr->addr32[2]); 2194 naddr->addr32[3] = (raddr->addr32[3] & rmask->addr32[3]) | 2195 ((rmask->addr32[3] ^ 0xffffffff ) & saddr->addr32[3]); 2196 break; 2197 } 2198 } 2199 2200 void 2201 pf_addr_inc(struct pf_addr *addr, sa_family_t af) 2202 { 2203 switch (af) { 2204 #ifdef INET 2205 case AF_INET: 2206 addr->addr32[0] = htonl(ntohl(addr->addr32[0]) + 1); 2207 break; 2208 #endif /* INET */ 2209 case AF_INET6: 2210 if (addr->addr32[3] == 0xffffffff) { 2211 addr->addr32[3] = 0; 2212 if (addr->addr32[2] == 0xffffffff) { 2213 addr->addr32[2] = 0; 2214 if (addr->addr32[1] == 0xffffffff) { 2215 addr->addr32[1] = 0; 2216 addr->addr32[0] = 2217 htonl(ntohl(addr->addr32[0]) + 1); 2218 } else 2219 addr->addr32[1] = 2220 htonl(ntohl(addr->addr32[1]) + 1); 2221 } else 2222 addr->addr32[2] = 2223 htonl(ntohl(addr->addr32[2]) + 1); 2224 } else 2225 addr->addr32[3] = 2226 htonl(ntohl(addr->addr32[3]) + 1); 2227 break; 2228 } 2229 } 2230 #endif /* INET6 */ 2231 2232 #define mix(a,b,c) \ 2233 do { \ 2234 a -= b; a -= c; a ^= (c >> 13); \ 2235 b -= c; b -= a; b ^= (a << 8); \ 2236 c -= a; c -= b; c ^= (b >> 13); \ 2237 a -= b; a -= c; a ^= (c >> 12); \ 2238 b -= c; b -= a; b ^= (a << 16); \ 2239 c -= a; c -= b; c ^= (b >> 5); \ 2240 a -= b; a -= c; a ^= (c >> 3); \ 2241 b -= c; b -= a; b ^= (a << 10); \ 2242 c -= a; c -= b; c ^= (b >> 15); \ 2243 } while (0) 2244 2245 /* 2246 * hash function based on bridge_hash in if_bridge.c 2247 */ 2248 void 2249 pf_hash(struct pf_addr *inaddr, struct pf_addr *hash, 2250 struct pf_poolhashkey *key, sa_family_t af) 2251 { 2252 u_int32_t a = 0x9e3779b9, b = 0x9e3779b9, c = key->key32[0]; 2253 2254 switch (af) { 2255 #ifdef INET 2256 case AF_INET: 2257 a += inaddr->addr32[0]; 2258 b += key->key32[1]; 2259 mix(a, b, c); 2260 hash->addr32[0] = c + key->key32[2]; 2261 break; 2262 #endif /* INET */ 2263 #ifdef INET6 2264 case AF_INET6: 2265 a += inaddr->addr32[0]; 2266 b += inaddr->addr32[2]; 2267 mix(a, b, c); 2268 hash->addr32[0] = c; 2269 a += inaddr->addr32[1]; 2270 b += inaddr->addr32[3]; 2271 c += key->key32[1]; 2272 mix(a, b, c); 2273 hash->addr32[1] = c; 2274 a += inaddr->addr32[2]; 2275 b += inaddr->addr32[1]; 2276 c += key->key32[2]; 2277 mix(a, b, c); 2278 hash->addr32[2] = c; 2279 a += inaddr->addr32[3]; 2280 b += inaddr->addr32[0]; 2281 c += key->key32[3]; 2282 mix(a, b, c); 2283 hash->addr32[3] = c; 2284 break; 2285 #endif /* INET6 */ 2286 } 2287 } 2288 2289 int 2290 pf_map_addr(sa_family_t af, struct pf_rule *r, struct pf_addr *saddr, 2291 struct pf_addr *naddr, struct pf_addr *init_addr, struct pf_src_node **sn) 2292 { 2293 unsigned char hash[16]; 2294 struct pf_pool *rpool = &r->rpool; 2295 struct pf_addr *raddr = &rpool->cur->addr.v.a.addr; 2296 struct pf_addr *rmask = &rpool->cur->addr.v.a.mask; 2297 struct pf_pooladdr *acur = rpool->cur; 2298 struct pf_src_node k; 2299 2300 if (*sn == NULL && r->rpool.opts & PF_POOL_STICKYADDR && 2301 (r->rpool.opts & PF_POOL_TYPEMASK) != PF_POOL_NONE) { 2302 k.af = af; 2303 PF_ACPY(&k.addr, saddr, af); 2304 if (r->rule_flag & PFRULE_RULESRCTRACK || 2305 r->rpool.opts & PF_POOL_STICKYADDR) 2306 k.rule.ptr = r; 2307 else 2308 k.rule.ptr = NULL; 2309 pf_status.scounters[SCNT_SRC_NODE_SEARCH]++; 2310 *sn = RB_FIND(pf_src_tree, &tree_src_tracking, &k); 2311 if (*sn != NULL && !PF_AZERO(&(*sn)->raddr, af)) { 2312 PF_ACPY(naddr, &(*sn)->raddr, af); 2313 if (pf_status.debug >= PF_DEBUG_MISC) { 2314 kprintf("pf_map_addr: src tracking maps "); 2315 pf_print_host(&k.addr, 0, af); 2316 kprintf(" to "); 2317 pf_print_host(naddr, 0, af); 2318 kprintf("\n"); 2319 } 2320 return (0); 2321 } 2322 } 2323 2324 if (rpool->cur->addr.type == PF_ADDR_NOROUTE) 2325 return (1); 2326 if (rpool->cur->addr.type == PF_ADDR_DYNIFTL) { 2327 switch (af) { 2328 #ifdef INET 2329 case AF_INET: 2330 if (rpool->cur->addr.p.dyn->pfid_acnt4 < 1 && 2331 (rpool->opts & PF_POOL_TYPEMASK) != 2332 PF_POOL_ROUNDROBIN) 2333 return (1); 2334 raddr = &rpool->cur->addr.p.dyn->pfid_addr4; 2335 rmask = &rpool->cur->addr.p.dyn->pfid_mask4; 2336 break; 2337 #endif /* INET */ 2338 #ifdef INET6 2339 case AF_INET6: 2340 if (rpool->cur->addr.p.dyn->pfid_acnt6 < 1 && 2341 (rpool->opts & PF_POOL_TYPEMASK) != 2342 PF_POOL_ROUNDROBIN) 2343 return (1); 2344 raddr = &rpool->cur->addr.p.dyn->pfid_addr6; 2345 rmask = &rpool->cur->addr.p.dyn->pfid_mask6; 2346 break; 2347 #endif /* INET6 */ 2348 } 2349 } else if (rpool->cur->addr.type == PF_ADDR_TABLE) { 2350 if ((rpool->opts & PF_POOL_TYPEMASK) != PF_POOL_ROUNDROBIN) 2351 return (1); /* unsupported */ 2352 } else { 2353 raddr = &rpool->cur->addr.v.a.addr; 2354 rmask = &rpool->cur->addr.v.a.mask; 2355 } 2356 2357 switch (rpool->opts & PF_POOL_TYPEMASK) { 2358 case PF_POOL_NONE: 2359 PF_ACPY(naddr, raddr, af); 2360 break; 2361 case PF_POOL_BITMASK: 2362 PF_POOLMASK(naddr, raddr, rmask, saddr, af); 2363 break; 2364 case PF_POOL_RANDOM: 2365 if (init_addr != NULL && PF_AZERO(init_addr, af)) { 2366 switch (af) { 2367 #ifdef INET 2368 case AF_INET: 2369 rpool->counter.addr32[0] = htonl(karc4random()); 2370 break; 2371 #endif /* INET */ 2372 #ifdef INET6 2373 case AF_INET6: 2374 if (rmask->addr32[3] != 0xffffffff) 2375 rpool->counter.addr32[3] = 2376 htonl(karc4random()); 2377 else 2378 break; 2379 if (rmask->addr32[2] != 0xffffffff) 2380 rpool->counter.addr32[2] = 2381 htonl(karc4random()); 2382 else 2383 break; 2384 if (rmask->addr32[1] != 0xffffffff) 2385 rpool->counter.addr32[1] = 2386 htonl(karc4random()); 2387 else 2388 break; 2389 if (rmask->addr32[0] != 0xffffffff) 2390 rpool->counter.addr32[0] = 2391 htonl(karc4random()); 2392 break; 2393 #endif /* INET6 */ 2394 } 2395 PF_POOLMASK(naddr, raddr, rmask, &rpool->counter, af); 2396 PF_ACPY(init_addr, naddr, af); 2397 2398 } else { 2399 PF_AINC(&rpool->counter, af); 2400 PF_POOLMASK(naddr, raddr, rmask, &rpool->counter, af); 2401 } 2402 break; 2403 case PF_POOL_SRCHASH: 2404 pf_hash(saddr, (struct pf_addr *)&hash, &rpool->key, af); 2405 PF_POOLMASK(naddr, raddr, rmask, (struct pf_addr *)&hash, af); 2406 break; 2407 case PF_POOL_ROUNDROBIN: 2408 if (rpool->cur->addr.type == PF_ADDR_TABLE) { 2409 if (!pfr_pool_get(rpool->cur->addr.p.tbl, 2410 &rpool->tblidx, &rpool->counter, 2411 &raddr, &rmask, af)) 2412 goto get_addr; 2413 } else if (rpool->cur->addr.type == PF_ADDR_DYNIFTL) { 2414 if (!pfr_pool_get(rpool->cur->addr.p.dyn->pfid_kt, 2415 &rpool->tblidx, &rpool->counter, 2416 &raddr, &rmask, af)) 2417 goto get_addr; 2418 } else if (pf_match_addr(0, raddr, rmask, &rpool->counter, af)) 2419 goto get_addr; 2420 2421 try_next: 2422 if ((rpool->cur = TAILQ_NEXT(rpool->cur, entries)) == NULL) 2423 rpool->cur = TAILQ_FIRST(&rpool->list); 2424 if (rpool->cur->addr.type == PF_ADDR_TABLE) { 2425 rpool->tblidx = -1; 2426 if (pfr_pool_get(rpool->cur->addr.p.tbl, 2427 &rpool->tblidx, &rpool->counter, 2428 &raddr, &rmask, af)) { 2429 /* table contains no address of type 'af' */ 2430 if (rpool->cur != acur) 2431 goto try_next; 2432 return (1); 2433 } 2434 } else if (rpool->cur->addr.type == PF_ADDR_DYNIFTL) { 2435 rpool->tblidx = -1; 2436 if (pfr_pool_get(rpool->cur->addr.p.dyn->pfid_kt, 2437 &rpool->tblidx, &rpool->counter, 2438 &raddr, &rmask, af)) { 2439 /* table contains no address of type 'af' */ 2440 if (rpool->cur != acur) 2441 goto try_next; 2442 return (1); 2443 } 2444 } else { 2445 raddr = &rpool->cur->addr.v.a.addr; 2446 rmask = &rpool->cur->addr.v.a.mask; 2447 PF_ACPY(&rpool->counter, raddr, af); 2448 } 2449 2450 get_addr: 2451 PF_ACPY(naddr, &rpool->counter, af); 2452 if (init_addr != NULL && PF_AZERO(init_addr, af)) 2453 PF_ACPY(init_addr, naddr, af); 2454 PF_AINC(&rpool->counter, af); 2455 break; 2456 } 2457 if (*sn != NULL) 2458 PF_ACPY(&(*sn)->raddr, naddr, af); 2459 2460 if (pf_status.debug >= PF_DEBUG_MISC && 2461 (rpool->opts & PF_POOL_TYPEMASK) != PF_POOL_NONE) { 2462 kprintf("pf_map_addr: selected address "); 2463 pf_print_host(naddr, 0, af); 2464 kprintf("\n"); 2465 } 2466 2467 return (0); 2468 } 2469 2470 int 2471 pf_get_sport(sa_family_t af, u_int8_t proto, struct pf_rule *r, 2472 struct pf_addr *saddr, struct pf_addr *daddr, u_int16_t dport, 2473 struct pf_addr *naddr, u_int16_t *nport, u_int16_t low, u_int16_t high, 2474 struct pf_src_node **sn) 2475 { 2476 struct pf_state_key_cmp key; 2477 struct pf_addr init_addr; 2478 u_int16_t cut; 2479 2480 bzero(&init_addr, sizeof(init_addr)); 2481 if (pf_map_addr(af, r, saddr, naddr, &init_addr, sn)) 2482 return (1); 2483 2484 if (proto == IPPROTO_ICMP) { 2485 low = 1; 2486 high = 65535; 2487 } 2488 2489 do { 2490 key.af = af; 2491 key.proto = proto; 2492 PF_ACPY(&key.addr[1], daddr, key.af); 2493 PF_ACPY(&key.addr[0], naddr, key.af); 2494 key.port[1] = dport; 2495 2496 /* 2497 * port search; start random, step; 2498 * similar 2 portloop in in_pcbbind 2499 */ 2500 if (!(proto == IPPROTO_TCP || proto == IPPROTO_UDP || 2501 proto == IPPROTO_ICMP)) { 2502 key.port[0] = dport; 2503 if (pf_find_state_all(&key, PF_IN, NULL) == NULL) 2504 return (0); 2505 } else if (low == 0 && high == 0) { 2506 key.port[0] = *nport; 2507 if (pf_find_state_all(&key, PF_IN, NULL) == NULL) 2508 return (0); 2509 } else if (low == high) { 2510 key.port[0] = htons(low); 2511 if (pf_find_state_all(&key, PF_IN, NULL) == NULL) { 2512 *nport = htons(low); 2513 return (0); 2514 } 2515 } else { 2516 u_int16_t tmp; 2517 2518 if (low > high) { 2519 tmp = low; 2520 low = high; 2521 high = tmp; 2522 } 2523 /* low < high */ 2524 cut = htonl(karc4random()) % (1 + high - low) + low; 2525 /* low <= cut <= high */ 2526 for (tmp = cut; tmp <= high; ++(tmp)) { 2527 key.port[0] = htons(tmp); 2528 if (pf_find_state_all(&key, PF_IN, NULL) == 2529 NULL && !in_baddynamic(tmp, proto)) { 2530 *nport = htons(tmp); 2531 return (0); 2532 } 2533 } 2534 for (tmp = cut - 1; tmp >= low; --(tmp)) { 2535 key.port[0] = htons(tmp); 2536 if (pf_find_state_all(&key, PF_IN, NULL) == 2537 NULL && !in_baddynamic(tmp, proto)) { 2538 *nport = htons(tmp); 2539 return (0); 2540 } 2541 } 2542 } 2543 2544 switch (r->rpool.opts & PF_POOL_TYPEMASK) { 2545 case PF_POOL_RANDOM: 2546 case PF_POOL_ROUNDROBIN: 2547 if (pf_map_addr(af, r, saddr, naddr, &init_addr, sn)) 2548 return (1); 2549 break; 2550 case PF_POOL_NONE: 2551 case PF_POOL_SRCHASH: 2552 case PF_POOL_BITMASK: 2553 default: 2554 return (1); 2555 } 2556 } while (! PF_AEQ(&init_addr, naddr, af) ); 2557 return (1); /* none available */ 2558 } 2559 2560 struct pf_rule * 2561 pf_match_translation(struct pf_pdesc *pd, struct mbuf *m, int off, 2562 int direction, struct pfi_kif *kif, struct pf_addr *saddr, u_int16_t sport, 2563 struct pf_addr *daddr, u_int16_t dport, int rs_num) 2564 { 2565 struct pf_rule *r, *rm = NULL; 2566 struct pf_ruleset *ruleset = NULL; 2567 int tag = -1; 2568 int rtableid = -1; 2569 int asd = 0; 2570 2571 r = TAILQ_FIRST(pf_main_ruleset.rules[rs_num].active.ptr); 2572 while (r && rm == NULL) { 2573 struct pf_rule_addr *src = NULL, *dst = NULL; 2574 struct pf_addr_wrap *xdst = NULL; 2575 2576 if (r->action == PF_BINAT && direction == PF_IN) { 2577 src = &r->dst; 2578 if (r->rpool.cur != NULL) 2579 xdst = &r->rpool.cur->addr; 2580 } else { 2581 src = &r->src; 2582 dst = &r->dst; 2583 } 2584 2585 r->evaluations++; 2586 if (pfi_kif_match(r->kif, kif) == r->ifnot) 2587 r = r->skip[PF_SKIP_IFP].ptr; 2588 else if (r->direction && r->direction != direction) 2589 r = r->skip[PF_SKIP_DIR].ptr; 2590 else if (r->af && r->af != pd->af) 2591 r = r->skip[PF_SKIP_AF].ptr; 2592 else if (r->proto && r->proto != pd->proto) 2593 r = r->skip[PF_SKIP_PROTO].ptr; 2594 else if (PF_MISMATCHAW(&src->addr, saddr, pd->af, 2595 src->neg, kif)) 2596 r = r->skip[src == &r->src ? PF_SKIP_SRC_ADDR : 2597 PF_SKIP_DST_ADDR].ptr; 2598 else if (src->port_op && !pf_match_port(src->port_op, 2599 src->port[0], src->port[1], sport)) 2600 r = r->skip[src == &r->src ? PF_SKIP_SRC_PORT : 2601 PF_SKIP_DST_PORT].ptr; 2602 else if (dst != NULL && 2603 PF_MISMATCHAW(&dst->addr, daddr, pd->af, dst->neg, NULL)) 2604 r = r->skip[PF_SKIP_DST_ADDR].ptr; 2605 else if (xdst != NULL && PF_MISMATCHAW(xdst, daddr, pd->af, 2606 0, NULL)) 2607 r = TAILQ_NEXT(r, entries); 2608 else if (dst != NULL && dst->port_op && 2609 !pf_match_port(dst->port_op, dst->port[0], 2610 dst->port[1], dport)) 2611 r = r->skip[PF_SKIP_DST_PORT].ptr; 2612 else if (r->match_tag && !pf_match_tag(m, r, &tag)) 2613 r = TAILQ_NEXT(r, entries); 2614 else if (r->os_fingerprint != PF_OSFP_ANY && (pd->proto != 2615 IPPROTO_TCP || !pf_osfp_match(pf_osfp_fingerprint(pd, m, 2616 off, pd->hdr.tcp), r->os_fingerprint))) 2617 r = TAILQ_NEXT(r, entries); 2618 else { 2619 if (r->tag) 2620 tag = r->tag; 2621 if (r->rtableid >= 0) 2622 rtableid = r->rtableid; 2623 if (r->anchor == NULL) { 2624 rm = r; 2625 } else 2626 pf_step_into_anchor(&asd, &ruleset, rs_num, 2627 &r, NULL, NULL); 2628 } 2629 if (r == NULL) 2630 pf_step_out_of_anchor(&asd, &ruleset, rs_num, &r, 2631 NULL, NULL); 2632 } 2633 if (pf_tag_packet(m, tag, rtableid)) 2634 return (NULL); 2635 if (rm != NULL && (rm->action == PF_NONAT || 2636 rm->action == PF_NORDR || rm->action == PF_NOBINAT)) 2637 return (NULL); 2638 return (rm); 2639 } 2640 2641 struct pf_rule * 2642 pf_get_translation(struct pf_pdesc *pd, struct mbuf *m, int off, int direction, 2643 struct pfi_kif *kif, struct pf_src_node **sn, 2644 struct pf_state_key **skw, struct pf_state_key **sks, 2645 struct pf_state_key **skp, struct pf_state_key **nkp, 2646 struct pf_addr *saddr, struct pf_addr *daddr, 2647 u_int16_t sport, u_int16_t dport) 2648 { 2649 struct pf_rule *r = NULL; 2650 2651 2652 if (direction == PF_OUT) { 2653 r = pf_match_translation(pd, m, off, direction, kif, saddr, 2654 sport, daddr, dport, PF_RULESET_BINAT); 2655 if (r == NULL) 2656 r = pf_match_translation(pd, m, off, direction, kif, 2657 saddr, sport, daddr, dport, PF_RULESET_NAT); 2658 } else { 2659 r = pf_match_translation(pd, m, off, direction, kif, saddr, 2660 sport, daddr, dport, PF_RULESET_RDR); 2661 if (r == NULL) 2662 r = pf_match_translation(pd, m, off, direction, kif, 2663 saddr, sport, daddr, dport, PF_RULESET_BINAT); 2664 } 2665 2666 if (r != NULL) { 2667 struct pf_addr *naddr; 2668 u_int16_t *nport; 2669 2670 if (pf_state_key_setup(pd, r, skw, sks, skp, nkp, 2671 saddr, daddr, sport, dport)) 2672 return r; 2673 2674 /* XXX We only modify one side for now. */ 2675 naddr = &(*nkp)->addr[1]; 2676 nport = &(*nkp)->port[1]; 2677 2678 /* 2679 * NOTE: Currently all translations will clear 2680 * BRIDGE_MBUF_TAGGED, telling the bridge to 2681 * ignore the original input encapsulation. 2682 */ 2683 switch (r->action) { 2684 case PF_NONAT: 2685 case PF_NOBINAT: 2686 case PF_NORDR: 2687 return (NULL); 2688 case PF_NAT: 2689 m->m_pkthdr.fw_flags &= ~BRIDGE_MBUF_TAGGED; 2690 if (pf_get_sport(pd->af, pd->proto, r, saddr, 2691 daddr, dport, naddr, nport, r->rpool.proxy_port[0], 2692 r->rpool.proxy_port[1], sn)) { 2693 DPFPRINTF(PF_DEBUG_MISC, 2694 ("pf: NAT proxy port allocation " 2695 "(%u-%u) failed\n", 2696 r->rpool.proxy_port[0], 2697 r->rpool.proxy_port[1])); 2698 return (NULL); 2699 } 2700 break; 2701 case PF_BINAT: 2702 m->m_pkthdr.fw_flags &= ~BRIDGE_MBUF_TAGGED; 2703 switch (direction) { 2704 case PF_OUT: 2705 if (r->rpool.cur->addr.type == PF_ADDR_DYNIFTL){ 2706 switch (pd->af) { 2707 #ifdef INET 2708 case AF_INET: 2709 if (r->rpool.cur->addr.p.dyn-> 2710 pfid_acnt4 < 1) 2711 return (NULL); 2712 PF_POOLMASK(naddr, 2713 &r->rpool.cur->addr.p.dyn-> 2714 pfid_addr4, 2715 &r->rpool.cur->addr.p.dyn-> 2716 pfid_mask4, 2717 saddr, AF_INET); 2718 break; 2719 #endif /* INET */ 2720 #ifdef INET6 2721 case AF_INET6: 2722 if (r->rpool.cur->addr.p.dyn-> 2723 pfid_acnt6 < 1) 2724 return (NULL); 2725 PF_POOLMASK(naddr, 2726 &r->rpool.cur->addr.p.dyn-> 2727 pfid_addr6, 2728 &r->rpool.cur->addr.p.dyn-> 2729 pfid_mask6, 2730 saddr, AF_INET6); 2731 break; 2732 #endif /* INET6 */ 2733 } 2734 } else 2735 PF_POOLMASK(naddr, 2736 &r->rpool.cur->addr.v.a.addr, 2737 &r->rpool.cur->addr.v.a.mask, 2738 saddr, pd->af); 2739 break; 2740 case PF_IN: 2741 if (r->src.addr.type == PF_ADDR_DYNIFTL) { 2742 switch (pd->af) { 2743 #ifdef INET 2744 case AF_INET: 2745 if (r->src.addr.p.dyn-> 2746 pfid_acnt4 < 1) 2747 return (NULL); 2748 PF_POOLMASK(naddr, 2749 &r->src.addr.p.dyn-> 2750 pfid_addr4, 2751 &r->src.addr.p.dyn-> 2752 pfid_mask4, 2753 daddr, AF_INET); 2754 break; 2755 #endif /* INET */ 2756 #ifdef INET6 2757 case AF_INET6: 2758 if (r->src.addr.p.dyn-> 2759 pfid_acnt6 < 1) 2760 return (NULL); 2761 PF_POOLMASK(naddr, 2762 &r->src.addr.p.dyn-> 2763 pfid_addr6, 2764 &r->src.addr.p.dyn-> 2765 pfid_mask6, 2766 daddr, AF_INET6); 2767 break; 2768 #endif /* INET6 */ 2769 } 2770 } else 2771 PF_POOLMASK(naddr, 2772 &r->src.addr.v.a.addr, 2773 &r->src.addr.v.a.mask, daddr, 2774 pd->af); 2775 break; 2776 } 2777 break; 2778 case PF_RDR: { 2779 m->m_pkthdr.fw_flags &= ~BRIDGE_MBUF_TAGGED; 2780 if (pf_map_addr(pd->af, r, saddr, naddr, NULL, sn)) 2781 return (NULL); 2782 if ((r->rpool.opts & PF_POOL_TYPEMASK) == 2783 PF_POOL_BITMASK) 2784 PF_POOLMASK(naddr, naddr, 2785 &r->rpool.cur->addr.v.a.mask, daddr, 2786 pd->af); 2787 2788 if (r->rpool.proxy_port[1]) { 2789 u_int32_t tmp_nport; 2790 2791 tmp_nport = ((ntohs(dport) - 2792 ntohs(r->dst.port[0])) % 2793 (r->rpool.proxy_port[1] - 2794 r->rpool.proxy_port[0] + 1)) + 2795 r->rpool.proxy_port[0]; 2796 2797 /* wrap around if necessary */ 2798 if (tmp_nport > 65535) 2799 tmp_nport -= 65535; 2800 *nport = htons((u_int16_t)tmp_nport); 2801 } else if (r->rpool.proxy_port[0]) 2802 *nport = htons(r->rpool.proxy_port[0]); 2803 break; 2804 } 2805 default: 2806 return (NULL); 2807 } 2808 } 2809 2810 return (r); 2811 } 2812 2813 #ifdef SMP 2814 struct netmsg_hashlookup { 2815 struct netmsg_base base; 2816 struct inpcb **nm_pinp; 2817 struct inpcbinfo *nm_pcbinfo; 2818 struct pf_addr *nm_saddr; 2819 struct pf_addr *nm_daddr; 2820 uint16_t nm_sport; 2821 uint16_t nm_dport; 2822 sa_family_t nm_af; 2823 }; 2824 2825 #ifdef PF_SOCKET_LOOKUP_DOMSG 2826 static void 2827 in_pcblookup_hash_handler(netmsg_t msg) 2828 { 2829 struct netmsg_hashlookup *rmsg = (struct netmsg_hashlookup *)msg; 2830 2831 if (rmsg->nm_af == AF_INET) 2832 *rmsg->nm_pinp = in_pcblookup_hash(rmsg->nm_pcbinfo, 2833 rmsg->nm_saddr->v4, rmsg->nm_sport, rmsg->nm_daddr->v4, 2834 rmsg->nm_dport, INPLOOKUP_WILDCARD, NULL); 2835 #ifdef INET6 2836 else 2837 *rmsg->nm_pinp = in6_pcblookup_hash(rmsg->nm_pcbinfo, 2838 &rmsg->nm_saddr->v6, rmsg->nm_sport, &rmsg->nm_daddr->v6, 2839 rmsg->nm_dport, INPLOOKUP_WILDCARD, NULL); 2840 #endif /* INET6 */ 2841 lwkt_replymsg(&rmsg->base.lmsg, 0); 2842 } 2843 #endif /* PF_SOCKET_LOOKUP_DOMSG */ 2844 2845 #endif /* SMP */ 2846 2847 int 2848 pf_socket_lookup(int direction, struct pf_pdesc *pd) 2849 { 2850 struct pf_addr *saddr, *daddr; 2851 u_int16_t sport, dport; 2852 struct inpcbinfo *pi; 2853 struct inpcb *inp; 2854 #ifdef SMP 2855 struct netmsg_hashlookup *msg = NULL; 2856 #ifdef PF_SOCKET_LOOKUP_DOMSG 2857 struct netmsg_hashlookup msg0; 2858 #endif 2859 #endif 2860 int pi_cpu = 0; 2861 2862 if (pd == NULL) 2863 return (-1); 2864 pd->lookup.uid = UID_MAX; 2865 pd->lookup.gid = GID_MAX; 2866 pd->lookup.pid = NO_PID; 2867 if (direction == PF_IN) { 2868 saddr = pd->src; 2869 daddr = pd->dst; 2870 } else { 2871 saddr = pd->dst; 2872 daddr = pd->src; 2873 } 2874 switch (pd->proto) { 2875 case IPPROTO_TCP: 2876 if (pd->hdr.tcp == NULL) 2877 return (-1); 2878 sport = pd->hdr.tcp->th_sport; 2879 dport = pd->hdr.tcp->th_dport; 2880 2881 pi_cpu = tcp_addrcpu(saddr->v4.s_addr, sport, daddr->v4.s_addr, dport); 2882 pi = &tcbinfo[pi_cpu]; 2883 #ifdef SMP 2884 /* 2885 * Our netstack runs lockless on MP systems 2886 * (only for TCP connections at the moment). 2887 * 2888 * As we are not allowed to read another CPU's tcbinfo, 2889 * we have to ask that CPU via remote call to search the 2890 * table for us. 2891 * 2892 * Prepare a msg iff data belongs to another CPU. 2893 */ 2894 if (pi_cpu != mycpu->gd_cpuid) { 2895 #ifdef PF_SOCKET_LOOKUP_DOMSG 2896 /* 2897 * NOTE: 2898 * 2899 * Following lwkt_domsg() is dangerous and could 2900 * lockup the network system, e.g. 2901 * 2902 * On 2 CPU system: 2903 * netisr0 domsg to netisr1 (due to lookup) 2904 * netisr1 domsg to netisr0 (due to lookup) 2905 * 2906 * We simply return -1 here, since we are probably 2907 * called before NAT, so the TCP packet should 2908 * already be on the correct CPU. 2909 */ 2910 msg = &msg0; 2911 netmsg_init(&msg->base, NULL, &curthread->td_msgport, 2912 0, in_pcblookup_hash_handler); 2913 msg->nm_pinp = &inp; 2914 msg->nm_pcbinfo = pi; 2915 msg->nm_saddr = saddr; 2916 msg->nm_sport = sport; 2917 msg->nm_daddr = daddr; 2918 msg->nm_dport = dport; 2919 msg->nm_af = pd->af; 2920 #else /* !PF_SOCKET_LOOKUP_DOMSG */ 2921 kprintf("pf_socket_lookup: tcp packet not on the " 2922 "correct cpu %d, cur cpu %d\n", 2923 pi_cpu, mycpuid); 2924 print_backtrace(-1); 2925 return -1; 2926 #endif /* PF_SOCKET_LOOKUP_DOMSG */ 2927 } 2928 #endif /* SMP */ 2929 break; 2930 case IPPROTO_UDP: 2931 if (pd->hdr.udp == NULL) 2932 return (-1); 2933 sport = pd->hdr.udp->uh_sport; 2934 dport = pd->hdr.udp->uh_dport; 2935 pi = &udbinfo; 2936 break; 2937 default: 2938 return (-1); 2939 } 2940 if (direction != PF_IN) { 2941 u_int16_t p; 2942 2943 p = sport; 2944 sport = dport; 2945 dport = p; 2946 } 2947 switch (pd->af) { 2948 #ifdef INET6 2949 case AF_INET6: 2950 #ifdef SMP 2951 /* 2952 * Query other CPU, second part 2953 * 2954 * msg only gets initialized when: 2955 * 1) packet is TCP 2956 * 2) the info belongs to another CPU 2957 * 2958 * Use some switch/case magic to avoid code duplication. 2959 */ 2960 if (msg == NULL) 2961 #endif /* SMP */ 2962 { 2963 inp = in6_pcblookup_hash(pi, &saddr->v6, sport, 2964 &daddr->v6, dport, INPLOOKUP_WILDCARD, NULL); 2965 2966 if (inp == NULL) 2967 return (-1); 2968 break; 2969 } 2970 /* FALLTHROUGH if SMP and on other CPU */ 2971 #endif /* INET6 */ 2972 case AF_INET: 2973 #ifdef SMP 2974 if (msg != NULL) { 2975 lwkt_domsg(cpu_portfn(pi_cpu), 2976 &msg->base.lmsg, 0); 2977 } else 2978 #endif /* SMP */ 2979 { 2980 inp = in_pcblookup_hash(pi, saddr->v4, sport, daddr->v4, 2981 dport, INPLOOKUP_WILDCARD, NULL); 2982 } 2983 if (inp == NULL) 2984 return (-1); 2985 break; 2986 2987 default: 2988 return (-1); 2989 } 2990 pd->lookup.uid = inp->inp_socket->so_cred->cr_uid; 2991 pd->lookup.gid = inp->inp_socket->so_cred->cr_groups[0]; 2992 return (1); 2993 } 2994 2995 u_int8_t 2996 pf_get_wscale(struct mbuf *m, int off, u_int16_t th_off, sa_family_t af) 2997 { 2998 int hlen; 2999 u_int8_t hdr[60]; 3000 u_int8_t *opt, optlen; 3001 u_int8_t wscale = 0; 3002 3003 hlen = th_off << 2; /* hlen <= sizeof(hdr) */ 3004 if (hlen <= sizeof(struct tcphdr)) 3005 return (0); 3006 if (!pf_pull_hdr(m, off, hdr, hlen, NULL, NULL, af)) 3007 return (0); 3008 opt = hdr + sizeof(struct tcphdr); 3009 hlen -= sizeof(struct tcphdr); 3010 while (hlen >= 3) { 3011 switch (*opt) { 3012 case TCPOPT_EOL: 3013 case TCPOPT_NOP: 3014 ++opt; 3015 --hlen; 3016 break; 3017 case TCPOPT_WINDOW: 3018 wscale = opt[2]; 3019 if (wscale > TCP_MAX_WINSHIFT) 3020 wscale = TCP_MAX_WINSHIFT; 3021 wscale |= PF_WSCALE_FLAG; 3022 /* FALLTHROUGH */ 3023 default: 3024 optlen = opt[1]; 3025 if (optlen < 2) 3026 optlen = 2; 3027 hlen -= optlen; 3028 opt += optlen; 3029 break; 3030 } 3031 } 3032 return (wscale); 3033 } 3034 3035 u_int16_t 3036 pf_get_mss(struct mbuf *m, int off, u_int16_t th_off, sa_family_t af) 3037 { 3038 int hlen; 3039 u_int8_t hdr[60]; 3040 u_int8_t *opt, optlen; 3041 u_int16_t mss = tcp_mssdflt; 3042 3043 hlen = th_off << 2; /* hlen <= sizeof(hdr) */ 3044 if (hlen <= sizeof(struct tcphdr)) 3045 return (0); 3046 if (!pf_pull_hdr(m, off, hdr, hlen, NULL, NULL, af)) 3047 return (0); 3048 opt = hdr + sizeof(struct tcphdr); 3049 hlen -= sizeof(struct tcphdr); 3050 while (hlen >= TCPOLEN_MAXSEG) { 3051 switch (*opt) { 3052 case TCPOPT_EOL: 3053 case TCPOPT_NOP: 3054 ++opt; 3055 --hlen; 3056 break; 3057 case TCPOPT_MAXSEG: 3058 bcopy((caddr_t)(opt + 2), (caddr_t)&mss, 2); 3059 /* FALLTHROUGH */ 3060 default: 3061 optlen = opt[1]; 3062 if (optlen < 2) 3063 optlen = 2; 3064 hlen -= optlen; 3065 opt += optlen; 3066 break; 3067 } 3068 } 3069 return (mss); 3070 } 3071 3072 u_int16_t 3073 pf_calc_mss(struct pf_addr *addr, sa_family_t af, u_int16_t offer) 3074 { 3075 #ifdef INET 3076 struct sockaddr_in *dst; 3077 struct route ro; 3078 #endif /* INET */ 3079 #ifdef INET6 3080 struct sockaddr_in6 *dst6; 3081 struct route_in6 ro6; 3082 #endif /* INET6 */ 3083 struct rtentry *rt = NULL; 3084 int hlen = 0; 3085 u_int16_t mss = tcp_mssdflt; 3086 3087 switch (af) { 3088 #ifdef INET 3089 case AF_INET: 3090 hlen = sizeof(struct ip); 3091 bzero(&ro, sizeof(ro)); 3092 dst = (struct sockaddr_in *)&ro.ro_dst; 3093 dst->sin_family = AF_INET; 3094 dst->sin_len = sizeof(*dst); 3095 dst->sin_addr = addr->v4; 3096 rtalloc_ign(&ro, (RTF_CLONING | RTF_PRCLONING)); 3097 rt = ro.ro_rt; 3098 break; 3099 #endif /* INET */ 3100 #ifdef INET6 3101 case AF_INET6: 3102 hlen = sizeof(struct ip6_hdr); 3103 bzero(&ro6, sizeof(ro6)); 3104 dst6 = (struct sockaddr_in6 *)&ro6.ro_dst; 3105 dst6->sin6_family = AF_INET6; 3106 dst6->sin6_len = sizeof(*dst6); 3107 dst6->sin6_addr = addr->v6; 3108 rtalloc_ign((struct route *)&ro6, (RTF_CLONING | RTF_PRCLONING)); 3109 rt = ro6.ro_rt; 3110 break; 3111 #endif /* INET6 */ 3112 } 3113 3114 if (rt && rt->rt_ifp) { 3115 mss = rt->rt_ifp->if_mtu - hlen - sizeof(struct tcphdr); 3116 mss = max(tcp_mssdflt, mss); 3117 RTFREE(rt); 3118 } 3119 mss = min(mss, offer); 3120 mss = max(mss, 64); /* sanity - at least max opt space */ 3121 return (mss); 3122 } 3123 3124 void 3125 pf_set_rt_ifp(struct pf_state *s, struct pf_addr *saddr) 3126 { 3127 struct pf_rule *r = s->rule.ptr; 3128 3129 s->rt_kif = NULL; 3130 if (!r->rt || r->rt == PF_FASTROUTE) 3131 return; 3132 switch (s->key[PF_SK_WIRE]->af) { 3133 #ifdef INET 3134 case AF_INET: 3135 pf_map_addr(AF_INET, r, saddr, &s->rt_addr, NULL, 3136 &s->nat_src_node); 3137 s->rt_kif = r->rpool.cur->kif; 3138 break; 3139 #endif /* INET */ 3140 #ifdef INET6 3141 case AF_INET6: 3142 pf_map_addr(AF_INET6, r, saddr, &s->rt_addr, NULL, 3143 &s->nat_src_node); 3144 s->rt_kif = r->rpool.cur->kif; 3145 break; 3146 #endif /* INET6 */ 3147 } 3148 } 3149 3150 u_int32_t 3151 pf_tcp_iss(struct pf_pdesc *pd) 3152 { 3153 MD5_CTX ctx; 3154 u_int32_t digest[4]; 3155 3156 if (pf_tcp_secret_init == 0) { 3157 karc4rand(pf_tcp_secret, sizeof(pf_tcp_secret)); 3158 MD5Init(&pf_tcp_secret_ctx); 3159 MD5Update(&pf_tcp_secret_ctx, pf_tcp_secret, 3160 sizeof(pf_tcp_secret)); 3161 pf_tcp_secret_init = 1; 3162 } 3163 ctx = pf_tcp_secret_ctx; 3164 3165 MD5Update(&ctx, (char *)&pd->hdr.tcp->th_sport, sizeof(u_short)); 3166 MD5Update(&ctx, (char *)&pd->hdr.tcp->th_dport, sizeof(u_short)); 3167 if (pd->af == AF_INET6) { 3168 MD5Update(&ctx, (char *)&pd->src->v6, sizeof(struct in6_addr)); 3169 MD5Update(&ctx, (char *)&pd->dst->v6, sizeof(struct in6_addr)); 3170 } else { 3171 MD5Update(&ctx, (char *)&pd->src->v4, sizeof(struct in_addr)); 3172 MD5Update(&ctx, (char *)&pd->dst->v4, sizeof(struct in_addr)); 3173 } 3174 MD5Final((u_char *)digest, &ctx); 3175 pf_tcp_iss_off += 4096; 3176 return (digest[0] + pd->hdr.tcp->th_seq + pf_tcp_iss_off); 3177 } 3178 3179 int 3180 pf_test_rule(struct pf_rule **rm, struct pf_state **sm, int direction, 3181 struct pfi_kif *kif, struct mbuf *m, int off, void *h, 3182 struct pf_pdesc *pd, struct pf_rule **am, struct pf_ruleset **rsm, 3183 struct ifqueue *ifq, struct inpcb *inp) 3184 { 3185 struct pf_rule *nr = NULL; 3186 struct pf_addr *saddr = pd->src, *daddr = pd->dst; 3187 sa_family_t af = pd->af; 3188 struct pf_rule *r, *a = NULL; 3189 struct pf_ruleset *ruleset = NULL; 3190 struct pf_src_node *nsn = NULL; 3191 struct tcphdr *th = pd->hdr.tcp; 3192 struct pf_state_key *skw = NULL, *sks = NULL; 3193 struct pf_state_key *sk = NULL, *nk = NULL; 3194 u_short reason; 3195 int rewrite = 0, hdrlen = 0; 3196 int tag = -1, rtableid = -1; 3197 int asd = 0; 3198 int match = 0; 3199 int state_icmp = 0; 3200 u_int16_t sport = 0, dport = 0; 3201 u_int16_t nport = 0, bport = 0; 3202 u_int16_t bproto_sum = 0, bip_sum = 0; 3203 u_int8_t icmptype = 0, icmpcode = 0; 3204 3205 3206 if (direction == PF_IN && pf_check_congestion(ifq)) { 3207 REASON_SET(&reason, PFRES_CONGEST); 3208 return (PF_DROP); 3209 } 3210 3211 if (inp != NULL) 3212 pd->lookup.done = pf_socket_lookup(direction, pd); 3213 else if (debug_pfugidhack) { 3214 DPFPRINTF(PF_DEBUG_MISC, ("pf: unlocked lookup\n")); 3215 pd->lookup.done = pf_socket_lookup(direction, pd); 3216 } 3217 3218 switch (pd->proto) { 3219 case IPPROTO_TCP: 3220 sport = th->th_sport; 3221 dport = th->th_dport; 3222 hdrlen = sizeof(*th); 3223 break; 3224 case IPPROTO_UDP: 3225 sport = pd->hdr.udp->uh_sport; 3226 dport = pd->hdr.udp->uh_dport; 3227 hdrlen = sizeof(*pd->hdr.udp); 3228 break; 3229 #ifdef INET 3230 case IPPROTO_ICMP: 3231 if (pd->af != AF_INET) 3232 break; 3233 sport = dport = pd->hdr.icmp->icmp_id; 3234 hdrlen = sizeof(*pd->hdr.icmp); 3235 icmptype = pd->hdr.icmp->icmp_type; 3236 icmpcode = pd->hdr.icmp->icmp_code; 3237 3238 if (icmptype == ICMP_UNREACH || 3239 icmptype == ICMP_SOURCEQUENCH || 3240 icmptype == ICMP_REDIRECT || 3241 icmptype == ICMP_TIMXCEED || 3242 icmptype == ICMP_PARAMPROB) 3243 state_icmp++; 3244 break; 3245 #endif /* INET */ 3246 #ifdef INET6 3247 case IPPROTO_ICMPV6: 3248 if (af != AF_INET6) 3249 break; 3250 sport = dport = pd->hdr.icmp6->icmp6_id; 3251 hdrlen = sizeof(*pd->hdr.icmp6); 3252 icmptype = pd->hdr.icmp6->icmp6_type; 3253 icmpcode = pd->hdr.icmp6->icmp6_code; 3254 3255 if (icmptype == ICMP6_DST_UNREACH || 3256 icmptype == ICMP6_PACKET_TOO_BIG || 3257 icmptype == ICMP6_TIME_EXCEEDED || 3258 icmptype == ICMP6_PARAM_PROB) 3259 state_icmp++; 3260 break; 3261 #endif /* INET6 */ 3262 default: 3263 sport = dport = hdrlen = 0; 3264 break; 3265 } 3266 3267 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_FILTER].active.ptr); 3268 3269 bport = nport = sport; 3270 /* check packet for BINAT/NAT/RDR */ 3271 if ((nr = pf_get_translation(pd, m, off, direction, kif, &nsn, 3272 &skw, &sks, &sk, &nk, saddr, daddr, sport, dport)) != NULL) { 3273 if (nk == NULL || sk == NULL) { 3274 REASON_SET(&reason, PFRES_MEMORY); 3275 goto cleanup; 3276 } 3277 3278 if (pd->ip_sum) 3279 bip_sum = *pd->ip_sum; 3280 3281 switch (pd->proto) { 3282 case IPPROTO_TCP: 3283 bproto_sum = th->th_sum; 3284 pd->proto_sum = &th->th_sum; 3285 3286 if (PF_ANEQ(saddr, &nk->addr[pd->sidx], af) || 3287 nk->port[pd->sidx] != sport) { 3288 pf_change_ap(saddr, &th->th_sport, pd->ip_sum, 3289 &th->th_sum, &nk->addr[pd->sidx], 3290 nk->port[pd->sidx], 0, af); 3291 pd->sport = &th->th_sport; 3292 sport = th->th_sport; 3293 } 3294 3295 if (PF_ANEQ(daddr, &nk->addr[pd->didx], af) || 3296 nk->port[pd->didx] != dport) { 3297 pf_change_ap(daddr, &th->th_dport, pd->ip_sum, 3298 &th->th_sum, &nk->addr[pd->didx], 3299 nk->port[pd->didx], 0, af); 3300 dport = th->th_dport; 3301 pd->dport = &th->th_dport; 3302 } 3303 rewrite++; 3304 break; 3305 case IPPROTO_UDP: 3306 bproto_sum = pd->hdr.udp->uh_sum; 3307 pd->proto_sum = &pd->hdr.udp->uh_sum; 3308 3309 if (PF_ANEQ(saddr, &nk->addr[pd->sidx], af) || 3310 nk->port[pd->sidx] != sport) { 3311 pf_change_ap(saddr, &pd->hdr.udp->uh_sport, 3312 pd->ip_sum, &pd->hdr.udp->uh_sum, 3313 &nk->addr[pd->sidx], 3314 nk->port[pd->sidx], 1, af); 3315 sport = pd->hdr.udp->uh_sport; 3316 pd->sport = &pd->hdr.udp->uh_sport; 3317 } 3318 3319 if (PF_ANEQ(daddr, &nk->addr[pd->didx], af) || 3320 nk->port[pd->didx] != dport) { 3321 pf_change_ap(daddr, &pd->hdr.udp->uh_dport, 3322 pd->ip_sum, &pd->hdr.udp->uh_sum, 3323 &nk->addr[pd->didx], 3324 nk->port[pd->didx], 1, af); 3325 dport = pd->hdr.udp->uh_dport; 3326 pd->dport = &pd->hdr.udp->uh_dport; 3327 } 3328 rewrite++; 3329 break; 3330 #ifdef INET 3331 case IPPROTO_ICMP: 3332 nk->port[0] = nk->port[1]; 3333 if (PF_ANEQ(saddr, &nk->addr[pd->sidx], AF_INET)) 3334 pf_change_a(&saddr->v4.s_addr, pd->ip_sum, 3335 nk->addr[pd->sidx].v4.s_addr, 0); 3336 3337 if (PF_ANEQ(daddr, &nk->addr[pd->didx], AF_INET)) 3338 pf_change_a(&daddr->v4.s_addr, pd->ip_sum, 3339 nk->addr[pd->didx].v4.s_addr, 0); 3340 3341 if (nk->port[1] != pd->hdr.icmp->icmp_id) { 3342 pd->hdr.icmp->icmp_cksum = pf_cksum_fixup( 3343 pd->hdr.icmp->icmp_cksum, sport, 3344 nk->port[1], 0); 3345 pd->hdr.icmp->icmp_id = nk->port[1]; 3346 pd->sport = &pd->hdr.icmp->icmp_id; 3347 } 3348 m_copyback(m, off, ICMP_MINLEN, (caddr_t)pd->hdr.icmp); 3349 break; 3350 #endif /* INET */ 3351 #ifdef INET6 3352 case IPPROTO_ICMPV6: 3353 nk->port[0] = nk->port[1]; 3354 if (PF_ANEQ(saddr, &nk->addr[pd->sidx], AF_INET6)) 3355 pf_change_a6(saddr, &pd->hdr.icmp6->icmp6_cksum, 3356 &nk->addr[pd->sidx], 0); 3357 3358 if (PF_ANEQ(daddr, &nk->addr[pd->didx], AF_INET6)) 3359 pf_change_a6(daddr, &pd->hdr.icmp6->icmp6_cksum, 3360 &nk->addr[pd->didx], 0); 3361 rewrite++; 3362 break; 3363 #endif /* INET */ 3364 default: 3365 switch (af) { 3366 #ifdef INET 3367 case AF_INET: 3368 if (PF_ANEQ(saddr, 3369 &nk->addr[pd->sidx], AF_INET)) 3370 pf_change_a(&saddr->v4.s_addr, 3371 pd->ip_sum, 3372 nk->addr[pd->sidx].v4.s_addr, 0); 3373 3374 if (PF_ANEQ(daddr, 3375 &nk->addr[pd->didx], AF_INET)) 3376 pf_change_a(&daddr->v4.s_addr, 3377 pd->ip_sum, 3378 nk->addr[pd->didx].v4.s_addr, 0); 3379 break; 3380 #endif /* INET */ 3381 #ifdef INET6 3382 case AF_INET6: 3383 if (PF_ANEQ(saddr, 3384 &nk->addr[pd->sidx], AF_INET6)) 3385 PF_ACPY(saddr, &nk->addr[pd->sidx], af); 3386 3387 if (PF_ANEQ(daddr, 3388 &nk->addr[pd->didx], AF_INET6)) 3389 PF_ACPY(saddr, &nk->addr[pd->didx], af); 3390 break; 3391 #endif /* INET */ 3392 } 3393 break; 3394 } 3395 if (nr->natpass) 3396 r = NULL; 3397 pd->nat_rule = nr; 3398 } 3399 3400 while (r != NULL) { 3401 r->evaluations++; 3402 if (pfi_kif_match(r->kif, kif) == r->ifnot) 3403 r = r->skip[PF_SKIP_IFP].ptr; 3404 else if (r->direction && r->direction != direction) 3405 r = r->skip[PF_SKIP_DIR].ptr; 3406 else if (r->af && r->af != af) 3407 r = r->skip[PF_SKIP_AF].ptr; 3408 else if (r->proto && r->proto != pd->proto) 3409 r = r->skip[PF_SKIP_PROTO].ptr; 3410 else if (PF_MISMATCHAW(&r->src.addr, saddr, af, 3411 r->src.neg, kif)) 3412 r = r->skip[PF_SKIP_SRC_ADDR].ptr; 3413 /* tcp/udp only. port_op always 0 in other cases */ 3414 else if (r->src.port_op && !pf_match_port(r->src.port_op, 3415 r->src.port[0], r->src.port[1], sport)) 3416 r = r->skip[PF_SKIP_SRC_PORT].ptr; 3417 else if (PF_MISMATCHAW(&r->dst.addr, daddr, af, 3418 r->dst.neg, NULL)) 3419 r = r->skip[PF_SKIP_DST_ADDR].ptr; 3420 /* tcp/udp only. port_op always 0 in other cases */ 3421 else if (r->dst.port_op && !pf_match_port(r->dst.port_op, 3422 r->dst.port[0], r->dst.port[1], dport)) 3423 r = r->skip[PF_SKIP_DST_PORT].ptr; 3424 /* icmp only. type always 0 in other cases */ 3425 else if (r->type && r->type != icmptype + 1) 3426 r = TAILQ_NEXT(r, entries); 3427 /* icmp only. type always 0 in other cases */ 3428 else if (r->code && r->code != icmpcode + 1) 3429 r = TAILQ_NEXT(r, entries); 3430 else if (r->tos && !(r->tos == pd->tos)) 3431 r = TAILQ_NEXT(r, entries); 3432 else if (r->rule_flag & PFRULE_FRAGMENT) 3433 r = TAILQ_NEXT(r, entries); 3434 else if (pd->proto == IPPROTO_TCP && 3435 (r->flagset & th->th_flags) != r->flags) 3436 r = TAILQ_NEXT(r, entries); 3437 /* tcp/udp only. uid.op always 0 in other cases */ 3438 else if (r->uid.op && (pd->lookup.done || (pd->lookup.done = 3439 pf_socket_lookup(direction, pd), 1)) && 3440 !pf_match_uid(r->uid.op, r->uid.uid[0], r->uid.uid[1], 3441 pd->lookup.uid)) 3442 r = TAILQ_NEXT(r, entries); 3443 /* tcp/udp only. gid.op always 0 in other cases */ 3444 else if (r->gid.op && (pd->lookup.done || (pd->lookup.done = 3445 pf_socket_lookup(direction, pd), 1)) && 3446 !pf_match_gid(r->gid.op, r->gid.gid[0], r->gid.gid[1], 3447 pd->lookup.gid)) 3448 r = TAILQ_NEXT(r, entries); 3449 else if (r->prob && 3450 r->prob <= karc4random()) 3451 r = TAILQ_NEXT(r, entries); 3452 else if (r->match_tag && !pf_match_tag(m, r, &tag)) 3453 r = TAILQ_NEXT(r, entries); 3454 else if (r->os_fingerprint != PF_OSFP_ANY && 3455 (pd->proto != IPPROTO_TCP || !pf_osfp_match( 3456 pf_osfp_fingerprint(pd, m, off, th), 3457 r->os_fingerprint))) 3458 r = TAILQ_NEXT(r, entries); 3459 else { 3460 if (r->tag) 3461 tag = r->tag; 3462 if (r->rtableid >= 0) 3463 rtableid = r->rtableid; 3464 if (r->anchor == NULL) { 3465 match = 1; 3466 *rm = r; 3467 *am = a; 3468 *rsm = ruleset; 3469 if ((*rm)->quick) 3470 break; 3471 r = TAILQ_NEXT(r, entries); 3472 } else 3473 pf_step_into_anchor(&asd, &ruleset, 3474 PF_RULESET_FILTER, &r, &a, &match); 3475 } 3476 if (r == NULL && pf_step_out_of_anchor(&asd, &ruleset, 3477 PF_RULESET_FILTER, &r, &a, &match)) 3478 break; 3479 } 3480 r = *rm; 3481 a = *am; 3482 ruleset = *rsm; 3483 3484 REASON_SET(&reason, PFRES_MATCH); 3485 3486 if (r->log || (nr != NULL && nr->log)) { 3487 if (rewrite) 3488 m_copyback(m, off, hdrlen, pd->hdr.any); 3489 PFLOG_PACKET(kif, h, m, af, direction, reason, r->log ? r : nr, 3490 a, ruleset, pd); 3491 } 3492 3493 if ((r->action == PF_DROP) && 3494 ((r->rule_flag & PFRULE_RETURNRST) || 3495 (r->rule_flag & PFRULE_RETURNICMP) || 3496 (r->rule_flag & PFRULE_RETURN))) { 3497 /* undo NAT changes, if they have taken place */ 3498 if (nr != NULL) { 3499 PF_ACPY(saddr, &sk->addr[pd->sidx], af); 3500 PF_ACPY(daddr, &sk->addr[pd->didx], af); 3501 if (pd->sport) 3502 *pd->sport = sk->port[pd->sidx]; 3503 if (pd->dport) 3504 *pd->dport = sk->port[pd->didx]; 3505 if (pd->proto_sum) 3506 *pd->proto_sum = bproto_sum; 3507 if (pd->ip_sum) 3508 *pd->ip_sum = bip_sum; 3509 m_copyback(m, off, hdrlen, pd->hdr.any); 3510 } 3511 if (pd->proto == IPPROTO_TCP && 3512 ((r->rule_flag & PFRULE_RETURNRST) || 3513 (r->rule_flag & PFRULE_RETURN)) && 3514 !(th->th_flags & TH_RST)) { 3515 u_int32_t ack = ntohl(th->th_seq) + pd->p_len; 3516 int len = 0; 3517 struct ip *h4; 3518 struct ip6_hdr *h6; 3519 3520 switch (af) { 3521 case AF_INET: 3522 h4 = mtod(m, struct ip *); 3523 len = h4->ip_len - off; 3524 break; 3525 #ifdef INET6 3526 case AF_INET6: 3527 h6 = mtod(m, struct ip6_hdr *); 3528 len = h6->ip6_plen - (off - sizeof(*h6)); 3529 break; 3530 #endif 3531 } 3532 3533 if (pf_check_proto_cksum(m, off, len, IPPROTO_TCP, af)) 3534 REASON_SET(&reason, PFRES_PROTCKSUM); 3535 else { 3536 if (th->th_flags & TH_SYN) 3537 ack++; 3538 if (th->th_flags & TH_FIN) 3539 ack++; 3540 pf_send_tcp(r, af, pd->dst, 3541 pd->src, th->th_dport, th->th_sport, 3542 ntohl(th->th_ack), ack, TH_RST|TH_ACK, 0, 0, 3543 r->return_ttl, 1, 0, pd->eh, kif->pfik_ifp); 3544 } 3545 } else if (pd->proto != IPPROTO_ICMP && af == AF_INET && 3546 r->return_icmp) 3547 pf_send_icmp(m, r->return_icmp >> 8, 3548 r->return_icmp & 255, af, r); 3549 else if (pd->proto != IPPROTO_ICMPV6 && af == AF_INET6 && 3550 r->return_icmp6) 3551 pf_send_icmp(m, r->return_icmp6 >> 8, 3552 r->return_icmp6 & 255, af, r); 3553 } 3554 3555 if (r->action == PF_DROP) 3556 goto cleanup; 3557 3558 if (pf_tag_packet(m, tag, rtableid)) { 3559 REASON_SET(&reason, PFRES_MEMORY); 3560 goto cleanup; 3561 } 3562 3563 if (!state_icmp && (r->keep_state || nr != NULL || 3564 (pd->flags & PFDESC_TCP_NORM))) { 3565 int action; 3566 action = pf_create_state(r, nr, a, pd, nsn, skw, sks, nk, sk, m, 3567 off, sport, dport, &rewrite, kif, sm, tag, bproto_sum, 3568 bip_sum, hdrlen); 3569 if (action != PF_PASS) 3570 return (action); 3571 } 3572 3573 /* copy back packet headers if we performed NAT operations */ 3574 if (rewrite) 3575 m_copyback(m, off, hdrlen, pd->hdr.any); 3576 3577 return (PF_PASS); 3578 3579 cleanup: 3580 if (sk != NULL) 3581 pool_put(&pf_state_key_pl, sk); 3582 if (nk != NULL) 3583 pool_put(&pf_state_key_pl, nk); 3584 return (PF_DROP); 3585 } 3586 3587 static __inline int 3588 pf_create_state(struct pf_rule *r, struct pf_rule *nr, struct pf_rule *a, 3589 struct pf_pdesc *pd, struct pf_src_node *nsn, struct pf_state_key *skw, 3590 struct pf_state_key *sks, struct pf_state_key *nk, struct pf_state_key *sk, 3591 struct mbuf *m, int off, u_int16_t sport, u_int16_t dport, int *rewrite, 3592 struct pfi_kif *kif, struct pf_state **sm, int tag, u_int16_t bproto_sum, 3593 u_int16_t bip_sum, int hdrlen) 3594 { 3595 struct pf_state *s = NULL; 3596 struct pf_src_node *sn = NULL; 3597 struct tcphdr *th = pd->hdr.tcp; 3598 u_int16_t mss = tcp_mssdflt; 3599 u_short reason; 3600 3601 /* check maximums */ 3602 if (r->max_states && (r->states_cur >= r->max_states)) { 3603 pf_status.lcounters[LCNT_STATES]++; 3604 REASON_SET(&reason, PFRES_MAXSTATES); 3605 return (PF_DROP); 3606 } 3607 /* src node for filter rule */ 3608 if ((r->rule_flag & PFRULE_SRCTRACK || 3609 r->rpool.opts & PF_POOL_STICKYADDR) && 3610 pf_insert_src_node(&sn, r, pd->src, pd->af) != 0) { 3611 REASON_SET(&reason, PFRES_SRCLIMIT); 3612 goto csfailed; 3613 } 3614 /* src node for translation rule */ 3615 if (nr != NULL && (nr->rpool.opts & PF_POOL_STICKYADDR) && 3616 pf_insert_src_node(&nsn, nr, &sk->addr[pd->sidx], pd->af)) { 3617 REASON_SET(&reason, PFRES_SRCLIMIT); 3618 goto csfailed; 3619 } 3620 s = pool_get(&pf_state_pl, PR_NOWAIT | PR_ZERO); 3621 if (s == NULL) { 3622 REASON_SET(&reason, PFRES_MEMORY); 3623 goto csfailed; 3624 } 3625 s->id = 0; /* XXX Do we really need that? not in OpenBSD */ 3626 s->creatorid = 0; 3627 s->rule.ptr = r; 3628 s->nat_rule.ptr = nr; 3629 s->anchor.ptr = a; 3630 STATE_INC_COUNTERS(s); 3631 if (r->allow_opts) 3632 s->state_flags |= PFSTATE_ALLOWOPTS; 3633 if (r->rule_flag & PFRULE_STATESLOPPY) 3634 s->state_flags |= PFSTATE_SLOPPY; 3635 s->log = r->log & PF_LOG_ALL; 3636 if (nr != NULL) 3637 s->log |= nr->log & PF_LOG_ALL; 3638 switch (pd->proto) { 3639 case IPPROTO_TCP: 3640 s->src.seqlo = ntohl(th->th_seq); 3641 s->src.seqhi = s->src.seqlo + pd->p_len + 1; 3642 if ((th->th_flags & (TH_SYN|TH_ACK)) == TH_SYN && 3643 r->keep_state == PF_STATE_MODULATE) { 3644 /* Generate sequence number modulator */ 3645 if ((s->src.seqdiff = pf_tcp_iss(pd) - s->src.seqlo) == 3646 0) 3647 s->src.seqdiff = 1; 3648 pf_change_a(&th->th_seq, &th->th_sum, 3649 htonl(s->src.seqlo + s->src.seqdiff), 0); 3650 *rewrite = 1; 3651 } else 3652 s->src.seqdiff = 0; 3653 if (th->th_flags & TH_SYN) { 3654 s->src.seqhi++; 3655 s->src.wscale = pf_get_wscale(m, off, 3656 th->th_off, pd->af); 3657 } 3658 s->src.max_win = MAX(ntohs(th->th_win), 1); 3659 if (s->src.wscale & PF_WSCALE_MASK) { 3660 /* Remove scale factor from initial window */ 3661 int win = s->src.max_win; 3662 win += 1 << (s->src.wscale & PF_WSCALE_MASK); 3663 s->src.max_win = (win - 1) >> 3664 (s->src.wscale & PF_WSCALE_MASK); 3665 } 3666 if (th->th_flags & TH_FIN) 3667 s->src.seqhi++; 3668 s->dst.seqhi = 1; 3669 s->dst.max_win = 1; 3670 s->src.state = TCPS_SYN_SENT; 3671 s->dst.state = TCPS_CLOSED; 3672 s->timeout = PFTM_TCP_FIRST_PACKET; 3673 break; 3674 case IPPROTO_UDP: 3675 s->src.state = PFUDPS_SINGLE; 3676 s->dst.state = PFUDPS_NO_TRAFFIC; 3677 s->timeout = PFTM_UDP_FIRST_PACKET; 3678 break; 3679 case IPPROTO_ICMP: 3680 #ifdef INET6 3681 case IPPROTO_ICMPV6: 3682 #endif 3683 s->timeout = PFTM_ICMP_FIRST_PACKET; 3684 break; 3685 default: 3686 s->src.state = PFOTHERS_SINGLE; 3687 s->dst.state = PFOTHERS_NO_TRAFFIC; 3688 s->timeout = PFTM_OTHER_FIRST_PACKET; 3689 } 3690 3691 s->creation = time_second; 3692 s->expire = time_second; 3693 3694 if (sn != NULL) { 3695 s->src_node = sn; 3696 s->src_node->states++; 3697 } 3698 if (nsn != NULL) { 3699 /* XXX We only modify one side for now. */ 3700 PF_ACPY(&nsn->raddr, &nk->addr[1], pd->af); 3701 s->nat_src_node = nsn; 3702 s->nat_src_node->states++; 3703 } 3704 if (pd->proto == IPPROTO_TCP) { 3705 if ((pd->flags & PFDESC_TCP_NORM) && pf_normalize_tcp_init(m, 3706 off, pd, th, &s->src, &s->dst)) { 3707 REASON_SET(&reason, PFRES_MEMORY); 3708 pf_src_tree_remove_state(s); 3709 STATE_DEC_COUNTERS(s); 3710 pool_put(&pf_state_pl, s); 3711 return (PF_DROP); 3712 } 3713 if ((pd->flags & PFDESC_TCP_NORM) && s->src.scrub && 3714 pf_normalize_tcp_stateful(m, off, pd, &reason, th, s, 3715 &s->src, &s->dst, rewrite)) { 3716 /* This really shouldn't happen!!! */ 3717 DPFPRINTF(PF_DEBUG_URGENT, 3718 ("pf_normalize_tcp_stateful failed on first pkt")); 3719 pf_normalize_tcp_cleanup(s); 3720 pf_src_tree_remove_state(s); 3721 STATE_DEC_COUNTERS(s); 3722 pool_put(&pf_state_pl, s); 3723 return (PF_DROP); 3724 } 3725 } 3726 s->direction = pd->dir; 3727 3728 if (sk == NULL && pf_state_key_setup(pd, nr, &skw, &sks, &sk, &nk, 3729 pd->src, pd->dst, sport, dport)) 3730 goto csfailed; 3731 3732 if (pf_state_insert(BOUND_IFACE(r, kif), skw, sks, s)) { 3733 if (pd->proto == IPPROTO_TCP) 3734 pf_normalize_tcp_cleanup(s); 3735 REASON_SET(&reason, PFRES_STATEINS); 3736 pf_src_tree_remove_state(s); 3737 STATE_DEC_COUNTERS(s); 3738 pool_put(&pf_state_pl, s); 3739 return (PF_DROP); 3740 } else 3741 *sm = s; 3742 3743 pf_set_rt_ifp(s, pd->src); /* needs s->state_key set */ 3744 if (tag > 0) { 3745 pf_tag_ref(tag); 3746 s->tag = tag; 3747 } 3748 if (pd->proto == IPPROTO_TCP && (th->th_flags & (TH_SYN|TH_ACK)) == 3749 TH_SYN && r->keep_state == PF_STATE_SYNPROXY) { 3750 s->src.state = PF_TCPS_PROXY_SRC; 3751 /* undo NAT changes, if they have taken place */ 3752 if (nr != NULL) { 3753 struct pf_state_key *skt = s->key[PF_SK_WIRE]; 3754 if (pd->dir == PF_OUT) 3755 skt = s->key[PF_SK_STACK]; 3756 PF_ACPY(pd->src, &skt->addr[pd->sidx], pd->af); 3757 PF_ACPY(pd->dst, &skt->addr[pd->didx], pd->af); 3758 if (pd->sport) 3759 *pd->sport = skt->port[pd->sidx]; 3760 if (pd->dport) 3761 *pd->dport = skt->port[pd->didx]; 3762 if (pd->proto_sum) 3763 *pd->proto_sum = bproto_sum; 3764 if (pd->ip_sum) 3765 *pd->ip_sum = bip_sum; 3766 m_copyback(m, off, hdrlen, pd->hdr.any); 3767 } 3768 s->src.seqhi = htonl(karc4random()); 3769 /* Find mss option */ 3770 mss = pf_get_mss(m, off, th->th_off, pd->af); 3771 mss = pf_calc_mss(pd->src, pd->af, mss); 3772 mss = pf_calc_mss(pd->dst, pd->af, mss); 3773 s->src.mss = mss; 3774 pf_send_tcp(r, pd->af, pd->dst, pd->src, th->th_dport, 3775 th->th_sport, s->src.seqhi, ntohl(th->th_seq) + 1, 3776 TH_SYN|TH_ACK, 0, s->src.mss, 0, 1, 0, NULL, NULL); 3777 REASON_SET(&reason, PFRES_SYNPROXY); 3778 return (PF_SYNPROXY_DROP); 3779 } 3780 3781 return (PF_PASS); 3782 3783 csfailed: 3784 if (sk != NULL) 3785 pool_put(&pf_state_key_pl, sk); 3786 if (nk != NULL) 3787 pool_put(&pf_state_key_pl, nk); 3788 3789 if (sn != NULL && sn->states == 0 && sn->expire == 0) { 3790 RB_REMOVE(pf_src_tree, &tree_src_tracking, sn); 3791 pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++; 3792 pf_status.src_nodes--; 3793 pool_put(&pf_src_tree_pl, sn); 3794 } 3795 if (nsn != sn && nsn != NULL && nsn->states == 0 && nsn->expire == 0) { 3796 RB_REMOVE(pf_src_tree, &tree_src_tracking, nsn); 3797 pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++; 3798 pf_status.src_nodes--; 3799 pool_put(&pf_src_tree_pl, nsn); 3800 } 3801 return (PF_DROP); 3802 } 3803 3804 int 3805 pf_test_fragment(struct pf_rule **rm, int direction, struct pfi_kif *kif, 3806 struct mbuf *m, void *h, struct pf_pdesc *pd, struct pf_rule **am, 3807 struct pf_ruleset **rsm) 3808 { 3809 struct pf_rule *r, *a = NULL; 3810 struct pf_ruleset *ruleset = NULL; 3811 sa_family_t af = pd->af; 3812 u_short reason; 3813 int tag = -1; 3814 int asd = 0; 3815 int match = 0; 3816 3817 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_FILTER].active.ptr); 3818 while (r != NULL) { 3819 r->evaluations++; 3820 if (pfi_kif_match(r->kif, kif) == r->ifnot) 3821 r = r->skip[PF_SKIP_IFP].ptr; 3822 else if (r->direction && r->direction != direction) 3823 r = r->skip[PF_SKIP_DIR].ptr; 3824 else if (r->af && r->af != af) 3825 r = r->skip[PF_SKIP_AF].ptr; 3826 else if (r->proto && r->proto != pd->proto) 3827 r = r->skip[PF_SKIP_PROTO].ptr; 3828 else if (PF_MISMATCHAW(&r->src.addr, pd->src, af, 3829 r->src.neg, kif)) 3830 r = r->skip[PF_SKIP_SRC_ADDR].ptr; 3831 else if (PF_MISMATCHAW(&r->dst.addr, pd->dst, af, 3832 r->dst.neg, NULL)) 3833 r = r->skip[PF_SKIP_DST_ADDR].ptr; 3834 else if (r->tos && !(r->tos == pd->tos)) 3835 r = TAILQ_NEXT(r, entries); 3836 else if (r->os_fingerprint != PF_OSFP_ANY) 3837 r = TAILQ_NEXT(r, entries); 3838 else if (pd->proto == IPPROTO_UDP && 3839 (r->src.port_op || r->dst.port_op)) 3840 r = TAILQ_NEXT(r, entries); 3841 else if (pd->proto == IPPROTO_TCP && 3842 (r->src.port_op || r->dst.port_op || r->flagset)) 3843 r = TAILQ_NEXT(r, entries); 3844 else if ((pd->proto == IPPROTO_ICMP || 3845 pd->proto == IPPROTO_ICMPV6) && 3846 (r->type || r->code)) 3847 r = TAILQ_NEXT(r, entries); 3848 else if (r->prob && r->prob <= karc4random()) 3849 r = TAILQ_NEXT(r, entries); 3850 else if (r->match_tag && !pf_match_tag(m, r, &tag)) 3851 r = TAILQ_NEXT(r, entries); 3852 else { 3853 if (r->anchor == NULL) { 3854 match = 1; 3855 *rm = r; 3856 *am = a; 3857 *rsm = ruleset; 3858 if ((*rm)->quick) 3859 break; 3860 r = TAILQ_NEXT(r, entries); 3861 } else 3862 pf_step_into_anchor(&asd, &ruleset, 3863 PF_RULESET_FILTER, &r, &a, &match); 3864 } 3865 if (r == NULL && pf_step_out_of_anchor(&asd, &ruleset, 3866 PF_RULESET_FILTER, &r, &a, &match)) 3867 break; 3868 } 3869 r = *rm; 3870 a = *am; 3871 ruleset = *rsm; 3872 3873 REASON_SET(&reason, PFRES_MATCH); 3874 3875 if (r->log) 3876 PFLOG_PACKET(kif, h, m, af, direction, reason, r, a, ruleset, 3877 pd); 3878 3879 if (r->action != PF_PASS) 3880 return (PF_DROP); 3881 3882 if (pf_tag_packet(m, tag, -1)) { 3883 REASON_SET(&reason, PFRES_MEMORY); 3884 return (PF_DROP); 3885 } 3886 3887 return (PF_PASS); 3888 } 3889 3890 int 3891 pf_tcp_track_full(struct pf_state_peer *src, struct pf_state_peer *dst, 3892 struct pf_state **state, struct pfi_kif *kif, struct mbuf *m, int off, 3893 struct pf_pdesc *pd, u_short *reason, int *copyback) 3894 { 3895 struct tcphdr *th = pd->hdr.tcp; 3896 u_int16_t win = ntohs(th->th_win); 3897 u_int32_t ack, end, seq, orig_seq; 3898 u_int8_t sws, dws; 3899 int ackskew; 3900 3901 if (src->wscale && dst->wscale && !(th->th_flags & TH_SYN)) { 3902 sws = src->wscale & PF_WSCALE_MASK; 3903 dws = dst->wscale & PF_WSCALE_MASK; 3904 } else 3905 sws = dws = 0; 3906 3907 /* 3908 * Sequence tracking algorithm from Guido van Rooij's paper: 3909 * http://www.madison-gurkha.com/publications/tcp_filtering/ 3910 * tcp_filtering.ps 3911 */ 3912 3913 orig_seq = seq = ntohl(th->th_seq); 3914 if (src->seqlo == 0) { 3915 /* First packet from this end. Set its state */ 3916 3917 if ((pd->flags & PFDESC_TCP_NORM || dst->scrub) && 3918 src->scrub == NULL) { 3919 if (pf_normalize_tcp_init(m, off, pd, th, src, dst)) { 3920 REASON_SET(reason, PFRES_MEMORY); 3921 return (PF_DROP); 3922 } 3923 } 3924 3925 /* Deferred generation of sequence number modulator */ 3926 if (dst->seqdiff && !src->seqdiff) { 3927 /* use random iss for the TCP server */ 3928 while ((src->seqdiff = karc4random() - seq) == 0) 3929 ; 3930 ack = ntohl(th->th_ack) - dst->seqdiff; 3931 pf_change_a(&th->th_seq, &th->th_sum, htonl(seq + 3932 src->seqdiff), 0); 3933 pf_change_a(&th->th_ack, &th->th_sum, htonl(ack), 0); 3934 *copyback = 1; 3935 } else { 3936 ack = ntohl(th->th_ack); 3937 } 3938 3939 end = seq + pd->p_len; 3940 if (th->th_flags & TH_SYN) { 3941 end++; 3942 (*state)->sync_flags |= PFSTATE_GOT_SYN2; 3943 if (dst->wscale & PF_WSCALE_FLAG) { 3944 src->wscale = pf_get_wscale(m, off, th->th_off, 3945 pd->af); 3946 if (src->wscale & PF_WSCALE_FLAG) { 3947 /* Remove scale factor from initial 3948 * window */ 3949 sws = src->wscale & PF_WSCALE_MASK; 3950 win = ((u_int32_t)win + (1 << sws) - 1) 3951 >> sws; 3952 dws = dst->wscale & PF_WSCALE_MASK; 3953 } else { 3954 /* fixup other window */ 3955 dst->max_win <<= dst->wscale & 3956 PF_WSCALE_MASK; 3957 /* in case of a retrans SYN|ACK */ 3958 dst->wscale = 0; 3959 } 3960 } 3961 } 3962 if (th->th_flags & TH_FIN) 3963 end++; 3964 3965 src->seqlo = seq; 3966 if (src->state < TCPS_SYN_SENT) 3967 src->state = TCPS_SYN_SENT; 3968 3969 /* 3970 * May need to slide the window (seqhi may have been set by 3971 * the crappy stack check or if we picked up the connection 3972 * after establishment) 3973 */ 3974 if (src->seqhi == 1 || 3975 SEQ_GEQ(end + MAX(1, dst->max_win << dws), src->seqhi)) 3976 src->seqhi = end + MAX(1, dst->max_win << dws); 3977 if (win > src->max_win) 3978 src->max_win = win; 3979 3980 } else { 3981 ack = ntohl(th->th_ack) - dst->seqdiff; 3982 if (src->seqdiff) { 3983 /* Modulate sequence numbers */ 3984 pf_change_a(&th->th_seq, &th->th_sum, htonl(seq + 3985 src->seqdiff), 0); 3986 pf_change_a(&th->th_ack, &th->th_sum, htonl(ack), 0); 3987 *copyback = 1; 3988 } 3989 end = seq + pd->p_len; 3990 if (th->th_flags & TH_SYN) 3991 end++; 3992 if (th->th_flags & TH_FIN) 3993 end++; 3994 } 3995 3996 if ((th->th_flags & TH_ACK) == 0) { 3997 /* Let it pass through the ack skew check */ 3998 ack = dst->seqlo; 3999 } else if ((ack == 0 && 4000 (th->th_flags & (TH_ACK|TH_RST)) == (TH_ACK|TH_RST)) || 4001 /* broken tcp stacks do not set ack */ 4002 (dst->state < TCPS_SYN_SENT)) { 4003 /* 4004 * Many stacks (ours included) will set the ACK number in an 4005 * FIN|ACK if the SYN times out -- no sequence to ACK. 4006 */ 4007 ack = dst->seqlo; 4008 } 4009 4010 if (seq == end) { 4011 /* Ease sequencing restrictions on no data packets */ 4012 seq = src->seqlo; 4013 end = seq; 4014 } 4015 4016 ackskew = dst->seqlo - ack; 4017 4018 4019 /* 4020 * Need to demodulate the sequence numbers in any TCP SACK options 4021 * (Selective ACK). We could optionally validate the SACK values 4022 * against the current ACK window, either forwards or backwards, but 4023 * I'm not confident that SACK has been implemented properly 4024 * everywhere. It wouldn't surprise me if several stacks accidently 4025 * SACK too far backwards of previously ACKed data. There really aren't 4026 * any security implications of bad SACKing unless the target stack 4027 * doesn't validate the option length correctly. Someone trying to 4028 * spoof into a TCP connection won't bother blindly sending SACK 4029 * options anyway. 4030 */ 4031 if (dst->seqdiff && (th->th_off << 2) > sizeof(struct tcphdr)) { 4032 if (pf_modulate_sack(m, off, pd, th, dst)) 4033 *copyback = 1; 4034 } 4035 4036 4037 #define MAXACKWINDOW (0xffff + 1500) /* 1500 is an arbitrary fudge factor */ 4038 if (SEQ_GEQ(src->seqhi, end) && 4039 /* Last octet inside other's window space */ 4040 SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)) && 4041 /* Retrans: not more than one window back */ 4042 (ackskew >= -MAXACKWINDOW) && 4043 /* Acking not more than one reassembled fragment backwards */ 4044 (ackskew <= (MAXACKWINDOW << sws)) && 4045 /* Acking not more than one window forward */ 4046 ((th->th_flags & TH_RST) == 0 || orig_seq == src->seqlo || 4047 (orig_seq == src->seqlo + 1) || (orig_seq + 1 == src->seqlo) || 4048 (pd->flags & PFDESC_IP_REAS) == 0)) { 4049 /* Require an exact/+1 sequence match on resets when possible */ 4050 4051 if (dst->scrub || src->scrub) { 4052 if (pf_normalize_tcp_stateful(m, off, pd, reason, th, 4053 *state, src, dst, copyback)) 4054 return (PF_DROP); 4055 } 4056 4057 /* update max window */ 4058 if (src->max_win < win) 4059 src->max_win = win; 4060 /* synchronize sequencing */ 4061 if (SEQ_GT(end, src->seqlo)) 4062 src->seqlo = end; 4063 /* slide the window of what the other end can send */ 4064 if (SEQ_GEQ(ack + (win << sws), dst->seqhi)) 4065 dst->seqhi = ack + MAX((win << sws), 1); 4066 4067 4068 /* update states */ 4069 if (th->th_flags & TH_SYN) 4070 if (src->state < TCPS_SYN_SENT) 4071 src->state = TCPS_SYN_SENT; 4072 if (th->th_flags & TH_FIN) 4073 if (src->state < TCPS_CLOSING) 4074 src->state = TCPS_CLOSING; 4075 if (th->th_flags & TH_ACK) { 4076 if (dst->state == TCPS_SYN_SENT) { 4077 dst->state = TCPS_ESTABLISHED; 4078 if (src->state == TCPS_ESTABLISHED && 4079 (*state)->src_node != NULL && 4080 pf_src_connlimit(state)) { 4081 REASON_SET(reason, PFRES_SRCLIMIT); 4082 return (PF_DROP); 4083 } 4084 } else if (dst->state == TCPS_CLOSING) 4085 dst->state = TCPS_FIN_WAIT_2; 4086 } 4087 if (th->th_flags & TH_RST) 4088 src->state = dst->state = TCPS_TIME_WAIT; 4089 4090 /* update expire time */ 4091 (*state)->expire = time_second; 4092 if (src->state >= TCPS_FIN_WAIT_2 && 4093 dst->state >= TCPS_FIN_WAIT_2) 4094 (*state)->timeout = PFTM_TCP_CLOSED; 4095 else if (src->state >= TCPS_CLOSING && 4096 dst->state >= TCPS_CLOSING) 4097 (*state)->timeout = PFTM_TCP_FIN_WAIT; 4098 else if (src->state < TCPS_ESTABLISHED || 4099 dst->state < TCPS_ESTABLISHED) 4100 (*state)->timeout = PFTM_TCP_OPENING; 4101 else if (src->state >= TCPS_CLOSING || 4102 dst->state >= TCPS_CLOSING) 4103 (*state)->timeout = PFTM_TCP_CLOSING; 4104 else 4105 (*state)->timeout = PFTM_TCP_ESTABLISHED; 4106 4107 /* Fall through to PASS packet */ 4108 4109 } else if ((dst->state < TCPS_SYN_SENT || 4110 dst->state >= TCPS_FIN_WAIT_2 || 4111 src->state >= TCPS_FIN_WAIT_2) && 4112 SEQ_GEQ(src->seqhi + MAXACKWINDOW, end) && 4113 /* Within a window forward of the originating packet */ 4114 SEQ_GEQ(seq, src->seqlo - MAXACKWINDOW)) { 4115 /* Within a window backward of the originating packet */ 4116 4117 /* 4118 * This currently handles three situations: 4119 * 1) Stupid stacks will shotgun SYNs before their peer 4120 * replies. 4121 * 2) When PF catches an already established stream (the 4122 * firewall rebooted, the state table was flushed, routes 4123 * changed...) 4124 * 3) Packets get funky immediately after the connection 4125 * closes (this should catch Solaris spurious ACK|FINs 4126 * that web servers like to spew after a close) 4127 * 4128 * This must be a little more careful than the above code 4129 * since packet floods will also be caught here. We don't 4130 * update the TTL here to mitigate the damage of a packet 4131 * flood and so the same code can handle awkward establishment 4132 * and a loosened connection close. 4133 * In the establishment case, a correct peer response will 4134 * validate the connection, go through the normal state code 4135 * and keep updating the state TTL. 4136 */ 4137 4138 if (pf_status.debug >= PF_DEBUG_MISC) { 4139 kprintf("pf: loose state match: "); 4140 pf_print_state(*state); 4141 pf_print_flags(th->th_flags); 4142 kprintf(" seq=%u (%u) ack=%u len=%u ackskew=%d " 4143 "pkts=%llu:%llu dir=%s,%s\n", seq, orig_seq, ack, pd->p_len, 4144 ackskew, (unsigned long long)(*state)->packets[0], 4145 (unsigned long long)(*state)->packets[1], 4146 pd->dir == PF_IN ? "in" : "out", 4147 pd->dir == (*state)->direction ? "fwd" : "rev"); 4148 } 4149 4150 if (dst->scrub || src->scrub) { 4151 if (pf_normalize_tcp_stateful(m, off, pd, reason, th, 4152 *state, src, dst, copyback)) 4153 return (PF_DROP); 4154 } 4155 4156 /* update max window */ 4157 if (src->max_win < win) 4158 src->max_win = win; 4159 /* synchronize sequencing */ 4160 if (SEQ_GT(end, src->seqlo)) 4161 src->seqlo = end; 4162 /* slide the window of what the other end can send */ 4163 if (SEQ_GEQ(ack + (win << sws), dst->seqhi)) 4164 dst->seqhi = ack + MAX((win << sws), 1); 4165 4166 /* 4167 * Cannot set dst->seqhi here since this could be a shotgunned 4168 * SYN and not an already established connection. 4169 */ 4170 4171 if (th->th_flags & TH_FIN) 4172 if (src->state < TCPS_CLOSING) 4173 src->state = TCPS_CLOSING; 4174 if (th->th_flags & TH_RST) 4175 src->state = dst->state = TCPS_TIME_WAIT; 4176 4177 /* Fall through to PASS packet */ 4178 4179 } else if ((*state)->pickup_mode == PF_PICKUPS_HASHONLY || 4180 ((*state)->pickup_mode == PF_PICKUPS_ENABLED && 4181 ((*state)->sync_flags & PFSTATE_GOT_SYN_MASK) != 4182 PFSTATE_GOT_SYN_MASK)) { 4183 /* 4184 * If pickup mode is hash only, do not fail on sequence checks. 4185 * 4186 * If pickup mode is enabled and we did not see the SYN in 4187 * both direction, do not fail on sequence checks because 4188 * we do not have complete information on window scale. 4189 * 4190 * Adjust expiration and fall through to PASS packet. 4191 * XXX Add a FIN check to reduce timeout? 4192 */ 4193 (*state)->expire = time_second; 4194 } else { 4195 /* 4196 * Failure processing 4197 */ 4198 if ((*state)->dst.state == TCPS_SYN_SENT && 4199 (*state)->src.state == TCPS_SYN_SENT) { 4200 /* Send RST for state mismatches during handshake */ 4201 if (!(th->th_flags & TH_RST)) 4202 pf_send_tcp((*state)->rule.ptr, pd->af, 4203 pd->dst, pd->src, th->th_dport, 4204 th->th_sport, ntohl(th->th_ack), 0, 4205 TH_RST, 0, 0, 4206 (*state)->rule.ptr->return_ttl, 1, 0, 4207 pd->eh, kif->pfik_ifp); 4208 src->seqlo = 0; 4209 src->seqhi = 1; 4210 src->max_win = 1; 4211 } else if (pf_status.debug >= PF_DEBUG_MISC) { 4212 kprintf("pf: BAD state: "); 4213 pf_print_state(*state); 4214 pf_print_flags(th->th_flags); 4215 kprintf(" seq=%u (%u) ack=%u len=%u ackskew=%d " 4216 "pkts=%llu:%llu dir=%s,%s\n", 4217 seq, orig_seq, ack, pd->p_len, ackskew, 4218 (unsigned long long)(*state)->packets[0], 4219 (unsigned long long)(*state)->packets[1], 4220 pd->dir == PF_IN ? "in" : "out", 4221 pd->dir == (*state)->direction ? "fwd" : "rev"); 4222 kprintf("pf: State failure on: %c %c %c %c | %c %c\n", 4223 SEQ_GEQ(src->seqhi, end) ? ' ' : '1', 4224 SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)) ? 4225 ' ': '2', 4226 (ackskew >= -MAXACKWINDOW) ? ' ' : '3', 4227 (ackskew <= (MAXACKWINDOW << sws)) ? ' ' : '4', 4228 SEQ_GEQ(src->seqhi + MAXACKWINDOW, end) ?' ' :'5', 4229 SEQ_GEQ(seq, src->seqlo - MAXACKWINDOW) ?' ' :'6'); 4230 } 4231 REASON_SET(reason, PFRES_BADSTATE); 4232 return (PF_DROP); 4233 } 4234 4235 return (PF_PASS); 4236 } 4237 4238 int 4239 pf_tcp_track_sloppy(struct pf_state_peer *src, struct pf_state_peer *dst, 4240 struct pf_state **state, struct pf_pdesc *pd, u_short *reason) 4241 { 4242 struct tcphdr *th = pd->hdr.tcp; 4243 4244 if (th->th_flags & TH_SYN) 4245 if (src->state < TCPS_SYN_SENT) 4246 src->state = TCPS_SYN_SENT; 4247 if (th->th_flags & TH_FIN) 4248 if (src->state < TCPS_CLOSING) 4249 src->state = TCPS_CLOSING; 4250 if (th->th_flags & TH_ACK) { 4251 if (dst->state == TCPS_SYN_SENT) { 4252 dst->state = TCPS_ESTABLISHED; 4253 if (src->state == TCPS_ESTABLISHED && 4254 (*state)->src_node != NULL && 4255 pf_src_connlimit(state)) { 4256 REASON_SET(reason, PFRES_SRCLIMIT); 4257 return (PF_DROP); 4258 } 4259 } else if (dst->state == TCPS_CLOSING) { 4260 dst->state = TCPS_FIN_WAIT_2; 4261 } else if (src->state == TCPS_SYN_SENT && 4262 dst->state < TCPS_SYN_SENT) { 4263 /* 4264 * Handle a special sloppy case where we only see one 4265 * half of the connection. If there is a ACK after 4266 * the initial SYN without ever seeing a packet from 4267 * the destination, set the connection to established. 4268 */ 4269 dst->state = src->state = TCPS_ESTABLISHED; 4270 if ((*state)->src_node != NULL && 4271 pf_src_connlimit(state)) { 4272 REASON_SET(reason, PFRES_SRCLIMIT); 4273 return (PF_DROP); 4274 } 4275 } else if (src->state == TCPS_CLOSING && 4276 dst->state == TCPS_ESTABLISHED && 4277 dst->seqlo == 0) { 4278 /* 4279 * Handle the closing of half connections where we 4280 * don't see the full bidirectional FIN/ACK+ACK 4281 * handshake. 4282 */ 4283 dst->state = TCPS_CLOSING; 4284 } 4285 } 4286 if (th->th_flags & TH_RST) 4287 src->state = dst->state = TCPS_TIME_WAIT; 4288 4289 /* update expire time */ 4290 (*state)->expire = time_second; 4291 if (src->state >= TCPS_FIN_WAIT_2 && 4292 dst->state >= TCPS_FIN_WAIT_2) 4293 (*state)->timeout = PFTM_TCP_CLOSED; 4294 else if (src->state >= TCPS_CLOSING && 4295 dst->state >= TCPS_CLOSING) 4296 (*state)->timeout = PFTM_TCP_FIN_WAIT; 4297 else if (src->state < TCPS_ESTABLISHED || 4298 dst->state < TCPS_ESTABLISHED) 4299 (*state)->timeout = PFTM_TCP_OPENING; 4300 else if (src->state >= TCPS_CLOSING || 4301 dst->state >= TCPS_CLOSING) 4302 (*state)->timeout = PFTM_TCP_CLOSING; 4303 else 4304 (*state)->timeout = PFTM_TCP_ESTABLISHED; 4305 4306 return (PF_PASS); 4307 } 4308 4309 int 4310 pf_test_state_tcp(struct pf_state **state, int direction, struct pfi_kif *kif, 4311 struct mbuf *m, int off, void *h, struct pf_pdesc *pd, 4312 u_short *reason) 4313 { 4314 struct pf_state_key_cmp key; 4315 struct tcphdr *th = pd->hdr.tcp; 4316 int copyback = 0; 4317 struct pf_state_peer *src, *dst; 4318 struct pf_state_key *sk; 4319 4320 key.af = pd->af; 4321 key.proto = IPPROTO_TCP; 4322 if (direction == PF_IN) { /* wire side, straight */ 4323 PF_ACPY(&key.addr[0], pd->src, key.af); 4324 PF_ACPY(&key.addr[1], pd->dst, key.af); 4325 key.port[0] = th->th_sport; 4326 key.port[1] = th->th_dport; 4327 } else { /* stack side, reverse */ 4328 PF_ACPY(&key.addr[1], pd->src, key.af); 4329 PF_ACPY(&key.addr[0], pd->dst, key.af); 4330 key.port[1] = th->th_sport; 4331 key.port[0] = th->th_dport; 4332 } 4333 4334 STATE_LOOKUP(kif, &key, direction, *state, m); 4335 4336 if (direction == (*state)->direction) { 4337 src = &(*state)->src; 4338 dst = &(*state)->dst; 4339 } else { 4340 src = &(*state)->dst; 4341 dst = &(*state)->src; 4342 } 4343 4344 sk = (*state)->key[pd->didx]; 4345 4346 if ((*state)->src.state == PF_TCPS_PROXY_SRC) { 4347 if (direction != (*state)->direction) { 4348 REASON_SET(reason, PFRES_SYNPROXY); 4349 return (PF_SYNPROXY_DROP); 4350 } 4351 if (th->th_flags & TH_SYN) { 4352 if (ntohl(th->th_seq) != (*state)->src.seqlo) { 4353 REASON_SET(reason, PFRES_SYNPROXY); 4354 return (PF_DROP); 4355 } 4356 pf_send_tcp((*state)->rule.ptr, pd->af, pd->dst, 4357 pd->src, th->th_dport, th->th_sport, 4358 (*state)->src.seqhi, ntohl(th->th_seq) + 1, 4359 TH_SYN|TH_ACK, 0, (*state)->src.mss, 0, 1, 4360 0, NULL, NULL); 4361 REASON_SET(reason, PFRES_SYNPROXY); 4362 return (PF_SYNPROXY_DROP); 4363 } else if (!(th->th_flags & TH_ACK) || 4364 (ntohl(th->th_ack) != (*state)->src.seqhi + 1) || 4365 (ntohl(th->th_seq) != (*state)->src.seqlo + 1)) { 4366 REASON_SET(reason, PFRES_SYNPROXY); 4367 return (PF_DROP); 4368 } else if ((*state)->src_node != NULL && 4369 pf_src_connlimit(state)) { 4370 REASON_SET(reason, PFRES_SRCLIMIT); 4371 return (PF_DROP); 4372 } else 4373 (*state)->src.state = PF_TCPS_PROXY_DST; 4374 } 4375 if ((*state)->src.state == PF_TCPS_PROXY_DST) { 4376 if (direction == (*state)->direction) { 4377 if (((th->th_flags & (TH_SYN|TH_ACK)) != TH_ACK) || 4378 (ntohl(th->th_ack) != (*state)->src.seqhi + 1) || 4379 (ntohl(th->th_seq) != (*state)->src.seqlo + 1)) { 4380 REASON_SET(reason, PFRES_SYNPROXY); 4381 return (PF_DROP); 4382 } 4383 (*state)->src.max_win = MAX(ntohs(th->th_win), 1); 4384 if ((*state)->dst.seqhi == 1) 4385 (*state)->dst.seqhi = htonl(karc4random()); 4386 pf_send_tcp((*state)->rule.ptr, pd->af, 4387 &sk->addr[pd->sidx], &sk->addr[pd->didx], 4388 sk->port[pd->sidx], sk->port[pd->didx], 4389 (*state)->dst.seqhi, 0, TH_SYN, 0, 4390 (*state)->src.mss, 0, 0, (*state)->tag, NULL, NULL); 4391 REASON_SET(reason, PFRES_SYNPROXY); 4392 return (PF_SYNPROXY_DROP); 4393 } else if (((th->th_flags & (TH_SYN|TH_ACK)) != 4394 (TH_SYN|TH_ACK)) || 4395 (ntohl(th->th_ack) != (*state)->dst.seqhi + 1)) { 4396 REASON_SET(reason, PFRES_SYNPROXY); 4397 return (PF_DROP); 4398 } else { 4399 (*state)->dst.max_win = MAX(ntohs(th->th_win), 1); 4400 (*state)->dst.seqlo = ntohl(th->th_seq); 4401 pf_send_tcp((*state)->rule.ptr, pd->af, pd->dst, 4402 pd->src, th->th_dport, th->th_sport, 4403 ntohl(th->th_ack), ntohl(th->th_seq) + 1, 4404 TH_ACK, (*state)->src.max_win, 0, 0, 0, 4405 (*state)->tag, NULL, NULL); 4406 pf_send_tcp((*state)->rule.ptr, pd->af, 4407 &sk->addr[pd->sidx], &sk->addr[pd->didx], 4408 sk->port[pd->sidx], sk->port[pd->didx], 4409 (*state)->src.seqhi + 1, (*state)->src.seqlo + 1, 4410 TH_ACK, (*state)->dst.max_win, 0, 0, 1, 4411 0, NULL, NULL); 4412 (*state)->src.seqdiff = (*state)->dst.seqhi - 4413 (*state)->src.seqlo; 4414 (*state)->dst.seqdiff = (*state)->src.seqhi - 4415 (*state)->dst.seqlo; 4416 (*state)->src.seqhi = (*state)->src.seqlo + 4417 (*state)->dst.max_win; 4418 (*state)->dst.seqhi = (*state)->dst.seqlo + 4419 (*state)->src.max_win; 4420 (*state)->src.wscale = (*state)->dst.wscale = 0; 4421 (*state)->src.state = (*state)->dst.state = 4422 TCPS_ESTABLISHED; 4423 REASON_SET(reason, PFRES_SYNPROXY); 4424 return (PF_SYNPROXY_DROP); 4425 } 4426 } 4427 4428 if (((th->th_flags & (TH_SYN|TH_ACK)) == TH_SYN) && 4429 dst->state >= TCPS_FIN_WAIT_2 && 4430 src->state >= TCPS_FIN_WAIT_2) { 4431 if (pf_status.debug >= PF_DEBUG_MISC) { 4432 kprintf("pf: state reuse "); 4433 pf_print_state(*state); 4434 pf_print_flags(th->th_flags); 4435 kprintf("\n"); 4436 } 4437 /* XXX make sure it's the same direction ?? */ 4438 (*state)->src.state = (*state)->dst.state = TCPS_CLOSED; 4439 pf_unlink_state(*state); 4440 *state = NULL; 4441 return (PF_DROP); 4442 } 4443 4444 if ((*state)->state_flags & PFSTATE_SLOPPY) { 4445 if (pf_tcp_track_sloppy(src, dst, state, pd, reason) == PF_DROP) 4446 return (PF_DROP); 4447 } else { 4448 if (pf_tcp_track_full(src, dst, state, kif, m, off, pd, reason, 4449 ©back) == PF_DROP) 4450 return (PF_DROP); 4451 } 4452 4453 /* translate source/destination address, if necessary */ 4454 if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) { 4455 struct pf_state_key *nk = (*state)->key[pd->didx]; 4456 4457 if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], pd->af) || 4458 nk->port[pd->sidx] != th->th_sport) { 4459 /* 4460 * The translated source address may be completely 4461 * unrelated to the saved link header, make sure 4462 * a bridge doesn't try to use it. 4463 */ 4464 m->m_pkthdr.fw_flags &= ~BRIDGE_MBUF_TAGGED; 4465 m->m_flags &= ~M_HASH; 4466 pf_change_ap(pd->src, &th->th_sport, pd->ip_sum, 4467 &th->th_sum, &nk->addr[pd->sidx], 4468 nk->port[pd->sidx], 0, pd->af); 4469 } 4470 4471 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], pd->af) || 4472 nk->port[pd->didx] != th->th_dport) { 4473 /* 4474 * If we don't redispatch the packet will go into 4475 * the protocol stack on the wrong cpu for the 4476 * post-translated address. 4477 */ 4478 m->m_flags &= ~M_HASH; 4479 pf_change_ap(pd->dst, &th->th_dport, pd->ip_sum, 4480 &th->th_sum, &nk->addr[pd->didx], 4481 nk->port[pd->didx], 0, pd->af); 4482 } 4483 copyback = 1; 4484 } 4485 4486 /* Copyback sequence modulation or stateful scrub changes if needed */ 4487 if (copyback) 4488 m_copyback(m, off, sizeof(*th), (caddr_t)th); 4489 4490 return (PF_PASS); 4491 } 4492 4493 int 4494 pf_test_state_udp(struct pf_state **state, int direction, struct pfi_kif *kif, 4495 struct mbuf *m, int off, void *h, struct pf_pdesc *pd) 4496 { 4497 struct pf_state_peer *src, *dst; 4498 struct pf_state_key_cmp key; 4499 struct udphdr *uh = pd->hdr.udp; 4500 4501 key.af = pd->af; 4502 key.proto = IPPROTO_UDP; 4503 if (direction == PF_IN) { /* wire side, straight */ 4504 PF_ACPY(&key.addr[0], pd->src, key.af); 4505 PF_ACPY(&key.addr[1], pd->dst, key.af); 4506 key.port[0] = uh->uh_sport; 4507 key.port[1] = uh->uh_dport; 4508 } else { /* stack side, reverse */ 4509 PF_ACPY(&key.addr[1], pd->src, key.af); 4510 PF_ACPY(&key.addr[0], pd->dst, key.af); 4511 key.port[1] = uh->uh_sport; 4512 key.port[0] = uh->uh_dport; 4513 } 4514 4515 STATE_LOOKUP(kif, &key, direction, *state, m); 4516 4517 if (direction == (*state)->direction) { 4518 src = &(*state)->src; 4519 dst = &(*state)->dst; 4520 } else { 4521 src = &(*state)->dst; 4522 dst = &(*state)->src; 4523 } 4524 4525 /* update states */ 4526 if (src->state < PFUDPS_SINGLE) 4527 src->state = PFUDPS_SINGLE; 4528 if (dst->state == PFUDPS_SINGLE) 4529 dst->state = PFUDPS_MULTIPLE; 4530 4531 /* update expire time */ 4532 (*state)->expire = time_second; 4533 if (src->state == PFUDPS_MULTIPLE && dst->state == PFUDPS_MULTIPLE) 4534 (*state)->timeout = PFTM_UDP_MULTIPLE; 4535 else 4536 (*state)->timeout = PFTM_UDP_SINGLE; 4537 4538 /* translate source/destination address, if necessary */ 4539 if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) { 4540 struct pf_state_key *nk = (*state)->key[pd->didx]; 4541 4542 if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], pd->af) || 4543 nk->port[pd->sidx] != uh->uh_sport) { 4544 /* 4545 * The translated source address may be completely 4546 * unrelated to the saved link header, make sure 4547 * a bridge doesn't try to use it. 4548 */ 4549 m->m_pkthdr.fw_flags &= ~BRIDGE_MBUF_TAGGED; 4550 m->m_flags &= ~M_HASH; 4551 pf_change_ap(pd->src, &uh->uh_sport, pd->ip_sum, 4552 &uh->uh_sum, &nk->addr[pd->sidx], 4553 nk->port[pd->sidx], 1, pd->af); 4554 } 4555 4556 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], pd->af) || 4557 nk->port[pd->didx] != uh->uh_dport) { 4558 /* 4559 * If we don't redispatch the packet will go into 4560 * the protocol stack on the wrong cpu for the 4561 * post-translated address. 4562 */ 4563 m->m_flags &= ~M_HASH; 4564 pf_change_ap(pd->dst, &uh->uh_dport, pd->ip_sum, 4565 &uh->uh_sum, &nk->addr[pd->didx], 4566 nk->port[pd->didx], 1, pd->af); 4567 } 4568 m_copyback(m, off, sizeof(*uh), (caddr_t)uh); 4569 } 4570 4571 return (PF_PASS); 4572 } 4573 4574 int 4575 pf_test_state_icmp(struct pf_state **state, int direction, struct pfi_kif *kif, 4576 struct mbuf *m, int off, void *h, struct pf_pdesc *pd, u_short *reason) 4577 { 4578 struct pf_addr *saddr = pd->src, *daddr = pd->dst; 4579 u_int16_t icmpid = 0, *icmpsum; 4580 u_int8_t icmptype; 4581 int state_icmp = 0; 4582 struct pf_state_key_cmp key; 4583 4584 switch (pd->proto) { 4585 #ifdef INET 4586 case IPPROTO_ICMP: 4587 icmptype = pd->hdr.icmp->icmp_type; 4588 icmpid = pd->hdr.icmp->icmp_id; 4589 icmpsum = &pd->hdr.icmp->icmp_cksum; 4590 4591 if (icmptype == ICMP_UNREACH || 4592 icmptype == ICMP_SOURCEQUENCH || 4593 icmptype == ICMP_REDIRECT || 4594 icmptype == ICMP_TIMXCEED || 4595 icmptype == ICMP_PARAMPROB) 4596 state_icmp++; 4597 break; 4598 #endif /* INET */ 4599 #ifdef INET6 4600 case IPPROTO_ICMPV6: 4601 icmptype = pd->hdr.icmp6->icmp6_type; 4602 icmpid = pd->hdr.icmp6->icmp6_id; 4603 icmpsum = &pd->hdr.icmp6->icmp6_cksum; 4604 4605 if (icmptype == ICMP6_DST_UNREACH || 4606 icmptype == ICMP6_PACKET_TOO_BIG || 4607 icmptype == ICMP6_TIME_EXCEEDED || 4608 icmptype == ICMP6_PARAM_PROB) 4609 state_icmp++; 4610 break; 4611 #endif /* INET6 */ 4612 } 4613 4614 if (!state_icmp) { 4615 4616 /* 4617 * ICMP query/reply message not related to a TCP/UDP packet. 4618 * Search for an ICMP state. 4619 */ 4620 key.af = pd->af; 4621 key.proto = pd->proto; 4622 key.port[0] = key.port[1] = icmpid; 4623 if (direction == PF_IN) { /* wire side, straight */ 4624 PF_ACPY(&key.addr[0], pd->src, key.af); 4625 PF_ACPY(&key.addr[1], pd->dst, key.af); 4626 } else { /* stack side, reverse */ 4627 PF_ACPY(&key.addr[1], pd->src, key.af); 4628 PF_ACPY(&key.addr[0], pd->dst, key.af); 4629 } 4630 4631 STATE_LOOKUP(kif, &key, direction, *state, m); 4632 4633 (*state)->expire = time_second; 4634 (*state)->timeout = PFTM_ICMP_ERROR_REPLY; 4635 4636 /* translate source/destination address, if necessary */ 4637 if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) { 4638 struct pf_state_key *nk = (*state)->key[pd->didx]; 4639 4640 switch (pd->af) { 4641 #ifdef INET 4642 case AF_INET: 4643 if (PF_ANEQ(pd->src, 4644 &nk->addr[pd->sidx], AF_INET)) 4645 pf_change_a(&saddr->v4.s_addr, 4646 pd->ip_sum, 4647 nk->addr[pd->sidx].v4.s_addr, 0); 4648 4649 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], 4650 AF_INET)) 4651 pf_change_a(&daddr->v4.s_addr, 4652 pd->ip_sum, 4653 nk->addr[pd->didx].v4.s_addr, 0); 4654 4655 if (nk->port[0] != 4656 pd->hdr.icmp->icmp_id) { 4657 pd->hdr.icmp->icmp_cksum = 4658 pf_cksum_fixup( 4659 pd->hdr.icmp->icmp_cksum, icmpid, 4660 nk->port[pd->sidx], 0); 4661 pd->hdr.icmp->icmp_id = 4662 nk->port[pd->sidx]; 4663 } 4664 4665 m_copyback(m, off, ICMP_MINLEN, 4666 (caddr_t)pd->hdr.icmp); 4667 break; 4668 #endif /* INET */ 4669 #ifdef INET6 4670 case AF_INET6: 4671 if (PF_ANEQ(pd->src, 4672 &nk->addr[pd->sidx], AF_INET6)) 4673 pf_change_a6(saddr, 4674 &pd->hdr.icmp6->icmp6_cksum, 4675 &nk->addr[pd->sidx], 0); 4676 4677 if (PF_ANEQ(pd->dst, 4678 &nk->addr[pd->didx], AF_INET6)) 4679 pf_change_a6(daddr, 4680 &pd->hdr.icmp6->icmp6_cksum, 4681 &nk->addr[pd->didx], 0); 4682 4683 m_copyback(m, off, 4684 sizeof(struct icmp6_hdr), 4685 (caddr_t)pd->hdr.icmp6); 4686 break; 4687 #endif /* INET6 */ 4688 } 4689 } 4690 return (PF_PASS); 4691 4692 } else { 4693 /* 4694 * ICMP error message in response to a TCP/UDP packet. 4695 * Extract the inner TCP/UDP header and search for that state. 4696 */ 4697 4698 struct pf_pdesc pd2; 4699 #ifdef INET 4700 struct ip h2; 4701 #endif /* INET */ 4702 #ifdef INET6 4703 struct ip6_hdr h2_6; 4704 int terminal = 0; 4705 #endif /* INET6 */ 4706 int ipoff2; 4707 int off2; 4708 4709 pd2.af = pd->af; 4710 /* Payload packet is from the opposite direction. */ 4711 pd2.sidx = (direction == PF_IN) ? 1 : 0; 4712 pd2.didx = (direction == PF_IN) ? 0 : 1; 4713 switch (pd->af) { 4714 #ifdef INET 4715 case AF_INET: 4716 /* offset of h2 in mbuf chain */ 4717 ipoff2 = off + ICMP_MINLEN; 4718 4719 if (!pf_pull_hdr(m, ipoff2, &h2, sizeof(h2), 4720 NULL, reason, pd2.af)) { 4721 DPFPRINTF(PF_DEBUG_MISC, 4722 ("pf: ICMP error message too short " 4723 "(ip)\n")); 4724 return (PF_DROP); 4725 } 4726 /* 4727 * ICMP error messages don't refer to non-first 4728 * fragments 4729 */ 4730 if (h2.ip_off & htons(IP_OFFMASK)) { 4731 REASON_SET(reason, PFRES_FRAG); 4732 return (PF_DROP); 4733 } 4734 4735 /* offset of protocol header that follows h2 */ 4736 off2 = ipoff2 + (h2.ip_hl << 2); 4737 4738 pd2.proto = h2.ip_p; 4739 pd2.src = (struct pf_addr *)&h2.ip_src; 4740 pd2.dst = (struct pf_addr *)&h2.ip_dst; 4741 pd2.ip_sum = &h2.ip_sum; 4742 break; 4743 #endif /* INET */ 4744 #ifdef INET6 4745 case AF_INET6: 4746 ipoff2 = off + sizeof(struct icmp6_hdr); 4747 4748 if (!pf_pull_hdr(m, ipoff2, &h2_6, sizeof(h2_6), 4749 NULL, reason, pd2.af)) { 4750 DPFPRINTF(PF_DEBUG_MISC, 4751 ("pf: ICMP error message too short " 4752 "(ip6)\n")); 4753 return (PF_DROP); 4754 } 4755 pd2.proto = h2_6.ip6_nxt; 4756 pd2.src = (struct pf_addr *)&h2_6.ip6_src; 4757 pd2.dst = (struct pf_addr *)&h2_6.ip6_dst; 4758 pd2.ip_sum = NULL; 4759 off2 = ipoff2 + sizeof(h2_6); 4760 do { 4761 switch (pd2.proto) { 4762 case IPPROTO_FRAGMENT: 4763 /* 4764 * ICMPv6 error messages for 4765 * non-first fragments 4766 */ 4767 REASON_SET(reason, PFRES_FRAG); 4768 return (PF_DROP); 4769 case IPPROTO_AH: 4770 case IPPROTO_HOPOPTS: 4771 case IPPROTO_ROUTING: 4772 case IPPROTO_DSTOPTS: { 4773 /* get next header and header length */ 4774 struct ip6_ext opt6; 4775 4776 if (!pf_pull_hdr(m, off2, &opt6, 4777 sizeof(opt6), NULL, reason, 4778 pd2.af)) { 4779 DPFPRINTF(PF_DEBUG_MISC, 4780 ("pf: ICMPv6 short opt\n")); 4781 return (PF_DROP); 4782 } 4783 if (pd2.proto == IPPROTO_AH) 4784 off2 += (opt6.ip6e_len + 2) * 4; 4785 else 4786 off2 += (opt6.ip6e_len + 1) * 8; 4787 pd2.proto = opt6.ip6e_nxt; 4788 /* goto the next header */ 4789 break; 4790 } 4791 default: 4792 terminal++; 4793 break; 4794 } 4795 } while (!terminal); 4796 break; 4797 #endif /* INET6 */ 4798 default: 4799 DPFPRINTF(PF_DEBUG_MISC, 4800 ("pf: ICMP AF %d unknown (ip6)\n", pd->af)); 4801 return (PF_DROP); 4802 break; 4803 } 4804 4805 switch (pd2.proto) { 4806 case IPPROTO_TCP: { 4807 struct tcphdr th; 4808 u_int32_t seq; 4809 struct pf_state_peer *src, *dst; 4810 u_int8_t dws; 4811 int copyback = 0; 4812 4813 /* 4814 * Only the first 8 bytes of the TCP header can be 4815 * expected. Don't access any TCP header fields after 4816 * th_seq, an ackskew test is not possible. 4817 */ 4818 if (!pf_pull_hdr(m, off2, &th, 8, NULL, reason, 4819 pd2.af)) { 4820 DPFPRINTF(PF_DEBUG_MISC, 4821 ("pf: ICMP error message too short " 4822 "(tcp)\n")); 4823 return (PF_DROP); 4824 } 4825 4826 key.af = pd2.af; 4827 key.proto = IPPROTO_TCP; 4828 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af); 4829 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af); 4830 key.port[pd2.sidx] = th.th_sport; 4831 key.port[pd2.didx] = th.th_dport; 4832 4833 STATE_LOOKUP(kif, &key, direction, *state, m); 4834 4835 if (direction == (*state)->direction) { 4836 src = &(*state)->dst; 4837 dst = &(*state)->src; 4838 } else { 4839 src = &(*state)->src; 4840 dst = &(*state)->dst; 4841 } 4842 4843 if (src->wscale && dst->wscale) 4844 dws = dst->wscale & PF_WSCALE_MASK; 4845 else 4846 dws = 0; 4847 4848 /* Demodulate sequence number */ 4849 seq = ntohl(th.th_seq) - src->seqdiff; 4850 if (src->seqdiff) { 4851 pf_change_a(&th.th_seq, icmpsum, 4852 htonl(seq), 0); 4853 copyback = 1; 4854 } 4855 4856 if (!((*state)->state_flags & PFSTATE_SLOPPY) && 4857 (!SEQ_GEQ(src->seqhi, seq) || 4858 !SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)))) { 4859 if (pf_status.debug >= PF_DEBUG_MISC) { 4860 kprintf("pf: BAD ICMP %d:%d ", 4861 icmptype, pd->hdr.icmp->icmp_code); 4862 pf_print_host(pd->src, 0, pd->af); 4863 kprintf(" -> "); 4864 pf_print_host(pd->dst, 0, pd->af); 4865 kprintf(" state: "); 4866 pf_print_state(*state); 4867 kprintf(" seq=%u\n", seq); 4868 } 4869 REASON_SET(reason, PFRES_BADSTATE); 4870 return (PF_DROP); 4871 } else { 4872 if (pf_status.debug >= PF_DEBUG_MISC) { 4873 kprintf("pf: OK ICMP %d:%d ", 4874 icmptype, pd->hdr.icmp->icmp_code); 4875 pf_print_host(pd->src, 0, pd->af); 4876 kprintf(" -> "); 4877 pf_print_host(pd->dst, 0, pd->af); 4878 kprintf(" state: "); 4879 pf_print_state(*state); 4880 kprintf(" seq=%u\n", seq); 4881 } 4882 } 4883 4884 /* translate source/destination address, if necessary */ 4885 if ((*state)->key[PF_SK_WIRE] != 4886 (*state)->key[PF_SK_STACK]) { 4887 struct pf_state_key *nk = 4888 (*state)->key[pd->didx]; 4889 4890 if (PF_ANEQ(pd2.src, 4891 &nk->addr[pd2.sidx], pd2.af) || 4892 nk->port[pd2.sidx] != th.th_sport) 4893 pf_change_icmp(pd2.src, &th.th_sport, 4894 daddr, &nk->addr[pd2.sidx], 4895 nk->port[pd2.sidx], NULL, 4896 pd2.ip_sum, icmpsum, 4897 pd->ip_sum, 0, pd2.af); 4898 4899 if (PF_ANEQ(pd2.dst, 4900 &nk->addr[pd2.didx], pd2.af) || 4901 nk->port[pd2.didx] != th.th_dport) 4902 pf_change_icmp(pd2.dst, &th.th_dport, 4903 NULL, /* XXX Inbound NAT? */ 4904 &nk->addr[pd2.didx], 4905 nk->port[pd2.didx], NULL, 4906 pd2.ip_sum, icmpsum, 4907 pd->ip_sum, 0, pd2.af); 4908 copyback = 1; 4909 } 4910 4911 if (copyback) { 4912 switch (pd2.af) { 4913 #ifdef INET 4914 case AF_INET: 4915 m_copyback(m, off, ICMP_MINLEN, 4916 (caddr_t)pd->hdr.icmp); 4917 m_copyback(m, ipoff2, sizeof(h2), 4918 (caddr_t)&h2); 4919 break; 4920 #endif /* INET */ 4921 #ifdef INET6 4922 case AF_INET6: 4923 m_copyback(m, off, 4924 sizeof(struct icmp6_hdr), 4925 (caddr_t)pd->hdr.icmp6); 4926 m_copyback(m, ipoff2, sizeof(h2_6), 4927 (caddr_t)&h2_6); 4928 break; 4929 #endif /* INET6 */ 4930 } 4931 m_copyback(m, off2, 8, (caddr_t)&th); 4932 } 4933 4934 return (PF_PASS); 4935 break; 4936 } 4937 case IPPROTO_UDP: { 4938 struct udphdr uh; 4939 4940 if (!pf_pull_hdr(m, off2, &uh, sizeof(uh), 4941 NULL, reason, pd2.af)) { 4942 DPFPRINTF(PF_DEBUG_MISC, 4943 ("pf: ICMP error message too short " 4944 "(udp)\n")); 4945 return (PF_DROP); 4946 } 4947 4948 key.af = pd2.af; 4949 key.proto = IPPROTO_UDP; 4950 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af); 4951 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af); 4952 key.port[pd2.sidx] = uh.uh_sport; 4953 key.port[pd2.didx] = uh.uh_dport; 4954 4955 STATE_LOOKUP(kif, &key, direction, *state, m); 4956 4957 /* translate source/destination address, if necessary */ 4958 if ((*state)->key[PF_SK_WIRE] != 4959 (*state)->key[PF_SK_STACK]) { 4960 struct pf_state_key *nk = 4961 (*state)->key[pd->didx]; 4962 4963 if (PF_ANEQ(pd2.src, 4964 &nk->addr[pd2.sidx], pd2.af) || 4965 nk->port[pd2.sidx] != uh.uh_sport) 4966 pf_change_icmp(pd2.src, &uh.uh_sport, 4967 daddr, &nk->addr[pd2.sidx], 4968 nk->port[pd2.sidx], &uh.uh_sum, 4969 pd2.ip_sum, icmpsum, 4970 pd->ip_sum, 1, pd2.af); 4971 4972 if (PF_ANEQ(pd2.dst, 4973 &nk->addr[pd2.didx], pd2.af) || 4974 nk->port[pd2.didx] != uh.uh_dport) 4975 pf_change_icmp(pd2.dst, &uh.uh_dport, 4976 NULL, /* XXX Inbound NAT? */ 4977 &nk->addr[pd2.didx], 4978 nk->port[pd2.didx], &uh.uh_sum, 4979 pd2.ip_sum, icmpsum, 4980 pd->ip_sum, 1, pd2.af); 4981 4982 switch (pd2.af) { 4983 #ifdef INET 4984 case AF_INET: 4985 m_copyback(m, off, ICMP_MINLEN, 4986 (caddr_t)pd->hdr.icmp); 4987 m_copyback(m, ipoff2, sizeof(h2), (caddr_t)&h2); 4988 break; 4989 #endif /* INET */ 4990 #ifdef INET6 4991 case AF_INET6: 4992 m_copyback(m, off, 4993 sizeof(struct icmp6_hdr), 4994 (caddr_t)pd->hdr.icmp6); 4995 m_copyback(m, ipoff2, sizeof(h2_6), 4996 (caddr_t)&h2_6); 4997 break; 4998 #endif /* INET6 */ 4999 } 5000 m_copyback(m, off2, sizeof(uh), (caddr_t)&uh); 5001 } 5002 5003 return (PF_PASS); 5004 break; 5005 } 5006 #ifdef INET 5007 case IPPROTO_ICMP: { 5008 struct icmp iih; 5009 5010 if (!pf_pull_hdr(m, off2, &iih, ICMP_MINLEN, 5011 NULL, reason, pd2.af)) { 5012 DPFPRINTF(PF_DEBUG_MISC, 5013 ("pf: ICMP error message too short i" 5014 "(icmp)\n")); 5015 return (PF_DROP); 5016 } 5017 5018 key.af = pd2.af; 5019 key.proto = IPPROTO_ICMP; 5020 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af); 5021 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af); 5022 key.port[0] = key.port[1] = iih.icmp_id; 5023 5024 STATE_LOOKUP(kif, &key, direction, *state, m); 5025 5026 /* translate source/destination address, if necessary */ 5027 if ((*state)->key[PF_SK_WIRE] != 5028 (*state)->key[PF_SK_STACK]) { 5029 struct pf_state_key *nk = 5030 (*state)->key[pd->didx]; 5031 5032 if (PF_ANEQ(pd2.src, 5033 &nk->addr[pd2.sidx], pd2.af) || 5034 nk->port[pd2.sidx] != iih.icmp_id) 5035 pf_change_icmp(pd2.src, &iih.icmp_id, 5036 daddr, &nk->addr[pd2.sidx], 5037 nk->port[pd2.sidx], NULL, 5038 pd2.ip_sum, icmpsum, 5039 pd->ip_sum, 0, AF_INET); 5040 5041 if (PF_ANEQ(pd2.dst, 5042 &nk->addr[pd2.didx], pd2.af) || 5043 nk->port[pd2.didx] != iih.icmp_id) 5044 pf_change_icmp(pd2.dst, &iih.icmp_id, 5045 NULL, /* XXX Inbound NAT? */ 5046 &nk->addr[pd2.didx], 5047 nk->port[pd2.didx], NULL, 5048 pd2.ip_sum, icmpsum, 5049 pd->ip_sum, 0, AF_INET); 5050 5051 m_copyback(m, off, ICMP_MINLEN, (caddr_t)pd->hdr.icmp); 5052 m_copyback(m, ipoff2, sizeof(h2), (caddr_t)&h2); 5053 m_copyback(m, off2, ICMP_MINLEN, (caddr_t)&iih); 5054 } 5055 return (PF_PASS); 5056 break; 5057 } 5058 #endif /* INET */ 5059 #ifdef INET6 5060 case IPPROTO_ICMPV6: { 5061 struct icmp6_hdr iih; 5062 5063 if (!pf_pull_hdr(m, off2, &iih, 5064 sizeof(struct icmp6_hdr), NULL, reason, pd2.af)) { 5065 DPFPRINTF(PF_DEBUG_MISC, 5066 ("pf: ICMP error message too short " 5067 "(icmp6)\n")); 5068 return (PF_DROP); 5069 } 5070 5071 key.af = pd2.af; 5072 key.proto = IPPROTO_ICMPV6; 5073 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af); 5074 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af); 5075 key.port[0] = key.port[1] = iih.icmp6_id; 5076 5077 STATE_LOOKUP(kif, &key, direction, *state, m); 5078 5079 /* translate source/destination address, if necessary */ 5080 if ((*state)->key[PF_SK_WIRE] != 5081 (*state)->key[PF_SK_STACK]) { 5082 struct pf_state_key *nk = 5083 (*state)->key[pd->didx]; 5084 5085 if (PF_ANEQ(pd2.src, 5086 &nk->addr[pd2.sidx], pd2.af) || 5087 nk->port[pd2.sidx] != iih.icmp6_id) 5088 pf_change_icmp(pd2.src, &iih.icmp6_id, 5089 daddr, &nk->addr[pd2.sidx], 5090 nk->port[pd2.sidx], NULL, 5091 pd2.ip_sum, icmpsum, 5092 pd->ip_sum, 0, AF_INET6); 5093 5094 if (PF_ANEQ(pd2.dst, 5095 &nk->addr[pd2.didx], pd2.af) || 5096 nk->port[pd2.didx] != iih.icmp6_id) 5097 pf_change_icmp(pd2.dst, &iih.icmp6_id, 5098 NULL, /* XXX Inbound NAT? */ 5099 &nk->addr[pd2.didx], 5100 nk->port[pd2.didx], NULL, 5101 pd2.ip_sum, icmpsum, 5102 pd->ip_sum, 0, AF_INET6); 5103 5104 m_copyback(m, off, sizeof(struct icmp6_hdr), 5105 (caddr_t)pd->hdr.icmp6); 5106 m_copyback(m, ipoff2, sizeof(h2_6), (caddr_t)&h2_6); 5107 m_copyback(m, off2, sizeof(struct icmp6_hdr), 5108 (caddr_t)&iih); 5109 } 5110 5111 return (PF_PASS); 5112 break; 5113 } 5114 #endif /* INET6 */ 5115 default: { 5116 key.af = pd2.af; 5117 key.proto = pd2.proto; 5118 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af); 5119 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af); 5120 key.port[0] = key.port[1] = 0; 5121 5122 STATE_LOOKUP(kif, &key, direction, *state, m); 5123 5124 /* translate source/destination address, if necessary */ 5125 if ((*state)->key[PF_SK_WIRE] != 5126 (*state)->key[PF_SK_STACK]) { 5127 struct pf_state_key *nk = 5128 (*state)->key[pd->didx]; 5129 5130 if (PF_ANEQ(pd2.src, 5131 &nk->addr[pd2.sidx], pd2.af)) 5132 pf_change_icmp(pd2.src, NULL, daddr, 5133 &nk->addr[pd2.sidx], 0, NULL, 5134 pd2.ip_sum, icmpsum, 5135 pd->ip_sum, 0, pd2.af); 5136 5137 if (PF_ANEQ(pd2.dst, 5138 &nk->addr[pd2.didx], pd2.af)) 5139 pf_change_icmp(pd2.src, NULL, 5140 NULL, /* XXX Inbound NAT? */ 5141 &nk->addr[pd2.didx], 0, NULL, 5142 pd2.ip_sum, icmpsum, 5143 pd->ip_sum, 0, pd2.af); 5144 5145 switch (pd2.af) { 5146 #ifdef INET 5147 case AF_INET: 5148 m_copyback(m, off, ICMP_MINLEN, 5149 (caddr_t)pd->hdr.icmp); 5150 m_copyback(m, ipoff2, sizeof(h2), (caddr_t)&h2); 5151 break; 5152 #endif /* INET */ 5153 #ifdef INET6 5154 case AF_INET6: 5155 m_copyback(m, off, 5156 sizeof(struct icmp6_hdr), 5157 (caddr_t)pd->hdr.icmp6); 5158 m_copyback(m, ipoff2, sizeof(h2_6), 5159 (caddr_t)&h2_6); 5160 break; 5161 #endif /* INET6 */ 5162 } 5163 } 5164 return (PF_PASS); 5165 break; 5166 } 5167 } 5168 } 5169 } 5170 5171 int 5172 pf_test_state_other(struct pf_state **state, int direction, struct pfi_kif *kif, 5173 struct mbuf *m, struct pf_pdesc *pd) 5174 { 5175 struct pf_state_peer *src, *dst; 5176 struct pf_state_key_cmp key; 5177 5178 key.af = pd->af; 5179 key.proto = pd->proto; 5180 if (direction == PF_IN) { 5181 PF_ACPY(&key.addr[0], pd->src, key.af); 5182 PF_ACPY(&key.addr[1], pd->dst, key.af); 5183 key.port[0] = key.port[1] = 0; 5184 } else { 5185 PF_ACPY(&key.addr[1], pd->src, key.af); 5186 PF_ACPY(&key.addr[0], pd->dst, key.af); 5187 key.port[1] = key.port[0] = 0; 5188 } 5189 5190 STATE_LOOKUP(kif, &key, direction, *state, m); 5191 5192 if (direction == (*state)->direction) { 5193 src = &(*state)->src; 5194 dst = &(*state)->dst; 5195 } else { 5196 src = &(*state)->dst; 5197 dst = &(*state)->src; 5198 } 5199 5200 /* update states */ 5201 if (src->state < PFOTHERS_SINGLE) 5202 src->state = PFOTHERS_SINGLE; 5203 if (dst->state == PFOTHERS_SINGLE) 5204 dst->state = PFOTHERS_MULTIPLE; 5205 5206 /* update expire time */ 5207 (*state)->expire = time_second; 5208 if (src->state == PFOTHERS_MULTIPLE && dst->state == PFOTHERS_MULTIPLE) 5209 (*state)->timeout = PFTM_OTHER_MULTIPLE; 5210 else 5211 (*state)->timeout = PFTM_OTHER_SINGLE; 5212 5213 /* translate source/destination address, if necessary */ 5214 if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) { 5215 struct pf_state_key *nk = (*state)->key[pd->didx]; 5216 5217 KKASSERT(nk); 5218 KKASSERT(pd); 5219 KKASSERT(pd->src); 5220 KKASSERT(pd->dst); 5221 switch (pd->af) { 5222 #ifdef INET 5223 case AF_INET: 5224 if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], AF_INET)) 5225 pf_change_a(&pd->src->v4.s_addr, 5226 pd->ip_sum, 5227 nk->addr[pd->sidx].v4.s_addr, 5228 0); 5229 5230 5231 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], AF_INET)) 5232 pf_change_a(&pd->dst->v4.s_addr, 5233 pd->ip_sum, 5234 nk->addr[pd->didx].v4.s_addr, 5235 0); 5236 5237 break; 5238 #endif /* INET */ 5239 #ifdef INET6 5240 case AF_INET6: 5241 if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], AF_INET)) 5242 PF_ACPY(pd->src, &nk->addr[pd->sidx], pd->af); 5243 5244 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], AF_INET)) 5245 PF_ACPY(pd->dst, &nk->addr[pd->didx], pd->af); 5246 #endif /* INET6 */ 5247 } 5248 } 5249 return (PF_PASS); 5250 } 5251 5252 /* 5253 * ipoff and off are measured from the start of the mbuf chain. 5254 * h must be at "ipoff" on the mbuf chain. 5255 */ 5256 void * 5257 pf_pull_hdr(struct mbuf *m, int off, void *p, int len, 5258 u_short *actionp, u_short *reasonp, sa_family_t af) 5259 { 5260 switch (af) { 5261 #ifdef INET 5262 case AF_INET: { 5263 struct ip *h = mtod(m, struct ip *); 5264 u_int16_t fragoff = (h->ip_off & IP_OFFMASK) << 3; 5265 5266 if (fragoff) { 5267 if (fragoff >= len) 5268 ACTION_SET(actionp, PF_PASS); 5269 else { 5270 ACTION_SET(actionp, PF_DROP); 5271 REASON_SET(reasonp, PFRES_FRAG); 5272 } 5273 return (NULL); 5274 } 5275 if (m->m_pkthdr.len < off + len || 5276 h->ip_len < off + len) { 5277 ACTION_SET(actionp, PF_DROP); 5278 REASON_SET(reasonp, PFRES_SHORT); 5279 return (NULL); 5280 } 5281 break; 5282 } 5283 #endif /* INET */ 5284 #ifdef INET6 5285 case AF_INET6: { 5286 struct ip6_hdr *h = mtod(m, struct ip6_hdr *); 5287 5288 if (m->m_pkthdr.len < off + len || 5289 (ntohs(h->ip6_plen) + sizeof(struct ip6_hdr)) < 5290 (unsigned)(off + len)) { 5291 ACTION_SET(actionp, PF_DROP); 5292 REASON_SET(reasonp, PFRES_SHORT); 5293 return (NULL); 5294 } 5295 break; 5296 } 5297 #endif /* INET6 */ 5298 } 5299 m_copydata(m, off, len, p); 5300 return (p); 5301 } 5302 5303 int 5304 pf_routable(struct pf_addr *addr, sa_family_t af, struct pfi_kif *kif) 5305 { 5306 struct sockaddr_in *dst; 5307 int ret = 1; 5308 int check_mpath; 5309 #ifdef INET6 5310 struct sockaddr_in6 *dst6; 5311 struct route_in6 ro; 5312 #else 5313 struct route ro; 5314 #endif 5315 struct radix_node *rn; 5316 struct rtentry *rt; 5317 struct ifnet *ifp; 5318 5319 check_mpath = 0; 5320 bzero(&ro, sizeof(ro)); 5321 switch (af) { 5322 case AF_INET: 5323 dst = satosin(&ro.ro_dst); 5324 dst->sin_family = AF_INET; 5325 dst->sin_len = sizeof(*dst); 5326 dst->sin_addr = addr->v4; 5327 break; 5328 #ifdef INET6 5329 case AF_INET6: 5330 dst6 = (struct sockaddr_in6 *)&ro.ro_dst; 5331 dst6->sin6_family = AF_INET6; 5332 dst6->sin6_len = sizeof(*dst6); 5333 dst6->sin6_addr = addr->v6; 5334 break; 5335 #endif /* INET6 */ 5336 default: 5337 return (0); 5338 } 5339 5340 /* Skip checks for ipsec interfaces */ 5341 if (kif != NULL && kif->pfik_ifp->if_type == IFT_ENC) 5342 goto out; 5343 5344 rtalloc_ign((struct route *)&ro, 0); 5345 5346 if (ro.ro_rt != NULL) { 5347 /* No interface given, this is a no-route check */ 5348 if (kif == NULL) 5349 goto out; 5350 5351 if (kif->pfik_ifp == NULL) { 5352 ret = 0; 5353 goto out; 5354 } 5355 5356 /* Perform uRPF check if passed input interface */ 5357 ret = 0; 5358 rn = (struct radix_node *)ro.ro_rt; 5359 do { 5360 rt = (struct rtentry *)rn; 5361 ifp = rt->rt_ifp; 5362 5363 if (kif->pfik_ifp == ifp) 5364 ret = 1; 5365 rn = NULL; 5366 } while (check_mpath == 1 && rn != NULL && ret == 0); 5367 } else 5368 ret = 0; 5369 out: 5370 if (ro.ro_rt != NULL) 5371 RTFREE(ro.ro_rt); 5372 return (ret); 5373 } 5374 5375 int 5376 pf_rtlabel_match(struct pf_addr *addr, sa_family_t af, struct pf_addr_wrap *aw) 5377 { 5378 struct sockaddr_in *dst; 5379 #ifdef INET6 5380 struct sockaddr_in6 *dst6; 5381 struct route_in6 ro; 5382 #else 5383 struct route ro; 5384 #endif 5385 int ret = 0; 5386 5387 ASSERT_LWKT_TOKEN_HELD(&pf_token); 5388 5389 bzero(&ro, sizeof(ro)); 5390 switch (af) { 5391 case AF_INET: 5392 dst = satosin(&ro.ro_dst); 5393 dst->sin_family = AF_INET; 5394 dst->sin_len = sizeof(*dst); 5395 dst->sin_addr = addr->v4; 5396 break; 5397 #ifdef INET6 5398 case AF_INET6: 5399 dst6 = (struct sockaddr_in6 *)&ro.ro_dst; 5400 dst6->sin6_family = AF_INET6; 5401 dst6->sin6_len = sizeof(*dst6); 5402 dst6->sin6_addr = addr->v6; 5403 break; 5404 #endif /* INET6 */ 5405 default: 5406 return (0); 5407 } 5408 5409 rtalloc_ign((struct route *)&ro, (RTF_CLONING | RTF_PRCLONING)); 5410 5411 if (ro.ro_rt != NULL) { 5412 RTFREE(ro.ro_rt); 5413 } 5414 5415 return (ret); 5416 } 5417 5418 #ifdef INET 5419 void 5420 pf_route(struct mbuf **m, struct pf_rule *r, int dir, struct ifnet *oifp, 5421 struct pf_state *s, struct pf_pdesc *pd) 5422 { 5423 struct mbuf *m0, *m1; 5424 struct route iproute; 5425 struct route *ro = NULL; 5426 struct sockaddr_in *dst; 5427 struct ip *ip; 5428 struct ifnet *ifp = NULL; 5429 struct pf_addr naddr; 5430 struct pf_src_node *sn = NULL; 5431 int error = 0; 5432 int sw_csum; 5433 #ifdef IPSEC 5434 struct m_tag *mtag; 5435 #endif /* IPSEC */ 5436 5437 ASSERT_LWKT_TOKEN_HELD(&pf_token); 5438 5439 if (m == NULL || *m == NULL || r == NULL || 5440 (dir != PF_IN && dir != PF_OUT) || oifp == NULL) 5441 panic("pf_route: invalid parameters"); 5442 5443 if (((*m)->m_pkthdr.fw_flags & PF_MBUF_ROUTED) == 0) { 5444 (*m)->m_pkthdr.fw_flags |= PF_MBUF_ROUTED; 5445 (*m)->m_pkthdr.pf.routed = 1; 5446 } else { 5447 if ((*m)->m_pkthdr.pf.routed++ > 3) { 5448 m0 = *m; 5449 *m = NULL; 5450 goto bad; 5451 } 5452 } 5453 5454 if (r->rt == PF_DUPTO) { 5455 if ((m0 = m_dup(*m, MB_DONTWAIT)) == NULL) { 5456 return; 5457 } 5458 } else { 5459 if ((r->rt == PF_REPLYTO) == (r->direction == dir)) { 5460 return; 5461 } 5462 m0 = *m; 5463 } 5464 5465 if (m0->m_len < sizeof(struct ip)) { 5466 DPFPRINTF(PF_DEBUG_URGENT, 5467 ("pf_route: m0->m_len < sizeof(struct ip)\n")); 5468 goto bad; 5469 } 5470 5471 ip = mtod(m0, struct ip *); 5472 5473 ro = &iproute; 5474 bzero((caddr_t)ro, sizeof(*ro)); 5475 dst = satosin(&ro->ro_dst); 5476 dst->sin_family = AF_INET; 5477 dst->sin_len = sizeof(*dst); 5478 dst->sin_addr = ip->ip_dst; 5479 5480 if (r->rt == PF_FASTROUTE) { 5481 rtalloc(ro); 5482 if (ro->ro_rt == 0) { 5483 ipstat.ips_noroute++; 5484 goto bad; 5485 } 5486 5487 ifp = ro->ro_rt->rt_ifp; 5488 ro->ro_rt->rt_use++; 5489 5490 if (ro->ro_rt->rt_flags & RTF_GATEWAY) 5491 dst = satosin(ro->ro_rt->rt_gateway); 5492 } else { 5493 if (TAILQ_EMPTY(&r->rpool.list)) { 5494 DPFPRINTF(PF_DEBUG_URGENT, 5495 ("pf_route: TAILQ_EMPTY(&r->rpool.list)\n")); 5496 goto bad; 5497 } 5498 if (s == NULL) { 5499 pf_map_addr(AF_INET, r, (struct pf_addr *)&ip->ip_src, 5500 &naddr, NULL, &sn); 5501 if (!PF_AZERO(&naddr, AF_INET)) 5502 dst->sin_addr.s_addr = naddr.v4.s_addr; 5503 ifp = r->rpool.cur->kif ? 5504 r->rpool.cur->kif->pfik_ifp : NULL; 5505 } else { 5506 if (!PF_AZERO(&s->rt_addr, AF_INET)) 5507 dst->sin_addr.s_addr = 5508 s->rt_addr.v4.s_addr; 5509 ifp = s->rt_kif ? s->rt_kif->pfik_ifp : NULL; 5510 } 5511 } 5512 if (ifp == NULL) 5513 goto bad; 5514 5515 if (oifp != ifp) { 5516 if (pf_test(PF_OUT, ifp, &m0, NULL, NULL) != PF_PASS) { 5517 goto bad; 5518 } else if (m0 == NULL) { 5519 goto done; 5520 } 5521 if (m0->m_len < sizeof(struct ip)) { 5522 DPFPRINTF(PF_DEBUG_URGENT, 5523 ("pf_route: m0->m_len < sizeof(struct ip)\n")); 5524 goto bad; 5525 } 5526 ip = mtod(m0, struct ip *); 5527 } 5528 5529 /* Copied from FreeBSD 5.1-CURRENT ip_output. */ 5530 m0->m_pkthdr.csum_flags |= CSUM_IP; 5531 sw_csum = m0->m_pkthdr.csum_flags & ~ifp->if_hwassist; 5532 if (sw_csum & CSUM_DELAY_DATA) { 5533 in_delayed_cksum(m0); 5534 sw_csum &= ~CSUM_DELAY_DATA; 5535 } 5536 m0->m_pkthdr.csum_flags &= ifp->if_hwassist; 5537 5538 if (ip->ip_len <= ifp->if_mtu || 5539 (ifp->if_hwassist & CSUM_FRAGMENT && 5540 (ip->ip_off & IP_DF) == 0)) { 5541 ip->ip_len = htons(ip->ip_len); 5542 ip->ip_off = htons(ip->ip_off); 5543 ip->ip_sum = 0; 5544 if (sw_csum & CSUM_DELAY_IP) { 5545 /* From KAME */ 5546 if (ip->ip_v == IPVERSION && 5547 (ip->ip_hl << 2) == sizeof(*ip)) { 5548 ip->ip_sum = in_cksum_hdr(ip); 5549 } else { 5550 ip->ip_sum = in_cksum(m0, ip->ip_hl << 2); 5551 } 5552 } 5553 lwkt_reltoken(&pf_token); 5554 error = ifp->if_output(ifp, m0, sintosa(dst), ro->ro_rt); 5555 lwkt_gettoken(&pf_token); 5556 goto done; 5557 } 5558 5559 /* 5560 * Too large for interface; fragment if possible. 5561 * Must be able to put at least 8 bytes per fragment. 5562 */ 5563 if (ip->ip_off & IP_DF) { 5564 ipstat.ips_cantfrag++; 5565 if (r->rt != PF_DUPTO) { 5566 icmp_error(m0, ICMP_UNREACH, ICMP_UNREACH_NEEDFRAG, 0, 5567 ifp->if_mtu); 5568 goto done; 5569 } else 5570 goto bad; 5571 } 5572 5573 m1 = m0; 5574 error = ip_fragment(ip, &m0, ifp->if_mtu, ifp->if_hwassist, sw_csum); 5575 if (error) { 5576 goto bad; 5577 } 5578 5579 for (m0 = m1; m0; m0 = m1) { 5580 m1 = m0->m_nextpkt; 5581 m0->m_nextpkt = 0; 5582 if (error == 0) { 5583 lwkt_reltoken(&pf_token); 5584 error = (*ifp->if_output)(ifp, m0, sintosa(dst), 5585 NULL); 5586 lwkt_gettoken(&pf_token); 5587 } else 5588 m_freem(m0); 5589 } 5590 5591 if (error == 0) 5592 ipstat.ips_fragmented++; 5593 5594 done: 5595 if (r->rt != PF_DUPTO) 5596 *m = NULL; 5597 if (ro == &iproute && ro->ro_rt) 5598 RTFREE(ro->ro_rt); 5599 return; 5600 5601 bad: 5602 m_freem(m0); 5603 goto done; 5604 } 5605 #endif /* INET */ 5606 5607 #ifdef INET6 5608 void 5609 pf_route6(struct mbuf **m, struct pf_rule *r, int dir, struct ifnet *oifp, 5610 struct pf_state *s, struct pf_pdesc *pd) 5611 { 5612 struct mbuf *m0; 5613 struct route_in6 ip6route; 5614 struct route_in6 *ro; 5615 struct sockaddr_in6 *dst; 5616 struct ip6_hdr *ip6; 5617 struct ifnet *ifp = NULL; 5618 struct pf_addr naddr; 5619 struct pf_src_node *sn = NULL; 5620 int error = 0; 5621 5622 if (m == NULL || *m == NULL || r == NULL || 5623 (dir != PF_IN && dir != PF_OUT) || oifp == NULL) 5624 panic("pf_route6: invalid parameters"); 5625 5626 if (((*m)->m_pkthdr.fw_flags & PF_MBUF_ROUTED) == 0) { 5627 (*m)->m_pkthdr.fw_flags |= PF_MBUF_ROUTED; 5628 (*m)->m_pkthdr.pf.routed = 1; 5629 } else { 5630 if ((*m)->m_pkthdr.pf.routed++ > 3) { 5631 m0 = *m; 5632 *m = NULL; 5633 goto bad; 5634 } 5635 } 5636 5637 if (r->rt == PF_DUPTO) { 5638 if ((m0 = m_dup(*m, MB_DONTWAIT)) == NULL) 5639 return; 5640 } else { 5641 if ((r->rt == PF_REPLYTO) == (r->direction == dir)) 5642 return; 5643 m0 = *m; 5644 } 5645 5646 if (m0->m_len < sizeof(struct ip6_hdr)) { 5647 DPFPRINTF(PF_DEBUG_URGENT, 5648 ("pf_route6: m0->m_len < sizeof(struct ip6_hdr)\n")); 5649 goto bad; 5650 } 5651 ip6 = mtod(m0, struct ip6_hdr *); 5652 5653 ro = &ip6route; 5654 bzero((caddr_t)ro, sizeof(*ro)); 5655 dst = (struct sockaddr_in6 *)&ro->ro_dst; 5656 dst->sin6_family = AF_INET6; 5657 dst->sin6_len = sizeof(*dst); 5658 dst->sin6_addr = ip6->ip6_dst; 5659 5660 /* 5661 * DragonFly doesn't zero the auxillary pkghdr fields, only fw_flags, 5662 * so make sure pf.flags is clear. 5663 * 5664 * Cheat. XXX why only in the v6 case??? 5665 */ 5666 if (r->rt == PF_FASTROUTE) { 5667 m0->m_pkthdr.fw_flags |= PF_MBUF_TAGGED; 5668 m0->m_pkthdr.pf.flags = 0; 5669 /* XXX Re-Check when Upgrading to > 4.4 */ 5670 m0->m_pkthdr.pf.statekey = NULL; 5671 ip6_output(m0, NULL, NULL, 0, NULL, NULL, NULL); 5672 return; 5673 } 5674 5675 if (TAILQ_EMPTY(&r->rpool.list)) { 5676 DPFPRINTF(PF_DEBUG_URGENT, 5677 ("pf_route6: TAILQ_EMPTY(&r->rpool.list)\n")); 5678 goto bad; 5679 } 5680 if (s == NULL) { 5681 pf_map_addr(AF_INET6, r, (struct pf_addr *)&ip6->ip6_src, 5682 &naddr, NULL, &sn); 5683 if (!PF_AZERO(&naddr, AF_INET6)) 5684 PF_ACPY((struct pf_addr *)&dst->sin6_addr, 5685 &naddr, AF_INET6); 5686 ifp = r->rpool.cur->kif ? r->rpool.cur->kif->pfik_ifp : NULL; 5687 } else { 5688 if (!PF_AZERO(&s->rt_addr, AF_INET6)) 5689 PF_ACPY((struct pf_addr *)&dst->sin6_addr, 5690 &s->rt_addr, AF_INET6); 5691 ifp = s->rt_kif ? s->rt_kif->pfik_ifp : NULL; 5692 } 5693 if (ifp == NULL) 5694 goto bad; 5695 5696 if (oifp != ifp) { 5697 if (pf_test6(PF_OUT, ifp, &m0, NULL, NULL) != PF_PASS) { 5698 goto bad; 5699 } else if (m0 == NULL) { 5700 goto done; 5701 } 5702 if (m0->m_len < sizeof(struct ip6_hdr)) { 5703 DPFPRINTF(PF_DEBUG_URGENT, 5704 ("pf_route6: m0->m_len < sizeof(struct ip6_hdr)\n")); 5705 goto bad; 5706 } 5707 ip6 = mtod(m0, struct ip6_hdr *); 5708 } 5709 5710 /* 5711 * If the packet is too large for the outgoing interface, 5712 * send back an icmp6 error. 5713 */ 5714 if (IN6_IS_ADDR_LINKLOCAL(&dst->sin6_addr)) 5715 dst->sin6_addr.s6_addr16[1] = htons(ifp->if_index); 5716 if ((u_long)m0->m_pkthdr.len <= ifp->if_mtu) { 5717 error = nd6_output(ifp, ifp, m0, dst, NULL); 5718 } else { 5719 in6_ifstat_inc(ifp, ifs6_in_toobig); 5720 if (r->rt != PF_DUPTO) 5721 icmp6_error(m0, ICMP6_PACKET_TOO_BIG, 0, ifp->if_mtu); 5722 else 5723 goto bad; 5724 } 5725 5726 done: 5727 if (r->rt != PF_DUPTO) 5728 *m = NULL; 5729 return; 5730 5731 bad: 5732 m_freem(m0); 5733 goto done; 5734 } 5735 #endif /* INET6 */ 5736 5737 5738 /* 5739 * check protocol (tcp/udp/icmp/icmp6) checksum and set mbuf flag 5740 * off is the offset where the protocol header starts 5741 * len is the total length of protocol header plus payload 5742 * returns 0 when the checksum is valid, otherwise returns 1. 5743 */ 5744 /* 5745 * XXX 5746 * FreeBSD supports cksum offload for the following drivers. 5747 * em(4), gx(4), lge(4), nge(4), ti(4), xl(4) 5748 * If we can make full use of it we would outperform ipfw/ipfilter in 5749 * very heavy traffic. 5750 * I have not tested 'cause I don't have NICs that supports cksum offload. 5751 * (There might be problems. Typical phenomena would be 5752 * 1. No route message for UDP packet. 5753 * 2. No connection acceptance from external hosts regardless of rule set.) 5754 */ 5755 int 5756 pf_check_proto_cksum(struct mbuf *m, int off, int len, u_int8_t p, 5757 sa_family_t af) 5758 { 5759 u_int16_t sum = 0; 5760 int hw_assist = 0; 5761 struct ip *ip; 5762 5763 if (off < sizeof(struct ip) || len < sizeof(struct udphdr)) 5764 return (1); 5765 if (m->m_pkthdr.len < off + len) 5766 return (1); 5767 5768 switch (p) { 5769 case IPPROTO_TCP: 5770 case IPPROTO_UDP: 5771 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) { 5772 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) { 5773 sum = m->m_pkthdr.csum_data; 5774 } else { 5775 ip = mtod(m, struct ip *); 5776 sum = in_pseudo(ip->ip_src.s_addr, 5777 ip->ip_dst.s_addr, htonl((u_short)len + 5778 m->m_pkthdr.csum_data + p)); 5779 } 5780 sum ^= 0xffff; 5781 ++hw_assist; 5782 } 5783 break; 5784 case IPPROTO_ICMP: 5785 #ifdef INET6 5786 case IPPROTO_ICMPV6: 5787 #endif /* INET6 */ 5788 break; 5789 default: 5790 return (1); 5791 } 5792 5793 if (!hw_assist) { 5794 switch (af) { 5795 case AF_INET: 5796 if (p == IPPROTO_ICMP) { 5797 if (m->m_len < off) 5798 return (1); 5799 m->m_data += off; 5800 m->m_len -= off; 5801 sum = in_cksum(m, len); 5802 m->m_data -= off; 5803 m->m_len += off; 5804 } else { 5805 if (m->m_len < sizeof(struct ip)) 5806 return (1); 5807 sum = in_cksum_range(m, p, off, len); 5808 if (sum == 0) { 5809 m->m_pkthdr.csum_flags |= 5810 (CSUM_DATA_VALID | 5811 CSUM_PSEUDO_HDR); 5812 m->m_pkthdr.csum_data = 0xffff; 5813 } 5814 } 5815 break; 5816 #ifdef INET6 5817 case AF_INET6: 5818 if (m->m_len < sizeof(struct ip6_hdr)) 5819 return (1); 5820 sum = in6_cksum(m, p, off, len); 5821 /* 5822 * XXX 5823 * IPv6 H/W cksum off-load not supported yet! 5824 * 5825 * if (sum == 0) { 5826 * m->m_pkthdr.csum_flags |= 5827 * (CSUM_DATA_VALID|CSUM_PSEUDO_HDR); 5828 * m->m_pkthdr.csum_data = 0xffff; 5829 *} 5830 */ 5831 break; 5832 #endif /* INET6 */ 5833 default: 5834 return (1); 5835 } 5836 } 5837 if (sum) { 5838 switch (p) { 5839 case IPPROTO_TCP: 5840 tcpstat.tcps_rcvbadsum++; 5841 break; 5842 case IPPROTO_UDP: 5843 udpstat.udps_badsum++; 5844 break; 5845 case IPPROTO_ICMP: 5846 icmpstat.icps_checksum++; 5847 break; 5848 #ifdef INET6 5849 case IPPROTO_ICMPV6: 5850 icmp6stat.icp6s_checksum++; 5851 break; 5852 #endif /* INET6 */ 5853 } 5854 return (1); 5855 } 5856 return (0); 5857 } 5858 5859 struct pf_divert * 5860 pf_find_divert(struct mbuf *m) 5861 { 5862 struct m_tag *mtag; 5863 5864 if ((mtag = m_tag_find(m, PACKET_TAG_PF_DIVERT, NULL)) == NULL) 5865 return (NULL); 5866 5867 return ((struct pf_divert *)(mtag + 1)); 5868 } 5869 5870 struct pf_divert * 5871 pf_get_divert(struct mbuf *m) 5872 { 5873 struct m_tag *mtag; 5874 5875 if ((mtag = m_tag_find(m, PACKET_TAG_PF_DIVERT, NULL)) == NULL) { 5876 mtag = m_tag_get(PACKET_TAG_PF_DIVERT, sizeof(struct pf_divert), 5877 M_NOWAIT); 5878 if (mtag == NULL) 5879 return (NULL); 5880 bzero(mtag + 1, sizeof(struct pf_divert)); 5881 m_tag_prepend(m, mtag); 5882 } 5883 5884 return ((struct pf_divert *)(mtag + 1)); 5885 } 5886 5887 #ifdef INET 5888 int 5889 pf_test(int dir, struct ifnet *ifp, struct mbuf **m0, 5890 struct ether_header *eh, struct inpcb *inp) 5891 { 5892 struct pfi_kif *kif; 5893 u_short action, reason = 0, log = 0; 5894 struct mbuf *m = *m0; 5895 struct ip *h = NULL; 5896 struct pf_rule *a = NULL, *r = &pf_default_rule, *tr, *nr; 5897 struct pf_state *s = NULL; 5898 struct pf_ruleset *ruleset = NULL; 5899 struct pf_pdesc pd; 5900 int off, dirndx, pqid = 0; 5901 5902 if (!pf_status.running) 5903 return (PF_PASS); 5904 5905 memset(&pd, 0, sizeof(pd)); 5906 if (ifp->if_type == IFT_CARP && ifp->if_carpdev) 5907 kif = (struct pfi_kif *)ifp->if_carpdev->if_pf_kif; 5908 else 5909 kif = (struct pfi_kif *)ifp->if_pf_kif; 5910 5911 if (kif == NULL) { 5912 DPFPRINTF(PF_DEBUG_URGENT, 5913 ("pf_test: kif == NULL, if_xname %s\n", ifp->if_xname)); 5914 return (PF_DROP); 5915 } 5916 if (kif->pfik_flags & PFI_IFLAG_SKIP) 5917 return (PF_PASS); 5918 5919 #ifdef DIAGNOSTIC 5920 if ((m->m_flags & M_PKTHDR) == 0) 5921 panic("non-M_PKTHDR is passed to pf_test"); 5922 #endif /* DIAGNOSTIC */ 5923 5924 if (m->m_pkthdr.len < (int)sizeof(*h)) { 5925 action = PF_DROP; 5926 REASON_SET(&reason, PFRES_SHORT); 5927 log = 1; 5928 goto done; 5929 } 5930 5931 /* 5932 * DragonFly doesn't zero the auxillary pkghdr fields, only fw_flags, 5933 * so make sure pf.flags is clear. 5934 */ 5935 if (m->m_pkthdr.fw_flags & PF_MBUF_TAGGED) 5936 return (PF_PASS); 5937 m->m_pkthdr.pf.flags = 0; 5938 /* Re-Check when updating to > 4.4 */ 5939 m->m_pkthdr.pf.statekey = NULL; 5940 5941 /* We do IP header normalization and packet reassembly here */ 5942 if (pf_normalize_ip(m0, dir, kif, &reason, &pd) != PF_PASS) { 5943 action = PF_DROP; 5944 goto done; 5945 } 5946 m = *m0; /* pf_normalize messes with m0 */ 5947 h = mtod(m, struct ip *); 5948 5949 off = h->ip_hl << 2; 5950 if (off < (int)sizeof(*h)) { 5951 action = PF_DROP; 5952 REASON_SET(&reason, PFRES_SHORT); 5953 log = 1; 5954 goto done; 5955 } 5956 5957 pd.src = (struct pf_addr *)&h->ip_src; 5958 pd.dst = (struct pf_addr *)&h->ip_dst; 5959 pd.sport = pd.dport = NULL; 5960 pd.ip_sum = &h->ip_sum; 5961 pd.proto_sum = NULL; 5962 pd.proto = h->ip_p; 5963 pd.dir = dir; 5964 pd.sidx = (dir == PF_IN) ? 0 : 1; 5965 pd.didx = (dir == PF_IN) ? 1 : 0; 5966 pd.af = AF_INET; 5967 pd.tos = h->ip_tos; 5968 pd.tot_len = h->ip_len; 5969 pd.eh = eh; 5970 5971 /* handle fragments that didn't get reassembled by normalization */ 5972 if (h->ip_off & (IP_MF | IP_OFFMASK)) { 5973 action = pf_test_fragment(&r, dir, kif, m, h, 5974 &pd, &a, &ruleset); 5975 goto done; 5976 } 5977 5978 switch (h->ip_p) { 5979 5980 case IPPROTO_TCP: { 5981 struct tcphdr th; 5982 5983 pd.hdr.tcp = &th; 5984 if (!pf_pull_hdr(m, off, &th, sizeof(th), 5985 &action, &reason, AF_INET)) { 5986 log = action != PF_PASS; 5987 goto done; 5988 } 5989 pd.p_len = pd.tot_len - off - (th.th_off << 2); 5990 if ((th.th_flags & TH_ACK) && pd.p_len == 0) 5991 pqid = 1; 5992 action = pf_normalize_tcp(dir, kif, m, 0, off, h, &pd); 5993 if (action == PF_DROP) 5994 goto done; 5995 action = pf_test_state_tcp(&s, dir, kif, m, off, h, &pd, 5996 &reason); 5997 if (action == PF_PASS) { 5998 pfsync_update_state(s); 5999 r = s->rule.ptr; 6000 a = s->anchor.ptr; 6001 log = s->log; 6002 } else if (s == NULL) 6003 action = pf_test_rule(&r, &s, dir, kif, 6004 m, off, h, &pd, &a, &ruleset, NULL, inp); 6005 break; 6006 } 6007 6008 case IPPROTO_UDP: { 6009 struct udphdr uh; 6010 6011 pd.hdr.udp = &uh; 6012 if (!pf_pull_hdr(m, off, &uh, sizeof(uh), 6013 &action, &reason, AF_INET)) { 6014 log = action != PF_PASS; 6015 goto done; 6016 } 6017 if (uh.uh_dport == 0 || 6018 ntohs(uh.uh_ulen) > m->m_pkthdr.len - off || 6019 ntohs(uh.uh_ulen) < sizeof(struct udphdr)) { 6020 action = PF_DROP; 6021 REASON_SET(&reason, PFRES_SHORT); 6022 goto done; 6023 } 6024 action = pf_test_state_udp(&s, dir, kif, m, off, h, &pd); 6025 if (action == PF_PASS) { 6026 pfsync_update_state(s); 6027 r = s->rule.ptr; 6028 a = s->anchor.ptr; 6029 log = s->log; 6030 } else if (s == NULL) 6031 action = pf_test_rule(&r, &s, dir, kif, 6032 m, off, h, &pd, &a, &ruleset, NULL, inp); 6033 break; 6034 } 6035 6036 case IPPROTO_ICMP: { 6037 struct icmp ih; 6038 6039 pd.hdr.icmp = &ih; 6040 if (!pf_pull_hdr(m, off, &ih, ICMP_MINLEN, 6041 &action, &reason, AF_INET)) { 6042 log = action != PF_PASS; 6043 goto done; 6044 } 6045 action = pf_test_state_icmp(&s, dir, kif, m, off, h, &pd, 6046 &reason); 6047 if (action == PF_PASS) { 6048 pfsync_update_state(s); 6049 r = s->rule.ptr; 6050 a = s->anchor.ptr; 6051 log = s->log; 6052 } else if (s == NULL) 6053 action = pf_test_rule(&r, &s, dir, kif, 6054 m, off, h, &pd, &a, &ruleset, NULL, inp); 6055 break; 6056 } 6057 6058 default: 6059 action = pf_test_state_other(&s, dir, kif, m, &pd); 6060 if (action == PF_PASS) { 6061 pfsync_update_state(s); 6062 r = s->rule.ptr; 6063 a = s->anchor.ptr; 6064 log = s->log; 6065 } else if (s == NULL) 6066 action = pf_test_rule(&r, &s, dir, kif, m, off, h, 6067 &pd, &a, &ruleset, NULL, inp); 6068 break; 6069 } 6070 6071 done: 6072 if (action == PF_PASS && h->ip_hl > 5 && 6073 !((s && s->state_flags & PFSTATE_ALLOWOPTS) || r->allow_opts)) { 6074 action = PF_DROP; 6075 REASON_SET(&reason, PFRES_IPOPTIONS); 6076 log = 1; 6077 DPFPRINTF(PF_DEBUG_MISC, 6078 ("pf: dropping packet with ip options\n")); 6079 } 6080 6081 if ((s && s->tag) || r->rtableid) 6082 pf_tag_packet(m, s ? s->tag : 0, r->rtableid); 6083 6084 #if 0 6085 if (dir == PF_IN && s && s->key[PF_SK_STACK]) 6086 m->m_pkthdr.pf.statekey = s->key[PF_SK_STACK]; 6087 #endif 6088 6089 #ifdef ALTQ 6090 if (action == PF_PASS && r->qid) { 6091 m->m_pkthdr.fw_flags |= PF_MBUF_STRUCTURE; 6092 if (pqid || (pd.tos & IPTOS_LOWDELAY)) 6093 m->m_pkthdr.pf.qid = r->pqid; 6094 else 6095 m->m_pkthdr.pf.qid = r->qid; 6096 m->m_pkthdr.pf.ecn_af = AF_INET; 6097 m->m_pkthdr.pf.hdr = h; 6098 /* add connection hash for fairq */ 6099 if (s) { 6100 /* for fairq */ 6101 m->m_pkthdr.pf.state_hash = s->hash; 6102 m->m_pkthdr.pf.flags |= PF_TAG_STATE_HASHED; 6103 } 6104 } 6105 #endif /* ALTQ */ 6106 6107 /* 6108 * connections redirected to loopback should not match sockets 6109 * bound specifically to loopback due to security implications, 6110 * see tcp_input() and in_pcblookup_listen(). 6111 */ 6112 if (dir == PF_IN && action == PF_PASS && (pd.proto == IPPROTO_TCP || 6113 pd.proto == IPPROTO_UDP) && s != NULL && s->nat_rule.ptr != NULL && 6114 (s->nat_rule.ptr->action == PF_RDR || 6115 s->nat_rule.ptr->action == PF_BINAT) && 6116 (ntohl(pd.dst->v4.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET) 6117 m->m_pkthdr.pf.flags |= PF_TAG_TRANSLATE_LOCALHOST; 6118 6119 if (dir == PF_IN && action == PF_PASS && r->divert.port) { 6120 struct pf_divert *divert; 6121 6122 if ((divert = pf_get_divert(m))) { 6123 m->m_pkthdr.pf.flags |= PF_TAG_DIVERTED; 6124 divert->port = r->divert.port; 6125 divert->addr.ipv4 = r->divert.addr.v4; 6126 } 6127 } 6128 6129 if (log) { 6130 struct pf_rule *lr; 6131 6132 if (s != NULL && s->nat_rule.ptr != NULL && 6133 s->nat_rule.ptr->log & PF_LOG_ALL) 6134 lr = s->nat_rule.ptr; 6135 else 6136 lr = r; 6137 PFLOG_PACKET(kif, h, m, AF_INET, dir, reason, lr, a, ruleset, 6138 &pd); 6139 } 6140 6141 kif->pfik_bytes[0][dir == PF_OUT][action != PF_PASS] += pd.tot_len; 6142 kif->pfik_packets[0][dir == PF_OUT][action != PF_PASS]++; 6143 6144 if (action == PF_PASS || r->action == PF_DROP) { 6145 dirndx = (dir == PF_OUT); 6146 r->packets[dirndx]++; 6147 r->bytes[dirndx] += pd.tot_len; 6148 if (a != NULL) { 6149 a->packets[dirndx]++; 6150 a->bytes[dirndx] += pd.tot_len; 6151 } 6152 if (s != NULL) { 6153 if (s->nat_rule.ptr != NULL) { 6154 s->nat_rule.ptr->packets[dirndx]++; 6155 s->nat_rule.ptr->bytes[dirndx] += pd.tot_len; 6156 } 6157 if (s->src_node != NULL) { 6158 s->src_node->packets[dirndx]++; 6159 s->src_node->bytes[dirndx] += pd.tot_len; 6160 } 6161 if (s->nat_src_node != NULL) { 6162 s->nat_src_node->packets[dirndx]++; 6163 s->nat_src_node->bytes[dirndx] += pd.tot_len; 6164 } 6165 dirndx = (dir == s->direction) ? 0 : 1; 6166 s->packets[dirndx]++; 6167 s->bytes[dirndx] += pd.tot_len; 6168 } 6169 tr = r; 6170 nr = (s != NULL) ? s->nat_rule.ptr : pd.nat_rule; 6171 if (nr != NULL && r == &pf_default_rule) 6172 tr = nr; 6173 if (tr->src.addr.type == PF_ADDR_TABLE) 6174 pfr_update_stats(tr->src.addr.p.tbl, 6175 (s == NULL) ? pd.src : 6176 &s->key[(s->direction == PF_IN)]-> 6177 addr[(s->direction == PF_OUT)], 6178 pd.af, pd.tot_len, dir == PF_OUT, 6179 r->action == PF_PASS, tr->src.neg); 6180 if (tr->dst.addr.type == PF_ADDR_TABLE) 6181 pfr_update_stats(tr->dst.addr.p.tbl, 6182 (s == NULL) ? pd.dst : 6183 &s->key[(s->direction == PF_IN)]-> 6184 addr[(s->direction == PF_IN)], 6185 pd.af, pd.tot_len, dir == PF_OUT, 6186 r->action == PF_PASS, tr->dst.neg); 6187 } 6188 6189 6190 if (action == PF_SYNPROXY_DROP) { 6191 m_freem(*m0); 6192 *m0 = NULL; 6193 action = PF_PASS; 6194 } else if (r->rt) 6195 /* pf_route can free the mbuf causing *m0 to become NULL */ 6196 pf_route(m0, r, dir, kif->pfik_ifp, s, &pd); 6197 6198 return (action); 6199 } 6200 #endif /* INET */ 6201 6202 #ifdef INET6 6203 int 6204 pf_test6(int dir, struct ifnet *ifp, struct mbuf **m0, 6205 struct ether_header *eh, struct inpcb *inp) 6206 { 6207 struct pfi_kif *kif; 6208 u_short action, reason = 0, log = 0; 6209 struct mbuf *m = *m0, *n = NULL; 6210 struct ip6_hdr *h = NULL; 6211 struct pf_rule *a = NULL, *r = &pf_default_rule, *tr, *nr; 6212 struct pf_state *s = NULL; 6213 struct pf_ruleset *ruleset = NULL; 6214 struct pf_pdesc pd; 6215 int off, terminal = 0, dirndx, rh_cnt = 0; 6216 6217 if (!pf_status.running) 6218 return (PF_PASS); 6219 6220 memset(&pd, 0, sizeof(pd)); 6221 if (ifp->if_type == IFT_CARP && ifp->if_carpdev) 6222 kif = (struct pfi_kif *)ifp->if_carpdev->if_pf_kif; 6223 else 6224 kif = (struct pfi_kif *)ifp->if_pf_kif; 6225 6226 if (kif == NULL) { 6227 DPFPRINTF(PF_DEBUG_URGENT, 6228 ("pf_test6: kif == NULL, if_xname %s\n", ifp->if_xname)); 6229 return (PF_DROP); 6230 } 6231 if (kif->pfik_flags & PFI_IFLAG_SKIP) 6232 return (PF_PASS); 6233 6234 #ifdef DIAGNOSTIC 6235 if ((m->m_flags & M_PKTHDR) == 0) 6236 panic("non-M_PKTHDR is passed to pf_test6"); 6237 #endif /* DIAGNOSTIC */ 6238 6239 if (m->m_pkthdr.len < (int)sizeof(*h)) { 6240 action = PF_DROP; 6241 REASON_SET(&reason, PFRES_SHORT); 6242 log = 1; 6243 goto done; 6244 } 6245 6246 /* 6247 * DragonFly doesn't zero the auxillary pkghdr fields, only fw_flags, 6248 * so make sure pf.flags is clear. 6249 */ 6250 if (m->m_pkthdr.fw_flags & PF_MBUF_TAGGED) 6251 return (PF_PASS); 6252 m->m_pkthdr.pf.flags = 0; 6253 /* Re-Check when updating to > 4.4 */ 6254 m->m_pkthdr.pf.statekey = NULL; 6255 6256 /* We do IP header normalization and packet reassembly here */ 6257 if (pf_normalize_ip6(m0, dir, kif, &reason, &pd) != PF_PASS) { 6258 action = PF_DROP; 6259 goto done; 6260 } 6261 m = *m0; /* pf_normalize messes with m0 */ 6262 h = mtod(m, struct ip6_hdr *); 6263 6264 #if 1 6265 /* 6266 * we do not support jumbogram yet. if we keep going, zero ip6_plen 6267 * will do something bad, so drop the packet for now. 6268 */ 6269 if (htons(h->ip6_plen) == 0) { 6270 action = PF_DROP; 6271 REASON_SET(&reason, PFRES_NORM); /*XXX*/ 6272 goto done; 6273 } 6274 #endif 6275 6276 pd.src = (struct pf_addr *)&h->ip6_src; 6277 pd.dst = (struct pf_addr *)&h->ip6_dst; 6278 pd.sport = pd.dport = NULL; 6279 pd.ip_sum = NULL; 6280 pd.proto_sum = NULL; 6281 pd.dir = dir; 6282 pd.sidx = (dir == PF_IN) ? 0 : 1; 6283 pd.didx = (dir == PF_IN) ? 1 : 0; 6284 pd.af = AF_INET6; 6285 pd.tos = 0; 6286 pd.tot_len = ntohs(h->ip6_plen) + sizeof(struct ip6_hdr); 6287 pd.eh = eh; 6288 6289 off = ((caddr_t)h - m->m_data) + sizeof(struct ip6_hdr); 6290 pd.proto = h->ip6_nxt; 6291 do { 6292 switch (pd.proto) { 6293 case IPPROTO_FRAGMENT: 6294 action = pf_test_fragment(&r, dir, kif, m, h, 6295 &pd, &a, &ruleset); 6296 if (action == PF_DROP) 6297 REASON_SET(&reason, PFRES_FRAG); 6298 goto done; 6299 case IPPROTO_ROUTING: { 6300 struct ip6_rthdr rthdr; 6301 6302 if (rh_cnt++) { 6303 DPFPRINTF(PF_DEBUG_MISC, 6304 ("pf: IPv6 more than one rthdr\n")); 6305 action = PF_DROP; 6306 REASON_SET(&reason, PFRES_IPOPTIONS); 6307 log = 1; 6308 goto done; 6309 } 6310 if (!pf_pull_hdr(m, off, &rthdr, sizeof(rthdr), NULL, 6311 &reason, pd.af)) { 6312 DPFPRINTF(PF_DEBUG_MISC, 6313 ("pf: IPv6 short rthdr\n")); 6314 action = PF_DROP; 6315 REASON_SET(&reason, PFRES_SHORT); 6316 log = 1; 6317 goto done; 6318 } 6319 if (rthdr.ip6r_type == IPV6_RTHDR_TYPE_0) { 6320 DPFPRINTF(PF_DEBUG_MISC, 6321 ("pf: IPv6 rthdr0\n")); 6322 action = PF_DROP; 6323 REASON_SET(&reason, PFRES_IPOPTIONS); 6324 log = 1; 6325 goto done; 6326 } 6327 /* FALLTHROUGH */ 6328 } 6329 case IPPROTO_AH: 6330 case IPPROTO_HOPOPTS: 6331 case IPPROTO_DSTOPTS: { 6332 /* get next header and header length */ 6333 struct ip6_ext opt6; 6334 6335 if (!pf_pull_hdr(m, off, &opt6, sizeof(opt6), 6336 NULL, &reason, pd.af)) { 6337 DPFPRINTF(PF_DEBUG_MISC, 6338 ("pf: IPv6 short opt\n")); 6339 action = PF_DROP; 6340 log = 1; 6341 goto done; 6342 } 6343 if (pd.proto == IPPROTO_AH) 6344 off += (opt6.ip6e_len + 2) * 4; 6345 else 6346 off += (opt6.ip6e_len + 1) * 8; 6347 pd.proto = opt6.ip6e_nxt; 6348 /* goto the next header */ 6349 break; 6350 } 6351 default: 6352 terminal++; 6353 break; 6354 } 6355 } while (!terminal); 6356 6357 /* if there's no routing header, use unmodified mbuf for checksumming */ 6358 if (!n) 6359 n = m; 6360 6361 switch (pd.proto) { 6362 6363 case IPPROTO_TCP: { 6364 struct tcphdr th; 6365 6366 pd.hdr.tcp = &th; 6367 if (!pf_pull_hdr(m, off, &th, sizeof(th), 6368 &action, &reason, AF_INET6)) { 6369 log = action != PF_PASS; 6370 goto done; 6371 } 6372 pd.p_len = pd.tot_len - off - (th.th_off << 2); 6373 action = pf_normalize_tcp(dir, kif, m, 0, off, h, &pd); 6374 if (action == PF_DROP) 6375 goto done; 6376 action = pf_test_state_tcp(&s, dir, kif, m, off, h, &pd, 6377 &reason); 6378 if (action == PF_PASS) { 6379 pfsync_update_state(s); 6380 r = s->rule.ptr; 6381 a = s->anchor.ptr; 6382 log = s->log; 6383 } else if (s == NULL) 6384 action = pf_test_rule(&r, &s, dir, kif, 6385 m, off, h, &pd, &a, &ruleset, NULL, inp); 6386 break; 6387 } 6388 6389 case IPPROTO_UDP: { 6390 struct udphdr uh; 6391 6392 pd.hdr.udp = &uh; 6393 if (!pf_pull_hdr(m, off, &uh, sizeof(uh), 6394 &action, &reason, AF_INET6)) { 6395 log = action != PF_PASS; 6396 goto done; 6397 } 6398 if (uh.uh_dport == 0 || 6399 ntohs(uh.uh_ulen) > m->m_pkthdr.len - off || 6400 ntohs(uh.uh_ulen) < sizeof(struct udphdr)) { 6401 action = PF_DROP; 6402 REASON_SET(&reason, PFRES_SHORT); 6403 goto done; 6404 } 6405 action = pf_test_state_udp(&s, dir, kif, m, off, h, &pd); 6406 if (action == PF_PASS) { 6407 pfsync_update_state(s); 6408 r = s->rule.ptr; 6409 a = s->anchor.ptr; 6410 log = s->log; 6411 } else if (s == NULL) 6412 action = pf_test_rule(&r, &s, dir, kif, 6413 m, off, h, &pd, &a, &ruleset, NULL, inp); 6414 break; 6415 } 6416 6417 case IPPROTO_ICMPV6: { 6418 struct icmp6_hdr ih; 6419 6420 pd.hdr.icmp6 = &ih; 6421 if (!pf_pull_hdr(m, off, &ih, sizeof(ih), 6422 &action, &reason, AF_INET6)) { 6423 log = action != PF_PASS; 6424 goto done; 6425 } 6426 action = pf_test_state_icmp(&s, dir, kif, 6427 m, off, h, &pd, &reason); 6428 if (action == PF_PASS) { 6429 pfsync_update_state(s); 6430 r = s->rule.ptr; 6431 a = s->anchor.ptr; 6432 log = s->log; 6433 } else if (s == NULL) 6434 action = pf_test_rule(&r, &s, dir, kif, 6435 m, off, h, &pd, &a, &ruleset, NULL, inp); 6436 break; 6437 } 6438 6439 default: 6440 action = pf_test_state_other(&s, dir, kif, m, &pd); 6441 if (action == PF_PASS) { 6442 pfsync_update_state(s); 6443 r = s->rule.ptr; 6444 a = s->anchor.ptr; 6445 log = s->log; 6446 } else if (s == NULL) 6447 action = pf_test_rule(&r, &s, dir, kif, m, off, h, 6448 &pd, &a, &ruleset, NULL, inp); 6449 break; 6450 } 6451 6452 done: 6453 if (n != m) { 6454 m_freem(n); 6455 n = NULL; 6456 } 6457 6458 /* handle dangerous IPv6 extension headers. */ 6459 if (action == PF_PASS && rh_cnt && 6460 !((s && s->state_flags & PFSTATE_ALLOWOPTS) || r->allow_opts)) { 6461 action = PF_DROP; 6462 REASON_SET(&reason, PFRES_IPOPTIONS); 6463 log = 1; 6464 DPFPRINTF(PF_DEBUG_MISC, 6465 ("pf: dropping packet with dangerous v6 headers\n")); 6466 } 6467 6468 if ((s && s->tag) || r->rtableid) 6469 pf_tag_packet(m, s ? s->tag : 0, r->rtableid); 6470 6471 #if 0 6472 if (dir == PF_IN && s && s->key[PF_SK_STACK]) 6473 m->m_pkthdr.pf.statekey = s->key[PF_SK_STACK]; 6474 #endif 6475 6476 #ifdef ALTQ 6477 if (action == PF_PASS && r->qid) { 6478 m->m_pkthdr.fw_flags |= PF_MBUF_STRUCTURE; 6479 if (pd.tos & IPTOS_LOWDELAY) 6480 m->m_pkthdr.pf.qid = r->pqid; 6481 else 6482 m->m_pkthdr.pf.qid = r->qid; 6483 m->m_pkthdr.pf.ecn_af = AF_INET6; 6484 m->m_pkthdr.pf.hdr = h; 6485 if (s) { 6486 /* for fairq */ 6487 m->m_pkthdr.pf.state_hash = s->hash; 6488 m->m_pkthdr.pf.flags |= PF_TAG_STATE_HASHED; 6489 } 6490 } 6491 #endif /* ALTQ */ 6492 6493 if (dir == PF_IN && action == PF_PASS && (pd.proto == IPPROTO_TCP || 6494 pd.proto == IPPROTO_UDP) && s != NULL && s->nat_rule.ptr != NULL && 6495 (s->nat_rule.ptr->action == PF_RDR || 6496 s->nat_rule.ptr->action == PF_BINAT) && 6497 IN6_IS_ADDR_LOOPBACK(&pd.dst->v6)) 6498 m->m_pkthdr.pf.flags |= PF_TAG_TRANSLATE_LOCALHOST; 6499 6500 if (dir == PF_IN && action == PF_PASS && r->divert.port) { 6501 struct pf_divert *divert; 6502 6503 if ((divert = pf_get_divert(m))) { 6504 m->m_pkthdr.pf.flags |= PF_TAG_DIVERTED; 6505 divert->port = r->divert.port; 6506 divert->addr.ipv6 = r->divert.addr.v6; 6507 } 6508 } 6509 6510 if (log) { 6511 struct pf_rule *lr; 6512 6513 if (s != NULL && s->nat_rule.ptr != NULL && 6514 s->nat_rule.ptr->log & PF_LOG_ALL) 6515 lr = s->nat_rule.ptr; 6516 else 6517 lr = r; 6518 PFLOG_PACKET(kif, h, m, AF_INET6, dir, reason, lr, a, ruleset, 6519 &pd); 6520 } 6521 6522 kif->pfik_bytes[1][dir == PF_OUT][action != PF_PASS] += pd.tot_len; 6523 kif->pfik_packets[1][dir == PF_OUT][action != PF_PASS]++; 6524 6525 if (action == PF_PASS || r->action == PF_DROP) { 6526 dirndx = (dir == PF_OUT); 6527 r->packets[dirndx]++; 6528 r->bytes[dirndx] += pd.tot_len; 6529 if (a != NULL) { 6530 a->packets[dirndx]++; 6531 a->bytes[dirndx] += pd.tot_len; 6532 } 6533 if (s != NULL) { 6534 if (s->nat_rule.ptr != NULL) { 6535 s->nat_rule.ptr->packets[dirndx]++; 6536 s->nat_rule.ptr->bytes[dirndx] += pd.tot_len; 6537 } 6538 if (s->src_node != NULL) { 6539 s->src_node->packets[dirndx]++; 6540 s->src_node->bytes[dirndx] += pd.tot_len; 6541 } 6542 if (s->nat_src_node != NULL) { 6543 s->nat_src_node->packets[dirndx]++; 6544 s->nat_src_node->bytes[dirndx] += pd.tot_len; 6545 } 6546 dirndx = (dir == s->direction) ? 0 : 1; 6547 s->packets[dirndx]++; 6548 s->bytes[dirndx] += pd.tot_len; 6549 } 6550 tr = r; 6551 nr = (s != NULL) ? s->nat_rule.ptr : pd.nat_rule; 6552 if (nr != NULL && r == &pf_default_rule) 6553 tr = nr; 6554 if (tr->src.addr.type == PF_ADDR_TABLE) 6555 pfr_update_stats(tr->src.addr.p.tbl, 6556 (s == NULL) ? pd.src : 6557 &s->key[(s->direction == PF_IN)]->addr[0], 6558 pd.af, pd.tot_len, dir == PF_OUT, 6559 r->action == PF_PASS, tr->src.neg); 6560 if (tr->dst.addr.type == PF_ADDR_TABLE) 6561 pfr_update_stats(tr->dst.addr.p.tbl, 6562 (s == NULL) ? pd.dst : 6563 &s->key[(s->direction == PF_IN)]->addr[1], 6564 pd.af, pd.tot_len, dir == PF_OUT, 6565 r->action == PF_PASS, tr->dst.neg); 6566 } 6567 6568 6569 if (action == PF_SYNPROXY_DROP) { 6570 m_freem(*m0); 6571 *m0 = NULL; 6572 action = PF_PASS; 6573 } else if (r->rt) 6574 /* pf_route6 can free the mbuf causing *m0 to become NULL */ 6575 pf_route6(m0, r, dir, kif->pfik_ifp, s, &pd); 6576 6577 return (action); 6578 } 6579 #endif /* INET6 */ 6580 6581 int 6582 pf_check_congestion(struct ifqueue *ifq) 6583 { 6584 return (0); 6585 } 6586