1 /* $OpenBSD: pf.c,v 1.614 2008/08/02 12:34:37 henning Exp $ */ 2 3 /* 4 * Copyright (c) 2004 The DragonFly Project. All rights reserved. 5 * 6 * Copyright (c) 2001 Daniel Hartmeier 7 * Copyright (c) 2002 - 2008 Henning Brauer 8 * All rights reserved. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 14 * - Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * - Redistributions in binary form must reproduce the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer in the documentation and/or other materials provided 19 * with the distribution. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 29 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 31 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 32 * POSSIBILITY OF SUCH DAMAGE. 33 * 34 * Effort sponsored in part by the Defense Advanced Research Projects 35 * Agency (DARPA) and Air Force Research Laboratory, Air Force 36 * Materiel Command, USAF, under agreement number F30602-01-2-0537. 37 * 38 */ 39 40 #include "opt_inet.h" 41 #include "opt_inet6.h" 42 43 #include <sys/param.h> 44 #include <sys/systm.h> 45 #include <sys/malloc.h> 46 #include <sys/mbuf.h> 47 #include <sys/filio.h> 48 #include <sys/socket.h> 49 #include <sys/socketvar.h> 50 #include <sys/kernel.h> 51 #include <sys/time.h> 52 #include <sys/sysctl.h> 53 #include <sys/endian.h> 54 #include <vm/vm_zone.h> 55 #include <sys/proc.h> 56 #include <sys/kthread.h> 57 58 #include <machine/inttypes.h> 59 60 #include <sys/md5.h> 61 62 #include <net/if.h> 63 #include <net/if_types.h> 64 #include <net/bpf.h> 65 #include <net/netisr.h> 66 #include <net/route.h> 67 68 #include <netinet/in.h> 69 #include <netinet/in_var.h> 70 #include <netinet/in_systm.h> 71 #include <netinet/ip.h> 72 #include <netinet/ip_var.h> 73 #include <netinet/tcp.h> 74 #include <netinet/tcp_seq.h> 75 #include <netinet/udp.h> 76 #include <netinet/ip_icmp.h> 77 #include <netinet/in_pcb.h> 78 #include <netinet/tcp_timer.h> 79 #include <netinet/tcp_var.h> 80 #include <netinet/udp_var.h> 81 #include <netinet/icmp_var.h> 82 #include <netinet/if_ether.h> 83 84 #include <net/pf/pfvar.h> 85 #include <net/pf/if_pflog.h> 86 87 #include <net/pf/if_pfsync.h> 88 89 #ifdef INET6 90 #include <netinet/ip6.h> 91 #include <netinet/in_pcb.h> 92 #include <netinet/icmp6.h> 93 #include <netinet6/nd6.h> 94 #include <netinet6/ip6_var.h> 95 #include <netinet6/in6_pcb.h> 96 #endif /* INET6 */ 97 98 #include <sys/in_cksum.h> 99 #include <sys/ucred.h> 100 #include <machine/limits.h> 101 #include <sys/msgport2.h> 102 #include <net/netmsg2.h> 103 104 extern int ip_optcopy(struct ip *, struct ip *); 105 extern int debug_pfugidhack; 106 107 struct lwkt_token pf_token = LWKT_TOKEN_INITIALIZER(pf_token); 108 109 #define DPFPRINTF(n, x) if (pf_status.debug >= (n)) kprintf x 110 111 /* 112 * Global variables 113 */ 114 115 /* mask radix tree */ 116 struct radix_node_head *pf_maskhead; 117 118 /* state tables */ 119 struct pf_state_tree pf_statetbl; 120 121 struct pf_altqqueue pf_altqs[2]; 122 struct pf_palist pf_pabuf; 123 struct pf_altqqueue *pf_altqs_active; 124 struct pf_altqqueue *pf_altqs_inactive; 125 struct pf_status pf_status; 126 127 u_int32_t ticket_altqs_active; 128 u_int32_t ticket_altqs_inactive; 129 int altqs_inactive_open; 130 u_int32_t ticket_pabuf; 131 132 MD5_CTX pf_tcp_secret_ctx; 133 u_char pf_tcp_secret[16]; 134 int pf_tcp_secret_init; 135 int pf_tcp_iss_off; 136 137 struct pf_anchor_stackframe { 138 struct pf_ruleset *rs; 139 struct pf_rule *r; 140 struct pf_anchor_node *parent; 141 struct pf_anchor *child; 142 } pf_anchor_stack[64]; 143 144 vm_zone_t pf_src_tree_pl, pf_rule_pl, pf_pooladdr_pl; 145 vm_zone_t pf_state_pl, pf_state_key_pl, pf_state_item_pl; 146 vm_zone_t pf_altq_pl; 147 148 void pf_print_host(struct pf_addr *, u_int16_t, u_int8_t); 149 150 void pf_init_threshold(struct pf_threshold *, u_int32_t, 151 u_int32_t); 152 void pf_add_threshold(struct pf_threshold *); 153 int pf_check_threshold(struct pf_threshold *); 154 155 void pf_change_ap(struct pf_addr *, u_int16_t *, 156 u_int16_t *, u_int16_t *, struct pf_addr *, 157 u_int16_t, u_int8_t, sa_family_t); 158 int pf_modulate_sack(struct mbuf *, int, struct pf_pdesc *, 159 struct tcphdr *, struct pf_state_peer *); 160 #ifdef INET6 161 void pf_change_a6(struct pf_addr *, u_int16_t *, 162 struct pf_addr *, u_int8_t); 163 #endif /* INET6 */ 164 void pf_change_icmp(struct pf_addr *, u_int16_t *, 165 struct pf_addr *, struct pf_addr *, u_int16_t, 166 u_int16_t *, u_int16_t *, u_int16_t *, 167 u_int16_t *, u_int8_t, sa_family_t); 168 void pf_send_tcp(const struct pf_rule *, sa_family_t, 169 const struct pf_addr *, const struct pf_addr *, 170 u_int16_t, u_int16_t, u_int32_t, u_int32_t, 171 u_int8_t, u_int16_t, u_int16_t, u_int8_t, int, 172 u_int16_t, struct ether_header *, struct ifnet *); 173 void pf_send_icmp(struct mbuf *, u_int8_t, u_int8_t, 174 sa_family_t, struct pf_rule *); 175 struct pf_rule *pf_match_translation(struct pf_pdesc *, struct mbuf *, 176 int, int, struct pfi_kif *, 177 struct pf_addr *, u_int16_t, struct pf_addr *, 178 u_int16_t, int); 179 struct pf_rule *pf_get_translation(struct pf_pdesc *, struct mbuf *, 180 int, int, struct pfi_kif *, struct pf_src_node **, 181 struct pf_state_key **, struct pf_state_key **, 182 struct pf_state_key **, struct pf_state_key **, 183 struct pf_addr *, struct pf_addr *, 184 u_int16_t, u_int16_t); 185 void pf_detach_state(struct pf_state *); 186 int pf_state_key_setup(struct pf_pdesc *, struct pf_rule *, 187 struct pf_state_key **, struct pf_state_key **, 188 struct pf_state_key **, struct pf_state_key **, 189 struct pf_addr *, struct pf_addr *, 190 u_int16_t, u_int16_t); 191 void pf_state_key_detach(struct pf_state *, int); 192 u_int32_t pf_tcp_iss(struct pf_pdesc *); 193 int pf_test_rule(struct pf_rule **, struct pf_state **, 194 int, struct pfi_kif *, struct mbuf *, int, 195 void *, struct pf_pdesc *, struct pf_rule **, 196 struct pf_ruleset **, struct ifqueue *, struct inpcb *); 197 static __inline int pf_create_state(struct pf_rule *, struct pf_rule *, 198 struct pf_rule *, struct pf_pdesc *, 199 struct pf_src_node *, struct pf_state_key *, 200 struct pf_state_key *, struct pf_state_key *, 201 struct pf_state_key *, struct mbuf *, int, 202 u_int16_t, u_int16_t, int *, struct pfi_kif *, 203 struct pf_state **, int, u_int16_t, u_int16_t, 204 int); 205 int pf_test_fragment(struct pf_rule **, int, 206 struct pfi_kif *, struct mbuf *, void *, 207 struct pf_pdesc *, struct pf_rule **, 208 struct pf_ruleset **); 209 int pf_tcp_track_full(struct pf_state_peer *, 210 struct pf_state_peer *, struct pf_state **, 211 struct pfi_kif *, struct mbuf *, int, 212 struct pf_pdesc *, u_short *, int *); 213 int pf_tcp_track_sloppy(struct pf_state_peer *, 214 struct pf_state_peer *, struct pf_state **, 215 struct pf_pdesc *, u_short *); 216 int pf_test_state_tcp(struct pf_state **, int, 217 struct pfi_kif *, struct mbuf *, int, 218 void *, struct pf_pdesc *, u_short *); 219 int pf_test_state_udp(struct pf_state **, int, 220 struct pfi_kif *, struct mbuf *, int, 221 void *, struct pf_pdesc *); 222 int pf_test_state_icmp(struct pf_state **, int, 223 struct pfi_kif *, struct mbuf *, int, 224 void *, struct pf_pdesc *, u_short *); 225 int pf_test_state_other(struct pf_state **, int, 226 struct pfi_kif *, struct mbuf *, struct pf_pdesc *); 227 void pf_step_into_anchor(int *, struct pf_ruleset **, int, 228 struct pf_rule **, struct pf_rule **, int *); 229 int pf_step_out_of_anchor(int *, struct pf_ruleset **, 230 int, struct pf_rule **, struct pf_rule **, 231 int *); 232 void pf_hash(struct pf_addr *, struct pf_addr *, 233 struct pf_poolhashkey *, sa_family_t); 234 int pf_map_addr(u_int8_t, struct pf_rule *, 235 struct pf_addr *, struct pf_addr *, 236 struct pf_addr *, struct pf_src_node **); 237 int pf_get_sport(sa_family_t, u_int8_t, struct pf_rule *, 238 struct pf_addr *, struct pf_addr *, u_int16_t, 239 struct pf_addr *, u_int16_t*, u_int16_t, u_int16_t, 240 struct pf_src_node **); 241 void pf_route(struct mbuf **, struct pf_rule *, int, 242 struct ifnet *, struct pf_state *, 243 struct pf_pdesc *); 244 void pf_route6(struct mbuf **, struct pf_rule *, int, 245 struct ifnet *, struct pf_state *, 246 struct pf_pdesc *); 247 u_int8_t pf_get_wscale(struct mbuf *, int, u_int16_t, 248 sa_family_t); 249 u_int16_t pf_get_mss(struct mbuf *, int, u_int16_t, 250 sa_family_t); 251 u_int16_t pf_calc_mss(struct pf_addr *, sa_family_t, 252 u_int16_t); 253 void pf_set_rt_ifp(struct pf_state *, 254 struct pf_addr *); 255 int pf_check_proto_cksum(struct mbuf *, int, int, 256 u_int8_t, sa_family_t); 257 struct pf_divert *pf_get_divert(struct mbuf *); 258 void pf_print_state_parts(struct pf_state *, 259 struct pf_state_key *, struct pf_state_key *); 260 int pf_addr_wrap_neq(struct pf_addr_wrap *, 261 struct pf_addr_wrap *); 262 struct pf_state *pf_find_state(struct pfi_kif *, 263 struct pf_state_key_cmp *, u_int, struct mbuf *); 264 int pf_src_connlimit(struct pf_state **); 265 int pf_check_congestion(struct ifqueue *); 266 267 extern int pf_end_threads; 268 269 struct pf_pool_limit pf_pool_limits[PF_LIMIT_MAX] = { 270 { &pf_state_pl, PFSTATE_HIWAT }, 271 { &pf_src_tree_pl, PFSNODE_HIWAT }, 272 { &pf_frent_pl, PFFRAG_FRENT_HIWAT }, 273 { &pfr_ktable_pl, PFR_KTABLE_HIWAT }, 274 { &pfr_kentry_pl, PFR_KENTRY_HIWAT } 275 }; 276 277 #define STATE_LOOKUP(i, k, d, s, m) \ 278 do { \ 279 s = pf_find_state(i, k, d, m); \ 280 if (s == NULL || (s)->timeout == PFTM_PURGE) \ 281 return (PF_DROP); \ 282 if (d == PF_OUT && \ 283 (((s)->rule.ptr->rt == PF_ROUTETO && \ 284 (s)->rule.ptr->direction == PF_OUT) || \ 285 ((s)->rule.ptr->rt == PF_REPLYTO && \ 286 (s)->rule.ptr->direction == PF_IN)) && \ 287 (s)->rt_kif != NULL && \ 288 (s)->rt_kif != i) \ 289 return (PF_PASS); \ 290 } while (0) 291 292 #define BOUND_IFACE(r, k) \ 293 ((r)->rule_flag & PFRULE_IFBOUND) ? (k) : pfi_all 294 295 #define STATE_INC_COUNTERS(s) \ 296 do { \ 297 s->rule.ptr->states_cur++; \ 298 s->rule.ptr->states_tot++; \ 299 if (s->anchor.ptr != NULL) { \ 300 s->anchor.ptr->states_cur++; \ 301 s->anchor.ptr->states_tot++; \ 302 } \ 303 if (s->nat_rule.ptr != NULL) { \ 304 s->nat_rule.ptr->states_cur++; \ 305 s->nat_rule.ptr->states_tot++; \ 306 } \ 307 } while (0) 308 309 #define STATE_DEC_COUNTERS(s) \ 310 do { \ 311 if (s->nat_rule.ptr != NULL) \ 312 s->nat_rule.ptr->states_cur--; \ 313 if (s->anchor.ptr != NULL) \ 314 s->anchor.ptr->states_cur--; \ 315 s->rule.ptr->states_cur--; \ 316 } while (0) 317 318 static __inline int pf_src_compare(struct pf_src_node *, struct pf_src_node *); 319 static __inline int pf_state_compare_key(struct pf_state_key *, 320 struct pf_state_key *); 321 static __inline int pf_state_compare_id(struct pf_state *, 322 struct pf_state *); 323 324 struct pf_src_tree tree_src_tracking; 325 326 struct pf_state_tree_id tree_id; 327 struct pf_state_queue state_list; 328 329 RB_GENERATE(pf_src_tree, pf_src_node, entry, pf_src_compare); 330 RB_GENERATE(pf_state_tree, pf_state_key, entry, pf_state_compare_key); 331 RB_GENERATE(pf_state_tree_id, pf_state, 332 entry_id, pf_state_compare_id); 333 334 static __inline int 335 pf_src_compare(struct pf_src_node *a, struct pf_src_node *b) 336 { 337 int diff; 338 339 if (a->rule.ptr > b->rule.ptr) 340 return (1); 341 if (a->rule.ptr < b->rule.ptr) 342 return (-1); 343 if ((diff = a->af - b->af) != 0) 344 return (diff); 345 switch (a->af) { 346 #ifdef INET 347 case AF_INET: 348 if (a->addr.addr32[0] > b->addr.addr32[0]) 349 return (1); 350 if (a->addr.addr32[0] < b->addr.addr32[0]) 351 return (-1); 352 break; 353 #endif /* INET */ 354 #ifdef INET6 355 case AF_INET6: 356 if (a->addr.addr32[3] > b->addr.addr32[3]) 357 return (1); 358 if (a->addr.addr32[3] < b->addr.addr32[3]) 359 return (-1); 360 if (a->addr.addr32[2] > b->addr.addr32[2]) 361 return (1); 362 if (a->addr.addr32[2] < b->addr.addr32[2]) 363 return (-1); 364 if (a->addr.addr32[1] > b->addr.addr32[1]) 365 return (1); 366 if (a->addr.addr32[1] < b->addr.addr32[1]) 367 return (-1); 368 if (a->addr.addr32[0] > b->addr.addr32[0]) 369 return (1); 370 if (a->addr.addr32[0] < b->addr.addr32[0]) 371 return (-1); 372 break; 373 #endif /* INET6 */ 374 } 375 return (0); 376 } 377 378 u_int32_t 379 pf_state_hash(struct pf_state_key *sk) 380 { 381 u_int32_t hv = (u_int32_t)(((intptr_t)sk >> 6) ^ ((intptr_t)sk >> 15)); 382 if (hv == 0) /* disallow 0 */ 383 hv = 1; 384 return(hv); 385 } 386 387 #ifdef INET6 388 void 389 pf_addrcpy(struct pf_addr *dst, struct pf_addr *src, sa_family_t af) 390 { 391 switch (af) { 392 #ifdef INET 393 case AF_INET: 394 dst->addr32[0] = src->addr32[0]; 395 break; 396 #endif /* INET */ 397 case AF_INET6: 398 dst->addr32[0] = src->addr32[0]; 399 dst->addr32[1] = src->addr32[1]; 400 dst->addr32[2] = src->addr32[2]; 401 dst->addr32[3] = src->addr32[3]; 402 break; 403 } 404 } 405 #endif /* INET6 */ 406 407 void 408 pf_init_threshold(struct pf_threshold *threshold, 409 u_int32_t limit, u_int32_t seconds) 410 { 411 threshold->limit = limit * PF_THRESHOLD_MULT; 412 threshold->seconds = seconds; 413 threshold->count = 0; 414 threshold->last = time_second; 415 } 416 417 void 418 pf_add_threshold(struct pf_threshold *threshold) 419 { 420 u_int32_t t = time_second, diff = t - threshold->last; 421 422 if (diff >= threshold->seconds) 423 threshold->count = 0; 424 else 425 threshold->count -= threshold->count * diff / 426 threshold->seconds; 427 threshold->count += PF_THRESHOLD_MULT; 428 threshold->last = t; 429 } 430 431 int 432 pf_check_threshold(struct pf_threshold *threshold) 433 { 434 return (threshold->count > threshold->limit); 435 } 436 437 int 438 pf_src_connlimit(struct pf_state **state) 439 { 440 int bad = 0; 441 442 (*state)->src_node->conn++; 443 (*state)->src.tcp_est = 1; 444 pf_add_threshold(&(*state)->src_node->conn_rate); 445 446 if ((*state)->rule.ptr->max_src_conn && 447 (*state)->rule.ptr->max_src_conn < 448 (*state)->src_node->conn) { 449 pf_status.lcounters[LCNT_SRCCONN]++; 450 bad++; 451 } 452 453 if ((*state)->rule.ptr->max_src_conn_rate.limit && 454 pf_check_threshold(&(*state)->src_node->conn_rate)) { 455 pf_status.lcounters[LCNT_SRCCONNRATE]++; 456 bad++; 457 } 458 459 if (!bad) 460 return (0); 461 462 if ((*state)->rule.ptr->overload_tbl) { 463 struct pfr_addr p; 464 u_int32_t killed = 0; 465 466 pf_status.lcounters[LCNT_OVERLOAD_TABLE]++; 467 if (pf_status.debug >= PF_DEBUG_MISC) { 468 kprintf("pf_src_connlimit: blocking address "); 469 pf_print_host(&(*state)->src_node->addr, 0, 470 (*state)->key[PF_SK_WIRE]->af); 471 } 472 473 bzero(&p, sizeof(p)); 474 p.pfra_af = (*state)->key[PF_SK_WIRE]->af; 475 switch ((*state)->key[PF_SK_WIRE]->af) { 476 #ifdef INET 477 case AF_INET: 478 p.pfra_net = 32; 479 p.pfra_ip4addr = (*state)->src_node->addr.v4; 480 break; 481 #endif /* INET */ 482 #ifdef INET6 483 case AF_INET6: 484 p.pfra_net = 128; 485 p.pfra_ip6addr = (*state)->src_node->addr.v6; 486 break; 487 #endif /* INET6 */ 488 } 489 490 pfr_insert_kentry((*state)->rule.ptr->overload_tbl, 491 &p, time_second); 492 493 /* kill existing states if that's required. */ 494 if ((*state)->rule.ptr->flush) { 495 struct pf_state_key *sk; 496 struct pf_state *st; 497 498 pf_status.lcounters[LCNT_OVERLOAD_FLUSH]++; 499 RB_FOREACH(st, pf_state_tree_id, &tree_id) { 500 sk = st->key[PF_SK_WIRE]; 501 /* 502 * Kill states from this source. (Only those 503 * from the same rule if PF_FLUSH_GLOBAL is not 504 * set) 505 */ 506 if (sk->af == 507 (*state)->key[PF_SK_WIRE]->af && 508 (((*state)->direction == PF_OUT && 509 PF_AEQ(&(*state)->src_node->addr, 510 &sk->addr[0], sk->af)) || 511 ((*state)->direction == PF_IN && 512 PF_AEQ(&(*state)->src_node->addr, 513 &sk->addr[1], sk->af))) && 514 ((*state)->rule.ptr->flush & 515 PF_FLUSH_GLOBAL || 516 (*state)->rule.ptr == st->rule.ptr)) { 517 st->timeout = PFTM_PURGE; 518 st->src.state = st->dst.state = 519 TCPS_CLOSED; 520 killed++; 521 } 522 } 523 if (pf_status.debug >= PF_DEBUG_MISC) 524 kprintf(", %u states killed", killed); 525 } 526 if (pf_status.debug >= PF_DEBUG_MISC) 527 kprintf("\n"); 528 } 529 530 /* kill this state */ 531 (*state)->timeout = PFTM_PURGE; 532 (*state)->src.state = (*state)->dst.state = TCPS_CLOSED; 533 return (1); 534 } 535 536 int 537 pf_insert_src_node(struct pf_src_node **sn, struct pf_rule *rule, 538 struct pf_addr *src, sa_family_t af) 539 { 540 struct pf_src_node k; 541 542 if (*sn == NULL) { 543 k.af = af; 544 PF_ACPY(&k.addr, src, af); 545 if (rule->rule_flag & PFRULE_RULESRCTRACK || 546 rule->rpool.opts & PF_POOL_STICKYADDR) 547 k.rule.ptr = rule; 548 else 549 k.rule.ptr = NULL; 550 pf_status.scounters[SCNT_SRC_NODE_SEARCH]++; 551 *sn = RB_FIND(pf_src_tree, &tree_src_tracking, &k); 552 } 553 if (*sn == NULL) { 554 if (!rule->max_src_nodes || 555 rule->src_nodes < rule->max_src_nodes) 556 (*sn) = pool_get(&pf_src_tree_pl, PR_NOWAIT | PR_ZERO); 557 else 558 pf_status.lcounters[LCNT_SRCNODES]++; 559 if ((*sn) == NULL) 560 return (-1); 561 562 pf_init_threshold(&(*sn)->conn_rate, 563 rule->max_src_conn_rate.limit, 564 rule->max_src_conn_rate.seconds); 565 566 (*sn)->af = af; 567 if (rule->rule_flag & PFRULE_RULESRCTRACK || 568 rule->rpool.opts & PF_POOL_STICKYADDR) 569 (*sn)->rule.ptr = rule; 570 else 571 (*sn)->rule.ptr = NULL; 572 PF_ACPY(&(*sn)->addr, src, af); 573 if (RB_INSERT(pf_src_tree, 574 &tree_src_tracking, *sn) != NULL) { 575 if (pf_status.debug >= PF_DEBUG_MISC) { 576 kprintf("pf: src_tree insert failed: "); 577 pf_print_host(&(*sn)->addr, 0, af); 578 kprintf("\n"); 579 } 580 pool_put(&pf_src_tree_pl, *sn); 581 return (-1); 582 } 583 (*sn)->creation = time_second; 584 (*sn)->ruletype = rule->action; 585 if ((*sn)->rule.ptr != NULL) 586 (*sn)->rule.ptr->src_nodes++; 587 pf_status.scounters[SCNT_SRC_NODE_INSERT]++; 588 pf_status.src_nodes++; 589 } else { 590 if (rule->max_src_states && 591 (*sn)->states >= rule->max_src_states) { 592 pf_status.lcounters[LCNT_SRCSTATES]++; 593 return (-1); 594 } 595 } 596 return (0); 597 } 598 599 /* state table stuff */ 600 601 static __inline int 602 pf_state_compare_key(struct pf_state_key *a, struct pf_state_key *b) 603 { 604 int diff; 605 606 if ((diff = a->proto - b->proto) != 0) 607 return (diff); 608 if ((diff = a->af - b->af) != 0) 609 return (diff); 610 switch (a->af) { 611 #ifdef INET 612 case AF_INET: 613 if (a->addr[0].addr32[0] > b->addr[0].addr32[0]) 614 return (1); 615 if (a->addr[0].addr32[0] < b->addr[0].addr32[0]) 616 return (-1); 617 if (a->addr[1].addr32[0] > b->addr[1].addr32[0]) 618 return (1); 619 if (a->addr[1].addr32[0] < b->addr[1].addr32[0]) 620 return (-1); 621 break; 622 #endif /* INET */ 623 #ifdef INET6 624 case AF_INET6: 625 if (a->addr[0].addr32[3] > b->addr[0].addr32[3]) 626 return (1); 627 if (a->addr[0].addr32[3] < b->addr[0].addr32[3]) 628 return (-1); 629 if (a->addr[1].addr32[3] > b->addr[1].addr32[3]) 630 return (1); 631 if (a->addr[1].addr32[3] < b->addr[1].addr32[3]) 632 return (-1); 633 if (a->addr[0].addr32[2] > b->addr[0].addr32[2]) 634 return (1); 635 if (a->addr[0].addr32[2] < b->addr[0].addr32[2]) 636 return (-1); 637 if (a->addr[1].addr32[2] > b->addr[1].addr32[2]) 638 return (1); 639 if (a->addr[1].addr32[2] < b->addr[1].addr32[2]) 640 return (-1); 641 if (a->addr[0].addr32[1] > b->addr[0].addr32[1]) 642 return (1); 643 if (a->addr[0].addr32[1] < b->addr[0].addr32[1]) 644 return (-1); 645 if (a->addr[1].addr32[1] > b->addr[1].addr32[1]) 646 return (1); 647 if (a->addr[1].addr32[1] < b->addr[1].addr32[1]) 648 return (-1); 649 if (a->addr[0].addr32[0] > b->addr[0].addr32[0]) 650 return (1); 651 if (a->addr[0].addr32[0] < b->addr[0].addr32[0]) 652 return (-1); 653 if (a->addr[1].addr32[0] > b->addr[1].addr32[0]) 654 return (1); 655 if (a->addr[1].addr32[0] < b->addr[1].addr32[0]) 656 return (-1); 657 break; 658 #endif /* INET6 */ 659 } 660 661 if ((diff = a->port[0] - b->port[0]) != 0) 662 return (diff); 663 if ((diff = a->port[1] - b->port[1]) != 0) 664 return (diff); 665 666 return (0); 667 } 668 669 static __inline int 670 pf_state_compare_id(struct pf_state *a, struct pf_state *b) 671 { 672 if (a->id > b->id) 673 return (1); 674 if (a->id < b->id) 675 return (-1); 676 if (a->creatorid > b->creatorid) 677 return (1); 678 if (a->creatorid < b->creatorid) 679 return (-1); 680 681 return (0); 682 } 683 684 int 685 pf_state_key_attach(struct pf_state_key *sk, struct pf_state *s, int idx) 686 { 687 struct pf_state_item *si; 688 struct pf_state_key *cur; 689 690 KKASSERT(s->key[idx] == NULL); /* XXX handle this? */ 691 692 if ((cur = RB_INSERT(pf_state_tree, &pf_statetbl, sk)) != NULL) { 693 /* key exists. check for same kif, if none, add to key */ 694 TAILQ_FOREACH(si, &cur->states, entry) 695 if (si->s->kif == s->kif && 696 si->s->direction == s->direction) { 697 if (pf_status.debug >= PF_DEBUG_MISC) { 698 kprintf( 699 "pf: %s key attach failed on %s: ", 700 (idx == PF_SK_WIRE) ? 701 "wire" : "stack", 702 s->kif->pfik_name); 703 pf_print_state_parts(s, 704 (idx == PF_SK_WIRE) ? sk : NULL, 705 (idx == PF_SK_STACK) ? sk : NULL); 706 kprintf("\n"); 707 } 708 pool_put(&pf_state_key_pl, sk); 709 return (-1); /* collision! */ 710 } 711 pool_put(&pf_state_key_pl, sk); 712 s->key[idx] = cur; 713 } else 714 s->key[idx] = sk; 715 716 if ((si = pool_get(&pf_state_item_pl, PR_NOWAIT)) == NULL) { 717 pf_state_key_detach(s, idx); 718 return (-1); 719 } 720 si->s = s; 721 722 /* list is sorted, if-bound states before floating */ 723 if (s->kif == pfi_all) 724 TAILQ_INSERT_TAIL(&s->key[idx]->states, si, entry); 725 else 726 TAILQ_INSERT_HEAD(&s->key[idx]->states, si, entry); 727 return (0); 728 } 729 730 void 731 pf_detach_state(struct pf_state *s) 732 { 733 if (s->key[PF_SK_WIRE] == s->key[PF_SK_STACK]) 734 s->key[PF_SK_WIRE] = NULL; 735 736 if (s->key[PF_SK_STACK] != NULL) 737 pf_state_key_detach(s, PF_SK_STACK); 738 739 if (s->key[PF_SK_WIRE] != NULL) 740 pf_state_key_detach(s, PF_SK_WIRE); 741 } 742 743 void 744 pf_state_key_detach(struct pf_state *s, int idx) 745 { 746 struct pf_state_item *si; 747 748 si = TAILQ_FIRST(&s->key[idx]->states); 749 while (si && si->s != s) 750 si = TAILQ_NEXT(si, entry); 751 752 if (si) { 753 TAILQ_REMOVE(&s->key[idx]->states, si, entry); 754 pool_put(&pf_state_item_pl, si); 755 } 756 757 if (TAILQ_EMPTY(&s->key[idx]->states)) { 758 RB_REMOVE(pf_state_tree, &pf_statetbl, s->key[idx]); 759 if (s->key[idx]->reverse) 760 s->key[idx]->reverse->reverse = NULL; 761 if (s->key[idx]->inp) 762 s->key[idx]->inp->inp_pf_sk = NULL; 763 pool_put(&pf_state_key_pl, s->key[idx]); 764 } 765 s->key[idx] = NULL; 766 } 767 768 struct pf_state_key * 769 pf_alloc_state_key(int pool_flags) 770 { 771 struct pf_state_key *sk; 772 773 if ((sk = pool_get(&pf_state_key_pl, pool_flags)) == NULL) 774 return (NULL); 775 TAILQ_INIT(&sk->states); 776 777 return (sk); 778 } 779 780 int 781 pf_state_key_setup(struct pf_pdesc *pd, struct pf_rule *nr, 782 struct pf_state_key **skw, struct pf_state_key **sks, 783 struct pf_state_key **skp, struct pf_state_key **nkp, 784 struct pf_addr *saddr, struct pf_addr *daddr, 785 u_int16_t sport, u_int16_t dport) 786 { 787 KKASSERT((*skp == NULL && *nkp == NULL)); 788 789 if ((*skp = pf_alloc_state_key(PR_NOWAIT | PR_ZERO)) == NULL) 790 return (ENOMEM); 791 792 PF_ACPY(&(*skp)->addr[pd->sidx], saddr, pd->af); 793 PF_ACPY(&(*skp)->addr[pd->didx], daddr, pd->af); 794 (*skp)->port[pd->sidx] = sport; 795 (*skp)->port[pd->didx] = dport; 796 (*skp)->proto = pd->proto; 797 (*skp)->af = pd->af; 798 799 if (nr != NULL) { 800 if ((*nkp = pf_alloc_state_key(PR_NOWAIT | PR_ZERO)) == NULL) 801 return (ENOMEM); /* caller must handle cleanup */ 802 803 /* XXX maybe just bcopy and TAILQ_INIT(&(*nkp)->states) */ 804 PF_ACPY(&(*nkp)->addr[0], &(*skp)->addr[0], pd->af); 805 PF_ACPY(&(*nkp)->addr[1], &(*skp)->addr[1], pd->af); 806 (*nkp)->port[0] = (*skp)->port[0]; 807 (*nkp)->port[1] = (*skp)->port[1]; 808 (*nkp)->proto = pd->proto; 809 (*nkp)->af = pd->af; 810 } else 811 *nkp = *skp; 812 813 if (pd->dir == PF_IN) { 814 *skw = *skp; 815 *sks = *nkp; 816 } else { 817 *sks = *skp; 818 *skw = *nkp; 819 } 820 return (0); 821 } 822 823 824 int 825 pf_state_insert(struct pfi_kif *kif, struct pf_state_key *skw, 826 struct pf_state_key *sks, struct pf_state *s) 827 { 828 s->kif = kif; 829 830 if (skw == sks) { 831 if (pf_state_key_attach(skw, s, PF_SK_WIRE)) 832 return (-1); 833 s->key[PF_SK_STACK] = s->key[PF_SK_WIRE]; 834 } else { 835 if (pf_state_key_attach(skw, s, PF_SK_WIRE)) { 836 pool_put(&pf_state_key_pl, sks); 837 return (-1); 838 } 839 if (pf_state_key_attach(sks, s, PF_SK_STACK)) { 840 pf_state_key_detach(s, PF_SK_WIRE); 841 return (-1); 842 } 843 } 844 845 if (s->id == 0 && s->creatorid == 0) { 846 s->id = htobe64(pf_status.stateid++); 847 s->creatorid = pf_status.hostid; 848 } 849 850 /* 851 * Calculate hash code for altq 852 */ 853 s->hash = crc32(s->key[PF_SK_WIRE], sizeof(*sks)); 854 855 if (RB_INSERT(pf_state_tree_id, &tree_id, s) != NULL) { 856 if (pf_status.debug >= PF_DEBUG_MISC) { 857 kprintf("pf: state insert failed: " 858 "id: %016jx creatorid: %08x", 859 (uintmax_t)be64toh(s->id), ntohl(s->creatorid)); 860 if (s->sync_flags & PFSTATE_FROMSYNC) 861 kprintf(" (from sync)"); 862 kprintf("\n"); 863 } 864 pf_detach_state(s); 865 return (-1); 866 } 867 TAILQ_INSERT_TAIL(&state_list, s, entry_list); 868 pf_status.fcounters[FCNT_STATE_INSERT]++; 869 pf_status.states++; 870 pfi_kif_ref(kif, PFI_KIF_REF_STATE); 871 pfsync_insert_state(s); 872 return (0); 873 } 874 875 struct pf_state * 876 pf_find_state_byid(struct pf_state_cmp *key) 877 { 878 pf_status.fcounters[FCNT_STATE_SEARCH]++; 879 880 return (RB_FIND(pf_state_tree_id, &tree_id, (struct pf_state *)key)); 881 } 882 883 struct pf_state * 884 pf_find_state(struct pfi_kif *kif, struct pf_state_key_cmp *key, u_int dir, 885 struct mbuf *m) 886 { 887 struct pf_state_key *sk; 888 struct pf_state_item *si; 889 890 pf_status.fcounters[FCNT_STATE_SEARCH]++; 891 892 if (dir == PF_OUT && m->m_pkthdr.pf.statekey && 893 ((struct pf_state_key *)m->m_pkthdr.pf.statekey)->reverse) 894 sk = ((struct pf_state_key *)m->m_pkthdr.pf.statekey)->reverse; 895 else { 896 if ((sk = RB_FIND(pf_state_tree, &pf_statetbl, 897 (struct pf_state_key *)key)) == NULL) 898 return (NULL); 899 if (dir == PF_OUT && m->m_pkthdr.pf.statekey) { 900 ((struct pf_state_key *) 901 m->m_pkthdr.pf.statekey)->reverse = sk; 902 sk->reverse = m->m_pkthdr.pf.statekey; 903 } 904 } 905 906 if (dir == PF_OUT) 907 m->m_pkthdr.pf.statekey = NULL; 908 909 /* list is sorted, if-bound states before floating ones */ 910 TAILQ_FOREACH(si, &sk->states, entry) 911 if ((si->s->kif == pfi_all || si->s->kif == kif) && 912 sk == (dir == PF_IN ? si->s->key[PF_SK_WIRE] : 913 si->s->key[PF_SK_STACK])) 914 return (si->s); 915 916 return (NULL); 917 } 918 919 struct pf_state * 920 pf_find_state_all(struct pf_state_key_cmp *key, u_int dir, int *more) 921 { 922 struct pf_state_key *sk; 923 struct pf_state_item *si, *ret = NULL; 924 925 pf_status.fcounters[FCNT_STATE_SEARCH]++; 926 927 sk = RB_FIND(pf_state_tree, &pf_statetbl, (struct pf_state_key *)key); 928 929 if (sk != NULL) { 930 TAILQ_FOREACH(si, &sk->states, entry) 931 if (dir == PF_INOUT || 932 (sk == (dir == PF_IN ? si->s->key[PF_SK_WIRE] : 933 si->s->key[PF_SK_STACK]))) { 934 if (more == NULL) 935 return (si->s); 936 937 if (ret) 938 (*more)++; 939 else 940 ret = si; 941 } 942 } 943 return (ret ? ret->s : NULL); 944 } 945 946 /* END state table stuff */ 947 948 949 void 950 pf_purge_thread(void *v) 951 { 952 int nloops = 0; 953 int locked = 0; 954 955 lwkt_gettoken(&pf_token); 956 for (;;) { 957 tsleep(pf_purge_thread, PWAIT, "pftm", 1 * hz); 958 959 lockmgr(&pf_consistency_lock, LK_EXCLUSIVE); 960 961 if (pf_end_threads) { 962 pf_purge_expired_states(pf_status.states, 1); 963 pf_purge_expired_fragments(); 964 pf_purge_expired_src_nodes(1); 965 pf_end_threads++; 966 967 lockmgr(&pf_consistency_lock, LK_RELEASE); 968 wakeup(pf_purge_thread); 969 kthread_exit(); 970 } 971 crit_enter(); 972 973 /* process a fraction of the state table every second */ 974 if(!pf_purge_expired_states(1 + (pf_status.states 975 / pf_default_rule.timeout[PFTM_INTERVAL]), 0)) { 976 977 pf_purge_expired_states(1 + (pf_status.states 978 / pf_default_rule.timeout[PFTM_INTERVAL]), 1); 979 } 980 981 /* purge other expired types every PFTM_INTERVAL seconds */ 982 if (++nloops >= pf_default_rule.timeout[PFTM_INTERVAL]) { 983 pf_purge_expired_fragments(); 984 if (!pf_purge_expired_src_nodes(locked)) { 985 pf_purge_expired_src_nodes(1); 986 } 987 nloops = 0; 988 } 989 crit_exit(); 990 lockmgr(&pf_consistency_lock, LK_RELEASE); 991 } 992 lwkt_reltoken(&pf_token); 993 } 994 995 u_int32_t 996 pf_state_expires(const struct pf_state *state) 997 { 998 u_int32_t timeout; 999 u_int32_t start; 1000 u_int32_t end; 1001 u_int32_t states; 1002 1003 /* handle all PFTM_* > PFTM_MAX here */ 1004 if (state->timeout == PFTM_PURGE) 1005 return (time_second); 1006 if (state->timeout == PFTM_UNTIL_PACKET) 1007 return (0); 1008 KKASSERT(state->timeout != PFTM_UNLINKED); 1009 KKASSERT(state->timeout < PFTM_MAX); 1010 timeout = state->rule.ptr->timeout[state->timeout]; 1011 if (!timeout) 1012 timeout = pf_default_rule.timeout[state->timeout]; 1013 start = state->rule.ptr->timeout[PFTM_ADAPTIVE_START]; 1014 if (start) { 1015 end = state->rule.ptr->timeout[PFTM_ADAPTIVE_END]; 1016 states = state->rule.ptr->states_cur; 1017 } else { 1018 start = pf_default_rule.timeout[PFTM_ADAPTIVE_START]; 1019 end = pf_default_rule.timeout[PFTM_ADAPTIVE_END]; 1020 states = pf_status.states; 1021 } 1022 if (end && states > start && start < end) { 1023 if (states < end) 1024 return (state->expire + timeout * (end - states) / 1025 (end - start)); 1026 else 1027 return (time_second); 1028 } 1029 return (state->expire + timeout); 1030 } 1031 1032 int 1033 pf_purge_expired_src_nodes(int waslocked) 1034 { 1035 struct pf_src_node *cur, *next; 1036 int locked = waslocked; 1037 1038 for (cur = RB_MIN(pf_src_tree, &tree_src_tracking); cur; cur = next) { 1039 next = RB_NEXT(pf_src_tree, &tree_src_tracking, cur); 1040 1041 if (cur->states <= 0 && cur->expire <= time_second) { 1042 if (! locked) { 1043 lockmgr(&pf_consistency_lock, LK_EXCLUSIVE); 1044 next = RB_NEXT(pf_src_tree, 1045 &tree_src_tracking, cur); 1046 locked = 1; 1047 } 1048 if (cur->rule.ptr != NULL) { 1049 cur->rule.ptr->src_nodes--; 1050 if (cur->rule.ptr->states_cur <= 0 && 1051 cur->rule.ptr->max_src_nodes <= 0) 1052 pf_rm_rule(NULL, cur->rule.ptr); 1053 } 1054 RB_REMOVE(pf_src_tree, &tree_src_tracking, cur); 1055 pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++; 1056 pf_status.src_nodes--; 1057 pool_put(&pf_src_tree_pl, cur); 1058 } 1059 } 1060 1061 if (locked && !waslocked) 1062 lockmgr(&pf_consistency_lock, LK_RELEASE); 1063 return(1); 1064 } 1065 1066 void 1067 pf_src_tree_remove_state(struct pf_state *s) 1068 { 1069 u_int32_t timeout; 1070 1071 if (s->src_node != NULL) { 1072 if (s->src.tcp_est) 1073 --s->src_node->conn; 1074 if (--s->src_node->states <= 0) { 1075 timeout = s->rule.ptr->timeout[PFTM_SRC_NODE]; 1076 if (!timeout) 1077 timeout = 1078 pf_default_rule.timeout[PFTM_SRC_NODE]; 1079 s->src_node->expire = time_second + timeout; 1080 } 1081 } 1082 if (s->nat_src_node != s->src_node && s->nat_src_node != NULL) { 1083 if (--s->nat_src_node->states <= 0) { 1084 timeout = s->rule.ptr->timeout[PFTM_SRC_NODE]; 1085 if (!timeout) 1086 timeout = 1087 pf_default_rule.timeout[PFTM_SRC_NODE]; 1088 s->nat_src_node->expire = time_second + timeout; 1089 } 1090 } 1091 s->src_node = s->nat_src_node = NULL; 1092 } 1093 1094 /* callers should be at crit_enter() */ 1095 void 1096 pf_unlink_state(struct pf_state *cur) 1097 { 1098 if (cur->src.state == PF_TCPS_PROXY_DST) { 1099 /* XXX wire key the right one? */ 1100 pf_send_tcp(cur->rule.ptr, cur->key[PF_SK_WIRE]->af, 1101 &cur->key[PF_SK_WIRE]->addr[1], 1102 &cur->key[PF_SK_WIRE]->addr[0], 1103 cur->key[PF_SK_WIRE]->port[1], 1104 cur->key[PF_SK_WIRE]->port[0], 1105 cur->src.seqhi, cur->src.seqlo + 1, 1106 TH_RST|TH_ACK, 0, 0, 0, 1, cur->tag, NULL, NULL); 1107 } 1108 RB_REMOVE(pf_state_tree_id, &tree_id, cur); 1109 if (cur->creatorid == pf_status.hostid) 1110 pfsync_delete_state(cur); 1111 cur->timeout = PFTM_UNLINKED; 1112 pf_src_tree_remove_state(cur); 1113 pf_detach_state(cur); 1114 } 1115 1116 static struct pf_state *purge_cur; 1117 1118 /* callers should be at crit_enter() and hold the 1119 * write_lock on pf_consistency_lock */ 1120 void 1121 pf_free_state(struct pf_state *cur) 1122 { 1123 if (pfsyncif != NULL && 1124 (pfsyncif->sc_bulk_send_next == cur || 1125 pfsyncif->sc_bulk_terminator == cur)) 1126 return; 1127 KKASSERT(cur->timeout == PFTM_UNLINKED); 1128 if (--cur->rule.ptr->states_cur <= 0 && 1129 cur->rule.ptr->src_nodes <= 0) 1130 pf_rm_rule(NULL, cur->rule.ptr); 1131 if (cur->nat_rule.ptr != NULL) 1132 if (--cur->nat_rule.ptr->states_cur <= 0 && 1133 cur->nat_rule.ptr->src_nodes <= 0) 1134 pf_rm_rule(NULL, cur->nat_rule.ptr); 1135 if (cur->anchor.ptr != NULL) 1136 if (--cur->anchor.ptr->states_cur <= 0) 1137 pf_rm_rule(NULL, cur->anchor.ptr); 1138 pf_normalize_tcp_cleanup(cur); 1139 pfi_kif_unref(cur->kif, PFI_KIF_REF_STATE); 1140 1141 /* 1142 * We may be freeing pf_purge_expired_states()'s saved scan entry, 1143 * adjust it if necessary. 1144 */ 1145 if (purge_cur == cur) { 1146 kprintf("PURGE CONFLICT\n"); 1147 purge_cur = TAILQ_NEXT(purge_cur, entry_list); 1148 } 1149 TAILQ_REMOVE(&state_list, cur, entry_list); 1150 if (cur->tag) 1151 pf_tag_unref(cur->tag); 1152 pool_put(&pf_state_pl, cur); 1153 pf_status.fcounters[FCNT_STATE_REMOVALS]++; 1154 pf_status.states--; 1155 } 1156 1157 int 1158 pf_purge_expired_states(u_int32_t maxcheck, int waslocked) 1159 { 1160 struct pf_state *cur; 1161 int locked = waslocked; 1162 1163 while (maxcheck--) { 1164 /* 1165 * Wrap to start of list when we hit the end 1166 */ 1167 cur = purge_cur; 1168 if (cur == NULL) { 1169 cur = TAILQ_FIRST(&state_list); 1170 if (cur == NULL) 1171 break; /* list empty */ 1172 } 1173 1174 /* 1175 * Setup next (purge_cur) while we process this one. If we block and 1176 * something else deletes purge_cur, pf_free_state() will adjust it further 1177 * ahead. 1178 */ 1179 purge_cur = TAILQ_NEXT(cur, entry_list); 1180 1181 if (cur->timeout == PFTM_UNLINKED) { 1182 /* free unlinked state */ 1183 if (! locked) { 1184 lockmgr(&pf_consistency_lock, LK_EXCLUSIVE); 1185 locked = 1; 1186 } 1187 pf_free_state(cur); 1188 } else if (pf_state_expires(cur) <= time_second) { 1189 /* unlink and free expired state */ 1190 pf_unlink_state(cur); 1191 if (! locked) { 1192 if (!lockmgr(&pf_consistency_lock, LK_EXCLUSIVE)) 1193 return (0); 1194 locked = 1; 1195 } 1196 pf_free_state(cur); 1197 } 1198 } 1199 1200 if (locked) 1201 lockmgr(&pf_consistency_lock, LK_RELEASE); 1202 return (1); 1203 } 1204 1205 int 1206 pf_tbladdr_setup(struct pf_ruleset *rs, struct pf_addr_wrap *aw) 1207 { 1208 if (aw->type != PF_ADDR_TABLE) 1209 return (0); 1210 if ((aw->p.tbl = pfr_attach_table(rs, aw->v.tblname)) == NULL) 1211 return (1); 1212 return (0); 1213 } 1214 1215 void 1216 pf_tbladdr_remove(struct pf_addr_wrap *aw) 1217 { 1218 if (aw->type != PF_ADDR_TABLE || aw->p.tbl == NULL) 1219 return; 1220 pfr_detach_table(aw->p.tbl); 1221 aw->p.tbl = NULL; 1222 } 1223 1224 void 1225 pf_tbladdr_copyout(struct pf_addr_wrap *aw) 1226 { 1227 struct pfr_ktable *kt = aw->p.tbl; 1228 1229 if (aw->type != PF_ADDR_TABLE || kt == NULL) 1230 return; 1231 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL) 1232 kt = kt->pfrkt_root; 1233 aw->p.tbl = NULL; 1234 aw->p.tblcnt = (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) ? 1235 kt->pfrkt_cnt : -1; 1236 } 1237 1238 void 1239 pf_print_host(struct pf_addr *addr, u_int16_t p, sa_family_t af) 1240 { 1241 switch (af) { 1242 #ifdef INET 1243 case AF_INET: { 1244 u_int32_t a = ntohl(addr->addr32[0]); 1245 kprintf("%u.%u.%u.%u", (a>>24)&255, (a>>16)&255, 1246 (a>>8)&255, a&255); 1247 if (p) { 1248 p = ntohs(p); 1249 kprintf(":%u", p); 1250 } 1251 break; 1252 } 1253 #endif /* INET */ 1254 #ifdef INET6 1255 case AF_INET6: { 1256 u_int16_t b; 1257 u_int8_t i, curstart = 255, curend = 0, 1258 maxstart = 0, maxend = 0; 1259 for (i = 0; i < 8; i++) { 1260 if (!addr->addr16[i]) { 1261 if (curstart == 255) 1262 curstart = i; 1263 else 1264 curend = i; 1265 } else { 1266 if (curstart) { 1267 if ((curend - curstart) > 1268 (maxend - maxstart)) { 1269 maxstart = curstart; 1270 maxend = curend; 1271 curstart = 255; 1272 } 1273 } 1274 } 1275 } 1276 for (i = 0; i < 8; i++) { 1277 if (i >= maxstart && i <= maxend) { 1278 if (maxend != 7) { 1279 if (i == maxstart) 1280 kprintf(":"); 1281 } else { 1282 if (i == maxend) 1283 kprintf(":"); 1284 } 1285 } else { 1286 b = ntohs(addr->addr16[i]); 1287 kprintf("%x", b); 1288 if (i < 7) 1289 kprintf(":"); 1290 } 1291 } 1292 if (p) { 1293 p = ntohs(p); 1294 kprintf("[%u]", p); 1295 } 1296 break; 1297 } 1298 #endif /* INET6 */ 1299 } 1300 } 1301 1302 void 1303 pf_print_state(struct pf_state *s) 1304 { 1305 pf_print_state_parts(s, NULL, NULL); 1306 } 1307 1308 void 1309 pf_print_state_parts(struct pf_state *s, 1310 struct pf_state_key *skwp, struct pf_state_key *sksp) 1311 { 1312 struct pf_state_key *skw, *sks; 1313 u_int8_t proto, dir; 1314 1315 /* Do our best to fill these, but they're skipped if NULL */ 1316 skw = skwp ? skwp : (s ? s->key[PF_SK_WIRE] : NULL); 1317 sks = sksp ? sksp : (s ? s->key[PF_SK_STACK] : NULL); 1318 proto = skw ? skw->proto : (sks ? sks->proto : 0); 1319 dir = s ? s->direction : 0; 1320 1321 switch (proto) { 1322 case IPPROTO_TCP: 1323 kprintf("TCP "); 1324 break; 1325 case IPPROTO_UDP: 1326 kprintf("UDP "); 1327 break; 1328 case IPPROTO_ICMP: 1329 kprintf("ICMP "); 1330 break; 1331 case IPPROTO_ICMPV6: 1332 kprintf("ICMPV6 "); 1333 break; 1334 default: 1335 kprintf("%u ", skw->proto); 1336 break; 1337 } 1338 switch (dir) { 1339 case PF_IN: 1340 kprintf(" in"); 1341 break; 1342 case PF_OUT: 1343 kprintf(" out"); 1344 break; 1345 } 1346 if (skw) { 1347 kprintf(" wire: "); 1348 pf_print_host(&skw->addr[0], skw->port[0], skw->af); 1349 kprintf(" "); 1350 pf_print_host(&skw->addr[1], skw->port[1], skw->af); 1351 } 1352 if (sks) { 1353 kprintf(" stack: "); 1354 if (sks != skw) { 1355 pf_print_host(&sks->addr[0], sks->port[0], sks->af); 1356 kprintf(" "); 1357 pf_print_host(&sks->addr[1], sks->port[1], sks->af); 1358 } else 1359 kprintf("-"); 1360 } 1361 if (s) { 1362 if (proto == IPPROTO_TCP) { 1363 kprintf(" [lo=%u high=%u win=%u modulator=%u", 1364 s->src.seqlo, s->src.seqhi, 1365 s->src.max_win, s->src.seqdiff); 1366 if (s->src.wscale && s->dst.wscale) 1367 kprintf(" wscale=%u", 1368 s->src.wscale & PF_WSCALE_MASK); 1369 kprintf("]"); 1370 kprintf(" [lo=%u high=%u win=%u modulator=%u", 1371 s->dst.seqlo, s->dst.seqhi, 1372 s->dst.max_win, s->dst.seqdiff); 1373 if (s->src.wscale && s->dst.wscale) 1374 kprintf(" wscale=%u", 1375 s->dst.wscale & PF_WSCALE_MASK); 1376 kprintf("]"); 1377 } 1378 kprintf(" %u:%u", s->src.state, s->dst.state); 1379 } 1380 } 1381 1382 void 1383 pf_print_flags(u_int8_t f) 1384 { 1385 if (f) 1386 kprintf(" "); 1387 if (f & TH_FIN) 1388 kprintf("F"); 1389 if (f & TH_SYN) 1390 kprintf("S"); 1391 if (f & TH_RST) 1392 kprintf("R"); 1393 if (f & TH_PUSH) 1394 kprintf("P"); 1395 if (f & TH_ACK) 1396 kprintf("A"); 1397 if (f & TH_URG) 1398 kprintf("U"); 1399 if (f & TH_ECE) 1400 kprintf("E"); 1401 if (f & TH_CWR) 1402 kprintf("W"); 1403 } 1404 1405 #define PF_SET_SKIP_STEPS(i) \ 1406 do { \ 1407 while (head[i] != cur) { \ 1408 head[i]->skip[i].ptr = cur; \ 1409 head[i] = TAILQ_NEXT(head[i], entries); \ 1410 } \ 1411 } while (0) 1412 1413 void 1414 pf_calc_skip_steps(struct pf_rulequeue *rules) 1415 { 1416 struct pf_rule *cur, *prev, *head[PF_SKIP_COUNT]; 1417 int i; 1418 1419 cur = TAILQ_FIRST(rules); 1420 prev = cur; 1421 for (i = 0; i < PF_SKIP_COUNT; ++i) 1422 head[i] = cur; 1423 while (cur != NULL) { 1424 1425 if (cur->kif != prev->kif || cur->ifnot != prev->ifnot) 1426 PF_SET_SKIP_STEPS(PF_SKIP_IFP); 1427 if (cur->direction != prev->direction) 1428 PF_SET_SKIP_STEPS(PF_SKIP_DIR); 1429 if (cur->af != prev->af) 1430 PF_SET_SKIP_STEPS(PF_SKIP_AF); 1431 if (cur->proto != prev->proto) 1432 PF_SET_SKIP_STEPS(PF_SKIP_PROTO); 1433 if (cur->src.neg != prev->src.neg || 1434 pf_addr_wrap_neq(&cur->src.addr, &prev->src.addr)) 1435 PF_SET_SKIP_STEPS(PF_SKIP_SRC_ADDR); 1436 if (cur->src.port[0] != prev->src.port[0] || 1437 cur->src.port[1] != prev->src.port[1] || 1438 cur->src.port_op != prev->src.port_op) 1439 PF_SET_SKIP_STEPS(PF_SKIP_SRC_PORT); 1440 if (cur->dst.neg != prev->dst.neg || 1441 pf_addr_wrap_neq(&cur->dst.addr, &prev->dst.addr)) 1442 PF_SET_SKIP_STEPS(PF_SKIP_DST_ADDR); 1443 if (cur->dst.port[0] != prev->dst.port[0] || 1444 cur->dst.port[1] != prev->dst.port[1] || 1445 cur->dst.port_op != prev->dst.port_op) 1446 PF_SET_SKIP_STEPS(PF_SKIP_DST_PORT); 1447 1448 prev = cur; 1449 cur = TAILQ_NEXT(cur, entries); 1450 } 1451 for (i = 0; i < PF_SKIP_COUNT; ++i) 1452 PF_SET_SKIP_STEPS(i); 1453 } 1454 1455 int 1456 pf_addr_wrap_neq(struct pf_addr_wrap *aw1, struct pf_addr_wrap *aw2) 1457 { 1458 if (aw1->type != aw2->type) 1459 return (1); 1460 switch (aw1->type) { 1461 case PF_ADDR_ADDRMASK: 1462 case PF_ADDR_RANGE: 1463 if (PF_ANEQ(&aw1->v.a.addr, &aw2->v.a.addr, 0)) 1464 return (1); 1465 if (PF_ANEQ(&aw1->v.a.mask, &aw2->v.a.mask, 0)) 1466 return (1); 1467 return (0); 1468 case PF_ADDR_DYNIFTL: 1469 return (aw1->p.dyn->pfid_kt != aw2->p.dyn->pfid_kt); 1470 case PF_ADDR_NOROUTE: 1471 case PF_ADDR_URPFFAILED: 1472 return (0); 1473 case PF_ADDR_TABLE: 1474 return (aw1->p.tbl != aw2->p.tbl); 1475 case PF_ADDR_RTLABEL: 1476 return (aw1->v.rtlabel != aw2->v.rtlabel); 1477 default: 1478 kprintf("invalid address type: %d\n", aw1->type); 1479 return (1); 1480 } 1481 } 1482 1483 u_int16_t 1484 pf_cksum_fixup(u_int16_t cksum, u_int16_t old, u_int16_t new, u_int8_t udp) 1485 { 1486 u_int32_t l; 1487 1488 if (udp && !cksum) 1489 return (0x0000); 1490 l = cksum + old - new; 1491 l = (l >> 16) + (l & 65535); 1492 l = l & 65535; 1493 if (udp && !l) 1494 return (0xFFFF); 1495 return (l); 1496 } 1497 1498 void 1499 pf_change_ap(struct pf_addr *a, u_int16_t *p, u_int16_t *ic, u_int16_t *pc, 1500 struct pf_addr *an, u_int16_t pn, u_int8_t u, sa_family_t af) 1501 { 1502 struct pf_addr ao; 1503 u_int16_t po = *p; 1504 1505 PF_ACPY(&ao, a, af); 1506 PF_ACPY(a, an, af); 1507 1508 *p = pn; 1509 1510 switch (af) { 1511 #ifdef INET 1512 case AF_INET: 1513 *ic = pf_cksum_fixup(pf_cksum_fixup(*ic, 1514 ao.addr16[0], an->addr16[0], 0), 1515 ao.addr16[1], an->addr16[1], 0); 1516 *p = pn; 1517 *pc = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(*pc, 1518 ao.addr16[0], an->addr16[0], u), 1519 ao.addr16[1], an->addr16[1], u), 1520 po, pn, u); 1521 break; 1522 #endif /* INET */ 1523 #ifdef INET6 1524 case AF_INET6: 1525 *pc = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( 1526 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( 1527 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(*pc, 1528 ao.addr16[0], an->addr16[0], u), 1529 ao.addr16[1], an->addr16[1], u), 1530 ao.addr16[2], an->addr16[2], u), 1531 ao.addr16[3], an->addr16[3], u), 1532 ao.addr16[4], an->addr16[4], u), 1533 ao.addr16[5], an->addr16[5], u), 1534 ao.addr16[6], an->addr16[6], u), 1535 ao.addr16[7], an->addr16[7], u), 1536 po, pn, u); 1537 break; 1538 #endif /* INET6 */ 1539 } 1540 } 1541 1542 1543 /* Changes a u_int32_t. Uses a void * so there are no align restrictions */ 1544 void 1545 pf_change_a(void *a, u_int16_t *c, u_int32_t an, u_int8_t u) 1546 { 1547 u_int32_t ao; 1548 1549 memcpy(&ao, a, sizeof(ao)); 1550 memcpy(a, &an, sizeof(u_int32_t)); 1551 *c = pf_cksum_fixup(pf_cksum_fixup(*c, ao / 65536, an / 65536, u), 1552 ao % 65536, an % 65536, u); 1553 } 1554 1555 #ifdef INET6 1556 void 1557 pf_change_a6(struct pf_addr *a, u_int16_t *c, struct pf_addr *an, u_int8_t u) 1558 { 1559 struct pf_addr ao; 1560 1561 PF_ACPY(&ao, a, AF_INET6); 1562 PF_ACPY(a, an, AF_INET6); 1563 1564 *c = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( 1565 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( 1566 pf_cksum_fixup(pf_cksum_fixup(*c, 1567 ao.addr16[0], an->addr16[0], u), 1568 ao.addr16[1], an->addr16[1], u), 1569 ao.addr16[2], an->addr16[2], u), 1570 ao.addr16[3], an->addr16[3], u), 1571 ao.addr16[4], an->addr16[4], u), 1572 ao.addr16[5], an->addr16[5], u), 1573 ao.addr16[6], an->addr16[6], u), 1574 ao.addr16[7], an->addr16[7], u); 1575 } 1576 #endif /* INET6 */ 1577 1578 void 1579 pf_change_icmp(struct pf_addr *ia, u_int16_t *ip, struct pf_addr *oa, 1580 struct pf_addr *na, u_int16_t np, u_int16_t *pc, u_int16_t *h2c, 1581 u_int16_t *ic, u_int16_t *hc, u_int8_t u, sa_family_t af) 1582 { 1583 struct pf_addr oia, ooa; 1584 1585 PF_ACPY(&oia, ia, af); 1586 if (oa) 1587 PF_ACPY(&ooa, oa, af); 1588 1589 /* Change inner protocol port, fix inner protocol checksum. */ 1590 if (ip != NULL) { 1591 u_int16_t oip = *ip; 1592 u_int32_t opc = 0; 1593 1594 if (pc != NULL) 1595 opc = *pc; 1596 *ip = np; 1597 if (pc != NULL) 1598 *pc = pf_cksum_fixup(*pc, oip, *ip, u); 1599 *ic = pf_cksum_fixup(*ic, oip, *ip, 0); 1600 if (pc != NULL) 1601 *ic = pf_cksum_fixup(*ic, opc, *pc, 0); 1602 } 1603 /* Change inner ip address, fix inner ip and icmp checksums. */ 1604 PF_ACPY(ia, na, af); 1605 switch (af) { 1606 #ifdef INET 1607 case AF_INET: { 1608 u_int32_t oh2c = *h2c; 1609 1610 *h2c = pf_cksum_fixup(pf_cksum_fixup(*h2c, 1611 oia.addr16[0], ia->addr16[0], 0), 1612 oia.addr16[1], ia->addr16[1], 0); 1613 *ic = pf_cksum_fixup(pf_cksum_fixup(*ic, 1614 oia.addr16[0], ia->addr16[0], 0), 1615 oia.addr16[1], ia->addr16[1], 0); 1616 *ic = pf_cksum_fixup(*ic, oh2c, *h2c, 0); 1617 break; 1618 } 1619 #endif /* INET */ 1620 #ifdef INET6 1621 case AF_INET6: 1622 *ic = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( 1623 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( 1624 pf_cksum_fixup(pf_cksum_fixup(*ic, 1625 oia.addr16[0], ia->addr16[0], u), 1626 oia.addr16[1], ia->addr16[1], u), 1627 oia.addr16[2], ia->addr16[2], u), 1628 oia.addr16[3], ia->addr16[3], u), 1629 oia.addr16[4], ia->addr16[4], u), 1630 oia.addr16[5], ia->addr16[5], u), 1631 oia.addr16[6], ia->addr16[6], u), 1632 oia.addr16[7], ia->addr16[7], u); 1633 break; 1634 #endif /* INET6 */ 1635 } 1636 /* Outer ip address, fix outer ip or icmpv6 checksum, if necessary. */ 1637 if (oa) { 1638 PF_ACPY(oa, na, af); 1639 switch (af) { 1640 #ifdef INET 1641 case AF_INET: 1642 *hc = pf_cksum_fixup(pf_cksum_fixup(*hc, 1643 ooa.addr16[0], oa->addr16[0], 0), 1644 ooa.addr16[1], oa->addr16[1], 0); 1645 break; 1646 #endif /* INET */ 1647 #ifdef INET6 1648 case AF_INET6: 1649 *ic = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( 1650 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( 1651 pf_cksum_fixup(pf_cksum_fixup(*ic, 1652 ooa.addr16[0], oa->addr16[0], u), 1653 ooa.addr16[1], oa->addr16[1], u), 1654 ooa.addr16[2], oa->addr16[2], u), 1655 ooa.addr16[3], oa->addr16[3], u), 1656 ooa.addr16[4], oa->addr16[4], u), 1657 ooa.addr16[5], oa->addr16[5], u), 1658 ooa.addr16[6], oa->addr16[6], u), 1659 ooa.addr16[7], oa->addr16[7], u); 1660 break; 1661 #endif /* INET6 */ 1662 } 1663 } 1664 } 1665 1666 1667 /* 1668 * Need to modulate the sequence numbers in the TCP SACK option 1669 * (credits to Krzysztof Pfaff for report and patch) 1670 */ 1671 int 1672 pf_modulate_sack(struct mbuf *m, int off, struct pf_pdesc *pd, 1673 struct tcphdr *th, struct pf_state_peer *dst) 1674 { 1675 int hlen = (th->th_off << 2) - sizeof(*th), thoptlen = hlen; 1676 u_int8_t opts[TCP_MAXOLEN], *opt = opts; 1677 int copyback = 0, i, olen; 1678 struct raw_sackblock sack; 1679 1680 #define TCPOLEN_SACKLEN (TCPOLEN_SACK + 2) 1681 if (hlen < TCPOLEN_SACKLEN || 1682 !pf_pull_hdr(m, off + sizeof(*th), opts, hlen, NULL, NULL, pd->af)) 1683 return 0; 1684 1685 while (hlen >= TCPOLEN_SACKLEN) { 1686 olen = opt[1]; 1687 switch (*opt) { 1688 case TCPOPT_EOL: /* FALLTHROUGH */ 1689 case TCPOPT_NOP: 1690 opt++; 1691 hlen--; 1692 break; 1693 case TCPOPT_SACK: 1694 if (olen > hlen) 1695 olen = hlen; 1696 if (olen >= TCPOLEN_SACKLEN) { 1697 for (i = 2; i + TCPOLEN_SACK <= olen; 1698 i += TCPOLEN_SACK) { 1699 memcpy(&sack, &opt[i], sizeof(sack)); 1700 pf_change_a(&sack.rblk_start, &th->th_sum, 1701 htonl(ntohl(sack.rblk_start) - 1702 dst->seqdiff), 0); 1703 pf_change_a(&sack.rblk_end, &th->th_sum, 1704 htonl(ntohl(sack.rblk_end) - 1705 dst->seqdiff), 0); 1706 memcpy(&opt[i], &sack, sizeof(sack)); 1707 } 1708 copyback = 1; 1709 } 1710 /* FALLTHROUGH */ 1711 default: 1712 if (olen < 2) 1713 olen = 2; 1714 hlen -= olen; 1715 opt += olen; 1716 } 1717 } 1718 1719 if (copyback) 1720 m_copyback(m, off + sizeof(*th), thoptlen, opts); 1721 return (copyback); 1722 } 1723 1724 void 1725 pf_send_tcp(const struct pf_rule *r, sa_family_t af, 1726 const struct pf_addr *saddr, const struct pf_addr *daddr, 1727 u_int16_t sport, u_int16_t dport, u_int32_t seq, u_int32_t ack, 1728 u_int8_t flags, u_int16_t win, u_int16_t mss, u_int8_t ttl, int tag, 1729 u_int16_t rtag, struct ether_header *eh, struct ifnet *ifp) 1730 { 1731 struct mbuf *m; 1732 int len = 0, tlen; 1733 #ifdef INET 1734 struct ip *h = NULL; 1735 #endif /* INET */ 1736 #ifdef INET6 1737 struct ip6_hdr *h6 = NULL; 1738 #endif /* INET6 */ 1739 struct tcphdr *th = NULL; 1740 char *opt; 1741 1742 ASSERT_LWKT_TOKEN_HELD(&pf_token); 1743 1744 /* maximum segment size tcp option */ 1745 tlen = sizeof(struct tcphdr); 1746 if (mss) 1747 tlen += 4; 1748 1749 switch (af) { 1750 #ifdef INET 1751 case AF_INET: 1752 len = sizeof(struct ip) + tlen; 1753 break; 1754 #endif /* INET */ 1755 #ifdef INET6 1756 case AF_INET6: 1757 len = sizeof(struct ip6_hdr) + tlen; 1758 break; 1759 #endif /* INET6 */ 1760 } 1761 1762 /* 1763 * Create outgoing mbuf. 1764 * 1765 * DragonFly doesn't zero the auxillary pkghdr fields, only fw_flags, 1766 * so make sure pf.flags is clear. 1767 */ 1768 m = m_gethdr(MB_DONTWAIT, MT_HEADER); 1769 if (m == NULL) { 1770 return; 1771 } 1772 if (tag) 1773 m->m_pkthdr.fw_flags |= PF_MBUF_TAGGED; 1774 m->m_pkthdr.pf.flags = 0; 1775 m->m_pkthdr.pf.tag = rtag; 1776 /* XXX Recheck when upgrading to > 4.4 */ 1777 m->m_pkthdr.pf.statekey = NULL; 1778 if (r != NULL && r->rtableid >= 0) 1779 m->m_pkthdr.pf.rtableid = r->rtableid; 1780 1781 #ifdef ALTQ 1782 if (r != NULL && r->qid) { 1783 m->m_pkthdr.fw_flags |= PF_MBUF_STRUCTURE; 1784 m->m_pkthdr.pf.qid = r->qid; 1785 m->m_pkthdr.pf.ecn_af = af; 1786 m->m_pkthdr.pf.hdr = mtod(m, struct ip *); 1787 } 1788 #endif /* ALTQ */ 1789 m->m_data += max_linkhdr; 1790 m->m_pkthdr.len = m->m_len = len; 1791 m->m_pkthdr.rcvif = NULL; 1792 bzero(m->m_data, len); 1793 switch (af) { 1794 #ifdef INET 1795 case AF_INET: 1796 h = mtod(m, struct ip *); 1797 1798 /* IP header fields included in the TCP checksum */ 1799 h->ip_p = IPPROTO_TCP; 1800 h->ip_len = tlen; 1801 h->ip_src.s_addr = saddr->v4.s_addr; 1802 h->ip_dst.s_addr = daddr->v4.s_addr; 1803 1804 th = (struct tcphdr *)((caddr_t)h + sizeof(struct ip)); 1805 break; 1806 #endif /* INET */ 1807 #ifdef INET6 1808 case AF_INET6: 1809 h6 = mtod(m, struct ip6_hdr *); 1810 1811 /* IP header fields included in the TCP checksum */ 1812 h6->ip6_nxt = IPPROTO_TCP; 1813 h6->ip6_plen = htons(tlen); 1814 memcpy(&h6->ip6_src, &saddr->v6, sizeof(struct in6_addr)); 1815 memcpy(&h6->ip6_dst, &daddr->v6, sizeof(struct in6_addr)); 1816 1817 th = (struct tcphdr *)((caddr_t)h6 + sizeof(struct ip6_hdr)); 1818 break; 1819 #endif /* INET6 */ 1820 } 1821 1822 /* TCP header */ 1823 th->th_sport = sport; 1824 th->th_dport = dport; 1825 th->th_seq = htonl(seq); 1826 th->th_ack = htonl(ack); 1827 th->th_off = tlen >> 2; 1828 th->th_flags = flags; 1829 th->th_win = htons(win); 1830 1831 if (mss) { 1832 opt = (char *)(th + 1); 1833 opt[0] = TCPOPT_MAXSEG; 1834 opt[1] = 4; 1835 mss = htons(mss); 1836 bcopy((caddr_t)&mss, (caddr_t)(opt + 2), 2); 1837 } 1838 1839 switch (af) { 1840 #ifdef INET 1841 case AF_INET: 1842 /* TCP checksum */ 1843 th->th_sum = in_cksum(m, len); 1844 1845 /* Finish the IP header */ 1846 h->ip_v = 4; 1847 h->ip_hl = sizeof(*h) >> 2; 1848 h->ip_tos = IPTOS_LOWDELAY; 1849 h->ip_len = len; 1850 h->ip_off = path_mtu_discovery ? IP_DF : 0; 1851 h->ip_ttl = ttl ? ttl : ip_defttl; 1852 h->ip_sum = 0; 1853 if (eh == NULL) { 1854 lwkt_reltoken(&pf_token); 1855 ip_output(m, NULL, NULL, 0, NULL, NULL); 1856 lwkt_gettoken(&pf_token); 1857 } else { 1858 struct route ro; 1859 struct rtentry rt; 1860 struct ether_header *e = (void *)ro.ro_dst.sa_data; 1861 1862 if (ifp == NULL) { 1863 m_freem(m); 1864 return; 1865 } 1866 rt.rt_ifp = ifp; 1867 ro.ro_rt = &rt; 1868 ro.ro_dst.sa_len = sizeof(ro.ro_dst); 1869 ro.ro_dst.sa_family = pseudo_AF_HDRCMPLT; 1870 bcopy(eh->ether_dhost, e->ether_shost, ETHER_ADDR_LEN); 1871 bcopy(eh->ether_shost, e->ether_dhost, ETHER_ADDR_LEN); 1872 e->ether_type = eh->ether_type; 1873 /* XXX_IMPORT: later */ 1874 lwkt_reltoken(&pf_token); 1875 ip_output(m, (void *)NULL, &ro, 0, 1876 (void *)NULL, (void *)NULL); 1877 lwkt_gettoken(&pf_token); 1878 } 1879 break; 1880 #endif /* INET */ 1881 #ifdef INET6 1882 case AF_INET6: 1883 /* TCP checksum */ 1884 th->th_sum = in6_cksum(m, IPPROTO_TCP, 1885 sizeof(struct ip6_hdr), tlen); 1886 1887 h6->ip6_vfc |= IPV6_VERSION; 1888 h6->ip6_hlim = IPV6_DEFHLIM; 1889 1890 lwkt_reltoken(&pf_token); 1891 ip6_output(m, NULL, NULL, 0, NULL, NULL, NULL); 1892 lwkt_gettoken(&pf_token); 1893 break; 1894 #endif /* INET6 */ 1895 } 1896 } 1897 1898 void 1899 pf_send_icmp(struct mbuf *m, u_int8_t type, u_int8_t code, sa_family_t af, 1900 struct pf_rule *r) 1901 { 1902 struct mbuf *m0; 1903 1904 /* 1905 * DragonFly doesn't zero the auxillary pkghdr fields, only fw_flags, 1906 * so make sure pf.flags is clear. 1907 */ 1908 if ((m0 = m_copy(m, 0, M_COPYALL)) == NULL) 1909 return; 1910 1911 m0->m_pkthdr.fw_flags |= PF_MBUF_TAGGED; 1912 m0->m_pkthdr.pf.flags = 0; 1913 /* XXX Re-Check when Upgrading to > 4.4 */ 1914 m0->m_pkthdr.pf.statekey = NULL; 1915 1916 if (r->rtableid >= 0) 1917 m0->m_pkthdr.pf.rtableid = r->rtableid; 1918 1919 #ifdef ALTQ 1920 if (r->qid) { 1921 m->m_pkthdr.fw_flags |= PF_MBUF_STRUCTURE; 1922 m0->m_pkthdr.pf.qid = r->qid; 1923 m0->m_pkthdr.pf.ecn_af = af; 1924 m0->m_pkthdr.pf.hdr = mtod(m0, struct ip *); 1925 } 1926 #endif /* ALTQ */ 1927 1928 switch (af) { 1929 #ifdef INET 1930 case AF_INET: 1931 icmp_error(m0, type, code, 0, 0); 1932 break; 1933 #endif /* INET */ 1934 #ifdef INET6 1935 case AF_INET6: 1936 icmp6_error(m0, type, code, 0); 1937 break; 1938 #endif /* INET6 */ 1939 } 1940 } 1941 1942 /* 1943 * Return 1 if the addresses a and b match (with mask m), otherwise return 0. 1944 * If n is 0, they match if they are equal. If n is != 0, they match if they 1945 * are different. 1946 */ 1947 int 1948 pf_match_addr(u_int8_t n, struct pf_addr *a, struct pf_addr *m, 1949 struct pf_addr *b, sa_family_t af) 1950 { 1951 int match = 0; 1952 1953 switch (af) { 1954 #ifdef INET 1955 case AF_INET: 1956 if ((a->addr32[0] & m->addr32[0]) == 1957 (b->addr32[0] & m->addr32[0])) 1958 match++; 1959 break; 1960 #endif /* INET */ 1961 #ifdef INET6 1962 case AF_INET6: 1963 if (((a->addr32[0] & m->addr32[0]) == 1964 (b->addr32[0] & m->addr32[0])) && 1965 ((a->addr32[1] & m->addr32[1]) == 1966 (b->addr32[1] & m->addr32[1])) && 1967 ((a->addr32[2] & m->addr32[2]) == 1968 (b->addr32[2] & m->addr32[2])) && 1969 ((a->addr32[3] & m->addr32[3]) == 1970 (b->addr32[3] & m->addr32[3]))) 1971 match++; 1972 break; 1973 #endif /* INET6 */ 1974 } 1975 if (match) { 1976 if (n) 1977 return (0); 1978 else 1979 return (1); 1980 } else { 1981 if (n) 1982 return (1); 1983 else 1984 return (0); 1985 } 1986 } 1987 1988 /* 1989 * Return 1 if b <= a <= e, otherwise return 0. 1990 */ 1991 int 1992 pf_match_addr_range(struct pf_addr *b, struct pf_addr *e, 1993 struct pf_addr *a, sa_family_t af) 1994 { 1995 switch (af) { 1996 #ifdef INET 1997 case AF_INET: 1998 if ((a->addr32[0] < b->addr32[0]) || 1999 (a->addr32[0] > e->addr32[0])) 2000 return (0); 2001 break; 2002 #endif /* INET */ 2003 #ifdef INET6 2004 case AF_INET6: { 2005 int i; 2006 2007 /* check a >= b */ 2008 for (i = 0; i < 4; ++i) 2009 if (a->addr32[i] > b->addr32[i]) 2010 break; 2011 else if (a->addr32[i] < b->addr32[i]) 2012 return (0); 2013 /* check a <= e */ 2014 for (i = 0; i < 4; ++i) 2015 if (a->addr32[i] < e->addr32[i]) 2016 break; 2017 else if (a->addr32[i] > e->addr32[i]) 2018 return (0); 2019 break; 2020 } 2021 #endif /* INET6 */ 2022 } 2023 return (1); 2024 } 2025 2026 int 2027 pf_match(u_int8_t op, u_int32_t a1, u_int32_t a2, u_int32_t p) 2028 { 2029 switch (op) { 2030 case PF_OP_IRG: 2031 return ((p > a1) && (p < a2)); 2032 case PF_OP_XRG: 2033 return ((p < a1) || (p > a2)); 2034 case PF_OP_RRG: 2035 return ((p >= a1) && (p <= a2)); 2036 case PF_OP_EQ: 2037 return (p == a1); 2038 case PF_OP_NE: 2039 return (p != a1); 2040 case PF_OP_LT: 2041 return (p < a1); 2042 case PF_OP_LE: 2043 return (p <= a1); 2044 case PF_OP_GT: 2045 return (p > a1); 2046 case PF_OP_GE: 2047 return (p >= a1); 2048 } 2049 return (0); /* never reached */ 2050 } 2051 2052 int 2053 pf_match_port(u_int8_t op, u_int16_t a1, u_int16_t a2, u_int16_t p) 2054 { 2055 a1 = ntohs(a1); 2056 a2 = ntohs(a2); 2057 p = ntohs(p); 2058 return (pf_match(op, a1, a2, p)); 2059 } 2060 2061 int 2062 pf_match_uid(u_int8_t op, uid_t a1, uid_t a2, uid_t u) 2063 { 2064 if (u == UID_MAX && op != PF_OP_EQ && op != PF_OP_NE) 2065 return (0); 2066 return (pf_match(op, a1, a2, u)); 2067 } 2068 2069 int 2070 pf_match_gid(u_int8_t op, gid_t a1, gid_t a2, gid_t g) 2071 { 2072 if (g == GID_MAX && op != PF_OP_EQ && op != PF_OP_NE) 2073 return (0); 2074 return (pf_match(op, a1, a2, g)); 2075 } 2076 2077 int 2078 pf_match_tag(struct mbuf *m, struct pf_rule *r, int *tag) 2079 { 2080 if (*tag == -1) 2081 *tag = m->m_pkthdr.pf.tag; 2082 2083 return ((!r->match_tag_not && r->match_tag == *tag) || 2084 (r->match_tag_not && r->match_tag != *tag)); 2085 } 2086 2087 int 2088 pf_tag_packet(struct mbuf *m, int tag, int rtableid) 2089 { 2090 if (tag <= 0 && rtableid < 0) 2091 return (0); 2092 2093 if (tag > 0) 2094 m->m_pkthdr.pf.tag = tag; 2095 if (rtableid >= 0) 2096 m->m_pkthdr.pf.rtableid = rtableid; 2097 2098 return (0); 2099 } 2100 2101 void 2102 pf_step_into_anchor(int *depth, struct pf_ruleset **rs, int n, 2103 struct pf_rule **r, struct pf_rule **a, int *match) 2104 { 2105 struct pf_anchor_stackframe *f; 2106 2107 (*r)->anchor->match = 0; 2108 if (match) 2109 *match = 0; 2110 if (*depth >= sizeof(pf_anchor_stack) / 2111 sizeof(pf_anchor_stack[0])) { 2112 kprintf("pf_step_into_anchor: stack overflow\n"); 2113 *r = TAILQ_NEXT(*r, entries); 2114 return; 2115 } else if (*depth == 0 && a != NULL) 2116 *a = *r; 2117 f = pf_anchor_stack + (*depth)++; 2118 f->rs = *rs; 2119 f->r = *r; 2120 if ((*r)->anchor_wildcard) { 2121 f->parent = &(*r)->anchor->children; 2122 if ((f->child = RB_MIN(pf_anchor_node, f->parent)) == 2123 NULL) { 2124 *r = NULL; 2125 return; 2126 } 2127 *rs = &f->child->ruleset; 2128 } else { 2129 f->parent = NULL; 2130 f->child = NULL; 2131 *rs = &(*r)->anchor->ruleset; 2132 } 2133 *r = TAILQ_FIRST((*rs)->rules[n].active.ptr); 2134 } 2135 2136 int 2137 pf_step_out_of_anchor(int *depth, struct pf_ruleset **rs, int n, 2138 struct pf_rule **r, struct pf_rule **a, int *match) 2139 { 2140 struct pf_anchor_stackframe *f; 2141 int quick = 0; 2142 2143 do { 2144 if (*depth <= 0) 2145 break; 2146 f = pf_anchor_stack + *depth - 1; 2147 if (f->parent != NULL && f->child != NULL) { 2148 if (f->child->match || 2149 (match != NULL && *match)) { 2150 f->r->anchor->match = 1; 2151 *match = 0; 2152 } 2153 f->child = RB_NEXT(pf_anchor_node, f->parent, f->child); 2154 if (f->child != NULL) { 2155 *rs = &f->child->ruleset; 2156 *r = TAILQ_FIRST((*rs)->rules[n].active.ptr); 2157 if (*r == NULL) 2158 continue; 2159 else 2160 break; 2161 } 2162 } 2163 (*depth)--; 2164 if (*depth == 0 && a != NULL) 2165 *a = NULL; 2166 *rs = f->rs; 2167 if (f->r->anchor->match || (match != NULL && *match)) 2168 quick = f->r->quick; 2169 *r = TAILQ_NEXT(f->r, entries); 2170 } while (*r == NULL); 2171 2172 return (quick); 2173 } 2174 2175 #ifdef INET6 2176 void 2177 pf_poolmask(struct pf_addr *naddr, struct pf_addr *raddr, 2178 struct pf_addr *rmask, struct pf_addr *saddr, sa_family_t af) 2179 { 2180 switch (af) { 2181 #ifdef INET 2182 case AF_INET: 2183 naddr->addr32[0] = (raddr->addr32[0] & rmask->addr32[0]) | 2184 ((rmask->addr32[0] ^ 0xffffffff ) & saddr->addr32[0]); 2185 break; 2186 #endif /* INET */ 2187 case AF_INET6: 2188 naddr->addr32[0] = (raddr->addr32[0] & rmask->addr32[0]) | 2189 ((rmask->addr32[0] ^ 0xffffffff ) & saddr->addr32[0]); 2190 naddr->addr32[1] = (raddr->addr32[1] & rmask->addr32[1]) | 2191 ((rmask->addr32[1] ^ 0xffffffff ) & saddr->addr32[1]); 2192 naddr->addr32[2] = (raddr->addr32[2] & rmask->addr32[2]) | 2193 ((rmask->addr32[2] ^ 0xffffffff ) & saddr->addr32[2]); 2194 naddr->addr32[3] = (raddr->addr32[3] & rmask->addr32[3]) | 2195 ((rmask->addr32[3] ^ 0xffffffff ) & saddr->addr32[3]); 2196 break; 2197 } 2198 } 2199 2200 void 2201 pf_addr_inc(struct pf_addr *addr, sa_family_t af) 2202 { 2203 switch (af) { 2204 #ifdef INET 2205 case AF_INET: 2206 addr->addr32[0] = htonl(ntohl(addr->addr32[0]) + 1); 2207 break; 2208 #endif /* INET */ 2209 case AF_INET6: 2210 if (addr->addr32[3] == 0xffffffff) { 2211 addr->addr32[3] = 0; 2212 if (addr->addr32[2] == 0xffffffff) { 2213 addr->addr32[2] = 0; 2214 if (addr->addr32[1] == 0xffffffff) { 2215 addr->addr32[1] = 0; 2216 addr->addr32[0] = 2217 htonl(ntohl(addr->addr32[0]) + 1); 2218 } else 2219 addr->addr32[1] = 2220 htonl(ntohl(addr->addr32[1]) + 1); 2221 } else 2222 addr->addr32[2] = 2223 htonl(ntohl(addr->addr32[2]) + 1); 2224 } else 2225 addr->addr32[3] = 2226 htonl(ntohl(addr->addr32[3]) + 1); 2227 break; 2228 } 2229 } 2230 #endif /* INET6 */ 2231 2232 #define mix(a,b,c) \ 2233 do { \ 2234 a -= b; a -= c; a ^= (c >> 13); \ 2235 b -= c; b -= a; b ^= (a << 8); \ 2236 c -= a; c -= b; c ^= (b >> 13); \ 2237 a -= b; a -= c; a ^= (c >> 12); \ 2238 b -= c; b -= a; b ^= (a << 16); \ 2239 c -= a; c -= b; c ^= (b >> 5); \ 2240 a -= b; a -= c; a ^= (c >> 3); \ 2241 b -= c; b -= a; b ^= (a << 10); \ 2242 c -= a; c -= b; c ^= (b >> 15); \ 2243 } while (0) 2244 2245 /* 2246 * hash function based on bridge_hash in if_bridge.c 2247 */ 2248 void 2249 pf_hash(struct pf_addr *inaddr, struct pf_addr *hash, 2250 struct pf_poolhashkey *key, sa_family_t af) 2251 { 2252 u_int32_t a = 0x9e3779b9, b = 0x9e3779b9, c = key->key32[0]; 2253 2254 switch (af) { 2255 #ifdef INET 2256 case AF_INET: 2257 a += inaddr->addr32[0]; 2258 b += key->key32[1]; 2259 mix(a, b, c); 2260 hash->addr32[0] = c + key->key32[2]; 2261 break; 2262 #endif /* INET */ 2263 #ifdef INET6 2264 case AF_INET6: 2265 a += inaddr->addr32[0]; 2266 b += inaddr->addr32[2]; 2267 mix(a, b, c); 2268 hash->addr32[0] = c; 2269 a += inaddr->addr32[1]; 2270 b += inaddr->addr32[3]; 2271 c += key->key32[1]; 2272 mix(a, b, c); 2273 hash->addr32[1] = c; 2274 a += inaddr->addr32[2]; 2275 b += inaddr->addr32[1]; 2276 c += key->key32[2]; 2277 mix(a, b, c); 2278 hash->addr32[2] = c; 2279 a += inaddr->addr32[3]; 2280 b += inaddr->addr32[0]; 2281 c += key->key32[3]; 2282 mix(a, b, c); 2283 hash->addr32[3] = c; 2284 break; 2285 #endif /* INET6 */ 2286 } 2287 } 2288 2289 int 2290 pf_map_addr(sa_family_t af, struct pf_rule *r, struct pf_addr *saddr, 2291 struct pf_addr *naddr, struct pf_addr *init_addr, struct pf_src_node **sn) 2292 { 2293 unsigned char hash[16]; 2294 struct pf_pool *rpool = &r->rpool; 2295 struct pf_addr *raddr = &rpool->cur->addr.v.a.addr; 2296 struct pf_addr *rmask = &rpool->cur->addr.v.a.mask; 2297 struct pf_pooladdr *acur = rpool->cur; 2298 struct pf_src_node k; 2299 2300 if (*sn == NULL && r->rpool.opts & PF_POOL_STICKYADDR && 2301 (r->rpool.opts & PF_POOL_TYPEMASK) != PF_POOL_NONE) { 2302 k.af = af; 2303 PF_ACPY(&k.addr, saddr, af); 2304 if (r->rule_flag & PFRULE_RULESRCTRACK || 2305 r->rpool.opts & PF_POOL_STICKYADDR) 2306 k.rule.ptr = r; 2307 else 2308 k.rule.ptr = NULL; 2309 pf_status.scounters[SCNT_SRC_NODE_SEARCH]++; 2310 *sn = RB_FIND(pf_src_tree, &tree_src_tracking, &k); 2311 if (*sn != NULL && !PF_AZERO(&(*sn)->raddr, af)) { 2312 PF_ACPY(naddr, &(*sn)->raddr, af); 2313 if (pf_status.debug >= PF_DEBUG_MISC) { 2314 kprintf("pf_map_addr: src tracking maps "); 2315 pf_print_host(&k.addr, 0, af); 2316 kprintf(" to "); 2317 pf_print_host(naddr, 0, af); 2318 kprintf("\n"); 2319 } 2320 return (0); 2321 } 2322 } 2323 2324 if (rpool->cur->addr.type == PF_ADDR_NOROUTE) 2325 return (1); 2326 if (rpool->cur->addr.type == PF_ADDR_DYNIFTL) { 2327 switch (af) { 2328 #ifdef INET 2329 case AF_INET: 2330 if (rpool->cur->addr.p.dyn->pfid_acnt4 < 1 && 2331 (rpool->opts & PF_POOL_TYPEMASK) != 2332 PF_POOL_ROUNDROBIN) 2333 return (1); 2334 raddr = &rpool->cur->addr.p.dyn->pfid_addr4; 2335 rmask = &rpool->cur->addr.p.dyn->pfid_mask4; 2336 break; 2337 #endif /* INET */ 2338 #ifdef INET6 2339 case AF_INET6: 2340 if (rpool->cur->addr.p.dyn->pfid_acnt6 < 1 && 2341 (rpool->opts & PF_POOL_TYPEMASK) != 2342 PF_POOL_ROUNDROBIN) 2343 return (1); 2344 raddr = &rpool->cur->addr.p.dyn->pfid_addr6; 2345 rmask = &rpool->cur->addr.p.dyn->pfid_mask6; 2346 break; 2347 #endif /* INET6 */ 2348 } 2349 } else if (rpool->cur->addr.type == PF_ADDR_TABLE) { 2350 if ((rpool->opts & PF_POOL_TYPEMASK) != PF_POOL_ROUNDROBIN) 2351 return (1); /* unsupported */ 2352 } else { 2353 raddr = &rpool->cur->addr.v.a.addr; 2354 rmask = &rpool->cur->addr.v.a.mask; 2355 } 2356 2357 switch (rpool->opts & PF_POOL_TYPEMASK) { 2358 case PF_POOL_NONE: 2359 PF_ACPY(naddr, raddr, af); 2360 break; 2361 case PF_POOL_BITMASK: 2362 PF_POOLMASK(naddr, raddr, rmask, saddr, af); 2363 break; 2364 case PF_POOL_RANDOM: 2365 if (init_addr != NULL && PF_AZERO(init_addr, af)) { 2366 switch (af) { 2367 #ifdef INET 2368 case AF_INET: 2369 rpool->counter.addr32[0] = htonl(karc4random()); 2370 break; 2371 #endif /* INET */ 2372 #ifdef INET6 2373 case AF_INET6: 2374 if (rmask->addr32[3] != 0xffffffff) 2375 rpool->counter.addr32[3] = 2376 htonl(karc4random()); 2377 else 2378 break; 2379 if (rmask->addr32[2] != 0xffffffff) 2380 rpool->counter.addr32[2] = 2381 htonl(karc4random()); 2382 else 2383 break; 2384 if (rmask->addr32[1] != 0xffffffff) 2385 rpool->counter.addr32[1] = 2386 htonl(karc4random()); 2387 else 2388 break; 2389 if (rmask->addr32[0] != 0xffffffff) 2390 rpool->counter.addr32[0] = 2391 htonl(karc4random()); 2392 break; 2393 #endif /* INET6 */ 2394 } 2395 PF_POOLMASK(naddr, raddr, rmask, &rpool->counter, af); 2396 PF_ACPY(init_addr, naddr, af); 2397 2398 } else { 2399 PF_AINC(&rpool->counter, af); 2400 PF_POOLMASK(naddr, raddr, rmask, &rpool->counter, af); 2401 } 2402 break; 2403 case PF_POOL_SRCHASH: 2404 pf_hash(saddr, (struct pf_addr *)&hash, &rpool->key, af); 2405 PF_POOLMASK(naddr, raddr, rmask, (struct pf_addr *)&hash, af); 2406 break; 2407 case PF_POOL_ROUNDROBIN: 2408 if (rpool->cur->addr.type == PF_ADDR_TABLE) { 2409 if (!pfr_pool_get(rpool->cur->addr.p.tbl, 2410 &rpool->tblidx, &rpool->counter, 2411 &raddr, &rmask, af)) 2412 goto get_addr; 2413 } else if (rpool->cur->addr.type == PF_ADDR_DYNIFTL) { 2414 if (!pfr_pool_get(rpool->cur->addr.p.dyn->pfid_kt, 2415 &rpool->tblidx, &rpool->counter, 2416 &raddr, &rmask, af)) 2417 goto get_addr; 2418 } else if (pf_match_addr(0, raddr, rmask, &rpool->counter, af)) 2419 goto get_addr; 2420 2421 try_next: 2422 if ((rpool->cur = TAILQ_NEXT(rpool->cur, entries)) == NULL) 2423 rpool->cur = TAILQ_FIRST(&rpool->list); 2424 if (rpool->cur->addr.type == PF_ADDR_TABLE) { 2425 rpool->tblidx = -1; 2426 if (pfr_pool_get(rpool->cur->addr.p.tbl, 2427 &rpool->tblidx, &rpool->counter, 2428 &raddr, &rmask, af)) { 2429 /* table contains no address of type 'af' */ 2430 if (rpool->cur != acur) 2431 goto try_next; 2432 return (1); 2433 } 2434 } else if (rpool->cur->addr.type == PF_ADDR_DYNIFTL) { 2435 rpool->tblidx = -1; 2436 if (pfr_pool_get(rpool->cur->addr.p.dyn->pfid_kt, 2437 &rpool->tblidx, &rpool->counter, 2438 &raddr, &rmask, af)) { 2439 /* table contains no address of type 'af' */ 2440 if (rpool->cur != acur) 2441 goto try_next; 2442 return (1); 2443 } 2444 } else { 2445 raddr = &rpool->cur->addr.v.a.addr; 2446 rmask = &rpool->cur->addr.v.a.mask; 2447 PF_ACPY(&rpool->counter, raddr, af); 2448 } 2449 2450 get_addr: 2451 PF_ACPY(naddr, &rpool->counter, af); 2452 if (init_addr != NULL && PF_AZERO(init_addr, af)) 2453 PF_ACPY(init_addr, naddr, af); 2454 PF_AINC(&rpool->counter, af); 2455 break; 2456 } 2457 if (*sn != NULL) 2458 PF_ACPY(&(*sn)->raddr, naddr, af); 2459 2460 if (pf_status.debug >= PF_DEBUG_MISC && 2461 (rpool->opts & PF_POOL_TYPEMASK) != PF_POOL_NONE) { 2462 kprintf("pf_map_addr: selected address "); 2463 pf_print_host(naddr, 0, af); 2464 kprintf("\n"); 2465 } 2466 2467 return (0); 2468 } 2469 2470 int 2471 pf_get_sport(sa_family_t af, u_int8_t proto, struct pf_rule *r, 2472 struct pf_addr *saddr, struct pf_addr *daddr, u_int16_t dport, 2473 struct pf_addr *naddr, u_int16_t *nport, u_int16_t low, u_int16_t high, 2474 struct pf_src_node **sn) 2475 { 2476 struct pf_state_key_cmp key; 2477 struct pf_addr init_addr; 2478 u_int16_t cut; 2479 2480 bzero(&init_addr, sizeof(init_addr)); 2481 if (pf_map_addr(af, r, saddr, naddr, &init_addr, sn)) 2482 return (1); 2483 2484 if (proto == IPPROTO_ICMP) { 2485 low = 1; 2486 high = 65535; 2487 } 2488 2489 do { 2490 key.af = af; 2491 key.proto = proto; 2492 PF_ACPY(&key.addr[1], daddr, key.af); 2493 PF_ACPY(&key.addr[0], naddr, key.af); 2494 key.port[1] = dport; 2495 2496 /* 2497 * port search; start random, step; 2498 * similar 2 portloop in in_pcbbind 2499 */ 2500 if (!(proto == IPPROTO_TCP || proto == IPPROTO_UDP || 2501 proto == IPPROTO_ICMP)) { 2502 key.port[0] = dport; 2503 if (pf_find_state_all(&key, PF_IN, NULL) == NULL) 2504 return (0); 2505 } else if (low == 0 && high == 0) { 2506 key.port[0] = *nport; 2507 if (pf_find_state_all(&key, PF_IN, NULL) == NULL) 2508 return (0); 2509 } else if (low == high) { 2510 key.port[0] = htons(low); 2511 if (pf_find_state_all(&key, PF_IN, NULL) == NULL) { 2512 *nport = htons(low); 2513 return (0); 2514 } 2515 } else { 2516 u_int16_t tmp; 2517 2518 if (low > high) { 2519 tmp = low; 2520 low = high; 2521 high = tmp; 2522 } 2523 /* low < high */ 2524 cut = htonl(karc4random()) % (1 + high - low) + low; 2525 /* low <= cut <= high */ 2526 for (tmp = cut; tmp <= high; ++(tmp)) { 2527 key.port[0] = htons(tmp); 2528 if (pf_find_state_all(&key, PF_IN, NULL) == 2529 NULL && !in_baddynamic(tmp, proto)) { 2530 *nport = htons(tmp); 2531 return (0); 2532 } 2533 } 2534 for (tmp = cut - 1; tmp >= low; --(tmp)) { 2535 key.port[0] = htons(tmp); 2536 if (pf_find_state_all(&key, PF_IN, NULL) == 2537 NULL && !in_baddynamic(tmp, proto)) { 2538 *nport = htons(tmp); 2539 return (0); 2540 } 2541 } 2542 } 2543 2544 switch (r->rpool.opts & PF_POOL_TYPEMASK) { 2545 case PF_POOL_RANDOM: 2546 case PF_POOL_ROUNDROBIN: 2547 if (pf_map_addr(af, r, saddr, naddr, &init_addr, sn)) 2548 return (1); 2549 break; 2550 case PF_POOL_NONE: 2551 case PF_POOL_SRCHASH: 2552 case PF_POOL_BITMASK: 2553 default: 2554 return (1); 2555 } 2556 } while (! PF_AEQ(&init_addr, naddr, af) ); 2557 return (1); /* none available */ 2558 } 2559 2560 struct pf_rule * 2561 pf_match_translation(struct pf_pdesc *pd, struct mbuf *m, int off, 2562 int direction, struct pfi_kif *kif, struct pf_addr *saddr, u_int16_t sport, 2563 struct pf_addr *daddr, u_int16_t dport, int rs_num) 2564 { 2565 struct pf_rule *r, *rm = NULL; 2566 struct pf_ruleset *ruleset = NULL; 2567 int tag = -1; 2568 int rtableid = -1; 2569 int asd = 0; 2570 2571 r = TAILQ_FIRST(pf_main_ruleset.rules[rs_num].active.ptr); 2572 while (r && rm == NULL) { 2573 struct pf_rule_addr *src = NULL, *dst = NULL; 2574 struct pf_addr_wrap *xdst = NULL; 2575 2576 if (r->action == PF_BINAT && direction == PF_IN) { 2577 src = &r->dst; 2578 if (r->rpool.cur != NULL) 2579 xdst = &r->rpool.cur->addr; 2580 } else { 2581 src = &r->src; 2582 dst = &r->dst; 2583 } 2584 2585 r->evaluations++; 2586 if (pfi_kif_match(r->kif, kif) == r->ifnot) 2587 r = r->skip[PF_SKIP_IFP].ptr; 2588 else if (r->direction && r->direction != direction) 2589 r = r->skip[PF_SKIP_DIR].ptr; 2590 else if (r->af && r->af != pd->af) 2591 r = r->skip[PF_SKIP_AF].ptr; 2592 else if (r->proto && r->proto != pd->proto) 2593 r = r->skip[PF_SKIP_PROTO].ptr; 2594 else if (PF_MISMATCHAW(&src->addr, saddr, pd->af, 2595 src->neg, kif)) 2596 r = r->skip[src == &r->src ? PF_SKIP_SRC_ADDR : 2597 PF_SKIP_DST_ADDR].ptr; 2598 else if (src->port_op && !pf_match_port(src->port_op, 2599 src->port[0], src->port[1], sport)) 2600 r = r->skip[src == &r->src ? PF_SKIP_SRC_PORT : 2601 PF_SKIP_DST_PORT].ptr; 2602 else if (dst != NULL && 2603 PF_MISMATCHAW(&dst->addr, daddr, pd->af, dst->neg, NULL)) 2604 r = r->skip[PF_SKIP_DST_ADDR].ptr; 2605 else if (xdst != NULL && PF_MISMATCHAW(xdst, daddr, pd->af, 2606 0, NULL)) 2607 r = TAILQ_NEXT(r, entries); 2608 else if (dst != NULL && dst->port_op && 2609 !pf_match_port(dst->port_op, dst->port[0], 2610 dst->port[1], dport)) 2611 r = r->skip[PF_SKIP_DST_PORT].ptr; 2612 else if (r->match_tag && !pf_match_tag(m, r, &tag)) 2613 r = TAILQ_NEXT(r, entries); 2614 else if (r->os_fingerprint != PF_OSFP_ANY && (pd->proto != 2615 IPPROTO_TCP || !pf_osfp_match(pf_osfp_fingerprint(pd, m, 2616 off, pd->hdr.tcp), r->os_fingerprint))) 2617 r = TAILQ_NEXT(r, entries); 2618 else { 2619 if (r->tag) 2620 tag = r->tag; 2621 if (r->rtableid >= 0) 2622 rtableid = r->rtableid; 2623 if (r->anchor == NULL) { 2624 rm = r; 2625 } else 2626 pf_step_into_anchor(&asd, &ruleset, rs_num, 2627 &r, NULL, NULL); 2628 } 2629 if (r == NULL) 2630 pf_step_out_of_anchor(&asd, &ruleset, rs_num, &r, 2631 NULL, NULL); 2632 } 2633 if (pf_tag_packet(m, tag, rtableid)) 2634 return (NULL); 2635 if (rm != NULL && (rm->action == PF_NONAT || 2636 rm->action == PF_NORDR || rm->action == PF_NOBINAT)) 2637 return (NULL); 2638 return (rm); 2639 } 2640 2641 struct pf_rule * 2642 pf_get_translation(struct pf_pdesc *pd, struct mbuf *m, int off, int direction, 2643 struct pfi_kif *kif, struct pf_src_node **sn, 2644 struct pf_state_key **skw, struct pf_state_key **sks, 2645 struct pf_state_key **skp, struct pf_state_key **nkp, 2646 struct pf_addr *saddr, struct pf_addr *daddr, 2647 u_int16_t sport, u_int16_t dport) 2648 { 2649 struct pf_rule *r = NULL; 2650 2651 2652 if (direction == PF_OUT) { 2653 r = pf_match_translation(pd, m, off, direction, kif, saddr, 2654 sport, daddr, dport, PF_RULESET_BINAT); 2655 if (r == NULL) 2656 r = pf_match_translation(pd, m, off, direction, kif, 2657 saddr, sport, daddr, dport, PF_RULESET_NAT); 2658 } else { 2659 r = pf_match_translation(pd, m, off, direction, kif, saddr, 2660 sport, daddr, dport, PF_RULESET_RDR); 2661 if (r == NULL) 2662 r = pf_match_translation(pd, m, off, direction, kif, 2663 saddr, sport, daddr, dport, PF_RULESET_BINAT); 2664 } 2665 2666 if (r != NULL) { 2667 struct pf_addr *naddr; 2668 u_int16_t *nport; 2669 2670 if (pf_state_key_setup(pd, r, skw, sks, skp, nkp, 2671 saddr, daddr, sport, dport)) 2672 return r; 2673 2674 /* XXX We only modify one side for now. */ 2675 naddr = &(*nkp)->addr[1]; 2676 nport = &(*nkp)->port[1]; 2677 2678 /* 2679 * NOTE: Currently all translations will clear 2680 * BRIDGE_MBUF_TAGGED, telling the bridge to 2681 * ignore the original input encapsulation. 2682 */ 2683 switch (r->action) { 2684 case PF_NONAT: 2685 case PF_NOBINAT: 2686 case PF_NORDR: 2687 return (NULL); 2688 case PF_NAT: 2689 m->m_pkthdr.fw_flags &= ~BRIDGE_MBUF_TAGGED; 2690 if (pf_get_sport(pd->af, pd->proto, r, saddr, 2691 daddr, dport, naddr, nport, r->rpool.proxy_port[0], 2692 r->rpool.proxy_port[1], sn)) { 2693 DPFPRINTF(PF_DEBUG_MISC, 2694 ("pf: NAT proxy port allocation " 2695 "(%u-%u) failed\n", 2696 r->rpool.proxy_port[0], 2697 r->rpool.proxy_port[1])); 2698 return (NULL); 2699 } 2700 break; 2701 case PF_BINAT: 2702 m->m_pkthdr.fw_flags &= ~BRIDGE_MBUF_TAGGED; 2703 switch (direction) { 2704 case PF_OUT: 2705 if (r->rpool.cur->addr.type == PF_ADDR_DYNIFTL){ 2706 switch (pd->af) { 2707 #ifdef INET 2708 case AF_INET: 2709 if (r->rpool.cur->addr.p.dyn-> 2710 pfid_acnt4 < 1) 2711 return (NULL); 2712 PF_POOLMASK(naddr, 2713 &r->rpool.cur->addr.p.dyn-> 2714 pfid_addr4, 2715 &r->rpool.cur->addr.p.dyn-> 2716 pfid_mask4, 2717 saddr, AF_INET); 2718 break; 2719 #endif /* INET */ 2720 #ifdef INET6 2721 case AF_INET6: 2722 if (r->rpool.cur->addr.p.dyn-> 2723 pfid_acnt6 < 1) 2724 return (NULL); 2725 PF_POOLMASK(naddr, 2726 &r->rpool.cur->addr.p.dyn-> 2727 pfid_addr6, 2728 &r->rpool.cur->addr.p.dyn-> 2729 pfid_mask6, 2730 saddr, AF_INET6); 2731 break; 2732 #endif /* INET6 */ 2733 } 2734 } else 2735 PF_POOLMASK(naddr, 2736 &r->rpool.cur->addr.v.a.addr, 2737 &r->rpool.cur->addr.v.a.mask, 2738 saddr, pd->af); 2739 break; 2740 case PF_IN: 2741 if (r->src.addr.type == PF_ADDR_DYNIFTL) { 2742 switch (pd->af) { 2743 #ifdef INET 2744 case AF_INET: 2745 if (r->src.addr.p.dyn-> 2746 pfid_acnt4 < 1) 2747 return (NULL); 2748 PF_POOLMASK(naddr, 2749 &r->src.addr.p.dyn-> 2750 pfid_addr4, 2751 &r->src.addr.p.dyn-> 2752 pfid_mask4, 2753 daddr, AF_INET); 2754 break; 2755 #endif /* INET */ 2756 #ifdef INET6 2757 case AF_INET6: 2758 if (r->src.addr.p.dyn-> 2759 pfid_acnt6 < 1) 2760 return (NULL); 2761 PF_POOLMASK(naddr, 2762 &r->src.addr.p.dyn-> 2763 pfid_addr6, 2764 &r->src.addr.p.dyn-> 2765 pfid_mask6, 2766 daddr, AF_INET6); 2767 break; 2768 #endif /* INET6 */ 2769 } 2770 } else 2771 PF_POOLMASK(naddr, 2772 &r->src.addr.v.a.addr, 2773 &r->src.addr.v.a.mask, daddr, 2774 pd->af); 2775 break; 2776 } 2777 break; 2778 case PF_RDR: { 2779 m->m_pkthdr.fw_flags &= ~BRIDGE_MBUF_TAGGED; 2780 if (pf_map_addr(pd->af, r, saddr, naddr, NULL, sn)) 2781 return (NULL); 2782 if ((r->rpool.opts & PF_POOL_TYPEMASK) == 2783 PF_POOL_BITMASK) 2784 PF_POOLMASK(naddr, naddr, 2785 &r->rpool.cur->addr.v.a.mask, daddr, 2786 pd->af); 2787 2788 if (r->rpool.proxy_port[1]) { 2789 u_int32_t tmp_nport; 2790 2791 tmp_nport = ((ntohs(dport) - 2792 ntohs(r->dst.port[0])) % 2793 (r->rpool.proxy_port[1] - 2794 r->rpool.proxy_port[0] + 1)) + 2795 r->rpool.proxy_port[0]; 2796 2797 /* wrap around if necessary */ 2798 if (tmp_nport > 65535) 2799 tmp_nport -= 65535; 2800 *nport = htons((u_int16_t)tmp_nport); 2801 } else if (r->rpool.proxy_port[0]) 2802 *nport = htons(r->rpool.proxy_port[0]); 2803 break; 2804 } 2805 default: 2806 return (NULL); 2807 } 2808 } 2809 2810 return (r); 2811 } 2812 2813 #ifdef SMP 2814 struct netmsg_hashlookup { 2815 struct netmsg_base base; 2816 struct inpcb **nm_pinp; 2817 struct inpcbinfo *nm_pcbinfo; 2818 struct pf_addr *nm_saddr; 2819 struct pf_addr *nm_daddr; 2820 uint16_t nm_sport; 2821 uint16_t nm_dport; 2822 sa_family_t nm_af; 2823 }; 2824 2825 #ifdef PF_SOCKET_LOOKUP_DOMSG 2826 static void 2827 in_pcblookup_hash_handler(netmsg_t msg) 2828 { 2829 struct netmsg_hashlookup *rmsg = (struct netmsg_hashlookup *)msg; 2830 2831 if (rmsg->nm_af == AF_INET) 2832 *rmsg->nm_pinp = in_pcblookup_hash(rmsg->nm_pcbinfo, 2833 rmsg->nm_saddr->v4, rmsg->nm_sport, rmsg->nm_daddr->v4, 2834 rmsg->nm_dport, INPLOOKUP_WILDCARD, NULL); 2835 #ifdef INET6 2836 else 2837 *rmsg->nm_pinp = in6_pcblookup_hash(rmsg->nm_pcbinfo, 2838 &rmsg->nm_saddr->v6, rmsg->nm_sport, &rmsg->nm_daddr->v6, 2839 rmsg->nm_dport, INPLOOKUP_WILDCARD, NULL); 2840 #endif /* INET6 */ 2841 lwkt_replymsg(&rmsg->base.lmsg, 0); 2842 } 2843 #endif /* PF_SOCKET_LOOKUP_DOMSG */ 2844 2845 #endif /* SMP */ 2846 2847 int 2848 pf_socket_lookup(int direction, struct pf_pdesc *pd) 2849 { 2850 struct pf_addr *saddr, *daddr; 2851 u_int16_t sport, dport; 2852 struct inpcbinfo *pi; 2853 struct inpcb *inp; 2854 #ifdef SMP 2855 struct netmsg_hashlookup *msg = NULL; 2856 #ifdef PF_SOCKET_LOOKUP_DOMSG 2857 struct netmsg_hashlookup msg0; 2858 #endif 2859 #endif 2860 int pi_cpu = 0; 2861 2862 if (pd == NULL) 2863 return (-1); 2864 pd->lookup.uid = UID_MAX; 2865 pd->lookup.gid = GID_MAX; 2866 pd->lookup.pid = NO_PID; 2867 if (direction == PF_IN) { 2868 saddr = pd->src; 2869 daddr = pd->dst; 2870 } else { 2871 saddr = pd->dst; 2872 daddr = pd->src; 2873 } 2874 switch (pd->proto) { 2875 case IPPROTO_TCP: 2876 if (pd->hdr.tcp == NULL) 2877 return (-1); 2878 sport = pd->hdr.tcp->th_sport; 2879 dport = pd->hdr.tcp->th_dport; 2880 2881 pi_cpu = tcp_addrcpu(saddr->v4.s_addr, sport, daddr->v4.s_addr, dport); 2882 pi = &tcbinfo[pi_cpu]; 2883 #ifdef SMP 2884 /* 2885 * Our netstack runs lockless on MP systems 2886 * (only for TCP connections at the moment). 2887 * 2888 * As we are not allowed to read another CPU's tcbinfo, 2889 * we have to ask that CPU via remote call to search the 2890 * table for us. 2891 * 2892 * Prepare a msg iff data belongs to another CPU. 2893 */ 2894 if (pi_cpu != mycpu->gd_cpuid) { 2895 #ifdef PF_SOCKET_LOOKUP_DOMSG 2896 /* 2897 * NOTE: 2898 * 2899 * Following lwkt_domsg() is dangerous and could 2900 * lockup the network system, e.g. 2901 * 2902 * On 2 CPU system: 2903 * netisr0 domsg to netisr1 (due to lookup) 2904 * netisr1 domsg to netisr0 (due to lookup) 2905 * 2906 * We simply return -1 here, since we are probably 2907 * called before NAT, so the TCP packet should 2908 * already be on the correct CPU. 2909 */ 2910 msg = &msg0; 2911 netmsg_init(&msg->base, NULL, &curthread->td_msgport, 2912 0, in_pcblookup_hash_handler); 2913 msg->nm_pinp = &inp; 2914 msg->nm_pcbinfo = pi; 2915 msg->nm_saddr = saddr; 2916 msg->nm_sport = sport; 2917 msg->nm_daddr = daddr; 2918 msg->nm_dport = dport; 2919 msg->nm_af = pd->af; 2920 #else /* !PF_SOCKET_LOOKUP_DOMSG */ 2921 kprintf("pf_socket_lookup: tcp packet not on the " 2922 "correct cpu %d, cur cpu %d\n", 2923 pi_cpu, mycpuid); 2924 print_backtrace(-1); 2925 return -1; 2926 #endif /* PF_SOCKET_LOOKUP_DOMSG */ 2927 } 2928 #endif /* SMP */ 2929 break; 2930 case IPPROTO_UDP: 2931 if (pd->hdr.udp == NULL) 2932 return (-1); 2933 sport = pd->hdr.udp->uh_sport; 2934 dport = pd->hdr.udp->uh_dport; 2935 pi = &udbinfo; 2936 break; 2937 default: 2938 return (-1); 2939 } 2940 if (direction != PF_IN) { 2941 u_int16_t p; 2942 2943 p = sport; 2944 sport = dport; 2945 dport = p; 2946 } 2947 switch (pd->af) { 2948 #ifdef INET6 2949 case AF_INET6: 2950 #ifdef SMP 2951 /* 2952 * Query other CPU, second part 2953 * 2954 * msg only gets initialized when: 2955 * 1) packet is TCP 2956 * 2) the info belongs to another CPU 2957 * 2958 * Use some switch/case magic to avoid code duplication. 2959 */ 2960 if (msg == NULL) 2961 #endif /* SMP */ 2962 { 2963 inp = in6_pcblookup_hash(pi, &saddr->v6, sport, 2964 &daddr->v6, dport, INPLOOKUP_WILDCARD, NULL); 2965 2966 if (inp == NULL) 2967 return (-1); 2968 break; 2969 } 2970 /* FALLTHROUGH if SMP and on other CPU */ 2971 #endif /* INET6 */ 2972 case AF_INET: 2973 #ifdef SMP 2974 if (msg != NULL) { 2975 lwkt_domsg(cpu_portfn(pi_cpu), 2976 &msg->base.lmsg, 0); 2977 } else 2978 #endif /* SMP */ 2979 { 2980 inp = in_pcblookup_hash(pi, saddr->v4, sport, daddr->v4, 2981 dport, INPLOOKUP_WILDCARD, NULL); 2982 } 2983 if (inp == NULL) 2984 return (-1); 2985 break; 2986 2987 default: 2988 return (-1); 2989 } 2990 pd->lookup.uid = inp->inp_socket->so_cred->cr_uid; 2991 pd->lookup.gid = inp->inp_socket->so_cred->cr_groups[0]; 2992 return (1); 2993 } 2994 2995 u_int8_t 2996 pf_get_wscale(struct mbuf *m, int off, u_int16_t th_off, sa_family_t af) 2997 { 2998 int hlen; 2999 u_int8_t hdr[60]; 3000 u_int8_t *opt, optlen; 3001 u_int8_t wscale = 0; 3002 3003 hlen = th_off << 2; /* hlen <= sizeof(hdr) */ 3004 if (hlen <= sizeof(struct tcphdr)) 3005 return (0); 3006 if (!pf_pull_hdr(m, off, hdr, hlen, NULL, NULL, af)) 3007 return (0); 3008 opt = hdr + sizeof(struct tcphdr); 3009 hlen -= sizeof(struct tcphdr); 3010 while (hlen >= 3) { 3011 switch (*opt) { 3012 case TCPOPT_EOL: 3013 case TCPOPT_NOP: 3014 ++opt; 3015 --hlen; 3016 break; 3017 case TCPOPT_WINDOW: 3018 wscale = opt[2]; 3019 if (wscale > TCP_MAX_WINSHIFT) 3020 wscale = TCP_MAX_WINSHIFT; 3021 wscale |= PF_WSCALE_FLAG; 3022 /* FALLTHROUGH */ 3023 default: 3024 optlen = opt[1]; 3025 if (optlen < 2) 3026 optlen = 2; 3027 hlen -= optlen; 3028 opt += optlen; 3029 break; 3030 } 3031 } 3032 return (wscale); 3033 } 3034 3035 u_int16_t 3036 pf_get_mss(struct mbuf *m, int off, u_int16_t th_off, sa_family_t af) 3037 { 3038 int hlen; 3039 u_int8_t hdr[60]; 3040 u_int8_t *opt, optlen; 3041 u_int16_t mss = tcp_mssdflt; 3042 3043 hlen = th_off << 2; /* hlen <= sizeof(hdr) */ 3044 if (hlen <= sizeof(struct tcphdr)) 3045 return (0); 3046 if (!pf_pull_hdr(m, off, hdr, hlen, NULL, NULL, af)) 3047 return (0); 3048 opt = hdr + sizeof(struct tcphdr); 3049 hlen -= sizeof(struct tcphdr); 3050 while (hlen >= TCPOLEN_MAXSEG) { 3051 switch (*opt) { 3052 case TCPOPT_EOL: 3053 case TCPOPT_NOP: 3054 ++opt; 3055 --hlen; 3056 break; 3057 case TCPOPT_MAXSEG: 3058 bcopy((caddr_t)(opt + 2), (caddr_t)&mss, 2); 3059 /* FALLTHROUGH */ 3060 default: 3061 optlen = opt[1]; 3062 if (optlen < 2) 3063 optlen = 2; 3064 hlen -= optlen; 3065 opt += optlen; 3066 break; 3067 } 3068 } 3069 return (mss); 3070 } 3071 3072 u_int16_t 3073 pf_calc_mss(struct pf_addr *addr, sa_family_t af, u_int16_t offer) 3074 { 3075 #ifdef INET 3076 struct sockaddr_in *dst; 3077 struct route ro; 3078 #endif /* INET */ 3079 #ifdef INET6 3080 struct sockaddr_in6 *dst6; 3081 struct route_in6 ro6; 3082 #endif /* INET6 */ 3083 struct rtentry *rt = NULL; 3084 int hlen = 0; 3085 u_int16_t mss = tcp_mssdflt; 3086 3087 switch (af) { 3088 #ifdef INET 3089 case AF_INET: 3090 hlen = sizeof(struct ip); 3091 bzero(&ro, sizeof(ro)); 3092 dst = (struct sockaddr_in *)&ro.ro_dst; 3093 dst->sin_family = AF_INET; 3094 dst->sin_len = sizeof(*dst); 3095 dst->sin_addr = addr->v4; 3096 rtalloc_ign(&ro, (RTF_CLONING | RTF_PRCLONING)); 3097 rt = ro.ro_rt; 3098 break; 3099 #endif /* INET */ 3100 #ifdef INET6 3101 case AF_INET6: 3102 hlen = sizeof(struct ip6_hdr); 3103 bzero(&ro6, sizeof(ro6)); 3104 dst6 = (struct sockaddr_in6 *)&ro6.ro_dst; 3105 dst6->sin6_family = AF_INET6; 3106 dst6->sin6_len = sizeof(*dst6); 3107 dst6->sin6_addr = addr->v6; 3108 rtalloc_ign((struct route *)&ro6, (RTF_CLONING | RTF_PRCLONING)); 3109 rt = ro6.ro_rt; 3110 break; 3111 #endif /* INET6 */ 3112 } 3113 3114 if (rt && rt->rt_ifp) { 3115 mss = rt->rt_ifp->if_mtu - hlen - sizeof(struct tcphdr); 3116 mss = max(tcp_mssdflt, mss); 3117 RTFREE(rt); 3118 } 3119 mss = min(mss, offer); 3120 mss = max(mss, 64); /* sanity - at least max opt space */ 3121 return (mss); 3122 } 3123 3124 void 3125 pf_set_rt_ifp(struct pf_state *s, struct pf_addr *saddr) 3126 { 3127 struct pf_rule *r = s->rule.ptr; 3128 3129 s->rt_kif = NULL; 3130 if (!r->rt || r->rt == PF_FASTROUTE) 3131 return; 3132 switch (s->key[PF_SK_WIRE]->af) { 3133 #ifdef INET 3134 case AF_INET: 3135 pf_map_addr(AF_INET, r, saddr, &s->rt_addr, NULL, 3136 &s->nat_src_node); 3137 s->rt_kif = r->rpool.cur->kif; 3138 break; 3139 #endif /* INET */ 3140 #ifdef INET6 3141 case AF_INET6: 3142 pf_map_addr(AF_INET6, r, saddr, &s->rt_addr, NULL, 3143 &s->nat_src_node); 3144 s->rt_kif = r->rpool.cur->kif; 3145 break; 3146 #endif /* INET6 */ 3147 } 3148 } 3149 3150 u_int32_t 3151 pf_tcp_iss(struct pf_pdesc *pd) 3152 { 3153 MD5_CTX ctx; 3154 u_int32_t digest[4]; 3155 3156 if (pf_tcp_secret_init == 0) { 3157 karc4rand(pf_tcp_secret, sizeof(pf_tcp_secret)); 3158 MD5Init(&pf_tcp_secret_ctx); 3159 MD5Update(&pf_tcp_secret_ctx, pf_tcp_secret, 3160 sizeof(pf_tcp_secret)); 3161 pf_tcp_secret_init = 1; 3162 } 3163 ctx = pf_tcp_secret_ctx; 3164 3165 MD5Update(&ctx, (char *)&pd->hdr.tcp->th_sport, sizeof(u_short)); 3166 MD5Update(&ctx, (char *)&pd->hdr.tcp->th_dport, sizeof(u_short)); 3167 if (pd->af == AF_INET6) { 3168 MD5Update(&ctx, (char *)&pd->src->v6, sizeof(struct in6_addr)); 3169 MD5Update(&ctx, (char *)&pd->dst->v6, sizeof(struct in6_addr)); 3170 } else { 3171 MD5Update(&ctx, (char *)&pd->src->v4, sizeof(struct in_addr)); 3172 MD5Update(&ctx, (char *)&pd->dst->v4, sizeof(struct in_addr)); 3173 } 3174 MD5Final((u_char *)digest, &ctx); 3175 pf_tcp_iss_off += 4096; 3176 return (digest[0] + pd->hdr.tcp->th_seq + pf_tcp_iss_off); 3177 } 3178 3179 int 3180 pf_test_rule(struct pf_rule **rm, struct pf_state **sm, int direction, 3181 struct pfi_kif *kif, struct mbuf *m, int off, void *h, 3182 struct pf_pdesc *pd, struct pf_rule **am, struct pf_ruleset **rsm, 3183 struct ifqueue *ifq, struct inpcb *inp) 3184 { 3185 struct pf_rule *nr = NULL; 3186 struct pf_addr *saddr = pd->src, *daddr = pd->dst; 3187 sa_family_t af = pd->af; 3188 struct pf_rule *r, *a = NULL; 3189 struct pf_ruleset *ruleset = NULL; 3190 struct pf_src_node *nsn = NULL; 3191 struct tcphdr *th = pd->hdr.tcp; 3192 struct pf_state_key *skw = NULL, *sks = NULL; 3193 struct pf_state_key *sk = NULL, *nk = NULL; 3194 u_short reason; 3195 int rewrite = 0, hdrlen = 0; 3196 int tag = -1, rtableid = -1; 3197 int asd = 0; 3198 int match = 0; 3199 int state_icmp = 0; 3200 u_int16_t sport = 0, dport = 0; 3201 u_int16_t nport = 0, bport = 0; 3202 u_int16_t bproto_sum = 0, bip_sum = 0; 3203 u_int8_t icmptype = 0, icmpcode = 0; 3204 3205 3206 if (direction == PF_IN && pf_check_congestion(ifq)) { 3207 REASON_SET(&reason, PFRES_CONGEST); 3208 return (PF_DROP); 3209 } 3210 3211 if (inp != NULL) 3212 pd->lookup.done = pf_socket_lookup(direction, pd); 3213 else if (debug_pfugidhack) { 3214 DPFPRINTF(PF_DEBUG_MISC, ("pf: unlocked lookup\n")); 3215 pd->lookup.done = pf_socket_lookup(direction, pd); 3216 } 3217 3218 switch (pd->proto) { 3219 case IPPROTO_TCP: 3220 sport = th->th_sport; 3221 dport = th->th_dport; 3222 hdrlen = sizeof(*th); 3223 break; 3224 case IPPROTO_UDP: 3225 sport = pd->hdr.udp->uh_sport; 3226 dport = pd->hdr.udp->uh_dport; 3227 hdrlen = sizeof(*pd->hdr.udp); 3228 break; 3229 #ifdef INET 3230 case IPPROTO_ICMP: 3231 if (pd->af != AF_INET) 3232 break; 3233 sport = dport = pd->hdr.icmp->icmp_id; 3234 hdrlen = sizeof(*pd->hdr.icmp); 3235 icmptype = pd->hdr.icmp->icmp_type; 3236 icmpcode = pd->hdr.icmp->icmp_code; 3237 3238 if (icmptype == ICMP_UNREACH || 3239 icmptype == ICMP_SOURCEQUENCH || 3240 icmptype == ICMP_REDIRECT || 3241 icmptype == ICMP_TIMXCEED || 3242 icmptype == ICMP_PARAMPROB) 3243 state_icmp++; 3244 break; 3245 #endif /* INET */ 3246 #ifdef INET6 3247 case IPPROTO_ICMPV6: 3248 if (af != AF_INET6) 3249 break; 3250 sport = dport = pd->hdr.icmp6->icmp6_id; 3251 hdrlen = sizeof(*pd->hdr.icmp6); 3252 icmptype = pd->hdr.icmp6->icmp6_type; 3253 icmpcode = pd->hdr.icmp6->icmp6_code; 3254 3255 if (icmptype == ICMP6_DST_UNREACH || 3256 icmptype == ICMP6_PACKET_TOO_BIG || 3257 icmptype == ICMP6_TIME_EXCEEDED || 3258 icmptype == ICMP6_PARAM_PROB) 3259 state_icmp++; 3260 break; 3261 #endif /* INET6 */ 3262 default: 3263 sport = dport = hdrlen = 0; 3264 break; 3265 } 3266 3267 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_FILTER].active.ptr); 3268 3269 bport = nport = sport; 3270 /* check packet for BINAT/NAT/RDR */ 3271 if ((nr = pf_get_translation(pd, m, off, direction, kif, &nsn, 3272 &skw, &sks, &sk, &nk, saddr, daddr, sport, dport)) != NULL) { 3273 if (nk == NULL || sk == NULL) { 3274 REASON_SET(&reason, PFRES_MEMORY); 3275 goto cleanup; 3276 } 3277 3278 if (pd->ip_sum) 3279 bip_sum = *pd->ip_sum; 3280 3281 switch (pd->proto) { 3282 case IPPROTO_TCP: 3283 bproto_sum = th->th_sum; 3284 pd->proto_sum = &th->th_sum; 3285 3286 if (PF_ANEQ(saddr, &nk->addr[pd->sidx], af) || 3287 nk->port[pd->sidx] != sport) { 3288 pf_change_ap(saddr, &th->th_sport, pd->ip_sum, 3289 &th->th_sum, &nk->addr[pd->sidx], 3290 nk->port[pd->sidx], 0, af); 3291 pd->sport = &th->th_sport; 3292 sport = th->th_sport; 3293 } 3294 3295 if (PF_ANEQ(daddr, &nk->addr[pd->didx], af) || 3296 nk->port[pd->didx] != dport) { 3297 pf_change_ap(daddr, &th->th_dport, pd->ip_sum, 3298 &th->th_sum, &nk->addr[pd->didx], 3299 nk->port[pd->didx], 0, af); 3300 dport = th->th_dport; 3301 pd->dport = &th->th_dport; 3302 } 3303 rewrite++; 3304 break; 3305 case IPPROTO_UDP: 3306 bproto_sum = pd->hdr.udp->uh_sum; 3307 pd->proto_sum = &pd->hdr.udp->uh_sum; 3308 3309 if (PF_ANEQ(saddr, &nk->addr[pd->sidx], af) || 3310 nk->port[pd->sidx] != sport) { 3311 pf_change_ap(saddr, &pd->hdr.udp->uh_sport, 3312 pd->ip_sum, &pd->hdr.udp->uh_sum, 3313 &nk->addr[pd->sidx], 3314 nk->port[pd->sidx], 1, af); 3315 sport = pd->hdr.udp->uh_sport; 3316 pd->sport = &pd->hdr.udp->uh_sport; 3317 } 3318 3319 if (PF_ANEQ(daddr, &nk->addr[pd->didx], af) || 3320 nk->port[pd->didx] != dport) { 3321 pf_change_ap(daddr, &pd->hdr.udp->uh_dport, 3322 pd->ip_sum, &pd->hdr.udp->uh_sum, 3323 &nk->addr[pd->didx], 3324 nk->port[pd->didx], 1, af); 3325 dport = pd->hdr.udp->uh_dport; 3326 pd->dport = &pd->hdr.udp->uh_dport; 3327 } 3328 rewrite++; 3329 break; 3330 #ifdef INET 3331 case IPPROTO_ICMP: 3332 nk->port[0] = nk->port[1]; 3333 if (PF_ANEQ(saddr, &nk->addr[pd->sidx], AF_INET)) 3334 pf_change_a(&saddr->v4.s_addr, pd->ip_sum, 3335 nk->addr[pd->sidx].v4.s_addr, 0); 3336 3337 if (PF_ANEQ(daddr, &nk->addr[pd->didx], AF_INET)) 3338 pf_change_a(&daddr->v4.s_addr, pd->ip_sum, 3339 nk->addr[pd->didx].v4.s_addr, 0); 3340 3341 if (nk->port[1] != pd->hdr.icmp->icmp_id) { 3342 pd->hdr.icmp->icmp_cksum = pf_cksum_fixup( 3343 pd->hdr.icmp->icmp_cksum, sport, 3344 nk->port[1], 0); 3345 pd->hdr.icmp->icmp_id = nk->port[1]; 3346 pd->sport = &pd->hdr.icmp->icmp_id; 3347 } 3348 m_copyback(m, off, ICMP_MINLEN, (caddr_t)pd->hdr.icmp); 3349 break; 3350 #endif /* INET */ 3351 #ifdef INET6 3352 case IPPROTO_ICMPV6: 3353 nk->port[0] = nk->port[1]; 3354 if (PF_ANEQ(saddr, &nk->addr[pd->sidx], AF_INET6)) 3355 pf_change_a6(saddr, &pd->hdr.icmp6->icmp6_cksum, 3356 &nk->addr[pd->sidx], 0); 3357 3358 if (PF_ANEQ(daddr, &nk->addr[pd->didx], AF_INET6)) 3359 pf_change_a6(daddr, &pd->hdr.icmp6->icmp6_cksum, 3360 &nk->addr[pd->didx], 0); 3361 rewrite++; 3362 break; 3363 #endif /* INET */ 3364 default: 3365 switch (af) { 3366 #ifdef INET 3367 case AF_INET: 3368 if (PF_ANEQ(saddr, 3369 &nk->addr[pd->sidx], AF_INET)) 3370 pf_change_a(&saddr->v4.s_addr, 3371 pd->ip_sum, 3372 nk->addr[pd->sidx].v4.s_addr, 0); 3373 3374 if (PF_ANEQ(daddr, 3375 &nk->addr[pd->didx], AF_INET)) 3376 pf_change_a(&daddr->v4.s_addr, 3377 pd->ip_sum, 3378 nk->addr[pd->didx].v4.s_addr, 0); 3379 break; 3380 #endif /* INET */ 3381 #ifdef INET6 3382 case AF_INET6: 3383 if (PF_ANEQ(saddr, 3384 &nk->addr[pd->sidx], AF_INET6)) 3385 PF_ACPY(saddr, &nk->addr[pd->sidx], af); 3386 3387 if (PF_ANEQ(daddr, 3388 &nk->addr[pd->didx], AF_INET6)) 3389 PF_ACPY(saddr, &nk->addr[pd->didx], af); 3390 break; 3391 #endif /* INET */ 3392 } 3393 break; 3394 } 3395 if (nr->natpass) 3396 r = NULL; 3397 pd->nat_rule = nr; 3398 } 3399 3400 while (r != NULL) { 3401 r->evaluations++; 3402 if (pfi_kif_match(r->kif, kif) == r->ifnot) 3403 r = r->skip[PF_SKIP_IFP].ptr; 3404 else if (r->direction && r->direction != direction) 3405 r = r->skip[PF_SKIP_DIR].ptr; 3406 else if (r->af && r->af != af) 3407 r = r->skip[PF_SKIP_AF].ptr; 3408 else if (r->proto && r->proto != pd->proto) 3409 r = r->skip[PF_SKIP_PROTO].ptr; 3410 else if (PF_MISMATCHAW(&r->src.addr, saddr, af, 3411 r->src.neg, kif)) 3412 r = r->skip[PF_SKIP_SRC_ADDR].ptr; 3413 /* tcp/udp only. port_op always 0 in other cases */ 3414 else if (r->src.port_op && !pf_match_port(r->src.port_op, 3415 r->src.port[0], r->src.port[1], sport)) 3416 r = r->skip[PF_SKIP_SRC_PORT].ptr; 3417 else if (PF_MISMATCHAW(&r->dst.addr, daddr, af, 3418 r->dst.neg, NULL)) 3419 r = r->skip[PF_SKIP_DST_ADDR].ptr; 3420 /* tcp/udp only. port_op always 0 in other cases */ 3421 else if (r->dst.port_op && !pf_match_port(r->dst.port_op, 3422 r->dst.port[0], r->dst.port[1], dport)) 3423 r = r->skip[PF_SKIP_DST_PORT].ptr; 3424 /* icmp only. type always 0 in other cases */ 3425 else if (r->type && r->type != icmptype + 1) 3426 r = TAILQ_NEXT(r, entries); 3427 /* icmp only. type always 0 in other cases */ 3428 else if (r->code && r->code != icmpcode + 1) 3429 r = TAILQ_NEXT(r, entries); 3430 else if (r->tos && !(r->tos == pd->tos)) 3431 r = TAILQ_NEXT(r, entries); 3432 else if (r->rule_flag & PFRULE_FRAGMENT) 3433 r = TAILQ_NEXT(r, entries); 3434 else if (pd->proto == IPPROTO_TCP && 3435 (r->flagset & th->th_flags) != r->flags) 3436 r = TAILQ_NEXT(r, entries); 3437 /* tcp/udp only. uid.op always 0 in other cases */ 3438 else if (r->uid.op && (pd->lookup.done || (pd->lookup.done = 3439 pf_socket_lookup(direction, pd), 1)) && 3440 !pf_match_uid(r->uid.op, r->uid.uid[0], r->uid.uid[1], 3441 pd->lookup.uid)) 3442 r = TAILQ_NEXT(r, entries); 3443 /* tcp/udp only. gid.op always 0 in other cases */ 3444 else if (r->gid.op && (pd->lookup.done || (pd->lookup.done = 3445 pf_socket_lookup(direction, pd), 1)) && 3446 !pf_match_gid(r->gid.op, r->gid.gid[0], r->gid.gid[1], 3447 pd->lookup.gid)) 3448 r = TAILQ_NEXT(r, entries); 3449 else if (r->prob && 3450 r->prob <= karc4random()) 3451 r = TAILQ_NEXT(r, entries); 3452 else if (r->match_tag && !pf_match_tag(m, r, &tag)) 3453 r = TAILQ_NEXT(r, entries); 3454 else if (r->os_fingerprint != PF_OSFP_ANY && 3455 (pd->proto != IPPROTO_TCP || !pf_osfp_match( 3456 pf_osfp_fingerprint(pd, m, off, th), 3457 r->os_fingerprint))) 3458 r = TAILQ_NEXT(r, entries); 3459 else { 3460 if (r->tag) 3461 tag = r->tag; 3462 if (r->rtableid >= 0) 3463 rtableid = r->rtableid; 3464 if (r->anchor == NULL) { 3465 match = 1; 3466 *rm = r; 3467 *am = a; 3468 *rsm = ruleset; 3469 if ((*rm)->quick) 3470 break; 3471 r = TAILQ_NEXT(r, entries); 3472 } else 3473 pf_step_into_anchor(&asd, &ruleset, 3474 PF_RULESET_FILTER, &r, &a, &match); 3475 } 3476 if (r == NULL && pf_step_out_of_anchor(&asd, &ruleset, 3477 PF_RULESET_FILTER, &r, &a, &match)) 3478 break; 3479 } 3480 r = *rm; 3481 a = *am; 3482 ruleset = *rsm; 3483 3484 REASON_SET(&reason, PFRES_MATCH); 3485 3486 if (r->log || (nr != NULL && nr->log)) { 3487 if (rewrite) 3488 m_copyback(m, off, hdrlen, pd->hdr.any); 3489 PFLOG_PACKET(kif, h, m, af, direction, reason, r->log ? r : nr, 3490 a, ruleset, pd); 3491 } 3492 3493 if ((r->action == PF_DROP) && 3494 ((r->rule_flag & PFRULE_RETURNRST) || 3495 (r->rule_flag & PFRULE_RETURNICMP) || 3496 (r->rule_flag & PFRULE_RETURN))) { 3497 /* undo NAT changes, if they have taken place */ 3498 if (nr != NULL) { 3499 PF_ACPY(saddr, &sk->addr[pd->sidx], af); 3500 PF_ACPY(daddr, &sk->addr[pd->didx], af); 3501 if (pd->sport) 3502 *pd->sport = sk->port[pd->sidx]; 3503 if (pd->dport) 3504 *pd->dport = sk->port[pd->didx]; 3505 if (pd->proto_sum) 3506 *pd->proto_sum = bproto_sum; 3507 if (pd->ip_sum) 3508 *pd->ip_sum = bip_sum; 3509 m_copyback(m, off, hdrlen, pd->hdr.any); 3510 } 3511 if (pd->proto == IPPROTO_TCP && 3512 ((r->rule_flag & PFRULE_RETURNRST) || 3513 (r->rule_flag & PFRULE_RETURN)) && 3514 !(th->th_flags & TH_RST)) { 3515 u_int32_t ack = ntohl(th->th_seq) + pd->p_len; 3516 int len = 0; 3517 struct ip *h4; 3518 #ifdef INET6 3519 struct ip6_hdr *h6; 3520 #endif 3521 switch (af) { 3522 case AF_INET: 3523 h4 = mtod(m, struct ip *); 3524 len = h4->ip_len - off; 3525 break; 3526 #ifdef INET6 3527 case AF_INET6: 3528 h6 = mtod(m, struct ip6_hdr *); 3529 len = h6->ip6_plen - (off - sizeof(*h6)); 3530 break; 3531 #endif 3532 } 3533 3534 if (pf_check_proto_cksum(m, off, len, IPPROTO_TCP, af)) 3535 REASON_SET(&reason, PFRES_PROTCKSUM); 3536 else { 3537 if (th->th_flags & TH_SYN) 3538 ack++; 3539 if (th->th_flags & TH_FIN) 3540 ack++; 3541 pf_send_tcp(r, af, pd->dst, 3542 pd->src, th->th_dport, th->th_sport, 3543 ntohl(th->th_ack), ack, TH_RST|TH_ACK, 0, 0, 3544 r->return_ttl, 1, 0, pd->eh, kif->pfik_ifp); 3545 } 3546 } else if (pd->proto != IPPROTO_ICMP && af == AF_INET && 3547 r->return_icmp) 3548 pf_send_icmp(m, r->return_icmp >> 8, 3549 r->return_icmp & 255, af, r); 3550 else if (pd->proto != IPPROTO_ICMPV6 && af == AF_INET6 && 3551 r->return_icmp6) 3552 pf_send_icmp(m, r->return_icmp6 >> 8, 3553 r->return_icmp6 & 255, af, r); 3554 } 3555 3556 if (r->action == PF_DROP) 3557 goto cleanup; 3558 3559 if (pf_tag_packet(m, tag, rtableid)) { 3560 REASON_SET(&reason, PFRES_MEMORY); 3561 goto cleanup; 3562 } 3563 3564 if (!state_icmp && (r->keep_state || nr != NULL || 3565 (pd->flags & PFDESC_TCP_NORM))) { 3566 int action; 3567 action = pf_create_state(r, nr, a, pd, nsn, skw, sks, nk, sk, m, 3568 off, sport, dport, &rewrite, kif, sm, tag, bproto_sum, 3569 bip_sum, hdrlen); 3570 if (action != PF_PASS) 3571 return (action); 3572 } 3573 3574 /* copy back packet headers if we performed NAT operations */ 3575 if (rewrite) 3576 m_copyback(m, off, hdrlen, pd->hdr.any); 3577 3578 return (PF_PASS); 3579 3580 cleanup: 3581 if (sk != NULL) 3582 pool_put(&pf_state_key_pl, sk); 3583 if (nk != NULL) 3584 pool_put(&pf_state_key_pl, nk); 3585 return (PF_DROP); 3586 } 3587 3588 static __inline int 3589 pf_create_state(struct pf_rule *r, struct pf_rule *nr, struct pf_rule *a, 3590 struct pf_pdesc *pd, struct pf_src_node *nsn, struct pf_state_key *skw, 3591 struct pf_state_key *sks, struct pf_state_key *nk, struct pf_state_key *sk, 3592 struct mbuf *m, int off, u_int16_t sport, u_int16_t dport, int *rewrite, 3593 struct pfi_kif *kif, struct pf_state **sm, int tag, u_int16_t bproto_sum, 3594 u_int16_t bip_sum, int hdrlen) 3595 { 3596 struct pf_state *s = NULL; 3597 struct pf_src_node *sn = NULL; 3598 struct tcphdr *th = pd->hdr.tcp; 3599 u_int16_t mss = tcp_mssdflt; 3600 u_short reason; 3601 3602 /* check maximums */ 3603 if (r->max_states && (r->states_cur >= r->max_states)) { 3604 pf_status.lcounters[LCNT_STATES]++; 3605 REASON_SET(&reason, PFRES_MAXSTATES); 3606 return (PF_DROP); 3607 } 3608 /* src node for filter rule */ 3609 if ((r->rule_flag & PFRULE_SRCTRACK || 3610 r->rpool.opts & PF_POOL_STICKYADDR) && 3611 pf_insert_src_node(&sn, r, pd->src, pd->af) != 0) { 3612 REASON_SET(&reason, PFRES_SRCLIMIT); 3613 goto csfailed; 3614 } 3615 /* src node for translation rule */ 3616 if (nr != NULL && (nr->rpool.opts & PF_POOL_STICKYADDR) && 3617 pf_insert_src_node(&nsn, nr, &sk->addr[pd->sidx], pd->af)) { 3618 REASON_SET(&reason, PFRES_SRCLIMIT); 3619 goto csfailed; 3620 } 3621 s = pool_get(&pf_state_pl, PR_NOWAIT | PR_ZERO); 3622 if (s == NULL) { 3623 REASON_SET(&reason, PFRES_MEMORY); 3624 goto csfailed; 3625 } 3626 s->id = 0; /* XXX Do we really need that? not in OpenBSD */ 3627 s->creatorid = 0; 3628 s->rule.ptr = r; 3629 s->nat_rule.ptr = nr; 3630 s->anchor.ptr = a; 3631 STATE_INC_COUNTERS(s); 3632 if (r->allow_opts) 3633 s->state_flags |= PFSTATE_ALLOWOPTS; 3634 if (r->rule_flag & PFRULE_STATESLOPPY) 3635 s->state_flags |= PFSTATE_SLOPPY; 3636 s->log = r->log & PF_LOG_ALL; 3637 if (nr != NULL) 3638 s->log |= nr->log & PF_LOG_ALL; 3639 switch (pd->proto) { 3640 case IPPROTO_TCP: 3641 s->src.seqlo = ntohl(th->th_seq); 3642 s->src.seqhi = s->src.seqlo + pd->p_len + 1; 3643 if ((th->th_flags & (TH_SYN|TH_ACK)) == TH_SYN && 3644 r->keep_state == PF_STATE_MODULATE) { 3645 /* Generate sequence number modulator */ 3646 if ((s->src.seqdiff = pf_tcp_iss(pd) - s->src.seqlo) == 3647 0) 3648 s->src.seqdiff = 1; 3649 pf_change_a(&th->th_seq, &th->th_sum, 3650 htonl(s->src.seqlo + s->src.seqdiff), 0); 3651 *rewrite = 1; 3652 } else 3653 s->src.seqdiff = 0; 3654 if (th->th_flags & TH_SYN) { 3655 s->src.seqhi++; 3656 s->src.wscale = pf_get_wscale(m, off, 3657 th->th_off, pd->af); 3658 } 3659 s->src.max_win = MAX(ntohs(th->th_win), 1); 3660 if (s->src.wscale & PF_WSCALE_MASK) { 3661 /* Remove scale factor from initial window */ 3662 int win = s->src.max_win; 3663 win += 1 << (s->src.wscale & PF_WSCALE_MASK); 3664 s->src.max_win = (win - 1) >> 3665 (s->src.wscale & PF_WSCALE_MASK); 3666 } 3667 if (th->th_flags & TH_FIN) 3668 s->src.seqhi++; 3669 s->dst.seqhi = 1; 3670 s->dst.max_win = 1; 3671 s->src.state = TCPS_SYN_SENT; 3672 s->dst.state = TCPS_CLOSED; 3673 s->timeout = PFTM_TCP_FIRST_PACKET; 3674 break; 3675 case IPPROTO_UDP: 3676 s->src.state = PFUDPS_SINGLE; 3677 s->dst.state = PFUDPS_NO_TRAFFIC; 3678 s->timeout = PFTM_UDP_FIRST_PACKET; 3679 break; 3680 case IPPROTO_ICMP: 3681 #ifdef INET6 3682 case IPPROTO_ICMPV6: 3683 #endif 3684 s->timeout = PFTM_ICMP_FIRST_PACKET; 3685 break; 3686 default: 3687 s->src.state = PFOTHERS_SINGLE; 3688 s->dst.state = PFOTHERS_NO_TRAFFIC; 3689 s->timeout = PFTM_OTHER_FIRST_PACKET; 3690 } 3691 3692 s->creation = time_second; 3693 s->expire = time_second; 3694 3695 if (sn != NULL) { 3696 s->src_node = sn; 3697 s->src_node->states++; 3698 } 3699 if (nsn != NULL) { 3700 /* XXX We only modify one side for now. */ 3701 PF_ACPY(&nsn->raddr, &nk->addr[1], pd->af); 3702 s->nat_src_node = nsn; 3703 s->nat_src_node->states++; 3704 } 3705 if (pd->proto == IPPROTO_TCP) { 3706 if ((pd->flags & PFDESC_TCP_NORM) && pf_normalize_tcp_init(m, 3707 off, pd, th, &s->src, &s->dst)) { 3708 REASON_SET(&reason, PFRES_MEMORY); 3709 pf_src_tree_remove_state(s); 3710 STATE_DEC_COUNTERS(s); 3711 pool_put(&pf_state_pl, s); 3712 return (PF_DROP); 3713 } 3714 if ((pd->flags & PFDESC_TCP_NORM) && s->src.scrub && 3715 pf_normalize_tcp_stateful(m, off, pd, &reason, th, s, 3716 &s->src, &s->dst, rewrite)) { 3717 /* This really shouldn't happen!!! */ 3718 DPFPRINTF(PF_DEBUG_URGENT, 3719 ("pf_normalize_tcp_stateful failed on first pkt")); 3720 pf_normalize_tcp_cleanup(s); 3721 pf_src_tree_remove_state(s); 3722 STATE_DEC_COUNTERS(s); 3723 pool_put(&pf_state_pl, s); 3724 return (PF_DROP); 3725 } 3726 } 3727 s->direction = pd->dir; 3728 3729 if (sk == NULL && pf_state_key_setup(pd, nr, &skw, &sks, &sk, &nk, 3730 pd->src, pd->dst, sport, dport)) 3731 goto csfailed; 3732 3733 if (pf_state_insert(BOUND_IFACE(r, kif), skw, sks, s)) { 3734 if (pd->proto == IPPROTO_TCP) 3735 pf_normalize_tcp_cleanup(s); 3736 REASON_SET(&reason, PFRES_STATEINS); 3737 pf_src_tree_remove_state(s); 3738 STATE_DEC_COUNTERS(s); 3739 pool_put(&pf_state_pl, s); 3740 return (PF_DROP); 3741 } else 3742 *sm = s; 3743 3744 pf_set_rt_ifp(s, pd->src); /* needs s->state_key set */ 3745 if (tag > 0) { 3746 pf_tag_ref(tag); 3747 s->tag = tag; 3748 } 3749 if (pd->proto == IPPROTO_TCP && (th->th_flags & (TH_SYN|TH_ACK)) == 3750 TH_SYN && r->keep_state == PF_STATE_SYNPROXY) { 3751 s->src.state = PF_TCPS_PROXY_SRC; 3752 /* undo NAT changes, if they have taken place */ 3753 if (nr != NULL) { 3754 struct pf_state_key *skt = s->key[PF_SK_WIRE]; 3755 if (pd->dir == PF_OUT) 3756 skt = s->key[PF_SK_STACK]; 3757 PF_ACPY(pd->src, &skt->addr[pd->sidx], pd->af); 3758 PF_ACPY(pd->dst, &skt->addr[pd->didx], pd->af); 3759 if (pd->sport) 3760 *pd->sport = skt->port[pd->sidx]; 3761 if (pd->dport) 3762 *pd->dport = skt->port[pd->didx]; 3763 if (pd->proto_sum) 3764 *pd->proto_sum = bproto_sum; 3765 if (pd->ip_sum) 3766 *pd->ip_sum = bip_sum; 3767 m_copyback(m, off, hdrlen, pd->hdr.any); 3768 } 3769 s->src.seqhi = htonl(karc4random()); 3770 /* Find mss option */ 3771 mss = pf_get_mss(m, off, th->th_off, pd->af); 3772 mss = pf_calc_mss(pd->src, pd->af, mss); 3773 mss = pf_calc_mss(pd->dst, pd->af, mss); 3774 s->src.mss = mss; 3775 pf_send_tcp(r, pd->af, pd->dst, pd->src, th->th_dport, 3776 th->th_sport, s->src.seqhi, ntohl(th->th_seq) + 1, 3777 TH_SYN|TH_ACK, 0, s->src.mss, 0, 1, 0, NULL, NULL); 3778 REASON_SET(&reason, PFRES_SYNPROXY); 3779 return (PF_SYNPROXY_DROP); 3780 } 3781 3782 return (PF_PASS); 3783 3784 csfailed: 3785 if (sk != NULL) 3786 pool_put(&pf_state_key_pl, sk); 3787 if (nk != NULL) 3788 pool_put(&pf_state_key_pl, nk); 3789 3790 if (sn != NULL && sn->states == 0 && sn->expire == 0) { 3791 RB_REMOVE(pf_src_tree, &tree_src_tracking, sn); 3792 pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++; 3793 pf_status.src_nodes--; 3794 pool_put(&pf_src_tree_pl, sn); 3795 } 3796 if (nsn != sn && nsn != NULL && nsn->states == 0 && nsn->expire == 0) { 3797 RB_REMOVE(pf_src_tree, &tree_src_tracking, nsn); 3798 pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++; 3799 pf_status.src_nodes--; 3800 pool_put(&pf_src_tree_pl, nsn); 3801 } 3802 return (PF_DROP); 3803 } 3804 3805 int 3806 pf_test_fragment(struct pf_rule **rm, int direction, struct pfi_kif *kif, 3807 struct mbuf *m, void *h, struct pf_pdesc *pd, struct pf_rule **am, 3808 struct pf_ruleset **rsm) 3809 { 3810 struct pf_rule *r, *a = NULL; 3811 struct pf_ruleset *ruleset = NULL; 3812 sa_family_t af = pd->af; 3813 u_short reason; 3814 int tag = -1; 3815 int asd = 0; 3816 int match = 0; 3817 3818 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_FILTER].active.ptr); 3819 while (r != NULL) { 3820 r->evaluations++; 3821 if (pfi_kif_match(r->kif, kif) == r->ifnot) 3822 r = r->skip[PF_SKIP_IFP].ptr; 3823 else if (r->direction && r->direction != direction) 3824 r = r->skip[PF_SKIP_DIR].ptr; 3825 else if (r->af && r->af != af) 3826 r = r->skip[PF_SKIP_AF].ptr; 3827 else if (r->proto && r->proto != pd->proto) 3828 r = r->skip[PF_SKIP_PROTO].ptr; 3829 else if (PF_MISMATCHAW(&r->src.addr, pd->src, af, 3830 r->src.neg, kif)) 3831 r = r->skip[PF_SKIP_SRC_ADDR].ptr; 3832 else if (PF_MISMATCHAW(&r->dst.addr, pd->dst, af, 3833 r->dst.neg, NULL)) 3834 r = r->skip[PF_SKIP_DST_ADDR].ptr; 3835 else if (r->tos && !(r->tos == pd->tos)) 3836 r = TAILQ_NEXT(r, entries); 3837 else if (r->os_fingerprint != PF_OSFP_ANY) 3838 r = TAILQ_NEXT(r, entries); 3839 else if (pd->proto == IPPROTO_UDP && 3840 (r->src.port_op || r->dst.port_op)) 3841 r = TAILQ_NEXT(r, entries); 3842 else if (pd->proto == IPPROTO_TCP && 3843 (r->src.port_op || r->dst.port_op || r->flagset)) 3844 r = TAILQ_NEXT(r, entries); 3845 else if ((pd->proto == IPPROTO_ICMP || 3846 pd->proto == IPPROTO_ICMPV6) && 3847 (r->type || r->code)) 3848 r = TAILQ_NEXT(r, entries); 3849 else if (r->prob && r->prob <= karc4random()) 3850 r = TAILQ_NEXT(r, entries); 3851 else if (r->match_tag && !pf_match_tag(m, r, &tag)) 3852 r = TAILQ_NEXT(r, entries); 3853 else { 3854 if (r->anchor == NULL) { 3855 match = 1; 3856 *rm = r; 3857 *am = a; 3858 *rsm = ruleset; 3859 if ((*rm)->quick) 3860 break; 3861 r = TAILQ_NEXT(r, entries); 3862 } else 3863 pf_step_into_anchor(&asd, &ruleset, 3864 PF_RULESET_FILTER, &r, &a, &match); 3865 } 3866 if (r == NULL && pf_step_out_of_anchor(&asd, &ruleset, 3867 PF_RULESET_FILTER, &r, &a, &match)) 3868 break; 3869 } 3870 r = *rm; 3871 a = *am; 3872 ruleset = *rsm; 3873 3874 REASON_SET(&reason, PFRES_MATCH); 3875 3876 if (r->log) 3877 PFLOG_PACKET(kif, h, m, af, direction, reason, r, a, ruleset, 3878 pd); 3879 3880 if (r->action != PF_PASS) 3881 return (PF_DROP); 3882 3883 if (pf_tag_packet(m, tag, -1)) { 3884 REASON_SET(&reason, PFRES_MEMORY); 3885 return (PF_DROP); 3886 } 3887 3888 return (PF_PASS); 3889 } 3890 3891 int 3892 pf_tcp_track_full(struct pf_state_peer *src, struct pf_state_peer *dst, 3893 struct pf_state **state, struct pfi_kif *kif, struct mbuf *m, int off, 3894 struct pf_pdesc *pd, u_short *reason, int *copyback) 3895 { 3896 struct tcphdr *th = pd->hdr.tcp; 3897 u_int16_t win = ntohs(th->th_win); 3898 u_int32_t ack, end, seq, orig_seq; 3899 u_int8_t sws, dws; 3900 int ackskew; 3901 3902 if (src->wscale && dst->wscale && !(th->th_flags & TH_SYN)) { 3903 sws = src->wscale & PF_WSCALE_MASK; 3904 dws = dst->wscale & PF_WSCALE_MASK; 3905 } else 3906 sws = dws = 0; 3907 3908 /* 3909 * Sequence tracking algorithm from Guido van Rooij's paper: 3910 * http://www.madison-gurkha.com/publications/tcp_filtering/ 3911 * tcp_filtering.ps 3912 */ 3913 3914 orig_seq = seq = ntohl(th->th_seq); 3915 if (src->seqlo == 0) { 3916 /* First packet from this end. Set its state */ 3917 3918 if ((pd->flags & PFDESC_TCP_NORM || dst->scrub) && 3919 src->scrub == NULL) { 3920 if (pf_normalize_tcp_init(m, off, pd, th, src, dst)) { 3921 REASON_SET(reason, PFRES_MEMORY); 3922 return (PF_DROP); 3923 } 3924 } 3925 3926 /* Deferred generation of sequence number modulator */ 3927 if (dst->seqdiff && !src->seqdiff) { 3928 /* use random iss for the TCP server */ 3929 while ((src->seqdiff = karc4random() - seq) == 0) 3930 ; 3931 ack = ntohl(th->th_ack) - dst->seqdiff; 3932 pf_change_a(&th->th_seq, &th->th_sum, htonl(seq + 3933 src->seqdiff), 0); 3934 pf_change_a(&th->th_ack, &th->th_sum, htonl(ack), 0); 3935 *copyback = 1; 3936 } else { 3937 ack = ntohl(th->th_ack); 3938 } 3939 3940 end = seq + pd->p_len; 3941 if (th->th_flags & TH_SYN) { 3942 end++; 3943 (*state)->sync_flags |= PFSTATE_GOT_SYN2; 3944 if (dst->wscale & PF_WSCALE_FLAG) { 3945 src->wscale = pf_get_wscale(m, off, th->th_off, 3946 pd->af); 3947 if (src->wscale & PF_WSCALE_FLAG) { 3948 /* Remove scale factor from initial 3949 * window */ 3950 sws = src->wscale & PF_WSCALE_MASK; 3951 win = ((u_int32_t)win + (1 << sws) - 1) 3952 >> sws; 3953 dws = dst->wscale & PF_WSCALE_MASK; 3954 } else { 3955 /* fixup other window */ 3956 dst->max_win <<= dst->wscale & 3957 PF_WSCALE_MASK; 3958 /* in case of a retrans SYN|ACK */ 3959 dst->wscale = 0; 3960 } 3961 } 3962 } 3963 if (th->th_flags & TH_FIN) 3964 end++; 3965 3966 src->seqlo = seq; 3967 if (src->state < TCPS_SYN_SENT) 3968 src->state = TCPS_SYN_SENT; 3969 3970 /* 3971 * May need to slide the window (seqhi may have been set by 3972 * the crappy stack check or if we picked up the connection 3973 * after establishment) 3974 */ 3975 if (src->seqhi == 1 || 3976 SEQ_GEQ(end + MAX(1, dst->max_win << dws), src->seqhi)) 3977 src->seqhi = end + MAX(1, dst->max_win << dws); 3978 if (win > src->max_win) 3979 src->max_win = win; 3980 3981 } else { 3982 ack = ntohl(th->th_ack) - dst->seqdiff; 3983 if (src->seqdiff) { 3984 /* Modulate sequence numbers */ 3985 pf_change_a(&th->th_seq, &th->th_sum, htonl(seq + 3986 src->seqdiff), 0); 3987 pf_change_a(&th->th_ack, &th->th_sum, htonl(ack), 0); 3988 *copyback = 1; 3989 } 3990 end = seq + pd->p_len; 3991 if (th->th_flags & TH_SYN) 3992 end++; 3993 if (th->th_flags & TH_FIN) 3994 end++; 3995 } 3996 3997 if ((th->th_flags & TH_ACK) == 0) { 3998 /* Let it pass through the ack skew check */ 3999 ack = dst->seqlo; 4000 } else if ((ack == 0 && 4001 (th->th_flags & (TH_ACK|TH_RST)) == (TH_ACK|TH_RST)) || 4002 /* broken tcp stacks do not set ack */ 4003 (dst->state < TCPS_SYN_SENT)) { 4004 /* 4005 * Many stacks (ours included) will set the ACK number in an 4006 * FIN|ACK if the SYN times out -- no sequence to ACK. 4007 */ 4008 ack = dst->seqlo; 4009 } 4010 4011 if (seq == end) { 4012 /* Ease sequencing restrictions on no data packets */ 4013 seq = src->seqlo; 4014 end = seq; 4015 } 4016 4017 ackskew = dst->seqlo - ack; 4018 4019 4020 /* 4021 * Need to demodulate the sequence numbers in any TCP SACK options 4022 * (Selective ACK). We could optionally validate the SACK values 4023 * against the current ACK window, either forwards or backwards, but 4024 * I'm not confident that SACK has been implemented properly 4025 * everywhere. It wouldn't surprise me if several stacks accidently 4026 * SACK too far backwards of previously ACKed data. There really aren't 4027 * any security implications of bad SACKing unless the target stack 4028 * doesn't validate the option length correctly. Someone trying to 4029 * spoof into a TCP connection won't bother blindly sending SACK 4030 * options anyway. 4031 */ 4032 if (dst->seqdiff && (th->th_off << 2) > sizeof(struct tcphdr)) { 4033 if (pf_modulate_sack(m, off, pd, th, dst)) 4034 *copyback = 1; 4035 } 4036 4037 4038 #define MAXACKWINDOW (0xffff + 1500) /* 1500 is an arbitrary fudge factor */ 4039 if (SEQ_GEQ(src->seqhi, end) && 4040 /* Last octet inside other's window space */ 4041 SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)) && 4042 /* Retrans: not more than one window back */ 4043 (ackskew >= -MAXACKWINDOW) && 4044 /* Acking not more than one reassembled fragment backwards */ 4045 (ackskew <= (MAXACKWINDOW << sws)) && 4046 /* Acking not more than one window forward */ 4047 ((th->th_flags & TH_RST) == 0 || orig_seq == src->seqlo || 4048 (orig_seq == src->seqlo + 1) || (orig_seq + 1 == src->seqlo) || 4049 (pd->flags & PFDESC_IP_REAS) == 0)) { 4050 /* Require an exact/+1 sequence match on resets when possible */ 4051 4052 if (dst->scrub || src->scrub) { 4053 if (pf_normalize_tcp_stateful(m, off, pd, reason, th, 4054 *state, src, dst, copyback)) 4055 return (PF_DROP); 4056 } 4057 4058 /* update max window */ 4059 if (src->max_win < win) 4060 src->max_win = win; 4061 /* synchronize sequencing */ 4062 if (SEQ_GT(end, src->seqlo)) 4063 src->seqlo = end; 4064 /* slide the window of what the other end can send */ 4065 if (SEQ_GEQ(ack + (win << sws), dst->seqhi)) 4066 dst->seqhi = ack + MAX((win << sws), 1); 4067 4068 4069 /* update states */ 4070 if (th->th_flags & TH_SYN) 4071 if (src->state < TCPS_SYN_SENT) 4072 src->state = TCPS_SYN_SENT; 4073 if (th->th_flags & TH_FIN) 4074 if (src->state < TCPS_CLOSING) 4075 src->state = TCPS_CLOSING; 4076 if (th->th_flags & TH_ACK) { 4077 if (dst->state == TCPS_SYN_SENT) { 4078 dst->state = TCPS_ESTABLISHED; 4079 if (src->state == TCPS_ESTABLISHED && 4080 (*state)->src_node != NULL && 4081 pf_src_connlimit(state)) { 4082 REASON_SET(reason, PFRES_SRCLIMIT); 4083 return (PF_DROP); 4084 } 4085 } else if (dst->state == TCPS_CLOSING) 4086 dst->state = TCPS_FIN_WAIT_2; 4087 } 4088 if (th->th_flags & TH_RST) 4089 src->state = dst->state = TCPS_TIME_WAIT; 4090 4091 /* update expire time */ 4092 (*state)->expire = time_second; 4093 if (src->state >= TCPS_FIN_WAIT_2 && 4094 dst->state >= TCPS_FIN_WAIT_2) 4095 (*state)->timeout = PFTM_TCP_CLOSED; 4096 else if (src->state >= TCPS_CLOSING && 4097 dst->state >= TCPS_CLOSING) 4098 (*state)->timeout = PFTM_TCP_FIN_WAIT; 4099 else if (src->state < TCPS_ESTABLISHED || 4100 dst->state < TCPS_ESTABLISHED) 4101 (*state)->timeout = PFTM_TCP_OPENING; 4102 else if (src->state >= TCPS_CLOSING || 4103 dst->state >= TCPS_CLOSING) 4104 (*state)->timeout = PFTM_TCP_CLOSING; 4105 else 4106 (*state)->timeout = PFTM_TCP_ESTABLISHED; 4107 4108 /* Fall through to PASS packet */ 4109 4110 } else if ((dst->state < TCPS_SYN_SENT || 4111 dst->state >= TCPS_FIN_WAIT_2 || 4112 src->state >= TCPS_FIN_WAIT_2) && 4113 SEQ_GEQ(src->seqhi + MAXACKWINDOW, end) && 4114 /* Within a window forward of the originating packet */ 4115 SEQ_GEQ(seq, src->seqlo - MAXACKWINDOW)) { 4116 /* Within a window backward of the originating packet */ 4117 4118 /* 4119 * This currently handles three situations: 4120 * 1) Stupid stacks will shotgun SYNs before their peer 4121 * replies. 4122 * 2) When PF catches an already established stream (the 4123 * firewall rebooted, the state table was flushed, routes 4124 * changed...) 4125 * 3) Packets get funky immediately after the connection 4126 * closes (this should catch Solaris spurious ACK|FINs 4127 * that web servers like to spew after a close) 4128 * 4129 * This must be a little more careful than the above code 4130 * since packet floods will also be caught here. We don't 4131 * update the TTL here to mitigate the damage of a packet 4132 * flood and so the same code can handle awkward establishment 4133 * and a loosened connection close. 4134 * In the establishment case, a correct peer response will 4135 * validate the connection, go through the normal state code 4136 * and keep updating the state TTL. 4137 */ 4138 4139 if (pf_status.debug >= PF_DEBUG_MISC) { 4140 kprintf("pf: loose state match: "); 4141 pf_print_state(*state); 4142 pf_print_flags(th->th_flags); 4143 kprintf(" seq=%u (%u) ack=%u len=%u ackskew=%d " 4144 "pkts=%llu:%llu dir=%s,%s\n", seq, orig_seq, ack, pd->p_len, 4145 ackskew, (unsigned long long)(*state)->packets[0], 4146 (unsigned long long)(*state)->packets[1], 4147 pd->dir == PF_IN ? "in" : "out", 4148 pd->dir == (*state)->direction ? "fwd" : "rev"); 4149 } 4150 4151 if (dst->scrub || src->scrub) { 4152 if (pf_normalize_tcp_stateful(m, off, pd, reason, th, 4153 *state, src, dst, copyback)) 4154 return (PF_DROP); 4155 } 4156 4157 /* update max window */ 4158 if (src->max_win < win) 4159 src->max_win = win; 4160 /* synchronize sequencing */ 4161 if (SEQ_GT(end, src->seqlo)) 4162 src->seqlo = end; 4163 /* slide the window of what the other end can send */ 4164 if (SEQ_GEQ(ack + (win << sws), dst->seqhi)) 4165 dst->seqhi = ack + MAX((win << sws), 1); 4166 4167 /* 4168 * Cannot set dst->seqhi here since this could be a shotgunned 4169 * SYN and not an already established connection. 4170 */ 4171 4172 if (th->th_flags & TH_FIN) 4173 if (src->state < TCPS_CLOSING) 4174 src->state = TCPS_CLOSING; 4175 if (th->th_flags & TH_RST) 4176 src->state = dst->state = TCPS_TIME_WAIT; 4177 4178 /* Fall through to PASS packet */ 4179 4180 } else if ((*state)->pickup_mode == PF_PICKUPS_HASHONLY || 4181 ((*state)->pickup_mode == PF_PICKUPS_ENABLED && 4182 ((*state)->sync_flags & PFSTATE_GOT_SYN_MASK) != 4183 PFSTATE_GOT_SYN_MASK)) { 4184 /* 4185 * If pickup mode is hash only, do not fail on sequence checks. 4186 * 4187 * If pickup mode is enabled and we did not see the SYN in 4188 * both direction, do not fail on sequence checks because 4189 * we do not have complete information on window scale. 4190 * 4191 * Adjust expiration and fall through to PASS packet. 4192 * XXX Add a FIN check to reduce timeout? 4193 */ 4194 (*state)->expire = time_second; 4195 } else { 4196 /* 4197 * Failure processing 4198 */ 4199 if ((*state)->dst.state == TCPS_SYN_SENT && 4200 (*state)->src.state == TCPS_SYN_SENT) { 4201 /* Send RST for state mismatches during handshake */ 4202 if (!(th->th_flags & TH_RST)) 4203 pf_send_tcp((*state)->rule.ptr, pd->af, 4204 pd->dst, pd->src, th->th_dport, 4205 th->th_sport, ntohl(th->th_ack), 0, 4206 TH_RST, 0, 0, 4207 (*state)->rule.ptr->return_ttl, 1, 0, 4208 pd->eh, kif->pfik_ifp); 4209 src->seqlo = 0; 4210 src->seqhi = 1; 4211 src->max_win = 1; 4212 } else if (pf_status.debug >= PF_DEBUG_MISC) { 4213 kprintf("pf: BAD state: "); 4214 pf_print_state(*state); 4215 pf_print_flags(th->th_flags); 4216 kprintf(" seq=%u (%u) ack=%u len=%u ackskew=%d " 4217 "pkts=%llu:%llu dir=%s,%s\n", 4218 seq, orig_seq, ack, pd->p_len, ackskew, 4219 (unsigned long long)(*state)->packets[0], 4220 (unsigned long long)(*state)->packets[1], 4221 pd->dir == PF_IN ? "in" : "out", 4222 pd->dir == (*state)->direction ? "fwd" : "rev"); 4223 kprintf("pf: State failure on: %c %c %c %c | %c %c\n", 4224 SEQ_GEQ(src->seqhi, end) ? ' ' : '1', 4225 SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)) ? 4226 ' ': '2', 4227 (ackskew >= -MAXACKWINDOW) ? ' ' : '3', 4228 (ackskew <= (MAXACKWINDOW << sws)) ? ' ' : '4', 4229 SEQ_GEQ(src->seqhi + MAXACKWINDOW, end) ?' ' :'5', 4230 SEQ_GEQ(seq, src->seqlo - MAXACKWINDOW) ?' ' :'6'); 4231 } 4232 REASON_SET(reason, PFRES_BADSTATE); 4233 return (PF_DROP); 4234 } 4235 4236 return (PF_PASS); 4237 } 4238 4239 int 4240 pf_tcp_track_sloppy(struct pf_state_peer *src, struct pf_state_peer *dst, 4241 struct pf_state **state, struct pf_pdesc *pd, u_short *reason) 4242 { 4243 struct tcphdr *th = pd->hdr.tcp; 4244 4245 if (th->th_flags & TH_SYN) 4246 if (src->state < TCPS_SYN_SENT) 4247 src->state = TCPS_SYN_SENT; 4248 if (th->th_flags & TH_FIN) 4249 if (src->state < TCPS_CLOSING) 4250 src->state = TCPS_CLOSING; 4251 if (th->th_flags & TH_ACK) { 4252 if (dst->state == TCPS_SYN_SENT) { 4253 dst->state = TCPS_ESTABLISHED; 4254 if (src->state == TCPS_ESTABLISHED && 4255 (*state)->src_node != NULL && 4256 pf_src_connlimit(state)) { 4257 REASON_SET(reason, PFRES_SRCLIMIT); 4258 return (PF_DROP); 4259 } 4260 } else if (dst->state == TCPS_CLOSING) { 4261 dst->state = TCPS_FIN_WAIT_2; 4262 } else if (src->state == TCPS_SYN_SENT && 4263 dst->state < TCPS_SYN_SENT) { 4264 /* 4265 * Handle a special sloppy case where we only see one 4266 * half of the connection. If there is a ACK after 4267 * the initial SYN without ever seeing a packet from 4268 * the destination, set the connection to established. 4269 */ 4270 dst->state = src->state = TCPS_ESTABLISHED; 4271 if ((*state)->src_node != NULL && 4272 pf_src_connlimit(state)) { 4273 REASON_SET(reason, PFRES_SRCLIMIT); 4274 return (PF_DROP); 4275 } 4276 } else if (src->state == TCPS_CLOSING && 4277 dst->state == TCPS_ESTABLISHED && 4278 dst->seqlo == 0) { 4279 /* 4280 * Handle the closing of half connections where we 4281 * don't see the full bidirectional FIN/ACK+ACK 4282 * handshake. 4283 */ 4284 dst->state = TCPS_CLOSING; 4285 } 4286 } 4287 if (th->th_flags & TH_RST) 4288 src->state = dst->state = TCPS_TIME_WAIT; 4289 4290 /* update expire time */ 4291 (*state)->expire = time_second; 4292 if (src->state >= TCPS_FIN_WAIT_2 && 4293 dst->state >= TCPS_FIN_WAIT_2) 4294 (*state)->timeout = PFTM_TCP_CLOSED; 4295 else if (src->state >= TCPS_CLOSING && 4296 dst->state >= TCPS_CLOSING) 4297 (*state)->timeout = PFTM_TCP_FIN_WAIT; 4298 else if (src->state < TCPS_ESTABLISHED || 4299 dst->state < TCPS_ESTABLISHED) 4300 (*state)->timeout = PFTM_TCP_OPENING; 4301 else if (src->state >= TCPS_CLOSING || 4302 dst->state >= TCPS_CLOSING) 4303 (*state)->timeout = PFTM_TCP_CLOSING; 4304 else 4305 (*state)->timeout = PFTM_TCP_ESTABLISHED; 4306 4307 return (PF_PASS); 4308 } 4309 4310 int 4311 pf_test_state_tcp(struct pf_state **state, int direction, struct pfi_kif *kif, 4312 struct mbuf *m, int off, void *h, struct pf_pdesc *pd, 4313 u_short *reason) 4314 { 4315 struct pf_state_key_cmp key; 4316 struct tcphdr *th = pd->hdr.tcp; 4317 int copyback = 0; 4318 struct pf_state_peer *src, *dst; 4319 struct pf_state_key *sk; 4320 4321 key.af = pd->af; 4322 key.proto = IPPROTO_TCP; 4323 if (direction == PF_IN) { /* wire side, straight */ 4324 PF_ACPY(&key.addr[0], pd->src, key.af); 4325 PF_ACPY(&key.addr[1], pd->dst, key.af); 4326 key.port[0] = th->th_sport; 4327 key.port[1] = th->th_dport; 4328 } else { /* stack side, reverse */ 4329 PF_ACPY(&key.addr[1], pd->src, key.af); 4330 PF_ACPY(&key.addr[0], pd->dst, key.af); 4331 key.port[1] = th->th_sport; 4332 key.port[0] = th->th_dport; 4333 } 4334 4335 STATE_LOOKUP(kif, &key, direction, *state, m); 4336 4337 if (direction == (*state)->direction) { 4338 src = &(*state)->src; 4339 dst = &(*state)->dst; 4340 } else { 4341 src = &(*state)->dst; 4342 dst = &(*state)->src; 4343 } 4344 4345 sk = (*state)->key[pd->didx]; 4346 4347 if ((*state)->src.state == PF_TCPS_PROXY_SRC) { 4348 if (direction != (*state)->direction) { 4349 REASON_SET(reason, PFRES_SYNPROXY); 4350 return (PF_SYNPROXY_DROP); 4351 } 4352 if (th->th_flags & TH_SYN) { 4353 if (ntohl(th->th_seq) != (*state)->src.seqlo) { 4354 REASON_SET(reason, PFRES_SYNPROXY); 4355 return (PF_DROP); 4356 } 4357 pf_send_tcp((*state)->rule.ptr, pd->af, pd->dst, 4358 pd->src, th->th_dport, th->th_sport, 4359 (*state)->src.seqhi, ntohl(th->th_seq) + 1, 4360 TH_SYN|TH_ACK, 0, (*state)->src.mss, 0, 1, 4361 0, NULL, NULL); 4362 REASON_SET(reason, PFRES_SYNPROXY); 4363 return (PF_SYNPROXY_DROP); 4364 } else if (!(th->th_flags & TH_ACK) || 4365 (ntohl(th->th_ack) != (*state)->src.seqhi + 1) || 4366 (ntohl(th->th_seq) != (*state)->src.seqlo + 1)) { 4367 REASON_SET(reason, PFRES_SYNPROXY); 4368 return (PF_DROP); 4369 } else if ((*state)->src_node != NULL && 4370 pf_src_connlimit(state)) { 4371 REASON_SET(reason, PFRES_SRCLIMIT); 4372 return (PF_DROP); 4373 } else 4374 (*state)->src.state = PF_TCPS_PROXY_DST; 4375 } 4376 if ((*state)->src.state == PF_TCPS_PROXY_DST) { 4377 if (direction == (*state)->direction) { 4378 if (((th->th_flags & (TH_SYN|TH_ACK)) != TH_ACK) || 4379 (ntohl(th->th_ack) != (*state)->src.seqhi + 1) || 4380 (ntohl(th->th_seq) != (*state)->src.seqlo + 1)) { 4381 REASON_SET(reason, PFRES_SYNPROXY); 4382 return (PF_DROP); 4383 } 4384 (*state)->src.max_win = MAX(ntohs(th->th_win), 1); 4385 if ((*state)->dst.seqhi == 1) 4386 (*state)->dst.seqhi = htonl(karc4random()); 4387 pf_send_tcp((*state)->rule.ptr, pd->af, 4388 &sk->addr[pd->sidx], &sk->addr[pd->didx], 4389 sk->port[pd->sidx], sk->port[pd->didx], 4390 (*state)->dst.seqhi, 0, TH_SYN, 0, 4391 (*state)->src.mss, 0, 0, (*state)->tag, NULL, NULL); 4392 REASON_SET(reason, PFRES_SYNPROXY); 4393 return (PF_SYNPROXY_DROP); 4394 } else if (((th->th_flags & (TH_SYN|TH_ACK)) != 4395 (TH_SYN|TH_ACK)) || 4396 (ntohl(th->th_ack) != (*state)->dst.seqhi + 1)) { 4397 REASON_SET(reason, PFRES_SYNPROXY); 4398 return (PF_DROP); 4399 } else { 4400 (*state)->dst.max_win = MAX(ntohs(th->th_win), 1); 4401 (*state)->dst.seqlo = ntohl(th->th_seq); 4402 pf_send_tcp((*state)->rule.ptr, pd->af, pd->dst, 4403 pd->src, th->th_dport, th->th_sport, 4404 ntohl(th->th_ack), ntohl(th->th_seq) + 1, 4405 TH_ACK, (*state)->src.max_win, 0, 0, 0, 4406 (*state)->tag, NULL, NULL); 4407 pf_send_tcp((*state)->rule.ptr, pd->af, 4408 &sk->addr[pd->sidx], &sk->addr[pd->didx], 4409 sk->port[pd->sidx], sk->port[pd->didx], 4410 (*state)->src.seqhi + 1, (*state)->src.seqlo + 1, 4411 TH_ACK, (*state)->dst.max_win, 0, 0, 1, 4412 0, NULL, NULL); 4413 (*state)->src.seqdiff = (*state)->dst.seqhi - 4414 (*state)->src.seqlo; 4415 (*state)->dst.seqdiff = (*state)->src.seqhi - 4416 (*state)->dst.seqlo; 4417 (*state)->src.seqhi = (*state)->src.seqlo + 4418 (*state)->dst.max_win; 4419 (*state)->dst.seqhi = (*state)->dst.seqlo + 4420 (*state)->src.max_win; 4421 (*state)->src.wscale = (*state)->dst.wscale = 0; 4422 (*state)->src.state = (*state)->dst.state = 4423 TCPS_ESTABLISHED; 4424 REASON_SET(reason, PFRES_SYNPROXY); 4425 return (PF_SYNPROXY_DROP); 4426 } 4427 } 4428 4429 if (((th->th_flags & (TH_SYN|TH_ACK)) == TH_SYN) && 4430 dst->state >= TCPS_FIN_WAIT_2 && 4431 src->state >= TCPS_FIN_WAIT_2) { 4432 if (pf_status.debug >= PF_DEBUG_MISC) { 4433 kprintf("pf: state reuse "); 4434 pf_print_state(*state); 4435 pf_print_flags(th->th_flags); 4436 kprintf("\n"); 4437 } 4438 /* XXX make sure it's the same direction ?? */ 4439 (*state)->src.state = (*state)->dst.state = TCPS_CLOSED; 4440 pf_unlink_state(*state); 4441 *state = NULL; 4442 return (PF_DROP); 4443 } 4444 4445 if ((*state)->state_flags & PFSTATE_SLOPPY) { 4446 if (pf_tcp_track_sloppy(src, dst, state, pd, reason) == PF_DROP) 4447 return (PF_DROP); 4448 } else { 4449 if (pf_tcp_track_full(src, dst, state, kif, m, off, pd, reason, 4450 ©back) == PF_DROP) 4451 return (PF_DROP); 4452 } 4453 4454 /* translate source/destination address, if necessary */ 4455 if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) { 4456 struct pf_state_key *nk = (*state)->key[pd->didx]; 4457 4458 if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], pd->af) || 4459 nk->port[pd->sidx] != th->th_sport) { 4460 /* 4461 * The translated source address may be completely 4462 * unrelated to the saved link header, make sure 4463 * a bridge doesn't try to use it. 4464 */ 4465 m->m_pkthdr.fw_flags &= ~BRIDGE_MBUF_TAGGED; 4466 m->m_flags &= ~M_HASH; 4467 pf_change_ap(pd->src, &th->th_sport, pd->ip_sum, 4468 &th->th_sum, &nk->addr[pd->sidx], 4469 nk->port[pd->sidx], 0, pd->af); 4470 } 4471 4472 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], pd->af) || 4473 nk->port[pd->didx] != th->th_dport) { 4474 /* 4475 * If we don't redispatch the packet will go into 4476 * the protocol stack on the wrong cpu for the 4477 * post-translated address. 4478 */ 4479 m->m_flags &= ~M_HASH; 4480 pf_change_ap(pd->dst, &th->th_dport, pd->ip_sum, 4481 &th->th_sum, &nk->addr[pd->didx], 4482 nk->port[pd->didx], 0, pd->af); 4483 } 4484 copyback = 1; 4485 } 4486 4487 /* Copyback sequence modulation or stateful scrub changes if needed */ 4488 if (copyback) 4489 m_copyback(m, off, sizeof(*th), (caddr_t)th); 4490 4491 return (PF_PASS); 4492 } 4493 4494 int 4495 pf_test_state_udp(struct pf_state **state, int direction, struct pfi_kif *kif, 4496 struct mbuf *m, int off, void *h, struct pf_pdesc *pd) 4497 { 4498 struct pf_state_peer *src, *dst; 4499 struct pf_state_key_cmp key; 4500 struct udphdr *uh = pd->hdr.udp; 4501 4502 key.af = pd->af; 4503 key.proto = IPPROTO_UDP; 4504 if (direction == PF_IN) { /* wire side, straight */ 4505 PF_ACPY(&key.addr[0], pd->src, key.af); 4506 PF_ACPY(&key.addr[1], pd->dst, key.af); 4507 key.port[0] = uh->uh_sport; 4508 key.port[1] = uh->uh_dport; 4509 } else { /* stack side, reverse */ 4510 PF_ACPY(&key.addr[1], pd->src, key.af); 4511 PF_ACPY(&key.addr[0], pd->dst, key.af); 4512 key.port[1] = uh->uh_sport; 4513 key.port[0] = uh->uh_dport; 4514 } 4515 4516 STATE_LOOKUP(kif, &key, direction, *state, m); 4517 4518 if (direction == (*state)->direction) { 4519 src = &(*state)->src; 4520 dst = &(*state)->dst; 4521 } else { 4522 src = &(*state)->dst; 4523 dst = &(*state)->src; 4524 } 4525 4526 /* update states */ 4527 if (src->state < PFUDPS_SINGLE) 4528 src->state = PFUDPS_SINGLE; 4529 if (dst->state == PFUDPS_SINGLE) 4530 dst->state = PFUDPS_MULTIPLE; 4531 4532 /* update expire time */ 4533 (*state)->expire = time_second; 4534 if (src->state == PFUDPS_MULTIPLE && dst->state == PFUDPS_MULTIPLE) 4535 (*state)->timeout = PFTM_UDP_MULTIPLE; 4536 else 4537 (*state)->timeout = PFTM_UDP_SINGLE; 4538 4539 /* translate source/destination address, if necessary */ 4540 if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) { 4541 struct pf_state_key *nk = (*state)->key[pd->didx]; 4542 4543 if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], pd->af) || 4544 nk->port[pd->sidx] != uh->uh_sport) { 4545 /* 4546 * The translated source address may be completely 4547 * unrelated to the saved link header, make sure 4548 * a bridge doesn't try to use it. 4549 */ 4550 m->m_pkthdr.fw_flags &= ~BRIDGE_MBUF_TAGGED; 4551 m->m_flags &= ~M_HASH; 4552 pf_change_ap(pd->src, &uh->uh_sport, pd->ip_sum, 4553 &uh->uh_sum, &nk->addr[pd->sidx], 4554 nk->port[pd->sidx], 1, pd->af); 4555 } 4556 4557 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], pd->af) || 4558 nk->port[pd->didx] != uh->uh_dport) { 4559 /* 4560 * If we don't redispatch the packet will go into 4561 * the protocol stack on the wrong cpu for the 4562 * post-translated address. 4563 */ 4564 m->m_flags &= ~M_HASH; 4565 pf_change_ap(pd->dst, &uh->uh_dport, pd->ip_sum, 4566 &uh->uh_sum, &nk->addr[pd->didx], 4567 nk->port[pd->didx], 1, pd->af); 4568 } 4569 m_copyback(m, off, sizeof(*uh), (caddr_t)uh); 4570 } 4571 4572 return (PF_PASS); 4573 } 4574 4575 int 4576 pf_test_state_icmp(struct pf_state **state, int direction, struct pfi_kif *kif, 4577 struct mbuf *m, int off, void *h, struct pf_pdesc *pd, u_short *reason) 4578 { 4579 struct pf_addr *saddr = pd->src, *daddr = pd->dst; 4580 u_int16_t icmpid = 0, *icmpsum; 4581 u_int8_t icmptype; 4582 int state_icmp = 0; 4583 struct pf_state_key_cmp key; 4584 4585 switch (pd->proto) { 4586 #ifdef INET 4587 case IPPROTO_ICMP: 4588 icmptype = pd->hdr.icmp->icmp_type; 4589 icmpid = pd->hdr.icmp->icmp_id; 4590 icmpsum = &pd->hdr.icmp->icmp_cksum; 4591 4592 if (icmptype == ICMP_UNREACH || 4593 icmptype == ICMP_SOURCEQUENCH || 4594 icmptype == ICMP_REDIRECT || 4595 icmptype == ICMP_TIMXCEED || 4596 icmptype == ICMP_PARAMPROB) 4597 state_icmp++; 4598 break; 4599 #endif /* INET */ 4600 #ifdef INET6 4601 case IPPROTO_ICMPV6: 4602 icmptype = pd->hdr.icmp6->icmp6_type; 4603 icmpid = pd->hdr.icmp6->icmp6_id; 4604 icmpsum = &pd->hdr.icmp6->icmp6_cksum; 4605 4606 if (icmptype == ICMP6_DST_UNREACH || 4607 icmptype == ICMP6_PACKET_TOO_BIG || 4608 icmptype == ICMP6_TIME_EXCEEDED || 4609 icmptype == ICMP6_PARAM_PROB) 4610 state_icmp++; 4611 break; 4612 #endif /* INET6 */ 4613 } 4614 4615 if (!state_icmp) { 4616 4617 /* 4618 * ICMP query/reply message not related to a TCP/UDP packet. 4619 * Search for an ICMP state. 4620 */ 4621 key.af = pd->af; 4622 key.proto = pd->proto; 4623 key.port[0] = key.port[1] = icmpid; 4624 if (direction == PF_IN) { /* wire side, straight */ 4625 PF_ACPY(&key.addr[0], pd->src, key.af); 4626 PF_ACPY(&key.addr[1], pd->dst, key.af); 4627 } else { /* stack side, reverse */ 4628 PF_ACPY(&key.addr[1], pd->src, key.af); 4629 PF_ACPY(&key.addr[0], pd->dst, key.af); 4630 } 4631 4632 STATE_LOOKUP(kif, &key, direction, *state, m); 4633 4634 (*state)->expire = time_second; 4635 (*state)->timeout = PFTM_ICMP_ERROR_REPLY; 4636 4637 /* translate source/destination address, if necessary */ 4638 if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) { 4639 struct pf_state_key *nk = (*state)->key[pd->didx]; 4640 4641 switch (pd->af) { 4642 #ifdef INET 4643 case AF_INET: 4644 if (PF_ANEQ(pd->src, 4645 &nk->addr[pd->sidx], AF_INET)) 4646 pf_change_a(&saddr->v4.s_addr, 4647 pd->ip_sum, 4648 nk->addr[pd->sidx].v4.s_addr, 0); 4649 4650 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], 4651 AF_INET)) 4652 pf_change_a(&daddr->v4.s_addr, 4653 pd->ip_sum, 4654 nk->addr[pd->didx].v4.s_addr, 0); 4655 4656 if (nk->port[0] != 4657 pd->hdr.icmp->icmp_id) { 4658 pd->hdr.icmp->icmp_cksum = 4659 pf_cksum_fixup( 4660 pd->hdr.icmp->icmp_cksum, icmpid, 4661 nk->port[pd->sidx], 0); 4662 pd->hdr.icmp->icmp_id = 4663 nk->port[pd->sidx]; 4664 } 4665 4666 m_copyback(m, off, ICMP_MINLEN, 4667 (caddr_t)pd->hdr.icmp); 4668 break; 4669 #endif /* INET */ 4670 #ifdef INET6 4671 case AF_INET6: 4672 if (PF_ANEQ(pd->src, 4673 &nk->addr[pd->sidx], AF_INET6)) 4674 pf_change_a6(saddr, 4675 &pd->hdr.icmp6->icmp6_cksum, 4676 &nk->addr[pd->sidx], 0); 4677 4678 if (PF_ANEQ(pd->dst, 4679 &nk->addr[pd->didx], AF_INET6)) 4680 pf_change_a6(daddr, 4681 &pd->hdr.icmp6->icmp6_cksum, 4682 &nk->addr[pd->didx], 0); 4683 4684 m_copyback(m, off, 4685 sizeof(struct icmp6_hdr), 4686 (caddr_t)pd->hdr.icmp6); 4687 break; 4688 #endif /* INET6 */ 4689 } 4690 } 4691 return (PF_PASS); 4692 4693 } else { 4694 /* 4695 * ICMP error message in response to a TCP/UDP packet. 4696 * Extract the inner TCP/UDP header and search for that state. 4697 */ 4698 4699 struct pf_pdesc pd2; 4700 #ifdef INET 4701 struct ip h2; 4702 #endif /* INET */ 4703 #ifdef INET6 4704 struct ip6_hdr h2_6; 4705 int terminal = 0; 4706 #endif /* INET6 */ 4707 int ipoff2; 4708 int off2; 4709 4710 pd2.af = pd->af; 4711 /* Payload packet is from the opposite direction. */ 4712 pd2.sidx = (direction == PF_IN) ? 1 : 0; 4713 pd2.didx = (direction == PF_IN) ? 0 : 1; 4714 switch (pd->af) { 4715 #ifdef INET 4716 case AF_INET: 4717 /* offset of h2 in mbuf chain */ 4718 ipoff2 = off + ICMP_MINLEN; 4719 4720 if (!pf_pull_hdr(m, ipoff2, &h2, sizeof(h2), 4721 NULL, reason, pd2.af)) { 4722 DPFPRINTF(PF_DEBUG_MISC, 4723 ("pf: ICMP error message too short " 4724 "(ip)\n")); 4725 return (PF_DROP); 4726 } 4727 /* 4728 * ICMP error messages don't refer to non-first 4729 * fragments 4730 */ 4731 if (h2.ip_off & htons(IP_OFFMASK)) { 4732 REASON_SET(reason, PFRES_FRAG); 4733 return (PF_DROP); 4734 } 4735 4736 /* offset of protocol header that follows h2 */ 4737 off2 = ipoff2 + (h2.ip_hl << 2); 4738 4739 pd2.proto = h2.ip_p; 4740 pd2.src = (struct pf_addr *)&h2.ip_src; 4741 pd2.dst = (struct pf_addr *)&h2.ip_dst; 4742 pd2.ip_sum = &h2.ip_sum; 4743 break; 4744 #endif /* INET */ 4745 #ifdef INET6 4746 case AF_INET6: 4747 ipoff2 = off + sizeof(struct icmp6_hdr); 4748 4749 if (!pf_pull_hdr(m, ipoff2, &h2_6, sizeof(h2_6), 4750 NULL, reason, pd2.af)) { 4751 DPFPRINTF(PF_DEBUG_MISC, 4752 ("pf: ICMP error message too short " 4753 "(ip6)\n")); 4754 return (PF_DROP); 4755 } 4756 pd2.proto = h2_6.ip6_nxt; 4757 pd2.src = (struct pf_addr *)&h2_6.ip6_src; 4758 pd2.dst = (struct pf_addr *)&h2_6.ip6_dst; 4759 pd2.ip_sum = NULL; 4760 off2 = ipoff2 + sizeof(h2_6); 4761 do { 4762 switch (pd2.proto) { 4763 case IPPROTO_FRAGMENT: 4764 /* 4765 * ICMPv6 error messages for 4766 * non-first fragments 4767 */ 4768 REASON_SET(reason, PFRES_FRAG); 4769 return (PF_DROP); 4770 case IPPROTO_AH: 4771 case IPPROTO_HOPOPTS: 4772 case IPPROTO_ROUTING: 4773 case IPPROTO_DSTOPTS: { 4774 /* get next header and header length */ 4775 struct ip6_ext opt6; 4776 4777 if (!pf_pull_hdr(m, off2, &opt6, 4778 sizeof(opt6), NULL, reason, 4779 pd2.af)) { 4780 DPFPRINTF(PF_DEBUG_MISC, 4781 ("pf: ICMPv6 short opt\n")); 4782 return (PF_DROP); 4783 } 4784 if (pd2.proto == IPPROTO_AH) 4785 off2 += (opt6.ip6e_len + 2) * 4; 4786 else 4787 off2 += (opt6.ip6e_len + 1) * 8; 4788 pd2.proto = opt6.ip6e_nxt; 4789 /* goto the next header */ 4790 break; 4791 } 4792 default: 4793 terminal++; 4794 break; 4795 } 4796 } while (!terminal); 4797 break; 4798 #endif /* INET6 */ 4799 default: 4800 DPFPRINTF(PF_DEBUG_MISC, 4801 ("pf: ICMP AF %d unknown (ip6)\n", pd->af)); 4802 return (PF_DROP); 4803 break; 4804 } 4805 4806 switch (pd2.proto) { 4807 case IPPROTO_TCP: { 4808 struct tcphdr th; 4809 u_int32_t seq; 4810 struct pf_state_peer *src, *dst; 4811 u_int8_t dws; 4812 int copyback = 0; 4813 4814 /* 4815 * Only the first 8 bytes of the TCP header can be 4816 * expected. Don't access any TCP header fields after 4817 * th_seq, an ackskew test is not possible. 4818 */ 4819 if (!pf_pull_hdr(m, off2, &th, 8, NULL, reason, 4820 pd2.af)) { 4821 DPFPRINTF(PF_DEBUG_MISC, 4822 ("pf: ICMP error message too short " 4823 "(tcp)\n")); 4824 return (PF_DROP); 4825 } 4826 4827 key.af = pd2.af; 4828 key.proto = IPPROTO_TCP; 4829 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af); 4830 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af); 4831 key.port[pd2.sidx] = th.th_sport; 4832 key.port[pd2.didx] = th.th_dport; 4833 4834 STATE_LOOKUP(kif, &key, direction, *state, m); 4835 4836 if (direction == (*state)->direction) { 4837 src = &(*state)->dst; 4838 dst = &(*state)->src; 4839 } else { 4840 src = &(*state)->src; 4841 dst = &(*state)->dst; 4842 } 4843 4844 if (src->wscale && dst->wscale) 4845 dws = dst->wscale & PF_WSCALE_MASK; 4846 else 4847 dws = 0; 4848 4849 /* Demodulate sequence number */ 4850 seq = ntohl(th.th_seq) - src->seqdiff; 4851 if (src->seqdiff) { 4852 pf_change_a(&th.th_seq, icmpsum, 4853 htonl(seq), 0); 4854 copyback = 1; 4855 } 4856 4857 if (!((*state)->state_flags & PFSTATE_SLOPPY) && 4858 (!SEQ_GEQ(src->seqhi, seq) || 4859 !SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)))) { 4860 if (pf_status.debug >= PF_DEBUG_MISC) { 4861 kprintf("pf: BAD ICMP %d:%d ", 4862 icmptype, pd->hdr.icmp->icmp_code); 4863 pf_print_host(pd->src, 0, pd->af); 4864 kprintf(" -> "); 4865 pf_print_host(pd->dst, 0, pd->af); 4866 kprintf(" state: "); 4867 pf_print_state(*state); 4868 kprintf(" seq=%u\n", seq); 4869 } 4870 REASON_SET(reason, PFRES_BADSTATE); 4871 return (PF_DROP); 4872 } else { 4873 if (pf_status.debug >= PF_DEBUG_MISC) { 4874 kprintf("pf: OK ICMP %d:%d ", 4875 icmptype, pd->hdr.icmp->icmp_code); 4876 pf_print_host(pd->src, 0, pd->af); 4877 kprintf(" -> "); 4878 pf_print_host(pd->dst, 0, pd->af); 4879 kprintf(" state: "); 4880 pf_print_state(*state); 4881 kprintf(" seq=%u\n", seq); 4882 } 4883 } 4884 4885 /* translate source/destination address, if necessary */ 4886 if ((*state)->key[PF_SK_WIRE] != 4887 (*state)->key[PF_SK_STACK]) { 4888 struct pf_state_key *nk = 4889 (*state)->key[pd->didx]; 4890 4891 if (PF_ANEQ(pd2.src, 4892 &nk->addr[pd2.sidx], pd2.af) || 4893 nk->port[pd2.sidx] != th.th_sport) 4894 pf_change_icmp(pd2.src, &th.th_sport, 4895 daddr, &nk->addr[pd2.sidx], 4896 nk->port[pd2.sidx], NULL, 4897 pd2.ip_sum, icmpsum, 4898 pd->ip_sum, 0, pd2.af); 4899 4900 if (PF_ANEQ(pd2.dst, 4901 &nk->addr[pd2.didx], pd2.af) || 4902 nk->port[pd2.didx] != th.th_dport) 4903 pf_change_icmp(pd2.dst, &th.th_dport, 4904 NULL, /* XXX Inbound NAT? */ 4905 &nk->addr[pd2.didx], 4906 nk->port[pd2.didx], NULL, 4907 pd2.ip_sum, icmpsum, 4908 pd->ip_sum, 0, pd2.af); 4909 copyback = 1; 4910 } 4911 4912 if (copyback) { 4913 switch (pd2.af) { 4914 #ifdef INET 4915 case AF_INET: 4916 m_copyback(m, off, ICMP_MINLEN, 4917 (caddr_t)pd->hdr.icmp); 4918 m_copyback(m, ipoff2, sizeof(h2), 4919 (caddr_t)&h2); 4920 break; 4921 #endif /* INET */ 4922 #ifdef INET6 4923 case AF_INET6: 4924 m_copyback(m, off, 4925 sizeof(struct icmp6_hdr), 4926 (caddr_t)pd->hdr.icmp6); 4927 m_copyback(m, ipoff2, sizeof(h2_6), 4928 (caddr_t)&h2_6); 4929 break; 4930 #endif /* INET6 */ 4931 } 4932 m_copyback(m, off2, 8, (caddr_t)&th); 4933 } 4934 4935 return (PF_PASS); 4936 break; 4937 } 4938 case IPPROTO_UDP: { 4939 struct udphdr uh; 4940 4941 if (!pf_pull_hdr(m, off2, &uh, sizeof(uh), 4942 NULL, reason, pd2.af)) { 4943 DPFPRINTF(PF_DEBUG_MISC, 4944 ("pf: ICMP error message too short " 4945 "(udp)\n")); 4946 return (PF_DROP); 4947 } 4948 4949 key.af = pd2.af; 4950 key.proto = IPPROTO_UDP; 4951 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af); 4952 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af); 4953 key.port[pd2.sidx] = uh.uh_sport; 4954 key.port[pd2.didx] = uh.uh_dport; 4955 4956 STATE_LOOKUP(kif, &key, direction, *state, m); 4957 4958 /* translate source/destination address, if necessary */ 4959 if ((*state)->key[PF_SK_WIRE] != 4960 (*state)->key[PF_SK_STACK]) { 4961 struct pf_state_key *nk = 4962 (*state)->key[pd->didx]; 4963 4964 if (PF_ANEQ(pd2.src, 4965 &nk->addr[pd2.sidx], pd2.af) || 4966 nk->port[pd2.sidx] != uh.uh_sport) 4967 pf_change_icmp(pd2.src, &uh.uh_sport, 4968 daddr, &nk->addr[pd2.sidx], 4969 nk->port[pd2.sidx], &uh.uh_sum, 4970 pd2.ip_sum, icmpsum, 4971 pd->ip_sum, 1, pd2.af); 4972 4973 if (PF_ANEQ(pd2.dst, 4974 &nk->addr[pd2.didx], pd2.af) || 4975 nk->port[pd2.didx] != uh.uh_dport) 4976 pf_change_icmp(pd2.dst, &uh.uh_dport, 4977 NULL, /* XXX Inbound NAT? */ 4978 &nk->addr[pd2.didx], 4979 nk->port[pd2.didx], &uh.uh_sum, 4980 pd2.ip_sum, icmpsum, 4981 pd->ip_sum, 1, pd2.af); 4982 4983 switch (pd2.af) { 4984 #ifdef INET 4985 case AF_INET: 4986 m_copyback(m, off, ICMP_MINLEN, 4987 (caddr_t)pd->hdr.icmp); 4988 m_copyback(m, ipoff2, sizeof(h2), (caddr_t)&h2); 4989 break; 4990 #endif /* INET */ 4991 #ifdef INET6 4992 case AF_INET6: 4993 m_copyback(m, off, 4994 sizeof(struct icmp6_hdr), 4995 (caddr_t)pd->hdr.icmp6); 4996 m_copyback(m, ipoff2, sizeof(h2_6), 4997 (caddr_t)&h2_6); 4998 break; 4999 #endif /* INET6 */ 5000 } 5001 m_copyback(m, off2, sizeof(uh), (caddr_t)&uh); 5002 } 5003 5004 return (PF_PASS); 5005 break; 5006 } 5007 #ifdef INET 5008 case IPPROTO_ICMP: { 5009 struct icmp iih; 5010 5011 if (!pf_pull_hdr(m, off2, &iih, ICMP_MINLEN, 5012 NULL, reason, pd2.af)) { 5013 DPFPRINTF(PF_DEBUG_MISC, 5014 ("pf: ICMP error message too short i" 5015 "(icmp)\n")); 5016 return (PF_DROP); 5017 } 5018 5019 key.af = pd2.af; 5020 key.proto = IPPROTO_ICMP; 5021 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af); 5022 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af); 5023 key.port[0] = key.port[1] = iih.icmp_id; 5024 5025 STATE_LOOKUP(kif, &key, direction, *state, m); 5026 5027 /* translate source/destination address, if necessary */ 5028 if ((*state)->key[PF_SK_WIRE] != 5029 (*state)->key[PF_SK_STACK]) { 5030 struct pf_state_key *nk = 5031 (*state)->key[pd->didx]; 5032 5033 if (PF_ANEQ(pd2.src, 5034 &nk->addr[pd2.sidx], pd2.af) || 5035 nk->port[pd2.sidx] != iih.icmp_id) 5036 pf_change_icmp(pd2.src, &iih.icmp_id, 5037 daddr, &nk->addr[pd2.sidx], 5038 nk->port[pd2.sidx], NULL, 5039 pd2.ip_sum, icmpsum, 5040 pd->ip_sum, 0, AF_INET); 5041 5042 if (PF_ANEQ(pd2.dst, 5043 &nk->addr[pd2.didx], pd2.af) || 5044 nk->port[pd2.didx] != iih.icmp_id) 5045 pf_change_icmp(pd2.dst, &iih.icmp_id, 5046 NULL, /* XXX Inbound NAT? */ 5047 &nk->addr[pd2.didx], 5048 nk->port[pd2.didx], NULL, 5049 pd2.ip_sum, icmpsum, 5050 pd->ip_sum, 0, AF_INET); 5051 5052 m_copyback(m, off, ICMP_MINLEN, (caddr_t)pd->hdr.icmp); 5053 m_copyback(m, ipoff2, sizeof(h2), (caddr_t)&h2); 5054 m_copyback(m, off2, ICMP_MINLEN, (caddr_t)&iih); 5055 } 5056 return (PF_PASS); 5057 break; 5058 } 5059 #endif /* INET */ 5060 #ifdef INET6 5061 case IPPROTO_ICMPV6: { 5062 struct icmp6_hdr iih; 5063 5064 if (!pf_pull_hdr(m, off2, &iih, 5065 sizeof(struct icmp6_hdr), NULL, reason, pd2.af)) { 5066 DPFPRINTF(PF_DEBUG_MISC, 5067 ("pf: ICMP error message too short " 5068 "(icmp6)\n")); 5069 return (PF_DROP); 5070 } 5071 5072 key.af = pd2.af; 5073 key.proto = IPPROTO_ICMPV6; 5074 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af); 5075 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af); 5076 key.port[0] = key.port[1] = iih.icmp6_id; 5077 5078 STATE_LOOKUP(kif, &key, direction, *state, m); 5079 5080 /* translate source/destination address, if necessary */ 5081 if ((*state)->key[PF_SK_WIRE] != 5082 (*state)->key[PF_SK_STACK]) { 5083 struct pf_state_key *nk = 5084 (*state)->key[pd->didx]; 5085 5086 if (PF_ANEQ(pd2.src, 5087 &nk->addr[pd2.sidx], pd2.af) || 5088 nk->port[pd2.sidx] != iih.icmp6_id) 5089 pf_change_icmp(pd2.src, &iih.icmp6_id, 5090 daddr, &nk->addr[pd2.sidx], 5091 nk->port[pd2.sidx], NULL, 5092 pd2.ip_sum, icmpsum, 5093 pd->ip_sum, 0, AF_INET6); 5094 5095 if (PF_ANEQ(pd2.dst, 5096 &nk->addr[pd2.didx], pd2.af) || 5097 nk->port[pd2.didx] != iih.icmp6_id) 5098 pf_change_icmp(pd2.dst, &iih.icmp6_id, 5099 NULL, /* XXX Inbound NAT? */ 5100 &nk->addr[pd2.didx], 5101 nk->port[pd2.didx], NULL, 5102 pd2.ip_sum, icmpsum, 5103 pd->ip_sum, 0, AF_INET6); 5104 5105 m_copyback(m, off, sizeof(struct icmp6_hdr), 5106 (caddr_t)pd->hdr.icmp6); 5107 m_copyback(m, ipoff2, sizeof(h2_6), (caddr_t)&h2_6); 5108 m_copyback(m, off2, sizeof(struct icmp6_hdr), 5109 (caddr_t)&iih); 5110 } 5111 5112 return (PF_PASS); 5113 break; 5114 } 5115 #endif /* INET6 */ 5116 default: { 5117 key.af = pd2.af; 5118 key.proto = pd2.proto; 5119 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af); 5120 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af); 5121 key.port[0] = key.port[1] = 0; 5122 5123 STATE_LOOKUP(kif, &key, direction, *state, m); 5124 5125 /* translate source/destination address, if necessary */ 5126 if ((*state)->key[PF_SK_WIRE] != 5127 (*state)->key[PF_SK_STACK]) { 5128 struct pf_state_key *nk = 5129 (*state)->key[pd->didx]; 5130 5131 if (PF_ANEQ(pd2.src, 5132 &nk->addr[pd2.sidx], pd2.af)) 5133 pf_change_icmp(pd2.src, NULL, daddr, 5134 &nk->addr[pd2.sidx], 0, NULL, 5135 pd2.ip_sum, icmpsum, 5136 pd->ip_sum, 0, pd2.af); 5137 5138 if (PF_ANEQ(pd2.dst, 5139 &nk->addr[pd2.didx], pd2.af)) 5140 pf_change_icmp(pd2.src, NULL, 5141 NULL, /* XXX Inbound NAT? */ 5142 &nk->addr[pd2.didx], 0, NULL, 5143 pd2.ip_sum, icmpsum, 5144 pd->ip_sum, 0, pd2.af); 5145 5146 switch (pd2.af) { 5147 #ifdef INET 5148 case AF_INET: 5149 m_copyback(m, off, ICMP_MINLEN, 5150 (caddr_t)pd->hdr.icmp); 5151 m_copyback(m, ipoff2, sizeof(h2), (caddr_t)&h2); 5152 break; 5153 #endif /* INET */ 5154 #ifdef INET6 5155 case AF_INET6: 5156 m_copyback(m, off, 5157 sizeof(struct icmp6_hdr), 5158 (caddr_t)pd->hdr.icmp6); 5159 m_copyback(m, ipoff2, sizeof(h2_6), 5160 (caddr_t)&h2_6); 5161 break; 5162 #endif /* INET6 */ 5163 } 5164 } 5165 return (PF_PASS); 5166 break; 5167 } 5168 } 5169 } 5170 } 5171 5172 int 5173 pf_test_state_other(struct pf_state **state, int direction, struct pfi_kif *kif, 5174 struct mbuf *m, struct pf_pdesc *pd) 5175 { 5176 struct pf_state_peer *src, *dst; 5177 struct pf_state_key_cmp key; 5178 5179 key.af = pd->af; 5180 key.proto = pd->proto; 5181 if (direction == PF_IN) { 5182 PF_ACPY(&key.addr[0], pd->src, key.af); 5183 PF_ACPY(&key.addr[1], pd->dst, key.af); 5184 key.port[0] = key.port[1] = 0; 5185 } else { 5186 PF_ACPY(&key.addr[1], pd->src, key.af); 5187 PF_ACPY(&key.addr[0], pd->dst, key.af); 5188 key.port[1] = key.port[0] = 0; 5189 } 5190 5191 STATE_LOOKUP(kif, &key, direction, *state, m); 5192 5193 if (direction == (*state)->direction) { 5194 src = &(*state)->src; 5195 dst = &(*state)->dst; 5196 } else { 5197 src = &(*state)->dst; 5198 dst = &(*state)->src; 5199 } 5200 5201 /* update states */ 5202 if (src->state < PFOTHERS_SINGLE) 5203 src->state = PFOTHERS_SINGLE; 5204 if (dst->state == PFOTHERS_SINGLE) 5205 dst->state = PFOTHERS_MULTIPLE; 5206 5207 /* update expire time */ 5208 (*state)->expire = time_second; 5209 if (src->state == PFOTHERS_MULTIPLE && dst->state == PFOTHERS_MULTIPLE) 5210 (*state)->timeout = PFTM_OTHER_MULTIPLE; 5211 else 5212 (*state)->timeout = PFTM_OTHER_SINGLE; 5213 5214 /* translate source/destination address, if necessary */ 5215 if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) { 5216 struct pf_state_key *nk = (*state)->key[pd->didx]; 5217 5218 KKASSERT(nk); 5219 KKASSERT(pd); 5220 KKASSERT(pd->src); 5221 KKASSERT(pd->dst); 5222 switch (pd->af) { 5223 #ifdef INET 5224 case AF_INET: 5225 if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], AF_INET)) 5226 pf_change_a(&pd->src->v4.s_addr, 5227 pd->ip_sum, 5228 nk->addr[pd->sidx].v4.s_addr, 5229 0); 5230 5231 5232 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], AF_INET)) 5233 pf_change_a(&pd->dst->v4.s_addr, 5234 pd->ip_sum, 5235 nk->addr[pd->didx].v4.s_addr, 5236 0); 5237 5238 break; 5239 #endif /* INET */ 5240 #ifdef INET6 5241 case AF_INET6: 5242 if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], AF_INET)) 5243 PF_ACPY(pd->src, &nk->addr[pd->sidx], pd->af); 5244 5245 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], AF_INET)) 5246 PF_ACPY(pd->dst, &nk->addr[pd->didx], pd->af); 5247 #endif /* INET6 */ 5248 } 5249 } 5250 return (PF_PASS); 5251 } 5252 5253 /* 5254 * ipoff and off are measured from the start of the mbuf chain. 5255 * h must be at "ipoff" on the mbuf chain. 5256 */ 5257 void * 5258 pf_pull_hdr(struct mbuf *m, int off, void *p, int len, 5259 u_short *actionp, u_short *reasonp, sa_family_t af) 5260 { 5261 switch (af) { 5262 #ifdef INET 5263 case AF_INET: { 5264 struct ip *h = mtod(m, struct ip *); 5265 u_int16_t fragoff = (h->ip_off & IP_OFFMASK) << 3; 5266 5267 if (fragoff) { 5268 if (fragoff >= len) 5269 ACTION_SET(actionp, PF_PASS); 5270 else { 5271 ACTION_SET(actionp, PF_DROP); 5272 REASON_SET(reasonp, PFRES_FRAG); 5273 } 5274 return (NULL); 5275 } 5276 if (m->m_pkthdr.len < off + len || 5277 h->ip_len < off + len) { 5278 ACTION_SET(actionp, PF_DROP); 5279 REASON_SET(reasonp, PFRES_SHORT); 5280 return (NULL); 5281 } 5282 break; 5283 } 5284 #endif /* INET */ 5285 #ifdef INET6 5286 case AF_INET6: { 5287 struct ip6_hdr *h = mtod(m, struct ip6_hdr *); 5288 5289 if (m->m_pkthdr.len < off + len || 5290 (ntohs(h->ip6_plen) + sizeof(struct ip6_hdr)) < 5291 (unsigned)(off + len)) { 5292 ACTION_SET(actionp, PF_DROP); 5293 REASON_SET(reasonp, PFRES_SHORT); 5294 return (NULL); 5295 } 5296 break; 5297 } 5298 #endif /* INET6 */ 5299 } 5300 m_copydata(m, off, len, p); 5301 return (p); 5302 } 5303 5304 int 5305 pf_routable(struct pf_addr *addr, sa_family_t af, struct pfi_kif *kif) 5306 { 5307 struct sockaddr_in *dst; 5308 int ret = 1; 5309 int check_mpath; 5310 #ifdef INET6 5311 struct sockaddr_in6 *dst6; 5312 struct route_in6 ro; 5313 #else 5314 struct route ro; 5315 #endif 5316 struct radix_node *rn; 5317 struct rtentry *rt; 5318 struct ifnet *ifp; 5319 5320 check_mpath = 0; 5321 bzero(&ro, sizeof(ro)); 5322 switch (af) { 5323 case AF_INET: 5324 dst = satosin(&ro.ro_dst); 5325 dst->sin_family = AF_INET; 5326 dst->sin_len = sizeof(*dst); 5327 dst->sin_addr = addr->v4; 5328 break; 5329 #ifdef INET6 5330 case AF_INET6: 5331 dst6 = (struct sockaddr_in6 *)&ro.ro_dst; 5332 dst6->sin6_family = AF_INET6; 5333 dst6->sin6_len = sizeof(*dst6); 5334 dst6->sin6_addr = addr->v6; 5335 break; 5336 #endif /* INET6 */ 5337 default: 5338 return (0); 5339 } 5340 5341 /* Skip checks for ipsec interfaces */ 5342 if (kif != NULL && kif->pfik_ifp->if_type == IFT_ENC) 5343 goto out; 5344 5345 rtalloc_ign((struct route *)&ro, 0); 5346 5347 if (ro.ro_rt != NULL) { 5348 /* No interface given, this is a no-route check */ 5349 if (kif == NULL) 5350 goto out; 5351 5352 if (kif->pfik_ifp == NULL) { 5353 ret = 0; 5354 goto out; 5355 } 5356 5357 /* Perform uRPF check if passed input interface */ 5358 ret = 0; 5359 rn = (struct radix_node *)ro.ro_rt; 5360 do { 5361 rt = (struct rtentry *)rn; 5362 ifp = rt->rt_ifp; 5363 5364 if (kif->pfik_ifp == ifp) 5365 ret = 1; 5366 rn = NULL; 5367 } while (check_mpath == 1 && rn != NULL && ret == 0); 5368 } else 5369 ret = 0; 5370 out: 5371 if (ro.ro_rt != NULL) 5372 RTFREE(ro.ro_rt); 5373 return (ret); 5374 } 5375 5376 int 5377 pf_rtlabel_match(struct pf_addr *addr, sa_family_t af, struct pf_addr_wrap *aw) 5378 { 5379 struct sockaddr_in *dst; 5380 #ifdef INET6 5381 struct sockaddr_in6 *dst6; 5382 struct route_in6 ro; 5383 #else 5384 struct route ro; 5385 #endif 5386 int ret = 0; 5387 5388 ASSERT_LWKT_TOKEN_HELD(&pf_token); 5389 5390 bzero(&ro, sizeof(ro)); 5391 switch (af) { 5392 case AF_INET: 5393 dst = satosin(&ro.ro_dst); 5394 dst->sin_family = AF_INET; 5395 dst->sin_len = sizeof(*dst); 5396 dst->sin_addr = addr->v4; 5397 break; 5398 #ifdef INET6 5399 case AF_INET6: 5400 dst6 = (struct sockaddr_in6 *)&ro.ro_dst; 5401 dst6->sin6_family = AF_INET6; 5402 dst6->sin6_len = sizeof(*dst6); 5403 dst6->sin6_addr = addr->v6; 5404 break; 5405 #endif /* INET6 */ 5406 default: 5407 return (0); 5408 } 5409 5410 rtalloc_ign((struct route *)&ro, (RTF_CLONING | RTF_PRCLONING)); 5411 5412 if (ro.ro_rt != NULL) { 5413 RTFREE(ro.ro_rt); 5414 } 5415 5416 return (ret); 5417 } 5418 5419 #ifdef INET 5420 void 5421 pf_route(struct mbuf **m, struct pf_rule *r, int dir, struct ifnet *oifp, 5422 struct pf_state *s, struct pf_pdesc *pd) 5423 { 5424 struct mbuf *m0, *m1; 5425 struct route iproute; 5426 struct route *ro = NULL; 5427 struct sockaddr_in *dst; 5428 struct ip *ip; 5429 struct ifnet *ifp = NULL; 5430 struct pf_addr naddr; 5431 struct pf_src_node *sn = NULL; 5432 int error = 0; 5433 int sw_csum; 5434 #ifdef IPSEC 5435 struct m_tag *mtag; 5436 #endif /* IPSEC */ 5437 5438 ASSERT_LWKT_TOKEN_HELD(&pf_token); 5439 5440 if (m == NULL || *m == NULL || r == NULL || 5441 (dir != PF_IN && dir != PF_OUT) || oifp == NULL) 5442 panic("pf_route: invalid parameters"); 5443 5444 if (((*m)->m_pkthdr.fw_flags & PF_MBUF_ROUTED) == 0) { 5445 (*m)->m_pkthdr.fw_flags |= PF_MBUF_ROUTED; 5446 (*m)->m_pkthdr.pf.routed = 1; 5447 } else { 5448 if ((*m)->m_pkthdr.pf.routed++ > 3) { 5449 m0 = *m; 5450 *m = NULL; 5451 goto bad; 5452 } 5453 } 5454 5455 if (r->rt == PF_DUPTO) { 5456 if ((m0 = m_dup(*m, MB_DONTWAIT)) == NULL) { 5457 return; 5458 } 5459 } else { 5460 if ((r->rt == PF_REPLYTO) == (r->direction == dir)) { 5461 return; 5462 } 5463 m0 = *m; 5464 } 5465 5466 if (m0->m_len < sizeof(struct ip)) { 5467 DPFPRINTF(PF_DEBUG_URGENT, 5468 ("pf_route: m0->m_len < sizeof(struct ip)\n")); 5469 goto bad; 5470 } 5471 5472 ip = mtod(m0, struct ip *); 5473 5474 ro = &iproute; 5475 bzero((caddr_t)ro, sizeof(*ro)); 5476 dst = satosin(&ro->ro_dst); 5477 dst->sin_family = AF_INET; 5478 dst->sin_len = sizeof(*dst); 5479 dst->sin_addr = ip->ip_dst; 5480 5481 if (r->rt == PF_FASTROUTE) { 5482 rtalloc(ro); 5483 if (ro->ro_rt == 0) { 5484 ipstat.ips_noroute++; 5485 goto bad; 5486 } 5487 5488 ifp = ro->ro_rt->rt_ifp; 5489 ro->ro_rt->rt_use++; 5490 5491 if (ro->ro_rt->rt_flags & RTF_GATEWAY) 5492 dst = satosin(ro->ro_rt->rt_gateway); 5493 } else { 5494 if (TAILQ_EMPTY(&r->rpool.list)) { 5495 DPFPRINTF(PF_DEBUG_URGENT, 5496 ("pf_route: TAILQ_EMPTY(&r->rpool.list)\n")); 5497 goto bad; 5498 } 5499 if (s == NULL) { 5500 pf_map_addr(AF_INET, r, (struct pf_addr *)&ip->ip_src, 5501 &naddr, NULL, &sn); 5502 if (!PF_AZERO(&naddr, AF_INET)) 5503 dst->sin_addr.s_addr = naddr.v4.s_addr; 5504 ifp = r->rpool.cur->kif ? 5505 r->rpool.cur->kif->pfik_ifp : NULL; 5506 } else { 5507 if (!PF_AZERO(&s->rt_addr, AF_INET)) 5508 dst->sin_addr.s_addr = 5509 s->rt_addr.v4.s_addr; 5510 ifp = s->rt_kif ? s->rt_kif->pfik_ifp : NULL; 5511 } 5512 } 5513 if (ifp == NULL) 5514 goto bad; 5515 5516 if (oifp != ifp) { 5517 if (pf_test(PF_OUT, ifp, &m0, NULL, NULL) != PF_PASS) { 5518 goto bad; 5519 } else if (m0 == NULL) { 5520 goto done; 5521 } 5522 if (m0->m_len < sizeof(struct ip)) { 5523 DPFPRINTF(PF_DEBUG_URGENT, 5524 ("pf_route: m0->m_len < sizeof(struct ip)\n")); 5525 goto bad; 5526 } 5527 ip = mtod(m0, struct ip *); 5528 } 5529 5530 /* Copied from FreeBSD 5.1-CURRENT ip_output. */ 5531 m0->m_pkthdr.csum_flags |= CSUM_IP; 5532 sw_csum = m0->m_pkthdr.csum_flags & ~ifp->if_hwassist; 5533 if (sw_csum & CSUM_DELAY_DATA) { 5534 in_delayed_cksum(m0); 5535 sw_csum &= ~CSUM_DELAY_DATA; 5536 } 5537 m0->m_pkthdr.csum_flags &= ifp->if_hwassist; 5538 5539 if (ip->ip_len <= ifp->if_mtu || 5540 (ifp->if_hwassist & CSUM_FRAGMENT && 5541 (ip->ip_off & IP_DF) == 0)) { 5542 ip->ip_len = htons(ip->ip_len); 5543 ip->ip_off = htons(ip->ip_off); 5544 ip->ip_sum = 0; 5545 if (sw_csum & CSUM_DELAY_IP) { 5546 /* From KAME */ 5547 if (ip->ip_v == IPVERSION && 5548 (ip->ip_hl << 2) == sizeof(*ip)) { 5549 ip->ip_sum = in_cksum_hdr(ip); 5550 } else { 5551 ip->ip_sum = in_cksum(m0, ip->ip_hl << 2); 5552 } 5553 } 5554 lwkt_reltoken(&pf_token); 5555 error = ifp->if_output(ifp, m0, sintosa(dst), ro->ro_rt); 5556 lwkt_gettoken(&pf_token); 5557 goto done; 5558 } 5559 5560 /* 5561 * Too large for interface; fragment if possible. 5562 * Must be able to put at least 8 bytes per fragment. 5563 */ 5564 if (ip->ip_off & IP_DF) { 5565 ipstat.ips_cantfrag++; 5566 if (r->rt != PF_DUPTO) { 5567 icmp_error(m0, ICMP_UNREACH, ICMP_UNREACH_NEEDFRAG, 0, 5568 ifp->if_mtu); 5569 goto done; 5570 } else 5571 goto bad; 5572 } 5573 5574 m1 = m0; 5575 error = ip_fragment(ip, &m0, ifp->if_mtu, ifp->if_hwassist, sw_csum); 5576 if (error) { 5577 goto bad; 5578 } 5579 5580 for (m0 = m1; m0; m0 = m1) { 5581 m1 = m0->m_nextpkt; 5582 m0->m_nextpkt = 0; 5583 if (error == 0) { 5584 lwkt_reltoken(&pf_token); 5585 error = (*ifp->if_output)(ifp, m0, sintosa(dst), 5586 NULL); 5587 lwkt_gettoken(&pf_token); 5588 } else 5589 m_freem(m0); 5590 } 5591 5592 if (error == 0) 5593 ipstat.ips_fragmented++; 5594 5595 done: 5596 if (r->rt != PF_DUPTO) 5597 *m = NULL; 5598 if (ro == &iproute && ro->ro_rt) 5599 RTFREE(ro->ro_rt); 5600 return; 5601 5602 bad: 5603 m_freem(m0); 5604 goto done; 5605 } 5606 #endif /* INET */ 5607 5608 #ifdef INET6 5609 void 5610 pf_route6(struct mbuf **m, struct pf_rule *r, int dir, struct ifnet *oifp, 5611 struct pf_state *s, struct pf_pdesc *pd) 5612 { 5613 struct mbuf *m0; 5614 struct route_in6 ip6route; 5615 struct route_in6 *ro; 5616 struct sockaddr_in6 *dst; 5617 struct ip6_hdr *ip6; 5618 struct ifnet *ifp = NULL; 5619 struct pf_addr naddr; 5620 struct pf_src_node *sn = NULL; 5621 int error = 0; 5622 5623 if (m == NULL || *m == NULL || r == NULL || 5624 (dir != PF_IN && dir != PF_OUT) || oifp == NULL) 5625 panic("pf_route6: invalid parameters"); 5626 5627 if (((*m)->m_pkthdr.fw_flags & PF_MBUF_ROUTED) == 0) { 5628 (*m)->m_pkthdr.fw_flags |= PF_MBUF_ROUTED; 5629 (*m)->m_pkthdr.pf.routed = 1; 5630 } else { 5631 if ((*m)->m_pkthdr.pf.routed++ > 3) { 5632 m0 = *m; 5633 *m = NULL; 5634 goto bad; 5635 } 5636 } 5637 5638 if (r->rt == PF_DUPTO) { 5639 if ((m0 = m_dup(*m, MB_DONTWAIT)) == NULL) 5640 return; 5641 } else { 5642 if ((r->rt == PF_REPLYTO) == (r->direction == dir)) 5643 return; 5644 m0 = *m; 5645 } 5646 5647 if (m0->m_len < sizeof(struct ip6_hdr)) { 5648 DPFPRINTF(PF_DEBUG_URGENT, 5649 ("pf_route6: m0->m_len < sizeof(struct ip6_hdr)\n")); 5650 goto bad; 5651 } 5652 ip6 = mtod(m0, struct ip6_hdr *); 5653 5654 ro = &ip6route; 5655 bzero((caddr_t)ro, sizeof(*ro)); 5656 dst = (struct sockaddr_in6 *)&ro->ro_dst; 5657 dst->sin6_family = AF_INET6; 5658 dst->sin6_len = sizeof(*dst); 5659 dst->sin6_addr = ip6->ip6_dst; 5660 5661 /* 5662 * DragonFly doesn't zero the auxillary pkghdr fields, only fw_flags, 5663 * so make sure pf.flags is clear. 5664 * 5665 * Cheat. XXX why only in the v6 case??? 5666 */ 5667 if (r->rt == PF_FASTROUTE) { 5668 m0->m_pkthdr.fw_flags |= PF_MBUF_TAGGED; 5669 m0->m_pkthdr.pf.flags = 0; 5670 /* XXX Re-Check when Upgrading to > 4.4 */ 5671 m0->m_pkthdr.pf.statekey = NULL; 5672 ip6_output(m0, NULL, NULL, 0, NULL, NULL, NULL); 5673 return; 5674 } 5675 5676 if (TAILQ_EMPTY(&r->rpool.list)) { 5677 DPFPRINTF(PF_DEBUG_URGENT, 5678 ("pf_route6: TAILQ_EMPTY(&r->rpool.list)\n")); 5679 goto bad; 5680 } 5681 if (s == NULL) { 5682 pf_map_addr(AF_INET6, r, (struct pf_addr *)&ip6->ip6_src, 5683 &naddr, NULL, &sn); 5684 if (!PF_AZERO(&naddr, AF_INET6)) 5685 PF_ACPY((struct pf_addr *)&dst->sin6_addr, 5686 &naddr, AF_INET6); 5687 ifp = r->rpool.cur->kif ? r->rpool.cur->kif->pfik_ifp : NULL; 5688 } else { 5689 if (!PF_AZERO(&s->rt_addr, AF_INET6)) 5690 PF_ACPY((struct pf_addr *)&dst->sin6_addr, 5691 &s->rt_addr, AF_INET6); 5692 ifp = s->rt_kif ? s->rt_kif->pfik_ifp : NULL; 5693 } 5694 if (ifp == NULL) 5695 goto bad; 5696 5697 if (oifp != ifp) { 5698 if (pf_test6(PF_OUT, ifp, &m0, NULL, NULL) != PF_PASS) { 5699 goto bad; 5700 } else if (m0 == NULL) { 5701 goto done; 5702 } 5703 if (m0->m_len < sizeof(struct ip6_hdr)) { 5704 DPFPRINTF(PF_DEBUG_URGENT, 5705 ("pf_route6: m0->m_len < sizeof(struct ip6_hdr)\n")); 5706 goto bad; 5707 } 5708 ip6 = mtod(m0, struct ip6_hdr *); 5709 } 5710 5711 /* 5712 * If the packet is too large for the outgoing interface, 5713 * send back an icmp6 error. 5714 */ 5715 if (IN6_IS_ADDR_LINKLOCAL(&dst->sin6_addr)) 5716 dst->sin6_addr.s6_addr16[1] = htons(ifp->if_index); 5717 if ((u_long)m0->m_pkthdr.len <= ifp->if_mtu) { 5718 error = nd6_output(ifp, ifp, m0, dst, NULL); 5719 } else { 5720 in6_ifstat_inc(ifp, ifs6_in_toobig); 5721 if (r->rt != PF_DUPTO) 5722 icmp6_error(m0, ICMP6_PACKET_TOO_BIG, 0, ifp->if_mtu); 5723 else 5724 goto bad; 5725 } 5726 5727 done: 5728 if (r->rt != PF_DUPTO) 5729 *m = NULL; 5730 return; 5731 5732 bad: 5733 m_freem(m0); 5734 goto done; 5735 } 5736 #endif /* INET6 */ 5737 5738 5739 /* 5740 * check protocol (tcp/udp/icmp/icmp6) checksum and set mbuf flag 5741 * off is the offset where the protocol header starts 5742 * len is the total length of protocol header plus payload 5743 * returns 0 when the checksum is valid, otherwise returns 1. 5744 */ 5745 /* 5746 * XXX 5747 * FreeBSD supports cksum offload for the following drivers. 5748 * em(4), gx(4), lge(4), nge(4), ti(4), xl(4) 5749 * If we can make full use of it we would outperform ipfw/ipfilter in 5750 * very heavy traffic. 5751 * I have not tested 'cause I don't have NICs that supports cksum offload. 5752 * (There might be problems. Typical phenomena would be 5753 * 1. No route message for UDP packet. 5754 * 2. No connection acceptance from external hosts regardless of rule set.) 5755 */ 5756 int 5757 pf_check_proto_cksum(struct mbuf *m, int off, int len, u_int8_t p, 5758 sa_family_t af) 5759 { 5760 u_int16_t sum = 0; 5761 int hw_assist = 0; 5762 struct ip *ip; 5763 5764 if (off < sizeof(struct ip) || len < sizeof(struct udphdr)) 5765 return (1); 5766 if (m->m_pkthdr.len < off + len) 5767 return (1); 5768 5769 switch (p) { 5770 case IPPROTO_TCP: 5771 case IPPROTO_UDP: 5772 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) { 5773 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) { 5774 sum = m->m_pkthdr.csum_data; 5775 } else { 5776 ip = mtod(m, struct ip *); 5777 sum = in_pseudo(ip->ip_src.s_addr, 5778 ip->ip_dst.s_addr, htonl((u_short)len + 5779 m->m_pkthdr.csum_data + p)); 5780 } 5781 sum ^= 0xffff; 5782 ++hw_assist; 5783 } 5784 break; 5785 case IPPROTO_ICMP: 5786 #ifdef INET6 5787 case IPPROTO_ICMPV6: 5788 #endif /* INET6 */ 5789 break; 5790 default: 5791 return (1); 5792 } 5793 5794 if (!hw_assist) { 5795 switch (af) { 5796 case AF_INET: 5797 if (p == IPPROTO_ICMP) { 5798 if (m->m_len < off) 5799 return (1); 5800 m->m_data += off; 5801 m->m_len -= off; 5802 sum = in_cksum(m, len); 5803 m->m_data -= off; 5804 m->m_len += off; 5805 } else { 5806 if (m->m_len < sizeof(struct ip)) 5807 return (1); 5808 sum = in_cksum_range(m, p, off, len); 5809 if (sum == 0) { 5810 m->m_pkthdr.csum_flags |= 5811 (CSUM_DATA_VALID | 5812 CSUM_PSEUDO_HDR); 5813 m->m_pkthdr.csum_data = 0xffff; 5814 } 5815 } 5816 break; 5817 #ifdef INET6 5818 case AF_INET6: 5819 if (m->m_len < sizeof(struct ip6_hdr)) 5820 return (1); 5821 sum = in6_cksum(m, p, off, len); 5822 /* 5823 * XXX 5824 * IPv6 H/W cksum off-load not supported yet! 5825 * 5826 * if (sum == 0) { 5827 * m->m_pkthdr.csum_flags |= 5828 * (CSUM_DATA_VALID|CSUM_PSEUDO_HDR); 5829 * m->m_pkthdr.csum_data = 0xffff; 5830 *} 5831 */ 5832 break; 5833 #endif /* INET6 */ 5834 default: 5835 return (1); 5836 } 5837 } 5838 if (sum) { 5839 switch (p) { 5840 case IPPROTO_TCP: 5841 tcpstat.tcps_rcvbadsum++; 5842 break; 5843 case IPPROTO_UDP: 5844 udpstat.udps_badsum++; 5845 break; 5846 case IPPROTO_ICMP: 5847 icmpstat.icps_checksum++; 5848 break; 5849 #ifdef INET6 5850 case IPPROTO_ICMPV6: 5851 icmp6stat.icp6s_checksum++; 5852 break; 5853 #endif /* INET6 */ 5854 } 5855 return (1); 5856 } 5857 return (0); 5858 } 5859 5860 struct pf_divert * 5861 pf_find_divert(struct mbuf *m) 5862 { 5863 struct m_tag *mtag; 5864 5865 if ((mtag = m_tag_find(m, PACKET_TAG_PF_DIVERT, NULL)) == NULL) 5866 return (NULL); 5867 5868 return ((struct pf_divert *)(mtag + 1)); 5869 } 5870 5871 struct pf_divert * 5872 pf_get_divert(struct mbuf *m) 5873 { 5874 struct m_tag *mtag; 5875 5876 if ((mtag = m_tag_find(m, PACKET_TAG_PF_DIVERT, NULL)) == NULL) { 5877 mtag = m_tag_get(PACKET_TAG_PF_DIVERT, sizeof(struct pf_divert), 5878 M_NOWAIT); 5879 if (mtag == NULL) 5880 return (NULL); 5881 bzero(mtag + 1, sizeof(struct pf_divert)); 5882 m_tag_prepend(m, mtag); 5883 } 5884 5885 return ((struct pf_divert *)(mtag + 1)); 5886 } 5887 5888 #ifdef INET 5889 int 5890 pf_test(int dir, struct ifnet *ifp, struct mbuf **m0, 5891 struct ether_header *eh, struct inpcb *inp) 5892 { 5893 struct pfi_kif *kif; 5894 u_short action, reason = 0, log = 0; 5895 struct mbuf *m = *m0; 5896 struct ip *h = NULL; 5897 struct pf_rule *a = NULL, *r = &pf_default_rule, *tr, *nr; 5898 struct pf_state *s = NULL; 5899 struct pf_ruleset *ruleset = NULL; 5900 struct pf_pdesc pd; 5901 int off, dirndx, pqid = 0; 5902 5903 if (!pf_status.running) 5904 return (PF_PASS); 5905 5906 memset(&pd, 0, sizeof(pd)); 5907 if (ifp->if_type == IFT_CARP && ifp->if_carpdev) 5908 kif = (struct pfi_kif *)ifp->if_carpdev->if_pf_kif; 5909 else 5910 kif = (struct pfi_kif *)ifp->if_pf_kif; 5911 5912 if (kif == NULL) { 5913 DPFPRINTF(PF_DEBUG_URGENT, 5914 ("pf_test: kif == NULL, if_xname %s\n", ifp->if_xname)); 5915 return (PF_DROP); 5916 } 5917 if (kif->pfik_flags & PFI_IFLAG_SKIP) 5918 return (PF_PASS); 5919 5920 #ifdef DIAGNOSTIC 5921 if ((m->m_flags & M_PKTHDR) == 0) 5922 panic("non-M_PKTHDR is passed to pf_test"); 5923 #endif /* DIAGNOSTIC */ 5924 5925 if (m->m_pkthdr.len < (int)sizeof(*h)) { 5926 action = PF_DROP; 5927 REASON_SET(&reason, PFRES_SHORT); 5928 log = 1; 5929 goto done; 5930 } 5931 5932 /* 5933 * DragonFly doesn't zero the auxillary pkghdr fields, only fw_flags, 5934 * so make sure pf.flags is clear. 5935 */ 5936 if (m->m_pkthdr.fw_flags & PF_MBUF_TAGGED) 5937 return (PF_PASS); 5938 m->m_pkthdr.pf.flags = 0; 5939 /* Re-Check when updating to > 4.4 */ 5940 m->m_pkthdr.pf.statekey = NULL; 5941 5942 /* We do IP header normalization and packet reassembly here */ 5943 if (pf_normalize_ip(m0, dir, kif, &reason, &pd) != PF_PASS) { 5944 action = PF_DROP; 5945 goto done; 5946 } 5947 m = *m0; /* pf_normalize messes with m0 */ 5948 h = mtod(m, struct ip *); 5949 5950 off = h->ip_hl << 2; 5951 if (off < (int)sizeof(*h)) { 5952 action = PF_DROP; 5953 REASON_SET(&reason, PFRES_SHORT); 5954 log = 1; 5955 goto done; 5956 } 5957 5958 pd.src = (struct pf_addr *)&h->ip_src; 5959 pd.dst = (struct pf_addr *)&h->ip_dst; 5960 pd.sport = pd.dport = NULL; 5961 pd.ip_sum = &h->ip_sum; 5962 pd.proto_sum = NULL; 5963 pd.proto = h->ip_p; 5964 pd.dir = dir; 5965 pd.sidx = (dir == PF_IN) ? 0 : 1; 5966 pd.didx = (dir == PF_IN) ? 1 : 0; 5967 pd.af = AF_INET; 5968 pd.tos = h->ip_tos; 5969 pd.tot_len = h->ip_len; 5970 pd.eh = eh; 5971 5972 /* handle fragments that didn't get reassembled by normalization */ 5973 if (h->ip_off & (IP_MF | IP_OFFMASK)) { 5974 action = pf_test_fragment(&r, dir, kif, m, h, 5975 &pd, &a, &ruleset); 5976 goto done; 5977 } 5978 5979 switch (h->ip_p) { 5980 5981 case IPPROTO_TCP: { 5982 struct tcphdr th; 5983 5984 pd.hdr.tcp = &th; 5985 if (!pf_pull_hdr(m, off, &th, sizeof(th), 5986 &action, &reason, AF_INET)) { 5987 log = action != PF_PASS; 5988 goto done; 5989 } 5990 pd.p_len = pd.tot_len - off - (th.th_off << 2); 5991 if ((th.th_flags & TH_ACK) && pd.p_len == 0) 5992 pqid = 1; 5993 action = pf_normalize_tcp(dir, kif, m, 0, off, h, &pd); 5994 if (action == PF_DROP) 5995 goto done; 5996 action = pf_test_state_tcp(&s, dir, kif, m, off, h, &pd, 5997 &reason); 5998 if (action == PF_PASS) { 5999 pfsync_update_state(s); 6000 r = s->rule.ptr; 6001 a = s->anchor.ptr; 6002 log = s->log; 6003 } else if (s == NULL) 6004 action = pf_test_rule(&r, &s, dir, kif, 6005 m, off, h, &pd, &a, &ruleset, NULL, inp); 6006 break; 6007 } 6008 6009 case IPPROTO_UDP: { 6010 struct udphdr uh; 6011 6012 pd.hdr.udp = &uh; 6013 if (!pf_pull_hdr(m, off, &uh, sizeof(uh), 6014 &action, &reason, AF_INET)) { 6015 log = action != PF_PASS; 6016 goto done; 6017 } 6018 if (uh.uh_dport == 0 || 6019 ntohs(uh.uh_ulen) > m->m_pkthdr.len - off || 6020 ntohs(uh.uh_ulen) < sizeof(struct udphdr)) { 6021 action = PF_DROP; 6022 REASON_SET(&reason, PFRES_SHORT); 6023 goto done; 6024 } 6025 action = pf_test_state_udp(&s, dir, kif, m, off, h, &pd); 6026 if (action == PF_PASS) { 6027 pfsync_update_state(s); 6028 r = s->rule.ptr; 6029 a = s->anchor.ptr; 6030 log = s->log; 6031 } else if (s == NULL) 6032 action = pf_test_rule(&r, &s, dir, kif, 6033 m, off, h, &pd, &a, &ruleset, NULL, inp); 6034 break; 6035 } 6036 6037 case IPPROTO_ICMP: { 6038 struct icmp ih; 6039 6040 pd.hdr.icmp = &ih; 6041 if (!pf_pull_hdr(m, off, &ih, ICMP_MINLEN, 6042 &action, &reason, AF_INET)) { 6043 log = action != PF_PASS; 6044 goto done; 6045 } 6046 action = pf_test_state_icmp(&s, dir, kif, m, off, h, &pd, 6047 &reason); 6048 if (action == PF_PASS) { 6049 pfsync_update_state(s); 6050 r = s->rule.ptr; 6051 a = s->anchor.ptr; 6052 log = s->log; 6053 } else if (s == NULL) 6054 action = pf_test_rule(&r, &s, dir, kif, 6055 m, off, h, &pd, &a, &ruleset, NULL, inp); 6056 break; 6057 } 6058 6059 default: 6060 action = pf_test_state_other(&s, dir, kif, m, &pd); 6061 if (action == PF_PASS) { 6062 pfsync_update_state(s); 6063 r = s->rule.ptr; 6064 a = s->anchor.ptr; 6065 log = s->log; 6066 } else if (s == NULL) 6067 action = pf_test_rule(&r, &s, dir, kif, m, off, h, 6068 &pd, &a, &ruleset, NULL, inp); 6069 break; 6070 } 6071 6072 done: 6073 if (action == PF_PASS && h->ip_hl > 5 && 6074 !((s && s->state_flags & PFSTATE_ALLOWOPTS) || r->allow_opts)) { 6075 action = PF_DROP; 6076 REASON_SET(&reason, PFRES_IPOPTIONS); 6077 log = 1; 6078 DPFPRINTF(PF_DEBUG_MISC, 6079 ("pf: dropping packet with ip options\n")); 6080 } 6081 6082 if ((s && s->tag) || r->rtableid) 6083 pf_tag_packet(m, s ? s->tag : 0, r->rtableid); 6084 6085 #if 0 6086 if (dir == PF_IN && s && s->key[PF_SK_STACK]) 6087 m->m_pkthdr.pf.statekey = s->key[PF_SK_STACK]; 6088 #endif 6089 6090 #ifdef ALTQ 6091 if (action == PF_PASS && r->qid) { 6092 m->m_pkthdr.fw_flags |= PF_MBUF_STRUCTURE; 6093 if (pqid || (pd.tos & IPTOS_LOWDELAY)) 6094 m->m_pkthdr.pf.qid = r->pqid; 6095 else 6096 m->m_pkthdr.pf.qid = r->qid; 6097 m->m_pkthdr.pf.ecn_af = AF_INET; 6098 m->m_pkthdr.pf.hdr = h; 6099 /* add connection hash for fairq */ 6100 if (s) { 6101 /* for fairq */ 6102 m->m_pkthdr.pf.state_hash = s->hash; 6103 m->m_pkthdr.pf.flags |= PF_TAG_STATE_HASHED; 6104 } 6105 } 6106 #endif /* ALTQ */ 6107 6108 /* 6109 * connections redirected to loopback should not match sockets 6110 * bound specifically to loopback due to security implications, 6111 * see tcp_input() and in_pcblookup_listen(). 6112 */ 6113 if (dir == PF_IN && action == PF_PASS && (pd.proto == IPPROTO_TCP || 6114 pd.proto == IPPROTO_UDP) && s != NULL && s->nat_rule.ptr != NULL && 6115 (s->nat_rule.ptr->action == PF_RDR || 6116 s->nat_rule.ptr->action == PF_BINAT) && 6117 (ntohl(pd.dst->v4.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET) 6118 m->m_pkthdr.pf.flags |= PF_TAG_TRANSLATE_LOCALHOST; 6119 6120 if (dir == PF_IN && action == PF_PASS && r->divert.port) { 6121 struct pf_divert *divert; 6122 6123 if ((divert = pf_get_divert(m))) { 6124 m->m_pkthdr.pf.flags |= PF_TAG_DIVERTED; 6125 divert->port = r->divert.port; 6126 divert->addr.ipv4 = r->divert.addr.v4; 6127 } 6128 } 6129 6130 if (log) { 6131 struct pf_rule *lr; 6132 6133 if (s != NULL && s->nat_rule.ptr != NULL && 6134 s->nat_rule.ptr->log & PF_LOG_ALL) 6135 lr = s->nat_rule.ptr; 6136 else 6137 lr = r; 6138 PFLOG_PACKET(kif, h, m, AF_INET, dir, reason, lr, a, ruleset, 6139 &pd); 6140 } 6141 6142 kif->pfik_bytes[0][dir == PF_OUT][action != PF_PASS] += pd.tot_len; 6143 kif->pfik_packets[0][dir == PF_OUT][action != PF_PASS]++; 6144 6145 if (action == PF_PASS || r->action == PF_DROP) { 6146 dirndx = (dir == PF_OUT); 6147 r->packets[dirndx]++; 6148 r->bytes[dirndx] += pd.tot_len; 6149 if (a != NULL) { 6150 a->packets[dirndx]++; 6151 a->bytes[dirndx] += pd.tot_len; 6152 } 6153 if (s != NULL) { 6154 if (s->nat_rule.ptr != NULL) { 6155 s->nat_rule.ptr->packets[dirndx]++; 6156 s->nat_rule.ptr->bytes[dirndx] += pd.tot_len; 6157 } 6158 if (s->src_node != NULL) { 6159 s->src_node->packets[dirndx]++; 6160 s->src_node->bytes[dirndx] += pd.tot_len; 6161 } 6162 if (s->nat_src_node != NULL) { 6163 s->nat_src_node->packets[dirndx]++; 6164 s->nat_src_node->bytes[dirndx] += pd.tot_len; 6165 } 6166 dirndx = (dir == s->direction) ? 0 : 1; 6167 s->packets[dirndx]++; 6168 s->bytes[dirndx] += pd.tot_len; 6169 } 6170 tr = r; 6171 nr = (s != NULL) ? s->nat_rule.ptr : pd.nat_rule; 6172 if (nr != NULL && r == &pf_default_rule) 6173 tr = nr; 6174 if (tr->src.addr.type == PF_ADDR_TABLE) 6175 pfr_update_stats(tr->src.addr.p.tbl, 6176 (s == NULL) ? pd.src : 6177 &s->key[(s->direction == PF_IN)]-> 6178 addr[(s->direction == PF_OUT)], 6179 pd.af, pd.tot_len, dir == PF_OUT, 6180 r->action == PF_PASS, tr->src.neg); 6181 if (tr->dst.addr.type == PF_ADDR_TABLE) 6182 pfr_update_stats(tr->dst.addr.p.tbl, 6183 (s == NULL) ? pd.dst : 6184 &s->key[(s->direction == PF_IN)]-> 6185 addr[(s->direction == PF_IN)], 6186 pd.af, pd.tot_len, dir == PF_OUT, 6187 r->action == PF_PASS, tr->dst.neg); 6188 } 6189 6190 6191 if (action == PF_SYNPROXY_DROP) { 6192 m_freem(*m0); 6193 *m0 = NULL; 6194 action = PF_PASS; 6195 } else if (r->rt) 6196 /* pf_route can free the mbuf causing *m0 to become NULL */ 6197 pf_route(m0, r, dir, kif->pfik_ifp, s, &pd); 6198 6199 return (action); 6200 } 6201 #endif /* INET */ 6202 6203 #ifdef INET6 6204 int 6205 pf_test6(int dir, struct ifnet *ifp, struct mbuf **m0, 6206 struct ether_header *eh, struct inpcb *inp) 6207 { 6208 struct pfi_kif *kif; 6209 u_short action, reason = 0, log = 0; 6210 struct mbuf *m = *m0, *n = NULL; 6211 struct ip6_hdr *h = NULL; 6212 struct pf_rule *a = NULL, *r = &pf_default_rule, *tr, *nr; 6213 struct pf_state *s = NULL; 6214 struct pf_ruleset *ruleset = NULL; 6215 struct pf_pdesc pd; 6216 int off, terminal = 0, dirndx, rh_cnt = 0; 6217 6218 if (!pf_status.running) 6219 return (PF_PASS); 6220 6221 memset(&pd, 0, sizeof(pd)); 6222 if (ifp->if_type == IFT_CARP && ifp->if_carpdev) 6223 kif = (struct pfi_kif *)ifp->if_carpdev->if_pf_kif; 6224 else 6225 kif = (struct pfi_kif *)ifp->if_pf_kif; 6226 6227 if (kif == NULL) { 6228 DPFPRINTF(PF_DEBUG_URGENT, 6229 ("pf_test6: kif == NULL, if_xname %s\n", ifp->if_xname)); 6230 return (PF_DROP); 6231 } 6232 if (kif->pfik_flags & PFI_IFLAG_SKIP) 6233 return (PF_PASS); 6234 6235 #ifdef DIAGNOSTIC 6236 if ((m->m_flags & M_PKTHDR) == 0) 6237 panic("non-M_PKTHDR is passed to pf_test6"); 6238 #endif /* DIAGNOSTIC */ 6239 6240 if (m->m_pkthdr.len < (int)sizeof(*h)) { 6241 action = PF_DROP; 6242 REASON_SET(&reason, PFRES_SHORT); 6243 log = 1; 6244 goto done; 6245 } 6246 6247 /* 6248 * DragonFly doesn't zero the auxillary pkghdr fields, only fw_flags, 6249 * so make sure pf.flags is clear. 6250 */ 6251 if (m->m_pkthdr.fw_flags & PF_MBUF_TAGGED) 6252 return (PF_PASS); 6253 m->m_pkthdr.pf.flags = 0; 6254 /* Re-Check when updating to > 4.4 */ 6255 m->m_pkthdr.pf.statekey = NULL; 6256 6257 /* We do IP header normalization and packet reassembly here */ 6258 if (pf_normalize_ip6(m0, dir, kif, &reason, &pd) != PF_PASS) { 6259 action = PF_DROP; 6260 goto done; 6261 } 6262 m = *m0; /* pf_normalize messes with m0 */ 6263 h = mtod(m, struct ip6_hdr *); 6264 6265 #if 1 6266 /* 6267 * we do not support jumbogram yet. if we keep going, zero ip6_plen 6268 * will do something bad, so drop the packet for now. 6269 */ 6270 if (htons(h->ip6_plen) == 0) { 6271 action = PF_DROP; 6272 REASON_SET(&reason, PFRES_NORM); /*XXX*/ 6273 goto done; 6274 } 6275 #endif 6276 6277 pd.src = (struct pf_addr *)&h->ip6_src; 6278 pd.dst = (struct pf_addr *)&h->ip6_dst; 6279 pd.sport = pd.dport = NULL; 6280 pd.ip_sum = NULL; 6281 pd.proto_sum = NULL; 6282 pd.dir = dir; 6283 pd.sidx = (dir == PF_IN) ? 0 : 1; 6284 pd.didx = (dir == PF_IN) ? 1 : 0; 6285 pd.af = AF_INET6; 6286 pd.tos = 0; 6287 pd.tot_len = ntohs(h->ip6_plen) + sizeof(struct ip6_hdr); 6288 pd.eh = eh; 6289 6290 off = ((caddr_t)h - m->m_data) + sizeof(struct ip6_hdr); 6291 pd.proto = h->ip6_nxt; 6292 do { 6293 switch (pd.proto) { 6294 case IPPROTO_FRAGMENT: 6295 action = pf_test_fragment(&r, dir, kif, m, h, 6296 &pd, &a, &ruleset); 6297 if (action == PF_DROP) 6298 REASON_SET(&reason, PFRES_FRAG); 6299 goto done; 6300 case IPPROTO_ROUTING: { 6301 struct ip6_rthdr rthdr; 6302 6303 if (rh_cnt++) { 6304 DPFPRINTF(PF_DEBUG_MISC, 6305 ("pf: IPv6 more than one rthdr\n")); 6306 action = PF_DROP; 6307 REASON_SET(&reason, PFRES_IPOPTIONS); 6308 log = 1; 6309 goto done; 6310 } 6311 if (!pf_pull_hdr(m, off, &rthdr, sizeof(rthdr), NULL, 6312 &reason, pd.af)) { 6313 DPFPRINTF(PF_DEBUG_MISC, 6314 ("pf: IPv6 short rthdr\n")); 6315 action = PF_DROP; 6316 REASON_SET(&reason, PFRES_SHORT); 6317 log = 1; 6318 goto done; 6319 } 6320 if (rthdr.ip6r_type == IPV6_RTHDR_TYPE_0) { 6321 DPFPRINTF(PF_DEBUG_MISC, 6322 ("pf: IPv6 rthdr0\n")); 6323 action = PF_DROP; 6324 REASON_SET(&reason, PFRES_IPOPTIONS); 6325 log = 1; 6326 goto done; 6327 } 6328 /* FALLTHROUGH */ 6329 } 6330 case IPPROTO_AH: 6331 case IPPROTO_HOPOPTS: 6332 case IPPROTO_DSTOPTS: { 6333 /* get next header and header length */ 6334 struct ip6_ext opt6; 6335 6336 if (!pf_pull_hdr(m, off, &opt6, sizeof(opt6), 6337 NULL, &reason, pd.af)) { 6338 DPFPRINTF(PF_DEBUG_MISC, 6339 ("pf: IPv6 short opt\n")); 6340 action = PF_DROP; 6341 log = 1; 6342 goto done; 6343 } 6344 if (pd.proto == IPPROTO_AH) 6345 off += (opt6.ip6e_len + 2) * 4; 6346 else 6347 off += (opt6.ip6e_len + 1) * 8; 6348 pd.proto = opt6.ip6e_nxt; 6349 /* goto the next header */ 6350 break; 6351 } 6352 default: 6353 terminal++; 6354 break; 6355 } 6356 } while (!terminal); 6357 6358 /* if there's no routing header, use unmodified mbuf for checksumming */ 6359 if (!n) 6360 n = m; 6361 6362 switch (pd.proto) { 6363 6364 case IPPROTO_TCP: { 6365 struct tcphdr th; 6366 6367 pd.hdr.tcp = &th; 6368 if (!pf_pull_hdr(m, off, &th, sizeof(th), 6369 &action, &reason, AF_INET6)) { 6370 log = action != PF_PASS; 6371 goto done; 6372 } 6373 pd.p_len = pd.tot_len - off - (th.th_off << 2); 6374 action = pf_normalize_tcp(dir, kif, m, 0, off, h, &pd); 6375 if (action == PF_DROP) 6376 goto done; 6377 action = pf_test_state_tcp(&s, dir, kif, m, off, h, &pd, 6378 &reason); 6379 if (action == PF_PASS) { 6380 pfsync_update_state(s); 6381 r = s->rule.ptr; 6382 a = s->anchor.ptr; 6383 log = s->log; 6384 } else if (s == NULL) 6385 action = pf_test_rule(&r, &s, dir, kif, 6386 m, off, h, &pd, &a, &ruleset, NULL, inp); 6387 break; 6388 } 6389 6390 case IPPROTO_UDP: { 6391 struct udphdr uh; 6392 6393 pd.hdr.udp = &uh; 6394 if (!pf_pull_hdr(m, off, &uh, sizeof(uh), 6395 &action, &reason, AF_INET6)) { 6396 log = action != PF_PASS; 6397 goto done; 6398 } 6399 if (uh.uh_dport == 0 || 6400 ntohs(uh.uh_ulen) > m->m_pkthdr.len - off || 6401 ntohs(uh.uh_ulen) < sizeof(struct udphdr)) { 6402 action = PF_DROP; 6403 REASON_SET(&reason, PFRES_SHORT); 6404 goto done; 6405 } 6406 action = pf_test_state_udp(&s, dir, kif, m, off, h, &pd); 6407 if (action == PF_PASS) { 6408 pfsync_update_state(s); 6409 r = s->rule.ptr; 6410 a = s->anchor.ptr; 6411 log = s->log; 6412 } else if (s == NULL) 6413 action = pf_test_rule(&r, &s, dir, kif, 6414 m, off, h, &pd, &a, &ruleset, NULL, inp); 6415 break; 6416 } 6417 6418 case IPPROTO_ICMPV6: { 6419 struct icmp6_hdr ih; 6420 6421 pd.hdr.icmp6 = &ih; 6422 if (!pf_pull_hdr(m, off, &ih, sizeof(ih), 6423 &action, &reason, AF_INET6)) { 6424 log = action != PF_PASS; 6425 goto done; 6426 } 6427 action = pf_test_state_icmp(&s, dir, kif, 6428 m, off, h, &pd, &reason); 6429 if (action == PF_PASS) { 6430 pfsync_update_state(s); 6431 r = s->rule.ptr; 6432 a = s->anchor.ptr; 6433 log = s->log; 6434 } else if (s == NULL) 6435 action = pf_test_rule(&r, &s, dir, kif, 6436 m, off, h, &pd, &a, &ruleset, NULL, inp); 6437 break; 6438 } 6439 6440 default: 6441 action = pf_test_state_other(&s, dir, kif, m, &pd); 6442 if (action == PF_PASS) { 6443 pfsync_update_state(s); 6444 r = s->rule.ptr; 6445 a = s->anchor.ptr; 6446 log = s->log; 6447 } else if (s == NULL) 6448 action = pf_test_rule(&r, &s, dir, kif, m, off, h, 6449 &pd, &a, &ruleset, NULL, inp); 6450 break; 6451 } 6452 6453 done: 6454 if (n != m) { 6455 m_freem(n); 6456 n = NULL; 6457 } 6458 6459 /* handle dangerous IPv6 extension headers. */ 6460 if (action == PF_PASS && rh_cnt && 6461 !((s && s->state_flags & PFSTATE_ALLOWOPTS) || r->allow_opts)) { 6462 action = PF_DROP; 6463 REASON_SET(&reason, PFRES_IPOPTIONS); 6464 log = 1; 6465 DPFPRINTF(PF_DEBUG_MISC, 6466 ("pf: dropping packet with dangerous v6 headers\n")); 6467 } 6468 6469 if ((s && s->tag) || r->rtableid) 6470 pf_tag_packet(m, s ? s->tag : 0, r->rtableid); 6471 6472 #if 0 6473 if (dir == PF_IN && s && s->key[PF_SK_STACK]) 6474 m->m_pkthdr.pf.statekey = s->key[PF_SK_STACK]; 6475 #endif 6476 6477 #ifdef ALTQ 6478 if (action == PF_PASS && r->qid) { 6479 m->m_pkthdr.fw_flags |= PF_MBUF_STRUCTURE; 6480 if (pd.tos & IPTOS_LOWDELAY) 6481 m->m_pkthdr.pf.qid = r->pqid; 6482 else 6483 m->m_pkthdr.pf.qid = r->qid; 6484 m->m_pkthdr.pf.ecn_af = AF_INET6; 6485 m->m_pkthdr.pf.hdr = h; 6486 if (s) { 6487 /* for fairq */ 6488 m->m_pkthdr.pf.state_hash = s->hash; 6489 m->m_pkthdr.pf.flags |= PF_TAG_STATE_HASHED; 6490 } 6491 } 6492 #endif /* ALTQ */ 6493 6494 if (dir == PF_IN && action == PF_PASS && (pd.proto == IPPROTO_TCP || 6495 pd.proto == IPPROTO_UDP) && s != NULL && s->nat_rule.ptr != NULL && 6496 (s->nat_rule.ptr->action == PF_RDR || 6497 s->nat_rule.ptr->action == PF_BINAT) && 6498 IN6_IS_ADDR_LOOPBACK(&pd.dst->v6)) 6499 m->m_pkthdr.pf.flags |= PF_TAG_TRANSLATE_LOCALHOST; 6500 6501 if (dir == PF_IN && action == PF_PASS && r->divert.port) { 6502 struct pf_divert *divert; 6503 6504 if ((divert = pf_get_divert(m))) { 6505 m->m_pkthdr.pf.flags |= PF_TAG_DIVERTED; 6506 divert->port = r->divert.port; 6507 divert->addr.ipv6 = r->divert.addr.v6; 6508 } 6509 } 6510 6511 if (log) { 6512 struct pf_rule *lr; 6513 6514 if (s != NULL && s->nat_rule.ptr != NULL && 6515 s->nat_rule.ptr->log & PF_LOG_ALL) 6516 lr = s->nat_rule.ptr; 6517 else 6518 lr = r; 6519 PFLOG_PACKET(kif, h, m, AF_INET6, dir, reason, lr, a, ruleset, 6520 &pd); 6521 } 6522 6523 kif->pfik_bytes[1][dir == PF_OUT][action != PF_PASS] += pd.tot_len; 6524 kif->pfik_packets[1][dir == PF_OUT][action != PF_PASS]++; 6525 6526 if (action == PF_PASS || r->action == PF_DROP) { 6527 dirndx = (dir == PF_OUT); 6528 r->packets[dirndx]++; 6529 r->bytes[dirndx] += pd.tot_len; 6530 if (a != NULL) { 6531 a->packets[dirndx]++; 6532 a->bytes[dirndx] += pd.tot_len; 6533 } 6534 if (s != NULL) { 6535 if (s->nat_rule.ptr != NULL) { 6536 s->nat_rule.ptr->packets[dirndx]++; 6537 s->nat_rule.ptr->bytes[dirndx] += pd.tot_len; 6538 } 6539 if (s->src_node != NULL) { 6540 s->src_node->packets[dirndx]++; 6541 s->src_node->bytes[dirndx] += pd.tot_len; 6542 } 6543 if (s->nat_src_node != NULL) { 6544 s->nat_src_node->packets[dirndx]++; 6545 s->nat_src_node->bytes[dirndx] += pd.tot_len; 6546 } 6547 dirndx = (dir == s->direction) ? 0 : 1; 6548 s->packets[dirndx]++; 6549 s->bytes[dirndx] += pd.tot_len; 6550 } 6551 tr = r; 6552 nr = (s != NULL) ? s->nat_rule.ptr : pd.nat_rule; 6553 if (nr != NULL && r == &pf_default_rule) 6554 tr = nr; 6555 if (tr->src.addr.type == PF_ADDR_TABLE) 6556 pfr_update_stats(tr->src.addr.p.tbl, 6557 (s == NULL) ? pd.src : 6558 &s->key[(s->direction == PF_IN)]->addr[0], 6559 pd.af, pd.tot_len, dir == PF_OUT, 6560 r->action == PF_PASS, tr->src.neg); 6561 if (tr->dst.addr.type == PF_ADDR_TABLE) 6562 pfr_update_stats(tr->dst.addr.p.tbl, 6563 (s == NULL) ? pd.dst : 6564 &s->key[(s->direction == PF_IN)]->addr[1], 6565 pd.af, pd.tot_len, dir == PF_OUT, 6566 r->action == PF_PASS, tr->dst.neg); 6567 } 6568 6569 6570 if (action == PF_SYNPROXY_DROP) { 6571 m_freem(*m0); 6572 *m0 = NULL; 6573 action = PF_PASS; 6574 } else if (r->rt) 6575 /* pf_route6 can free the mbuf causing *m0 to become NULL */ 6576 pf_route6(m0, r, dir, kif->pfik_ifp, s, &pd); 6577 6578 return (action); 6579 } 6580 #endif /* INET6 */ 6581 6582 int 6583 pf_check_congestion(struct ifqueue *ifq) 6584 { 6585 return (0); 6586 } 6587