1 /* $OpenBSD: pf.c,v 1.614 2008/08/02 12:34:37 henning Exp $ */ 2 3 /* 4 * Copyright (c) 2004 The DragonFly Project. All rights reserved. 5 * 6 * Copyright (c) 2001 Daniel Hartmeier 7 * Copyright (c) 2002 - 2008 Henning Brauer 8 * All rights reserved. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 14 * - Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * - Redistributions in binary form must reproduce the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer in the documentation and/or other materials provided 19 * with the distribution. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 29 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 31 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 32 * POSSIBILITY OF SUCH DAMAGE. 33 * 34 * Effort sponsored in part by the Defense Advanced Research Projects 35 * Agency (DARPA) and Air Force Research Laboratory, Air Force 36 * Materiel Command, USAF, under agreement number F30602-01-2-0537. 37 * 38 */ 39 40 #include "opt_inet.h" 41 #include "opt_inet6.h" 42 43 #include <sys/param.h> 44 #include <sys/systm.h> 45 #include <sys/malloc.h> 46 #include <sys/mbuf.h> 47 #include <sys/filio.h> 48 #include <sys/socket.h> 49 #include <sys/socketvar.h> 50 #include <sys/kernel.h> 51 #include <sys/time.h> 52 #include <sys/sysctl.h> 53 #include <sys/endian.h> 54 #include <sys/proc.h> 55 #include <sys/kthread.h> 56 #include <sys/spinlock.h> 57 58 #include <machine/inttypes.h> 59 60 #include <sys/md5.h> 61 62 #include <net/if.h> 63 #include <net/if_types.h> 64 #include <net/bpf.h> 65 #include <net/netisr2.h> 66 #include <net/route.h> 67 68 #include <netinet/in.h> 69 #include <netinet/in_var.h> 70 #include <netinet/in_systm.h> 71 #include <netinet/ip.h> 72 #include <netinet/ip_var.h> 73 #include <netinet/tcp.h> 74 #include <netinet/tcp_seq.h> 75 #include <netinet/udp.h> 76 #include <netinet/ip_icmp.h> 77 #include <netinet/in_pcb.h> 78 #include <netinet/tcp_timer.h> 79 #include <netinet/tcp_var.h> 80 #include <netinet/udp_var.h> 81 #include <netinet/icmp_var.h> 82 #include <netinet/if_ether.h> 83 84 #include <net/pf/pfvar.h> 85 #include <net/pf/if_pflog.h> 86 87 #include <net/pf/if_pfsync.h> 88 89 #ifdef INET6 90 #include <netinet/ip6.h> 91 #include <netinet/icmp6.h> 92 #include <netinet6/nd6.h> 93 #include <netinet6/ip6_var.h> 94 #include <netinet6/in6_pcb.h> 95 #endif /* INET6 */ 96 97 #include <sys/in_cksum.h> 98 #include <sys/ucred.h> 99 #include <machine/limits.h> 100 #include <sys/msgport2.h> 101 #include <sys/spinlock2.h> 102 #include <net/netmsg2.h> 103 #include <net/toeplitz2.h> 104 105 extern int ip_optcopy(struct ip *, struct ip *); 106 extern int debug_pfugidhack; 107 108 struct lwkt_token pf_token = LWKT_TOKEN_INITIALIZER(pf_token); 109 struct lwkt_token pf_secret_token = LWKT_TOKEN_INITIALIZER(pf_secret_token); 110 struct spinlock pf_spin = SPINLOCK_INITIALIZER(pf_spin); 111 112 #define DPFPRINTF(n, x) if (pf_status.debug >= (n)) kprintf x 113 114 /* 115 * Global variables 116 */ 117 118 /* mask radix tree */ 119 struct radix_node_head *pf_maskhead; 120 121 /* state tables */ 122 struct pf_state_tree pf_statetbl[MAXCPU]; 123 124 struct pf_altqqueue pf_altqs[2]; 125 struct pf_palist pf_pabuf; 126 struct pf_altqqueue *pf_altqs_active; 127 struct pf_altqqueue *pf_altqs_inactive; 128 struct pf_status pf_status; 129 130 u_int32_t ticket_altqs_active; 131 u_int32_t ticket_altqs_inactive; 132 int altqs_inactive_open; 133 u_int32_t ticket_pabuf; 134 135 MD5_CTX pf_tcp_secret_ctx; 136 u_char pf_tcp_secret[16]; 137 int pf_tcp_secret_init; 138 int pf_tcp_iss_off; 139 140 struct pf_anchor_stackframe { 141 struct pf_ruleset *rs; 142 struct pf_rule *r; 143 struct pf_anchor_node *parent; 144 struct pf_anchor *child; 145 } pf_anchor_stack[64]; 146 147 struct malloc_type *pf_src_tree_pl, *pf_rule_pl, *pf_pooladdr_pl; 148 struct malloc_type *pf_state_pl, *pf_state_key_pl, *pf_state_item_pl; 149 struct malloc_type *pf_altq_pl; 150 151 void pf_print_host(struct pf_addr *, u_int16_t, u_int8_t); 152 153 void pf_init_threshold(struct pf_threshold *, u_int32_t, 154 u_int32_t); 155 void pf_add_threshold(struct pf_threshold *); 156 int pf_check_threshold(struct pf_threshold *); 157 158 void pf_change_ap(struct pf_addr *, u_int16_t *, 159 u_int16_t *, u_int16_t *, struct pf_addr *, 160 u_int16_t, u_int8_t, sa_family_t); 161 int pf_modulate_sack(struct mbuf *, int, struct pf_pdesc *, 162 struct tcphdr *, struct pf_state_peer *); 163 #ifdef INET6 164 void pf_change_a6(struct pf_addr *, u_int16_t *, 165 struct pf_addr *, u_int8_t); 166 #endif /* INET6 */ 167 void pf_change_icmp(struct pf_addr *, u_int16_t *, 168 struct pf_addr *, struct pf_addr *, u_int16_t, 169 u_int16_t *, u_int16_t *, u_int16_t *, 170 u_int16_t *, u_int8_t, sa_family_t); 171 void pf_send_tcp(const struct pf_rule *, sa_family_t, 172 const struct pf_addr *, const struct pf_addr *, 173 u_int16_t, u_int16_t, u_int32_t, u_int32_t, 174 u_int8_t, u_int16_t, u_int16_t, u_int8_t, int, 175 u_int16_t, struct ether_header *, struct ifnet *); 176 void pf_send_icmp(struct mbuf *, u_int8_t, u_int8_t, 177 sa_family_t, struct pf_rule *); 178 struct pf_rule *pf_match_translation(struct pf_pdesc *, struct mbuf *, 179 int, int, struct pfi_kif *, 180 struct pf_addr *, u_int16_t, struct pf_addr *, 181 u_int16_t, int); 182 struct pf_rule *pf_get_translation(struct pf_pdesc *, struct mbuf *, 183 int, int, struct pfi_kif *, struct pf_src_node **, 184 struct pf_state_key **, struct pf_state_key **, 185 struct pf_state_key **, struct pf_state_key **, 186 struct pf_addr *, struct pf_addr *, 187 u_int16_t, u_int16_t); 188 void pf_detach_state(struct pf_state *); 189 int pf_state_key_setup(struct pf_pdesc *, struct pf_rule *, 190 struct pf_state_key **, struct pf_state_key **, 191 struct pf_state_key **, struct pf_state_key **, 192 struct pf_addr *, struct pf_addr *, 193 u_int16_t, u_int16_t); 194 void pf_state_key_detach(struct pf_state *, int); 195 u_int32_t pf_tcp_iss(struct pf_pdesc *); 196 int pf_test_rule(struct pf_rule **, struct pf_state **, 197 int, struct pfi_kif *, struct mbuf *, int, 198 void *, struct pf_pdesc *, struct pf_rule **, 199 struct pf_ruleset **, struct ifqueue *, struct inpcb *); 200 static __inline int pf_create_state(struct pf_rule *, struct pf_rule *, 201 struct pf_rule *, struct pf_pdesc *, 202 struct pf_src_node *, struct pf_state_key *, 203 struct pf_state_key *, struct pf_state_key *, 204 struct pf_state_key *, struct mbuf *, int, 205 u_int16_t, u_int16_t, int *, struct pfi_kif *, 206 struct pf_state **, int, u_int16_t, u_int16_t, 207 int); 208 int pf_test_fragment(struct pf_rule **, int, 209 struct pfi_kif *, struct mbuf *, void *, 210 struct pf_pdesc *, struct pf_rule **, 211 struct pf_ruleset **); 212 int pf_tcp_track_full(struct pf_state_peer *, 213 struct pf_state_peer *, struct pf_state **, 214 struct pfi_kif *, struct mbuf *, int, 215 struct pf_pdesc *, u_short *, int *); 216 int pf_tcp_track_sloppy(struct pf_state_peer *, 217 struct pf_state_peer *, struct pf_state **, 218 struct pf_pdesc *, u_short *); 219 int pf_test_state_tcp(struct pf_state **, int, 220 struct pfi_kif *, struct mbuf *, int, 221 void *, struct pf_pdesc *, u_short *); 222 int pf_test_state_udp(struct pf_state **, int, 223 struct pfi_kif *, struct mbuf *, int, 224 void *, struct pf_pdesc *); 225 int pf_test_state_icmp(struct pf_state **, int, 226 struct pfi_kif *, struct mbuf *, int, 227 void *, struct pf_pdesc *, u_short *); 228 int pf_test_state_other(struct pf_state **, int, 229 struct pfi_kif *, struct mbuf *, struct pf_pdesc *); 230 void pf_step_into_anchor(int *, struct pf_ruleset **, int, 231 struct pf_rule **, struct pf_rule **, int *); 232 int pf_step_out_of_anchor(int *, struct pf_ruleset **, 233 int, struct pf_rule **, struct pf_rule **, 234 int *); 235 void pf_hash(struct pf_addr *, struct pf_addr *, 236 struct pf_poolhashkey *, sa_family_t); 237 int pf_map_addr(u_int8_t, struct pf_rule *, 238 struct pf_addr *, struct pf_addr *, 239 struct pf_addr *, struct pf_src_node **); 240 int pf_get_sport(sa_family_t, u_int8_t, struct pf_rule *, 241 struct pf_addr *, struct pf_addr *, 242 u_int16_t, u_int16_t, 243 struct pf_addr *, u_int16_t *, 244 u_int16_t, u_int16_t, 245 struct pf_src_node **); 246 void pf_route(struct mbuf **, struct pf_rule *, int, 247 struct ifnet *, struct pf_state *, 248 struct pf_pdesc *); 249 void pf_route6(struct mbuf **, struct pf_rule *, int, 250 struct ifnet *, struct pf_state *, 251 struct pf_pdesc *); 252 u_int8_t pf_get_wscale(struct mbuf *, int, u_int16_t, 253 sa_family_t); 254 u_int16_t pf_get_mss(struct mbuf *, int, u_int16_t, 255 sa_family_t); 256 u_int16_t pf_calc_mss(struct pf_addr *, sa_family_t, 257 u_int16_t); 258 void pf_set_rt_ifp(struct pf_state *, 259 struct pf_addr *); 260 int pf_check_proto_cksum(struct mbuf *, int, int, 261 u_int8_t, sa_family_t); 262 struct pf_divert *pf_get_divert(struct mbuf *); 263 void pf_print_state_parts(struct pf_state *, 264 struct pf_state_key *, struct pf_state_key *); 265 int pf_addr_wrap_neq(struct pf_addr_wrap *, 266 struct pf_addr_wrap *); 267 struct pf_state *pf_find_state(struct pfi_kif *, 268 struct pf_state_key_cmp *, u_int, struct mbuf *); 269 int pf_src_connlimit(struct pf_state **); 270 int pf_check_congestion(struct ifqueue *); 271 272 extern int pf_end_threads; 273 274 struct pf_pool_limit pf_pool_limits[PF_LIMIT_MAX] = { 275 { &pf_state_pl, PFSTATE_HIWAT }, 276 { &pf_src_tree_pl, PFSNODE_HIWAT }, 277 { &pf_frent_pl, PFFRAG_FRENT_HIWAT }, 278 { &pfr_ktable_pl, PFR_KTABLE_HIWAT }, 279 { &pfr_kentry_pl, PFR_KENTRY_HIWAT } 280 }; 281 282 #define STATE_LOOKUP(i, k, d, s, m) \ 283 do { \ 284 s = pf_find_state(i, k, d, m); \ 285 if (s == NULL || (s)->timeout == PFTM_PURGE) \ 286 return (PF_DROP); \ 287 if (d == PF_OUT && \ 288 (((s)->rule.ptr->rt == PF_ROUTETO && \ 289 (s)->rule.ptr->direction == PF_OUT) || \ 290 ((s)->rule.ptr->rt == PF_REPLYTO && \ 291 (s)->rule.ptr->direction == PF_IN)) && \ 292 (s)->rt_kif != NULL && \ 293 (s)->rt_kif != i) \ 294 return (PF_PASS); \ 295 } while (0) 296 297 #define BOUND_IFACE(r, k) \ 298 ((r)->rule_flag & PFRULE_IFBOUND) ? (k) : pfi_all 299 300 #define STATE_INC_COUNTERS(s) \ 301 do { \ 302 atomic_add_int(&s->rule.ptr->states_cur, 1); \ 303 s->rule.ptr->states_tot++; \ 304 if (s->anchor.ptr != NULL) { \ 305 atomic_add_int(&s->anchor.ptr->states_cur, 1); \ 306 s->anchor.ptr->states_tot++; \ 307 } \ 308 if (s->nat_rule.ptr != NULL) { \ 309 atomic_add_int(&s->nat_rule.ptr->states_cur, 1); \ 310 s->nat_rule.ptr->states_tot++; \ 311 } \ 312 } while (0) 313 314 #define STATE_DEC_COUNTERS(s) \ 315 do { \ 316 if (s->nat_rule.ptr != NULL) \ 317 atomic_add_int(&s->nat_rule.ptr->states_cur, -1); \ 318 if (s->anchor.ptr != NULL) \ 319 atomic_add_int(&s->anchor.ptr->states_cur, -1); \ 320 atomic_add_int(&s->rule.ptr->states_cur, -1); \ 321 } while (0) 322 323 static MALLOC_DEFINE(M_PFSTATEPL, "pfstatepl", "pf state pool list"); 324 static MALLOC_DEFINE(M_PFSRCTREEPL, "pfsrctpl", "pf source tree pool list"); 325 static MALLOC_DEFINE(M_PFSTATEKEYPL, "pfstatekeypl", "pf state key pool list"); 326 static MALLOC_DEFINE(M_PFSTATEITEMPL, "pfstateitempl", "pf state item pool list"); 327 328 static __inline int pf_src_compare(struct pf_src_node *, struct pf_src_node *); 329 static __inline int pf_state_compare_key(struct pf_state_key *, 330 struct pf_state_key *); 331 static __inline int pf_state_compare_id(struct pf_state *, 332 struct pf_state *); 333 334 struct pf_src_tree tree_src_tracking[MAXCPU]; 335 struct pf_state_tree_id tree_id[MAXCPU]; 336 struct pf_state_queue state_list[MAXCPU]; 337 338 RB_GENERATE(pf_src_tree, pf_src_node, entry, pf_src_compare); 339 RB_GENERATE(pf_state_tree, pf_state_key, entry, pf_state_compare_key); 340 RB_GENERATE(pf_state_tree_id, pf_state, 341 entry_id, pf_state_compare_id); 342 343 static __inline int 344 pf_src_compare(struct pf_src_node *a, struct pf_src_node *b) 345 { 346 int diff; 347 348 if (a->rule.ptr > b->rule.ptr) 349 return (1); 350 if (a->rule.ptr < b->rule.ptr) 351 return (-1); 352 if ((diff = a->af - b->af) != 0) 353 return (diff); 354 switch (a->af) { 355 #ifdef INET 356 case AF_INET: 357 if (a->addr.addr32[0] > b->addr.addr32[0]) 358 return (1); 359 if (a->addr.addr32[0] < b->addr.addr32[0]) 360 return (-1); 361 break; 362 #endif /* INET */ 363 #ifdef INET6 364 case AF_INET6: 365 if (a->addr.addr32[3] > b->addr.addr32[3]) 366 return (1); 367 if (a->addr.addr32[3] < b->addr.addr32[3]) 368 return (-1); 369 if (a->addr.addr32[2] > b->addr.addr32[2]) 370 return (1); 371 if (a->addr.addr32[2] < b->addr.addr32[2]) 372 return (-1); 373 if (a->addr.addr32[1] > b->addr.addr32[1]) 374 return (1); 375 if (a->addr.addr32[1] < b->addr.addr32[1]) 376 return (-1); 377 if (a->addr.addr32[0] > b->addr.addr32[0]) 378 return (1); 379 if (a->addr.addr32[0] < b->addr.addr32[0]) 380 return (-1); 381 break; 382 #endif /* INET6 */ 383 } 384 return (0); 385 } 386 387 u_int32_t 388 pf_state_hash(struct pf_state_key *sk) 389 { 390 u_int32_t hv = (u_int32_t)(((intptr_t)sk >> 6) ^ ((intptr_t)sk >> 15)); 391 if (hv == 0) /* disallow 0 */ 392 hv = 1; 393 return(hv); 394 } 395 396 #ifdef INET6 397 void 398 pf_addrcpy(struct pf_addr *dst, struct pf_addr *src, sa_family_t af) 399 { 400 switch (af) { 401 #ifdef INET 402 case AF_INET: 403 dst->addr32[0] = src->addr32[0]; 404 break; 405 #endif /* INET */ 406 case AF_INET6: 407 dst->addr32[0] = src->addr32[0]; 408 dst->addr32[1] = src->addr32[1]; 409 dst->addr32[2] = src->addr32[2]; 410 dst->addr32[3] = src->addr32[3]; 411 break; 412 } 413 } 414 #endif /* INET6 */ 415 416 void 417 pf_init_threshold(struct pf_threshold *threshold, 418 u_int32_t limit, u_int32_t seconds) 419 { 420 threshold->limit = limit * PF_THRESHOLD_MULT; 421 threshold->seconds = seconds; 422 threshold->count = 0; 423 threshold->last = time_second; 424 } 425 426 void 427 pf_add_threshold(struct pf_threshold *threshold) 428 { 429 u_int32_t t = time_second, diff = t - threshold->last; 430 431 if (diff >= threshold->seconds) 432 threshold->count = 0; 433 else 434 threshold->count -= threshold->count * diff / 435 threshold->seconds; 436 threshold->count += PF_THRESHOLD_MULT; 437 threshold->last = t; 438 } 439 440 int 441 pf_check_threshold(struct pf_threshold *threshold) 442 { 443 return (threshold->count > threshold->limit); 444 } 445 446 int 447 pf_src_connlimit(struct pf_state **state) 448 { 449 int bad = 0; 450 int cpu = mycpu->gd_cpuid; 451 452 (*state)->src_node->conn++; 453 (*state)->src.tcp_est = 1; 454 pf_add_threshold(&(*state)->src_node->conn_rate); 455 456 if ((*state)->rule.ptr->max_src_conn && 457 (*state)->rule.ptr->max_src_conn < 458 (*state)->src_node->conn) { 459 pf_status.lcounters[LCNT_SRCCONN]++; 460 bad++; 461 } 462 463 if ((*state)->rule.ptr->max_src_conn_rate.limit && 464 pf_check_threshold(&(*state)->src_node->conn_rate)) { 465 pf_status.lcounters[LCNT_SRCCONNRATE]++; 466 bad++; 467 } 468 469 if (!bad) 470 return (0); 471 472 if ((*state)->rule.ptr->overload_tbl) { 473 struct pfr_addr p; 474 u_int32_t killed = 0; 475 476 pf_status.lcounters[LCNT_OVERLOAD_TABLE]++; 477 if (pf_status.debug >= PF_DEBUG_MISC) { 478 kprintf("pf_src_connlimit: blocking address "); 479 pf_print_host(&(*state)->src_node->addr, 0, 480 (*state)->key[PF_SK_WIRE]->af); 481 } 482 483 bzero(&p, sizeof(p)); 484 p.pfra_af = (*state)->key[PF_SK_WIRE]->af; 485 switch ((*state)->key[PF_SK_WIRE]->af) { 486 #ifdef INET 487 case AF_INET: 488 p.pfra_net = 32; 489 p.pfra_ip4addr = (*state)->src_node->addr.v4; 490 break; 491 #endif /* INET */ 492 #ifdef INET6 493 case AF_INET6: 494 p.pfra_net = 128; 495 p.pfra_ip6addr = (*state)->src_node->addr.v6; 496 break; 497 #endif /* INET6 */ 498 } 499 500 pfr_insert_kentry((*state)->rule.ptr->overload_tbl, 501 &p, time_second); 502 503 /* kill existing states if that's required. */ 504 if ((*state)->rule.ptr->flush) { 505 struct pf_state_key *sk; 506 struct pf_state *st; 507 508 pf_status.lcounters[LCNT_OVERLOAD_FLUSH]++; 509 RB_FOREACH(st, pf_state_tree_id, &tree_id[cpu]) { 510 sk = st->key[PF_SK_WIRE]; 511 /* 512 * Kill states from this source. (Only those 513 * from the same rule if PF_FLUSH_GLOBAL is not 514 * set). (Only on current cpu). 515 */ 516 if (sk->af == 517 (*state)->key[PF_SK_WIRE]->af && 518 (((*state)->direction == PF_OUT && 519 PF_AEQ(&(*state)->src_node->addr, 520 &sk->addr[0], sk->af)) || 521 ((*state)->direction == PF_IN && 522 PF_AEQ(&(*state)->src_node->addr, 523 &sk->addr[1], sk->af))) && 524 ((*state)->rule.ptr->flush & 525 PF_FLUSH_GLOBAL || 526 (*state)->rule.ptr == st->rule.ptr)) { 527 st->timeout = PFTM_PURGE; 528 st->src.state = st->dst.state = 529 TCPS_CLOSED; 530 killed++; 531 } 532 } 533 if (pf_status.debug >= PF_DEBUG_MISC) 534 kprintf(", %u states killed", killed); 535 } 536 if (pf_status.debug >= PF_DEBUG_MISC) 537 kprintf("\n"); 538 } 539 540 /* kill this state */ 541 (*state)->timeout = PFTM_PURGE; 542 (*state)->src.state = (*state)->dst.state = TCPS_CLOSED; 543 return (1); 544 } 545 546 int 547 pf_insert_src_node(struct pf_src_node **sn, struct pf_rule *rule, 548 struct pf_addr *src, sa_family_t af) 549 { 550 struct pf_src_node k; 551 int cpu = mycpu->gd_cpuid; 552 553 if (*sn == NULL) { 554 k.af = af; 555 PF_ACPY(&k.addr, src, af); 556 if (rule->rule_flag & PFRULE_RULESRCTRACK || 557 rule->rpool.opts & PF_POOL_STICKYADDR) 558 k.rule.ptr = rule; 559 else 560 k.rule.ptr = NULL; 561 pf_status.scounters[SCNT_SRC_NODE_SEARCH]++; 562 *sn = RB_FIND(pf_src_tree, &tree_src_tracking[cpu], &k); 563 } 564 if (*sn == NULL) { 565 if (!rule->max_src_nodes || 566 rule->src_nodes < rule->max_src_nodes) 567 (*sn) = kmalloc(sizeof(struct pf_src_node), 568 M_PFSRCTREEPL, M_NOWAIT|M_ZERO); 569 else 570 pf_status.lcounters[LCNT_SRCNODES]++; 571 if ((*sn) == NULL) 572 return (-1); 573 574 pf_init_threshold(&(*sn)->conn_rate, 575 rule->max_src_conn_rate.limit, 576 rule->max_src_conn_rate.seconds); 577 578 (*sn)->af = af; 579 if (rule->rule_flag & PFRULE_RULESRCTRACK || 580 rule->rpool.opts & PF_POOL_STICKYADDR) 581 (*sn)->rule.ptr = rule; 582 else 583 (*sn)->rule.ptr = NULL; 584 PF_ACPY(&(*sn)->addr, src, af); 585 if (RB_INSERT(pf_src_tree, 586 &tree_src_tracking[cpu], *sn) != NULL) { 587 if (pf_status.debug >= PF_DEBUG_MISC) { 588 kprintf("pf: src_tree insert failed: "); 589 pf_print_host(&(*sn)->addr, 0, af); 590 kprintf("\n"); 591 } 592 kfree(*sn, M_PFSRCTREEPL); 593 return (-1); 594 } 595 596 /* 597 * Atomic op required to increment src_nodes in the rule 598 * because we hold a shared token here (decrements will use 599 * an exclusive token). 600 */ 601 (*sn)->creation = time_second; 602 (*sn)->ruletype = rule->action; 603 if ((*sn)->rule.ptr != NULL) 604 atomic_add_int(&(*sn)->rule.ptr->src_nodes, 1); 605 pf_status.scounters[SCNT_SRC_NODE_INSERT]++; 606 atomic_add_int(&pf_status.src_nodes, 1); 607 } else { 608 if (rule->max_src_states && 609 (*sn)->states >= rule->max_src_states) { 610 pf_status.lcounters[LCNT_SRCSTATES]++; 611 return (-1); 612 } 613 } 614 return (0); 615 } 616 617 /* state table stuff */ 618 619 static __inline int 620 pf_state_compare_key(struct pf_state_key *a, struct pf_state_key *b) 621 { 622 int diff; 623 624 if ((diff = a->proto - b->proto) != 0) 625 return (diff); 626 if ((diff = a->af - b->af) != 0) 627 return (diff); 628 switch (a->af) { 629 #ifdef INET 630 case AF_INET: 631 if (a->addr[0].addr32[0] > b->addr[0].addr32[0]) 632 return (1); 633 if (a->addr[0].addr32[0] < b->addr[0].addr32[0]) 634 return (-1); 635 if (a->addr[1].addr32[0] > b->addr[1].addr32[0]) 636 return (1); 637 if (a->addr[1].addr32[0] < b->addr[1].addr32[0]) 638 return (-1); 639 break; 640 #endif /* INET */ 641 #ifdef INET6 642 case AF_INET6: 643 if (a->addr[0].addr32[3] > b->addr[0].addr32[3]) 644 return (1); 645 if (a->addr[0].addr32[3] < b->addr[0].addr32[3]) 646 return (-1); 647 if (a->addr[1].addr32[3] > b->addr[1].addr32[3]) 648 return (1); 649 if (a->addr[1].addr32[3] < b->addr[1].addr32[3]) 650 return (-1); 651 if (a->addr[0].addr32[2] > b->addr[0].addr32[2]) 652 return (1); 653 if (a->addr[0].addr32[2] < b->addr[0].addr32[2]) 654 return (-1); 655 if (a->addr[1].addr32[2] > b->addr[1].addr32[2]) 656 return (1); 657 if (a->addr[1].addr32[2] < b->addr[1].addr32[2]) 658 return (-1); 659 if (a->addr[0].addr32[1] > b->addr[0].addr32[1]) 660 return (1); 661 if (a->addr[0].addr32[1] < b->addr[0].addr32[1]) 662 return (-1); 663 if (a->addr[1].addr32[1] > b->addr[1].addr32[1]) 664 return (1); 665 if (a->addr[1].addr32[1] < b->addr[1].addr32[1]) 666 return (-1); 667 if (a->addr[0].addr32[0] > b->addr[0].addr32[0]) 668 return (1); 669 if (a->addr[0].addr32[0] < b->addr[0].addr32[0]) 670 return (-1); 671 if (a->addr[1].addr32[0] > b->addr[1].addr32[0]) 672 return (1); 673 if (a->addr[1].addr32[0] < b->addr[1].addr32[0]) 674 return (-1); 675 break; 676 #endif /* INET6 */ 677 } 678 679 if ((diff = a->port[0] - b->port[0]) != 0) 680 return (diff); 681 if ((diff = a->port[1] - b->port[1]) != 0) 682 return (diff); 683 684 return (0); 685 } 686 687 static __inline int 688 pf_state_compare_id(struct pf_state *a, struct pf_state *b) 689 { 690 if (a->id > b->id) 691 return (1); 692 if (a->id < b->id) 693 return (-1); 694 if (a->creatorid > b->creatorid) 695 return (1); 696 if (a->creatorid < b->creatorid) 697 return (-1); 698 699 return (0); 700 } 701 702 int 703 pf_state_key_attach(struct pf_state_key *sk, struct pf_state *s, int idx) 704 { 705 struct pf_state_item *si; 706 struct pf_state_key *cur; 707 int cpu = mycpu->gd_cpuid; 708 709 KKASSERT(s->key[idx] == NULL); /* XXX handle this? */ 710 711 if ((cur = RB_INSERT(pf_state_tree, &pf_statetbl[cpu], sk)) != NULL) { 712 /* key exists. check for same kif, if none, add to key */ 713 TAILQ_FOREACH(si, &cur->states, entry) 714 if (si->s->kif == s->kif && 715 si->s->direction == s->direction) { 716 if (pf_status.debug >= PF_DEBUG_MISC) { 717 kprintf( 718 "pf: %s key attach failed on %s: ", 719 (idx == PF_SK_WIRE) ? 720 "wire" : "stack", 721 s->kif->pfik_name); 722 pf_print_state_parts(s, 723 (idx == PF_SK_WIRE) ? sk : NULL, 724 (idx == PF_SK_STACK) ? sk : NULL); 725 kprintf("\n"); 726 } 727 kfree(sk, M_PFSTATEKEYPL); 728 return (-1); /* collision! */ 729 } 730 kfree(sk, M_PFSTATEKEYPL); 731 732 s->key[idx] = cur; 733 } else 734 s->key[idx] = sk; 735 736 if ((si = kmalloc(sizeof(struct pf_state_item), M_PFSTATEITEMPL, M_NOWAIT)) == NULL) { 737 pf_state_key_detach(s, idx); 738 return (-1); 739 } 740 si->s = s; 741 742 /* list is sorted, if-bound states before floating */ 743 if (s->kif == pfi_all) 744 TAILQ_INSERT_TAIL(&s->key[idx]->states, si, entry); 745 else 746 TAILQ_INSERT_HEAD(&s->key[idx]->states, si, entry); 747 return (0); 748 } 749 750 void 751 pf_detach_state(struct pf_state *s) 752 { 753 if (s->key[PF_SK_WIRE] == s->key[PF_SK_STACK]) 754 s->key[PF_SK_WIRE] = NULL; 755 756 if (s->key[PF_SK_STACK] != NULL) 757 pf_state_key_detach(s, PF_SK_STACK); 758 759 if (s->key[PF_SK_WIRE] != NULL) 760 pf_state_key_detach(s, PF_SK_WIRE); 761 } 762 763 void 764 pf_state_key_detach(struct pf_state *s, int idx) 765 { 766 struct pf_state_item *si; 767 int cpu = mycpu->gd_cpuid; 768 769 si = TAILQ_FIRST(&s->key[idx]->states); 770 while (si && si->s != s) 771 si = TAILQ_NEXT(si, entry); 772 773 if (si) { 774 TAILQ_REMOVE(&s->key[idx]->states, si, entry); 775 kfree(si, M_PFSTATEITEMPL); 776 } 777 778 if (TAILQ_EMPTY(&s->key[idx]->states)) { 779 RB_REMOVE(pf_state_tree, &pf_statetbl[cpu], s->key[idx]); 780 if (s->key[idx]->reverse) 781 s->key[idx]->reverse->reverse = NULL; 782 if (s->key[idx]->inp) 783 s->key[idx]->inp->inp_pf_sk = NULL; 784 kfree(s->key[idx], M_PFSTATEKEYPL); 785 } 786 s->key[idx] = NULL; 787 } 788 789 struct pf_state_key * 790 pf_alloc_state_key(int pool_flags) 791 { 792 struct pf_state_key *sk; 793 794 if ((sk = kmalloc(sizeof(struct pf_state_key), M_PFSTATEKEYPL, pool_flags)) == NULL) 795 return (NULL); 796 TAILQ_INIT(&sk->states); 797 798 return (sk); 799 } 800 801 int 802 pf_state_key_setup(struct pf_pdesc *pd, struct pf_rule *nr, 803 struct pf_state_key **skw, struct pf_state_key **sks, 804 struct pf_state_key **skp, struct pf_state_key **nkp, 805 struct pf_addr *saddr, struct pf_addr *daddr, 806 u_int16_t sport, u_int16_t dport) 807 { 808 KKASSERT((*skp == NULL && *nkp == NULL)); 809 810 if ((*skp = pf_alloc_state_key(M_NOWAIT | M_ZERO)) == NULL) 811 return (ENOMEM); 812 813 PF_ACPY(&(*skp)->addr[pd->sidx], saddr, pd->af); 814 PF_ACPY(&(*skp)->addr[pd->didx], daddr, pd->af); 815 (*skp)->port[pd->sidx] = sport; 816 (*skp)->port[pd->didx] = dport; 817 (*skp)->proto = pd->proto; 818 (*skp)->af = pd->af; 819 820 if (nr != NULL) { 821 if ((*nkp = pf_alloc_state_key(M_NOWAIT | M_ZERO)) == NULL) 822 return (ENOMEM); /* caller must handle cleanup */ 823 824 /* XXX maybe just bcopy and TAILQ_INIT(&(*nkp)->states) */ 825 PF_ACPY(&(*nkp)->addr[0], &(*skp)->addr[0], pd->af); 826 PF_ACPY(&(*nkp)->addr[1], &(*skp)->addr[1], pd->af); 827 (*nkp)->port[0] = (*skp)->port[0]; 828 (*nkp)->port[1] = (*skp)->port[1]; 829 (*nkp)->proto = pd->proto; 830 (*nkp)->af = pd->af; 831 } else 832 *nkp = *skp; 833 834 if (pd->dir == PF_IN) { 835 *skw = *skp; 836 *sks = *nkp; 837 } else { 838 *sks = *skp; 839 *skw = *nkp; 840 } 841 return (0); 842 } 843 844 /* 845 * Insert pf_state with one or two state keys (allowing a reverse path lookup 846 * which is used by NAT). In the NAT case skw is the initiator (?) and 847 * sks is the target. 848 */ 849 int 850 pf_state_insert(struct pfi_kif *kif, struct pf_state_key *skw, 851 struct pf_state_key *sks, struct pf_state *s) 852 { 853 int cpu = mycpu->gd_cpuid; 854 855 s->kif = kif; 856 s->cpuid = cpu; 857 858 if (skw == sks) { 859 if (pf_state_key_attach(skw, s, PF_SK_WIRE)) 860 return (-1); 861 s->key[PF_SK_STACK] = s->key[PF_SK_WIRE]; 862 } else { 863 if (pf_state_key_attach(skw, s, PF_SK_WIRE)) { 864 kfree(sks, M_PFSTATEKEYPL); 865 return (-1); 866 } 867 if (pf_state_key_attach(sks, s, PF_SK_STACK)) { 868 pf_state_key_detach(s, PF_SK_WIRE); 869 return (-1); 870 } 871 } 872 873 if (s->id == 0 && s->creatorid == 0) { 874 u_int64_t sid; 875 876 if (sizeof(long) == 8) { 877 sid = atomic_fetchadd_long(&pf_status.stateid, 1); 878 } else { 879 spin_lock(&pf_spin); 880 sid = pf_status.stateid++; 881 spin_unlock(&pf_spin); 882 } 883 s->id = htobe64(sid); 884 s->creatorid = pf_status.hostid; 885 } 886 887 /* 888 * Calculate hash code for altq 889 */ 890 s->hash = crc32(s->key[PF_SK_WIRE], sizeof(*sks)); 891 892 if (RB_INSERT(pf_state_tree_id, &tree_id[cpu], s) != NULL) { 893 if (pf_status.debug >= PF_DEBUG_MISC) { 894 kprintf("pf: state insert failed: " 895 "id: %016jx creatorid: %08x", 896 (uintmax_t)be64toh(s->id), ntohl(s->creatorid)); 897 if (s->sync_flags & PFSTATE_FROMSYNC) 898 kprintf(" (from sync)"); 899 kprintf("\n"); 900 } 901 pf_detach_state(s); 902 return (-1); 903 } 904 TAILQ_INSERT_TAIL(&state_list[cpu], s, entry_list); 905 pf_status.fcounters[FCNT_STATE_INSERT]++; 906 atomic_add_int(&pf_status.states, 1); 907 pfi_kif_ref(kif, PFI_KIF_REF_STATE); 908 pfsync_insert_state(s); 909 return (0); 910 } 911 912 struct pf_state * 913 pf_find_state_byid(struct pf_state_cmp *key) 914 { 915 int cpu = mycpu->gd_cpuid; 916 917 pf_status.fcounters[FCNT_STATE_SEARCH]++; 918 919 return (RB_FIND(pf_state_tree_id, &tree_id[cpu], 920 (struct pf_state *)key)); 921 } 922 923 struct pf_state * 924 pf_find_state(struct pfi_kif *kif, struct pf_state_key_cmp *key, u_int dir, 925 struct mbuf *m) 926 { 927 struct pf_state_key *sk; 928 struct pf_state_item *si; 929 int cpu = mycpu->gd_cpuid; 930 931 pf_status.fcounters[FCNT_STATE_SEARCH]++; 932 933 if (dir == PF_OUT && m->m_pkthdr.pf.statekey && 934 ((struct pf_state_key *)m->m_pkthdr.pf.statekey)->reverse) 935 sk = ((struct pf_state_key *)m->m_pkthdr.pf.statekey)->reverse; 936 else { 937 if ((sk = RB_FIND(pf_state_tree, &pf_statetbl[cpu], 938 (struct pf_state_key *)key)) == NULL) 939 return (NULL); 940 if (dir == PF_OUT && m->m_pkthdr.pf.statekey) { 941 ((struct pf_state_key *) 942 m->m_pkthdr.pf.statekey)->reverse = sk; 943 sk->reverse = m->m_pkthdr.pf.statekey; 944 } 945 } 946 947 if (dir == PF_OUT) 948 m->m_pkthdr.pf.statekey = NULL; 949 950 /* list is sorted, if-bound states before floating ones */ 951 TAILQ_FOREACH(si, &sk->states, entry) 952 if ((si->s->kif == pfi_all || si->s->kif == kif) && 953 sk == (dir == PF_IN ? si->s->key[PF_SK_WIRE] : 954 si->s->key[PF_SK_STACK])) 955 return (si->s); 956 957 return (NULL); 958 } 959 960 struct pf_state * 961 pf_find_state_all(struct pf_state_key_cmp *key, u_int dir, int *more) 962 { 963 struct pf_state_key *sk; 964 struct pf_state_item *si, *ret = NULL; 965 int cpu = mycpu->gd_cpuid; 966 967 pf_status.fcounters[FCNT_STATE_SEARCH]++; 968 969 sk = RB_FIND(pf_state_tree, &pf_statetbl[cpu], 970 (struct pf_state_key *)key); 971 972 if (sk != NULL) { 973 TAILQ_FOREACH(si, &sk->states, entry) 974 if (dir == PF_INOUT || 975 (sk == (dir == PF_IN ? si->s->key[PF_SK_WIRE] : 976 si->s->key[PF_SK_STACK]))) { 977 if (more == NULL) 978 return (si->s); 979 980 if (ret) 981 (*more)++; 982 else 983 ret = si; 984 } 985 } 986 return (ret ? ret->s : NULL); 987 } 988 989 /* END state table stuff */ 990 991 992 void 993 pf_purge_thread(void *v) 994 { 995 globaldata_t save_gd = mycpu; 996 int nloops = 0; 997 int locked = 0; 998 int nn; 999 int endingit; 1000 1001 for (;;) { 1002 tsleep(pf_purge_thread, PWAIT, "pftm", 1 * hz); 1003 1004 endingit = pf_end_threads; 1005 1006 for (nn = 0; nn < ncpus; ++nn) { 1007 lwkt_setcpu_self(globaldata_find(nn)); 1008 1009 lwkt_gettoken(&pf_token); 1010 lockmgr(&pf_consistency_lock, LK_EXCLUSIVE); 1011 crit_enter(); 1012 1013 /* 1014 * process a fraction of the state table every second 1015 */ 1016 if(!pf_purge_expired_states( 1017 1 + (pf_status.states / 1018 pf_default_rule.timeout[ 1019 PFTM_INTERVAL]), 0)) { 1020 pf_purge_expired_states( 1021 1 + (pf_status.states / 1022 pf_default_rule.timeout[ 1023 PFTM_INTERVAL]), 1); 1024 } 1025 1026 /* 1027 * purge other expired types every PFTM_INTERVAL 1028 * seconds 1029 */ 1030 if (++nloops >= 1031 pf_default_rule.timeout[PFTM_INTERVAL]) { 1032 pf_purge_expired_fragments(); 1033 if (!pf_purge_expired_src_nodes(locked)) { 1034 pf_purge_expired_src_nodes(1); 1035 } 1036 nloops = 0; 1037 } 1038 1039 /* 1040 * If terminating the thread, clean everything out 1041 * (on all cpus). 1042 */ 1043 if (endingit) { 1044 pf_purge_expired_states(pf_status.states, 0); 1045 pf_purge_expired_fragments(); 1046 pf_purge_expired_src_nodes(1); 1047 } 1048 1049 crit_exit(); 1050 lockmgr(&pf_consistency_lock, LK_RELEASE); 1051 lwkt_reltoken(&pf_token); 1052 } 1053 lwkt_setcpu_self(save_gd); 1054 if (endingit) 1055 break; 1056 } 1057 1058 /* 1059 * Thread termination 1060 */ 1061 pf_end_threads++; 1062 wakeup(pf_purge_thread); 1063 kthread_exit(); 1064 } 1065 1066 u_int32_t 1067 pf_state_expires(const struct pf_state *state) 1068 { 1069 u_int32_t timeout; 1070 u_int32_t start; 1071 u_int32_t end; 1072 u_int32_t states; 1073 1074 /* handle all PFTM_* > PFTM_MAX here */ 1075 if (state->timeout == PFTM_PURGE) 1076 return (time_second); 1077 if (state->timeout == PFTM_UNTIL_PACKET) 1078 return (0); 1079 KKASSERT(state->timeout != PFTM_UNLINKED); 1080 KKASSERT(state->timeout < PFTM_MAX); 1081 timeout = state->rule.ptr->timeout[state->timeout]; 1082 if (!timeout) 1083 timeout = pf_default_rule.timeout[state->timeout]; 1084 start = state->rule.ptr->timeout[PFTM_ADAPTIVE_START]; 1085 if (start) { 1086 end = state->rule.ptr->timeout[PFTM_ADAPTIVE_END]; 1087 states = state->rule.ptr->states_cur; 1088 } else { 1089 start = pf_default_rule.timeout[PFTM_ADAPTIVE_START]; 1090 end = pf_default_rule.timeout[PFTM_ADAPTIVE_END]; 1091 states = pf_status.states; 1092 } 1093 if (end && states > start && start < end) { 1094 if (states < end) 1095 return (state->expire + timeout * (end - states) / 1096 (end - start)); 1097 else 1098 return (time_second); 1099 } 1100 return (state->expire + timeout); 1101 } 1102 1103 /* 1104 * (called with exclusive pf_token) 1105 */ 1106 int 1107 pf_purge_expired_src_nodes(int waslocked) 1108 { 1109 struct pf_src_node *cur, *next; 1110 int locked = waslocked; 1111 int cpu = mycpu->gd_cpuid; 1112 1113 for (cur = RB_MIN(pf_src_tree, &tree_src_tracking[cpu]); 1114 cur; 1115 cur = next) { 1116 next = RB_NEXT(pf_src_tree, &tree_src_tracking[cpu], cur); 1117 1118 if (cur->states <= 0 && cur->expire <= time_second) { 1119 if (!locked) { 1120 lockmgr(&pf_consistency_lock, LK_EXCLUSIVE); 1121 next = RB_NEXT(pf_src_tree, 1122 &tree_src_tracking[cpu], cur); 1123 locked = 1; 1124 } 1125 if (cur->rule.ptr != NULL) { 1126 cur->rule.ptr->src_nodes--; 1127 if (cur->rule.ptr->states_cur <= 0 && 1128 cur->rule.ptr->max_src_nodes <= 0) 1129 pf_rm_rule(NULL, cur->rule.ptr); 1130 } 1131 RB_REMOVE(pf_src_tree, &tree_src_tracking[cpu], cur); 1132 pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++; 1133 atomic_add_int(&pf_status.src_nodes, -1); 1134 kfree(cur, M_PFSRCTREEPL); 1135 } 1136 } 1137 if (locked && !waslocked) 1138 lockmgr(&pf_consistency_lock, LK_RELEASE); 1139 return(1); 1140 } 1141 1142 void 1143 pf_src_tree_remove_state(struct pf_state *s) 1144 { 1145 u_int32_t timeout; 1146 1147 if (s->src_node != NULL) { 1148 if (s->src.tcp_est) 1149 --s->src_node->conn; 1150 if (--s->src_node->states <= 0) { 1151 timeout = s->rule.ptr->timeout[PFTM_SRC_NODE]; 1152 if (!timeout) 1153 timeout = 1154 pf_default_rule.timeout[PFTM_SRC_NODE]; 1155 s->src_node->expire = time_second + timeout; 1156 } 1157 } 1158 if (s->nat_src_node != s->src_node && s->nat_src_node != NULL) { 1159 if (--s->nat_src_node->states <= 0) { 1160 timeout = s->rule.ptr->timeout[PFTM_SRC_NODE]; 1161 if (!timeout) 1162 timeout = 1163 pf_default_rule.timeout[PFTM_SRC_NODE]; 1164 s->nat_src_node->expire = time_second + timeout; 1165 } 1166 } 1167 s->src_node = s->nat_src_node = NULL; 1168 } 1169 1170 /* callers should be at crit_enter() */ 1171 void 1172 pf_unlink_state(struct pf_state *cur) 1173 { 1174 int cpu = mycpu->gd_cpuid; 1175 1176 if (cur->src.state == PF_TCPS_PROXY_DST) { 1177 /* XXX wire key the right one? */ 1178 pf_send_tcp(cur->rule.ptr, cur->key[PF_SK_WIRE]->af, 1179 &cur->key[PF_SK_WIRE]->addr[1], 1180 &cur->key[PF_SK_WIRE]->addr[0], 1181 cur->key[PF_SK_WIRE]->port[1], 1182 cur->key[PF_SK_WIRE]->port[0], 1183 cur->src.seqhi, cur->src.seqlo + 1, 1184 TH_RST|TH_ACK, 0, 0, 0, 1, cur->tag, NULL, NULL); 1185 } 1186 RB_REMOVE(pf_state_tree_id, &tree_id[cpu], cur); 1187 if (cur->creatorid == pf_status.hostid) 1188 pfsync_delete_state(cur); 1189 cur->timeout = PFTM_UNLINKED; 1190 pf_src_tree_remove_state(cur); 1191 pf_detach_state(cur); 1192 } 1193 1194 static struct pf_state *purge_cur[MAXCPU]; 1195 1196 /* 1197 * callers should be at crit_enter() and hold pf_consistency_lock exclusively. 1198 * pf_token must also be held exclusively. 1199 */ 1200 void 1201 pf_free_state(struct pf_state *cur) 1202 { 1203 int cpu = mycpu->gd_cpuid; 1204 1205 KKASSERT(cur->cpuid == cpu); 1206 1207 if (pfsyncif != NULL && 1208 (pfsyncif->sc_bulk_send_next == cur || 1209 pfsyncif->sc_bulk_terminator == cur)) 1210 return; 1211 KKASSERT(cur->timeout == PFTM_UNLINKED); 1212 if (--cur->rule.ptr->states_cur <= 0 && 1213 cur->rule.ptr->src_nodes <= 0) 1214 pf_rm_rule(NULL, cur->rule.ptr); 1215 if (cur->nat_rule.ptr != NULL) 1216 if (--cur->nat_rule.ptr->states_cur <= 0 && 1217 cur->nat_rule.ptr->src_nodes <= 0) 1218 pf_rm_rule(NULL, cur->nat_rule.ptr); 1219 if (cur->anchor.ptr != NULL) 1220 if (--cur->anchor.ptr->states_cur <= 0) 1221 pf_rm_rule(NULL, cur->anchor.ptr); 1222 pf_normalize_tcp_cleanup(cur); 1223 pfi_kif_unref(cur->kif, PFI_KIF_REF_STATE); 1224 1225 /* 1226 * We may be freeing pf_purge_expired_states()'s saved scan entry, 1227 * adjust it if necessary. 1228 */ 1229 if (purge_cur[cpu] == cur) { 1230 kprintf("PURGE CONFLICT\n"); 1231 purge_cur[cpu] = TAILQ_NEXT(purge_cur[cpu], entry_list); 1232 } 1233 TAILQ_REMOVE(&state_list[cpu], cur, entry_list); 1234 if (cur->tag) 1235 pf_tag_unref(cur->tag); 1236 kfree(cur, M_PFSTATEPL); 1237 pf_status.fcounters[FCNT_STATE_REMOVALS]++; 1238 atomic_add_int(&pf_status.states, -1); 1239 } 1240 1241 int 1242 pf_purge_expired_states(u_int32_t maxcheck, int waslocked) 1243 { 1244 struct pf_state *cur; 1245 int locked = waslocked; 1246 int cpu = mycpu->gd_cpuid; 1247 1248 while (maxcheck--) { 1249 /* 1250 * Wrap to start of list when we hit the end 1251 */ 1252 cur = purge_cur[cpu]; 1253 if (cur == NULL) { 1254 cur = TAILQ_FIRST(&state_list[cpu]); 1255 if (cur == NULL) 1256 break; /* list empty */ 1257 } 1258 1259 /* 1260 * Setup next (purge_cur) while we process this one. If 1261 * we block and something else deletes purge_cur, 1262 * pf_free_state() will adjust it further ahead. 1263 */ 1264 purge_cur[cpu] = TAILQ_NEXT(cur, entry_list); 1265 1266 if (cur->timeout == PFTM_UNLINKED) { 1267 /* free unlinked state */ 1268 if (! locked) { 1269 lockmgr(&pf_consistency_lock, LK_EXCLUSIVE); 1270 locked = 1; 1271 } 1272 pf_free_state(cur); 1273 } else if (pf_state_expires(cur) <= time_second) { 1274 /* unlink and free expired state */ 1275 pf_unlink_state(cur); 1276 if (! locked) { 1277 if (!lockmgr(&pf_consistency_lock, LK_EXCLUSIVE)) 1278 return (0); 1279 locked = 1; 1280 } 1281 pf_free_state(cur); 1282 } 1283 } 1284 1285 if (locked) 1286 lockmgr(&pf_consistency_lock, LK_RELEASE); 1287 return (1); 1288 } 1289 1290 int 1291 pf_tbladdr_setup(struct pf_ruleset *rs, struct pf_addr_wrap *aw) 1292 { 1293 if (aw->type != PF_ADDR_TABLE) 1294 return (0); 1295 if ((aw->p.tbl = pfr_attach_table(rs, aw->v.tblname)) == NULL) 1296 return (1); 1297 return (0); 1298 } 1299 1300 void 1301 pf_tbladdr_remove(struct pf_addr_wrap *aw) 1302 { 1303 if (aw->type != PF_ADDR_TABLE || aw->p.tbl == NULL) 1304 return; 1305 pfr_detach_table(aw->p.tbl); 1306 aw->p.tbl = NULL; 1307 } 1308 1309 void 1310 pf_tbladdr_copyout(struct pf_addr_wrap *aw) 1311 { 1312 struct pfr_ktable *kt = aw->p.tbl; 1313 1314 if (aw->type != PF_ADDR_TABLE || kt == NULL) 1315 return; 1316 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL) 1317 kt = kt->pfrkt_root; 1318 aw->p.tbl = NULL; 1319 aw->p.tblcnt = (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) ? 1320 kt->pfrkt_cnt : -1; 1321 } 1322 1323 void 1324 pf_print_host(struct pf_addr *addr, u_int16_t p, sa_family_t af) 1325 { 1326 switch (af) { 1327 #ifdef INET 1328 case AF_INET: { 1329 u_int32_t a = ntohl(addr->addr32[0]); 1330 kprintf("%u.%u.%u.%u", (a>>24)&255, (a>>16)&255, 1331 (a>>8)&255, a&255); 1332 if (p) { 1333 p = ntohs(p); 1334 kprintf(":%u", p); 1335 } 1336 break; 1337 } 1338 #endif /* INET */ 1339 #ifdef INET6 1340 case AF_INET6: { 1341 u_int16_t b; 1342 u_int8_t i, curstart = 255, curend = 0, 1343 maxstart = 0, maxend = 0; 1344 for (i = 0; i < 8; i++) { 1345 if (!addr->addr16[i]) { 1346 if (curstart == 255) 1347 curstart = i; 1348 else 1349 curend = i; 1350 } else { 1351 if (curstart) { 1352 if ((curend - curstart) > 1353 (maxend - maxstart)) { 1354 maxstart = curstart; 1355 maxend = curend; 1356 curstart = 255; 1357 } 1358 } 1359 } 1360 } 1361 for (i = 0; i < 8; i++) { 1362 if (i >= maxstart && i <= maxend) { 1363 if (maxend != 7) { 1364 if (i == maxstart) 1365 kprintf(":"); 1366 } else { 1367 if (i == maxend) 1368 kprintf(":"); 1369 } 1370 } else { 1371 b = ntohs(addr->addr16[i]); 1372 kprintf("%x", b); 1373 if (i < 7) 1374 kprintf(":"); 1375 } 1376 } 1377 if (p) { 1378 p = ntohs(p); 1379 kprintf("[%u]", p); 1380 } 1381 break; 1382 } 1383 #endif /* INET6 */ 1384 } 1385 } 1386 1387 void 1388 pf_print_state(struct pf_state *s) 1389 { 1390 pf_print_state_parts(s, NULL, NULL); 1391 } 1392 1393 void 1394 pf_print_state_parts(struct pf_state *s, 1395 struct pf_state_key *skwp, struct pf_state_key *sksp) 1396 { 1397 struct pf_state_key *skw, *sks; 1398 u_int8_t proto, dir; 1399 1400 /* Do our best to fill these, but they're skipped if NULL */ 1401 skw = skwp ? skwp : (s ? s->key[PF_SK_WIRE] : NULL); 1402 sks = sksp ? sksp : (s ? s->key[PF_SK_STACK] : NULL); 1403 proto = skw ? skw->proto : (sks ? sks->proto : 0); 1404 dir = s ? s->direction : 0; 1405 1406 switch (proto) { 1407 case IPPROTO_TCP: 1408 kprintf("TCP "); 1409 break; 1410 case IPPROTO_UDP: 1411 kprintf("UDP "); 1412 break; 1413 case IPPROTO_ICMP: 1414 kprintf("ICMP "); 1415 break; 1416 case IPPROTO_ICMPV6: 1417 kprintf("ICMPV6 "); 1418 break; 1419 default: 1420 kprintf("%u ", skw->proto); 1421 break; 1422 } 1423 switch (dir) { 1424 case PF_IN: 1425 kprintf(" in"); 1426 break; 1427 case PF_OUT: 1428 kprintf(" out"); 1429 break; 1430 } 1431 if (skw) { 1432 kprintf(" wire: "); 1433 pf_print_host(&skw->addr[0], skw->port[0], skw->af); 1434 kprintf(" "); 1435 pf_print_host(&skw->addr[1], skw->port[1], skw->af); 1436 } 1437 if (sks) { 1438 kprintf(" stack: "); 1439 if (sks != skw) { 1440 pf_print_host(&sks->addr[0], sks->port[0], sks->af); 1441 kprintf(" "); 1442 pf_print_host(&sks->addr[1], sks->port[1], sks->af); 1443 } else 1444 kprintf("-"); 1445 } 1446 if (s) { 1447 if (proto == IPPROTO_TCP) { 1448 kprintf(" [lo=%u high=%u win=%u modulator=%u", 1449 s->src.seqlo, s->src.seqhi, 1450 s->src.max_win, s->src.seqdiff); 1451 if (s->src.wscale && s->dst.wscale) 1452 kprintf(" wscale=%u", 1453 s->src.wscale & PF_WSCALE_MASK); 1454 kprintf("]"); 1455 kprintf(" [lo=%u high=%u win=%u modulator=%u", 1456 s->dst.seqlo, s->dst.seqhi, 1457 s->dst.max_win, s->dst.seqdiff); 1458 if (s->src.wscale && s->dst.wscale) 1459 kprintf(" wscale=%u", 1460 s->dst.wscale & PF_WSCALE_MASK); 1461 kprintf("]"); 1462 } 1463 kprintf(" %u:%u", s->src.state, s->dst.state); 1464 } 1465 } 1466 1467 void 1468 pf_print_flags(u_int8_t f) 1469 { 1470 if (f) 1471 kprintf(" "); 1472 if (f & TH_FIN) 1473 kprintf("F"); 1474 if (f & TH_SYN) 1475 kprintf("S"); 1476 if (f & TH_RST) 1477 kprintf("R"); 1478 if (f & TH_PUSH) 1479 kprintf("P"); 1480 if (f & TH_ACK) 1481 kprintf("A"); 1482 if (f & TH_URG) 1483 kprintf("U"); 1484 if (f & TH_ECE) 1485 kprintf("E"); 1486 if (f & TH_CWR) 1487 kprintf("W"); 1488 } 1489 1490 #define PF_SET_SKIP_STEPS(i) \ 1491 do { \ 1492 while (head[i] != cur) { \ 1493 head[i]->skip[i].ptr = cur; \ 1494 head[i] = TAILQ_NEXT(head[i], entries); \ 1495 } \ 1496 } while (0) 1497 1498 void 1499 pf_calc_skip_steps(struct pf_rulequeue *rules) 1500 { 1501 struct pf_rule *cur, *prev, *head[PF_SKIP_COUNT]; 1502 int i; 1503 1504 cur = TAILQ_FIRST(rules); 1505 prev = cur; 1506 for (i = 0; i < PF_SKIP_COUNT; ++i) 1507 head[i] = cur; 1508 while (cur != NULL) { 1509 1510 if (cur->kif != prev->kif || cur->ifnot != prev->ifnot) 1511 PF_SET_SKIP_STEPS(PF_SKIP_IFP); 1512 if (cur->direction != prev->direction) 1513 PF_SET_SKIP_STEPS(PF_SKIP_DIR); 1514 if (cur->af != prev->af) 1515 PF_SET_SKIP_STEPS(PF_SKIP_AF); 1516 if (cur->proto != prev->proto) 1517 PF_SET_SKIP_STEPS(PF_SKIP_PROTO); 1518 if (cur->src.neg != prev->src.neg || 1519 pf_addr_wrap_neq(&cur->src.addr, &prev->src.addr)) 1520 PF_SET_SKIP_STEPS(PF_SKIP_SRC_ADDR); 1521 if (cur->src.port[0] != prev->src.port[0] || 1522 cur->src.port[1] != prev->src.port[1] || 1523 cur->src.port_op != prev->src.port_op) 1524 PF_SET_SKIP_STEPS(PF_SKIP_SRC_PORT); 1525 if (cur->dst.neg != prev->dst.neg || 1526 pf_addr_wrap_neq(&cur->dst.addr, &prev->dst.addr)) 1527 PF_SET_SKIP_STEPS(PF_SKIP_DST_ADDR); 1528 if (cur->dst.port[0] != prev->dst.port[0] || 1529 cur->dst.port[1] != prev->dst.port[1] || 1530 cur->dst.port_op != prev->dst.port_op) 1531 PF_SET_SKIP_STEPS(PF_SKIP_DST_PORT); 1532 1533 prev = cur; 1534 cur = TAILQ_NEXT(cur, entries); 1535 } 1536 for (i = 0; i < PF_SKIP_COUNT; ++i) 1537 PF_SET_SKIP_STEPS(i); 1538 } 1539 1540 int 1541 pf_addr_wrap_neq(struct pf_addr_wrap *aw1, struct pf_addr_wrap *aw2) 1542 { 1543 if (aw1->type != aw2->type) 1544 return (1); 1545 switch (aw1->type) { 1546 case PF_ADDR_ADDRMASK: 1547 case PF_ADDR_RANGE: 1548 if (PF_ANEQ(&aw1->v.a.addr, &aw2->v.a.addr, 0)) 1549 return (1); 1550 if (PF_ANEQ(&aw1->v.a.mask, &aw2->v.a.mask, 0)) 1551 return (1); 1552 return (0); 1553 case PF_ADDR_DYNIFTL: 1554 return (aw1->p.dyn->pfid_kt != aw2->p.dyn->pfid_kt); 1555 case PF_ADDR_NOROUTE: 1556 case PF_ADDR_URPFFAILED: 1557 return (0); 1558 case PF_ADDR_TABLE: 1559 return (aw1->p.tbl != aw2->p.tbl); 1560 case PF_ADDR_RTLABEL: 1561 return (aw1->v.rtlabel != aw2->v.rtlabel); 1562 default: 1563 kprintf("invalid address type: %d\n", aw1->type); 1564 return (1); 1565 } 1566 } 1567 1568 u_int16_t 1569 pf_cksum_fixup(u_int16_t cksum, u_int16_t old, u_int16_t new, u_int8_t udp) 1570 { 1571 u_int32_t l; 1572 1573 if (udp && !cksum) 1574 return (0x0000); 1575 l = cksum + old - new; 1576 l = (l >> 16) + (l & 65535); 1577 l = l & 65535; 1578 if (udp && !l) 1579 return (0xFFFF); 1580 return (l); 1581 } 1582 1583 void 1584 pf_change_ap(struct pf_addr *a, u_int16_t *p, u_int16_t *ic, u_int16_t *pc, 1585 struct pf_addr *an, u_int16_t pn, u_int8_t u, sa_family_t af) 1586 { 1587 struct pf_addr ao; 1588 u_int16_t po = *p; 1589 1590 PF_ACPY(&ao, a, af); 1591 PF_ACPY(a, an, af); 1592 1593 *p = pn; 1594 1595 switch (af) { 1596 #ifdef INET 1597 case AF_INET: 1598 *ic = pf_cksum_fixup(pf_cksum_fixup(*ic, 1599 ao.addr16[0], an->addr16[0], 0), 1600 ao.addr16[1], an->addr16[1], 0); 1601 *p = pn; 1602 *pc = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(*pc, 1603 ao.addr16[0], an->addr16[0], u), 1604 ao.addr16[1], an->addr16[1], u), 1605 po, pn, u); 1606 break; 1607 #endif /* INET */ 1608 #ifdef INET6 1609 case AF_INET6: 1610 *pc = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( 1611 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( 1612 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(*pc, 1613 ao.addr16[0], an->addr16[0], u), 1614 ao.addr16[1], an->addr16[1], u), 1615 ao.addr16[2], an->addr16[2], u), 1616 ao.addr16[3], an->addr16[3], u), 1617 ao.addr16[4], an->addr16[4], u), 1618 ao.addr16[5], an->addr16[5], u), 1619 ao.addr16[6], an->addr16[6], u), 1620 ao.addr16[7], an->addr16[7], u), 1621 po, pn, u); 1622 break; 1623 #endif /* INET6 */ 1624 } 1625 } 1626 1627 1628 /* Changes a u_int32_t. Uses a void * so there are no align restrictions */ 1629 void 1630 pf_change_a(void *a, u_int16_t *c, u_int32_t an, u_int8_t u) 1631 { 1632 u_int32_t ao; 1633 1634 memcpy(&ao, a, sizeof(ao)); 1635 memcpy(a, &an, sizeof(u_int32_t)); 1636 *c = pf_cksum_fixup(pf_cksum_fixup(*c, ao / 65536, an / 65536, u), 1637 ao % 65536, an % 65536, u); 1638 } 1639 1640 #ifdef INET6 1641 void 1642 pf_change_a6(struct pf_addr *a, u_int16_t *c, struct pf_addr *an, u_int8_t u) 1643 { 1644 struct pf_addr ao; 1645 1646 PF_ACPY(&ao, a, AF_INET6); 1647 PF_ACPY(a, an, AF_INET6); 1648 1649 *c = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( 1650 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( 1651 pf_cksum_fixup(pf_cksum_fixup(*c, 1652 ao.addr16[0], an->addr16[0], u), 1653 ao.addr16[1], an->addr16[1], u), 1654 ao.addr16[2], an->addr16[2], u), 1655 ao.addr16[3], an->addr16[3], u), 1656 ao.addr16[4], an->addr16[4], u), 1657 ao.addr16[5], an->addr16[5], u), 1658 ao.addr16[6], an->addr16[6], u), 1659 ao.addr16[7], an->addr16[7], u); 1660 } 1661 #endif /* INET6 */ 1662 1663 void 1664 pf_change_icmp(struct pf_addr *ia, u_int16_t *ip, struct pf_addr *oa, 1665 struct pf_addr *na, u_int16_t np, u_int16_t *pc, u_int16_t *h2c, 1666 u_int16_t *ic, u_int16_t *hc, u_int8_t u, sa_family_t af) 1667 { 1668 struct pf_addr oia, ooa; 1669 1670 PF_ACPY(&oia, ia, af); 1671 if (oa) 1672 PF_ACPY(&ooa, oa, af); 1673 1674 /* Change inner protocol port, fix inner protocol checksum. */ 1675 if (ip != NULL) { 1676 u_int16_t oip = *ip; 1677 u_int32_t opc = 0; 1678 1679 if (pc != NULL) 1680 opc = *pc; 1681 *ip = np; 1682 if (pc != NULL) 1683 *pc = pf_cksum_fixup(*pc, oip, *ip, u); 1684 *ic = pf_cksum_fixup(*ic, oip, *ip, 0); 1685 if (pc != NULL) 1686 *ic = pf_cksum_fixup(*ic, opc, *pc, 0); 1687 } 1688 /* Change inner ip address, fix inner ip and icmp checksums. */ 1689 PF_ACPY(ia, na, af); 1690 switch (af) { 1691 #ifdef INET 1692 case AF_INET: { 1693 u_int32_t oh2c = *h2c; 1694 1695 *h2c = pf_cksum_fixup(pf_cksum_fixup(*h2c, 1696 oia.addr16[0], ia->addr16[0], 0), 1697 oia.addr16[1], ia->addr16[1], 0); 1698 *ic = pf_cksum_fixup(pf_cksum_fixup(*ic, 1699 oia.addr16[0], ia->addr16[0], 0), 1700 oia.addr16[1], ia->addr16[1], 0); 1701 *ic = pf_cksum_fixup(*ic, oh2c, *h2c, 0); 1702 break; 1703 } 1704 #endif /* INET */ 1705 #ifdef INET6 1706 case AF_INET6: 1707 *ic = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( 1708 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( 1709 pf_cksum_fixup(pf_cksum_fixup(*ic, 1710 oia.addr16[0], ia->addr16[0], u), 1711 oia.addr16[1], ia->addr16[1], u), 1712 oia.addr16[2], ia->addr16[2], u), 1713 oia.addr16[3], ia->addr16[3], u), 1714 oia.addr16[4], ia->addr16[4], u), 1715 oia.addr16[5], ia->addr16[5], u), 1716 oia.addr16[6], ia->addr16[6], u), 1717 oia.addr16[7], ia->addr16[7], u); 1718 break; 1719 #endif /* INET6 */ 1720 } 1721 /* Outer ip address, fix outer ip or icmpv6 checksum, if necessary. */ 1722 if (oa) { 1723 PF_ACPY(oa, na, af); 1724 switch (af) { 1725 #ifdef INET 1726 case AF_INET: 1727 *hc = pf_cksum_fixup(pf_cksum_fixup(*hc, 1728 ooa.addr16[0], oa->addr16[0], 0), 1729 ooa.addr16[1], oa->addr16[1], 0); 1730 break; 1731 #endif /* INET */ 1732 #ifdef INET6 1733 case AF_INET6: 1734 *ic = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( 1735 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( 1736 pf_cksum_fixup(pf_cksum_fixup(*ic, 1737 ooa.addr16[0], oa->addr16[0], u), 1738 ooa.addr16[1], oa->addr16[1], u), 1739 ooa.addr16[2], oa->addr16[2], u), 1740 ooa.addr16[3], oa->addr16[3], u), 1741 ooa.addr16[4], oa->addr16[4], u), 1742 ooa.addr16[5], oa->addr16[5], u), 1743 ooa.addr16[6], oa->addr16[6], u), 1744 ooa.addr16[7], oa->addr16[7], u); 1745 break; 1746 #endif /* INET6 */ 1747 } 1748 } 1749 } 1750 1751 1752 /* 1753 * Need to modulate the sequence numbers in the TCP SACK option 1754 * (credits to Krzysztof Pfaff for report and patch) 1755 */ 1756 int 1757 pf_modulate_sack(struct mbuf *m, int off, struct pf_pdesc *pd, 1758 struct tcphdr *th, struct pf_state_peer *dst) 1759 { 1760 int hlen = (th->th_off << 2) - sizeof(*th), thoptlen = hlen; 1761 u_int8_t opts[TCP_MAXOLEN], *opt = opts; 1762 int copyback = 0, i, olen; 1763 struct raw_sackblock sack; 1764 1765 #define TCPOLEN_SACKLEN (TCPOLEN_SACK + 2) 1766 if (hlen < TCPOLEN_SACKLEN || 1767 !pf_pull_hdr(m, off + sizeof(*th), opts, hlen, NULL, NULL, pd->af)) 1768 return 0; 1769 1770 while (hlen >= TCPOLEN_SACKLEN) { 1771 olen = opt[1]; 1772 switch (*opt) { 1773 case TCPOPT_EOL: /* FALLTHROUGH */ 1774 case TCPOPT_NOP: 1775 opt++; 1776 hlen--; 1777 break; 1778 case TCPOPT_SACK: 1779 if (olen > hlen) 1780 olen = hlen; 1781 if (olen >= TCPOLEN_SACKLEN) { 1782 for (i = 2; i + TCPOLEN_SACK <= olen; 1783 i += TCPOLEN_SACK) { 1784 memcpy(&sack, &opt[i], sizeof(sack)); 1785 pf_change_a(&sack.rblk_start, &th->th_sum, 1786 htonl(ntohl(sack.rblk_start) - 1787 dst->seqdiff), 0); 1788 pf_change_a(&sack.rblk_end, &th->th_sum, 1789 htonl(ntohl(sack.rblk_end) - 1790 dst->seqdiff), 0); 1791 memcpy(&opt[i], &sack, sizeof(sack)); 1792 } 1793 copyback = 1; 1794 } 1795 /* FALLTHROUGH */ 1796 default: 1797 if (olen < 2) 1798 olen = 2; 1799 hlen -= olen; 1800 opt += olen; 1801 } 1802 } 1803 1804 if (copyback) 1805 m_copyback(m, off + sizeof(*th), thoptlen, opts); 1806 return (copyback); 1807 } 1808 1809 void 1810 pf_send_tcp(const struct pf_rule *r, sa_family_t af, 1811 const struct pf_addr *saddr, const struct pf_addr *daddr, 1812 u_int16_t sport, u_int16_t dport, u_int32_t seq, u_int32_t ack, 1813 u_int8_t flags, u_int16_t win, u_int16_t mss, u_int8_t ttl, int tag, 1814 u_int16_t rtag, struct ether_header *eh, struct ifnet *ifp) 1815 { 1816 struct mbuf *m; 1817 int len = 0, tlen; 1818 #ifdef INET 1819 struct ip *h = NULL; 1820 #endif /* INET */ 1821 #ifdef INET6 1822 struct ip6_hdr *h6 = NULL; 1823 #endif /* INET6 */ 1824 struct tcphdr *th = NULL; 1825 char *opt; 1826 1827 ASSERT_LWKT_TOKEN_HELD(&pf_token); 1828 1829 /* maximum segment size tcp option */ 1830 tlen = sizeof(struct tcphdr); 1831 if (mss) 1832 tlen += 4; 1833 1834 switch (af) { 1835 #ifdef INET 1836 case AF_INET: 1837 len = sizeof(struct ip) + tlen; 1838 break; 1839 #endif /* INET */ 1840 #ifdef INET6 1841 case AF_INET6: 1842 len = sizeof(struct ip6_hdr) + tlen; 1843 break; 1844 #endif /* INET6 */ 1845 } 1846 1847 /* 1848 * Create outgoing mbuf. 1849 * 1850 * DragonFly doesn't zero the auxillary pkghdr fields, only fw_flags, 1851 * so make sure pf.flags is clear. 1852 */ 1853 m = m_gethdr(MB_DONTWAIT, MT_HEADER); 1854 if (m == NULL) { 1855 return; 1856 } 1857 if (tag) 1858 m->m_pkthdr.fw_flags |= PF_MBUF_TAGGED; 1859 m->m_pkthdr.pf.flags = 0; 1860 m->m_pkthdr.pf.tag = rtag; 1861 /* XXX Recheck when upgrading to > 4.4 */ 1862 m->m_pkthdr.pf.statekey = NULL; 1863 if (r != NULL && r->rtableid >= 0) 1864 m->m_pkthdr.pf.rtableid = r->rtableid; 1865 1866 #ifdef ALTQ 1867 if (r != NULL && r->qid) { 1868 m->m_pkthdr.fw_flags |= PF_MBUF_STRUCTURE; 1869 m->m_pkthdr.pf.qid = r->qid; 1870 m->m_pkthdr.pf.ecn_af = af; 1871 m->m_pkthdr.pf.hdr = mtod(m, struct ip *); 1872 } 1873 #endif /* ALTQ */ 1874 m->m_data += max_linkhdr; 1875 m->m_pkthdr.len = m->m_len = len; 1876 m->m_pkthdr.rcvif = NULL; 1877 bzero(m->m_data, len); 1878 switch (af) { 1879 #ifdef INET 1880 case AF_INET: 1881 h = mtod(m, struct ip *); 1882 1883 /* IP header fields included in the TCP checksum */ 1884 h->ip_p = IPPROTO_TCP; 1885 h->ip_len = tlen; 1886 h->ip_src.s_addr = saddr->v4.s_addr; 1887 h->ip_dst.s_addr = daddr->v4.s_addr; 1888 1889 th = (struct tcphdr *)((caddr_t)h + sizeof(struct ip)); 1890 break; 1891 #endif /* INET */ 1892 #ifdef INET6 1893 case AF_INET6: 1894 h6 = mtod(m, struct ip6_hdr *); 1895 1896 /* IP header fields included in the TCP checksum */ 1897 h6->ip6_nxt = IPPROTO_TCP; 1898 h6->ip6_plen = htons(tlen); 1899 memcpy(&h6->ip6_src, &saddr->v6, sizeof(struct in6_addr)); 1900 memcpy(&h6->ip6_dst, &daddr->v6, sizeof(struct in6_addr)); 1901 1902 th = (struct tcphdr *)((caddr_t)h6 + sizeof(struct ip6_hdr)); 1903 break; 1904 #endif /* INET6 */ 1905 } 1906 1907 /* TCP header */ 1908 th->th_sport = sport; 1909 th->th_dport = dport; 1910 th->th_seq = htonl(seq); 1911 th->th_ack = htonl(ack); 1912 th->th_off = tlen >> 2; 1913 th->th_flags = flags; 1914 th->th_win = htons(win); 1915 1916 if (mss) { 1917 opt = (char *)(th + 1); 1918 opt[0] = TCPOPT_MAXSEG; 1919 opt[1] = 4; 1920 mss = htons(mss); 1921 bcopy((caddr_t)&mss, (caddr_t)(opt + 2), 2); 1922 } 1923 1924 switch (af) { 1925 #ifdef INET 1926 case AF_INET: 1927 /* TCP checksum */ 1928 th->th_sum = in_cksum(m, len); 1929 1930 /* Finish the IP header */ 1931 h->ip_v = 4; 1932 h->ip_hl = sizeof(*h) >> 2; 1933 h->ip_tos = IPTOS_LOWDELAY; 1934 h->ip_len = len; 1935 h->ip_off = path_mtu_discovery ? IP_DF : 0; 1936 h->ip_ttl = ttl ? ttl : ip_defttl; 1937 h->ip_sum = 0; 1938 if (eh == NULL) { 1939 lwkt_reltoken(&pf_token); 1940 ip_output(m, NULL, NULL, 0, NULL, NULL); 1941 lwkt_gettoken(&pf_token); 1942 } else { 1943 struct route ro; 1944 struct rtentry rt; 1945 struct ether_header *e = (void *)ro.ro_dst.sa_data; 1946 1947 if (ifp == NULL) { 1948 m_freem(m); 1949 return; 1950 } 1951 rt.rt_ifp = ifp; 1952 ro.ro_rt = &rt; 1953 ro.ro_dst.sa_len = sizeof(ro.ro_dst); 1954 ro.ro_dst.sa_family = pseudo_AF_HDRCMPLT; 1955 bcopy(eh->ether_dhost, e->ether_shost, ETHER_ADDR_LEN); 1956 bcopy(eh->ether_shost, e->ether_dhost, ETHER_ADDR_LEN); 1957 e->ether_type = eh->ether_type; 1958 /* XXX_IMPORT: later */ 1959 lwkt_reltoken(&pf_token); 1960 ip_output(m, NULL, &ro, 0, NULL, NULL); 1961 lwkt_gettoken(&pf_token); 1962 } 1963 break; 1964 #endif /* INET */ 1965 #ifdef INET6 1966 case AF_INET6: 1967 /* TCP checksum */ 1968 th->th_sum = in6_cksum(m, IPPROTO_TCP, 1969 sizeof(struct ip6_hdr), tlen); 1970 1971 h6->ip6_vfc |= IPV6_VERSION; 1972 h6->ip6_hlim = IPV6_DEFHLIM; 1973 1974 lwkt_reltoken(&pf_token); 1975 ip6_output(m, NULL, NULL, 0, NULL, NULL, NULL); 1976 lwkt_gettoken(&pf_token); 1977 break; 1978 #endif /* INET6 */ 1979 } 1980 } 1981 1982 void 1983 pf_send_icmp(struct mbuf *m, u_int8_t type, u_int8_t code, sa_family_t af, 1984 struct pf_rule *r) 1985 { 1986 struct mbuf *m0; 1987 1988 /* 1989 * DragonFly doesn't zero the auxillary pkghdr fields, only fw_flags, 1990 * so make sure pf.flags is clear. 1991 */ 1992 if ((m0 = m_copy(m, 0, M_COPYALL)) == NULL) 1993 return; 1994 1995 m0->m_pkthdr.fw_flags |= PF_MBUF_TAGGED; 1996 m0->m_pkthdr.pf.flags = 0; 1997 /* XXX Re-Check when Upgrading to > 4.4 */ 1998 m0->m_pkthdr.pf.statekey = NULL; 1999 2000 if (r->rtableid >= 0) 2001 m0->m_pkthdr.pf.rtableid = r->rtableid; 2002 2003 #ifdef ALTQ 2004 if (r->qid) { 2005 m->m_pkthdr.fw_flags |= PF_MBUF_STRUCTURE; 2006 m0->m_pkthdr.pf.qid = r->qid; 2007 m0->m_pkthdr.pf.ecn_af = af; 2008 m0->m_pkthdr.pf.hdr = mtod(m0, struct ip *); 2009 } 2010 #endif /* ALTQ */ 2011 2012 switch (af) { 2013 #ifdef INET 2014 case AF_INET: 2015 icmp_error(m0, type, code, 0, 0); 2016 break; 2017 #endif /* INET */ 2018 #ifdef INET6 2019 case AF_INET6: 2020 icmp6_error(m0, type, code, 0); 2021 break; 2022 #endif /* INET6 */ 2023 } 2024 } 2025 2026 /* 2027 * Return 1 if the addresses a and b match (with mask m), otherwise return 0. 2028 * If n is 0, they match if they are equal. If n is != 0, they match if they 2029 * are different. 2030 */ 2031 int 2032 pf_match_addr(u_int8_t n, struct pf_addr *a, struct pf_addr *m, 2033 struct pf_addr *b, sa_family_t af) 2034 { 2035 int match = 0; 2036 2037 switch (af) { 2038 #ifdef INET 2039 case AF_INET: 2040 if ((a->addr32[0] & m->addr32[0]) == 2041 (b->addr32[0] & m->addr32[0])) 2042 match++; 2043 break; 2044 #endif /* INET */ 2045 #ifdef INET6 2046 case AF_INET6: 2047 if (((a->addr32[0] & m->addr32[0]) == 2048 (b->addr32[0] & m->addr32[0])) && 2049 ((a->addr32[1] & m->addr32[1]) == 2050 (b->addr32[1] & m->addr32[1])) && 2051 ((a->addr32[2] & m->addr32[2]) == 2052 (b->addr32[2] & m->addr32[2])) && 2053 ((a->addr32[3] & m->addr32[3]) == 2054 (b->addr32[3] & m->addr32[3]))) 2055 match++; 2056 break; 2057 #endif /* INET6 */ 2058 } 2059 if (match) { 2060 if (n) 2061 return (0); 2062 else 2063 return (1); 2064 } else { 2065 if (n) 2066 return (1); 2067 else 2068 return (0); 2069 } 2070 } 2071 2072 /* 2073 * Return 1 if b <= a <= e, otherwise return 0. 2074 */ 2075 int 2076 pf_match_addr_range(struct pf_addr *b, struct pf_addr *e, 2077 struct pf_addr *a, sa_family_t af) 2078 { 2079 switch (af) { 2080 #ifdef INET 2081 case AF_INET: 2082 if ((a->addr32[0] < b->addr32[0]) || 2083 (a->addr32[0] > e->addr32[0])) 2084 return (0); 2085 break; 2086 #endif /* INET */ 2087 #ifdef INET6 2088 case AF_INET6: { 2089 int i; 2090 2091 /* check a >= b */ 2092 for (i = 0; i < 4; ++i) 2093 if (a->addr32[i] > b->addr32[i]) 2094 break; 2095 else if (a->addr32[i] < b->addr32[i]) 2096 return (0); 2097 /* check a <= e */ 2098 for (i = 0; i < 4; ++i) 2099 if (a->addr32[i] < e->addr32[i]) 2100 break; 2101 else if (a->addr32[i] > e->addr32[i]) 2102 return (0); 2103 break; 2104 } 2105 #endif /* INET6 */ 2106 } 2107 return (1); 2108 } 2109 2110 int 2111 pf_match(u_int8_t op, u_int32_t a1, u_int32_t a2, u_int32_t p) 2112 { 2113 switch (op) { 2114 case PF_OP_IRG: 2115 return ((p > a1) && (p < a2)); 2116 case PF_OP_XRG: 2117 return ((p < a1) || (p > a2)); 2118 case PF_OP_RRG: 2119 return ((p >= a1) && (p <= a2)); 2120 case PF_OP_EQ: 2121 return (p == a1); 2122 case PF_OP_NE: 2123 return (p != a1); 2124 case PF_OP_LT: 2125 return (p < a1); 2126 case PF_OP_LE: 2127 return (p <= a1); 2128 case PF_OP_GT: 2129 return (p > a1); 2130 case PF_OP_GE: 2131 return (p >= a1); 2132 } 2133 return (0); /* never reached */ 2134 } 2135 2136 int 2137 pf_match_port(u_int8_t op, u_int16_t a1, u_int16_t a2, u_int16_t p) 2138 { 2139 a1 = ntohs(a1); 2140 a2 = ntohs(a2); 2141 p = ntohs(p); 2142 return (pf_match(op, a1, a2, p)); 2143 } 2144 2145 int 2146 pf_match_uid(u_int8_t op, uid_t a1, uid_t a2, uid_t u) 2147 { 2148 if (u == UID_MAX && op != PF_OP_EQ && op != PF_OP_NE) 2149 return (0); 2150 return (pf_match(op, a1, a2, u)); 2151 } 2152 2153 int 2154 pf_match_gid(u_int8_t op, gid_t a1, gid_t a2, gid_t g) 2155 { 2156 if (g == GID_MAX && op != PF_OP_EQ && op != PF_OP_NE) 2157 return (0); 2158 return (pf_match(op, a1, a2, g)); 2159 } 2160 2161 int 2162 pf_match_tag(struct mbuf *m, struct pf_rule *r, int *tag) 2163 { 2164 if (*tag == -1) 2165 *tag = m->m_pkthdr.pf.tag; 2166 2167 return ((!r->match_tag_not && r->match_tag == *tag) || 2168 (r->match_tag_not && r->match_tag != *tag)); 2169 } 2170 2171 int 2172 pf_tag_packet(struct mbuf *m, int tag, int rtableid) 2173 { 2174 if (tag <= 0 && rtableid < 0) 2175 return (0); 2176 2177 if (tag > 0) 2178 m->m_pkthdr.pf.tag = tag; 2179 if (rtableid >= 0) 2180 m->m_pkthdr.pf.rtableid = rtableid; 2181 2182 return (0); 2183 } 2184 2185 void 2186 pf_step_into_anchor(int *depth, struct pf_ruleset **rs, int n, 2187 struct pf_rule **r, struct pf_rule **a, int *match) 2188 { 2189 struct pf_anchor_stackframe *f; 2190 2191 (*r)->anchor->match = 0; 2192 if (match) 2193 *match = 0; 2194 if (*depth >= NELEM(pf_anchor_stack)) { 2195 kprintf("pf_step_into_anchor: stack overflow\n"); 2196 *r = TAILQ_NEXT(*r, entries); 2197 return; 2198 } else if (*depth == 0 && a != NULL) 2199 *a = *r; 2200 f = pf_anchor_stack + (*depth)++; 2201 f->rs = *rs; 2202 f->r = *r; 2203 if ((*r)->anchor_wildcard) { 2204 f->parent = &(*r)->anchor->children; 2205 if ((f->child = RB_MIN(pf_anchor_node, f->parent)) == 2206 NULL) { 2207 *r = NULL; 2208 return; 2209 } 2210 *rs = &f->child->ruleset; 2211 } else { 2212 f->parent = NULL; 2213 f->child = NULL; 2214 *rs = &(*r)->anchor->ruleset; 2215 } 2216 *r = TAILQ_FIRST((*rs)->rules[n].active.ptr); 2217 } 2218 2219 int 2220 pf_step_out_of_anchor(int *depth, struct pf_ruleset **rs, int n, 2221 struct pf_rule **r, struct pf_rule **a, int *match) 2222 { 2223 struct pf_anchor_stackframe *f; 2224 int quick = 0; 2225 2226 do { 2227 if (*depth <= 0) 2228 break; 2229 f = pf_anchor_stack + *depth - 1; 2230 if (f->parent != NULL && f->child != NULL) { 2231 if (f->child->match || 2232 (match != NULL && *match)) { 2233 f->r->anchor->match = 1; 2234 *match = 0; 2235 } 2236 f->child = RB_NEXT(pf_anchor_node, f->parent, f->child); 2237 if (f->child != NULL) { 2238 *rs = &f->child->ruleset; 2239 *r = TAILQ_FIRST((*rs)->rules[n].active.ptr); 2240 if (*r == NULL) 2241 continue; 2242 else 2243 break; 2244 } 2245 } 2246 (*depth)--; 2247 if (*depth == 0 && a != NULL) 2248 *a = NULL; 2249 *rs = f->rs; 2250 if (f->r->anchor->match || (match != NULL && *match)) 2251 quick = f->r->quick; 2252 *r = TAILQ_NEXT(f->r, entries); 2253 } while (*r == NULL); 2254 2255 return (quick); 2256 } 2257 2258 #ifdef INET6 2259 void 2260 pf_poolmask(struct pf_addr *naddr, struct pf_addr *raddr, 2261 struct pf_addr *rmask, struct pf_addr *saddr, sa_family_t af) 2262 { 2263 switch (af) { 2264 #ifdef INET 2265 case AF_INET: 2266 naddr->addr32[0] = (raddr->addr32[0] & rmask->addr32[0]) | 2267 ((rmask->addr32[0] ^ 0xffffffff ) & saddr->addr32[0]); 2268 break; 2269 #endif /* INET */ 2270 case AF_INET6: 2271 naddr->addr32[0] = (raddr->addr32[0] & rmask->addr32[0]) | 2272 ((rmask->addr32[0] ^ 0xffffffff ) & saddr->addr32[0]); 2273 naddr->addr32[1] = (raddr->addr32[1] & rmask->addr32[1]) | 2274 ((rmask->addr32[1] ^ 0xffffffff ) & saddr->addr32[1]); 2275 naddr->addr32[2] = (raddr->addr32[2] & rmask->addr32[2]) | 2276 ((rmask->addr32[2] ^ 0xffffffff ) & saddr->addr32[2]); 2277 naddr->addr32[3] = (raddr->addr32[3] & rmask->addr32[3]) | 2278 ((rmask->addr32[3] ^ 0xffffffff ) & saddr->addr32[3]); 2279 break; 2280 } 2281 } 2282 2283 void 2284 pf_addr_inc(struct pf_addr *addr, sa_family_t af) 2285 { 2286 switch (af) { 2287 #ifdef INET 2288 case AF_INET: 2289 addr->addr32[0] = htonl(ntohl(addr->addr32[0]) + 1); 2290 break; 2291 #endif /* INET */ 2292 case AF_INET6: 2293 if (addr->addr32[3] == 0xffffffff) { 2294 addr->addr32[3] = 0; 2295 if (addr->addr32[2] == 0xffffffff) { 2296 addr->addr32[2] = 0; 2297 if (addr->addr32[1] == 0xffffffff) { 2298 addr->addr32[1] = 0; 2299 addr->addr32[0] = 2300 htonl(ntohl(addr->addr32[0]) + 1); 2301 } else 2302 addr->addr32[1] = 2303 htonl(ntohl(addr->addr32[1]) + 1); 2304 } else 2305 addr->addr32[2] = 2306 htonl(ntohl(addr->addr32[2]) + 1); 2307 } else 2308 addr->addr32[3] = 2309 htonl(ntohl(addr->addr32[3]) + 1); 2310 break; 2311 } 2312 } 2313 #endif /* INET6 */ 2314 2315 #define mix(a,b,c) \ 2316 do { \ 2317 a -= b; a -= c; a ^= (c >> 13); \ 2318 b -= c; b -= a; b ^= (a << 8); \ 2319 c -= a; c -= b; c ^= (b >> 13); \ 2320 a -= b; a -= c; a ^= (c >> 12); \ 2321 b -= c; b -= a; b ^= (a << 16); \ 2322 c -= a; c -= b; c ^= (b >> 5); \ 2323 a -= b; a -= c; a ^= (c >> 3); \ 2324 b -= c; b -= a; b ^= (a << 10); \ 2325 c -= a; c -= b; c ^= (b >> 15); \ 2326 } while (0) 2327 2328 /* 2329 * hash function based on bridge_hash in if_bridge.c 2330 */ 2331 void 2332 pf_hash(struct pf_addr *inaddr, struct pf_addr *hash, 2333 struct pf_poolhashkey *key, sa_family_t af) 2334 { 2335 u_int32_t a = 0x9e3779b9, b = 0x9e3779b9, c = key->key32[0]; 2336 2337 switch (af) { 2338 #ifdef INET 2339 case AF_INET: 2340 a += inaddr->addr32[0]; 2341 b += key->key32[1]; 2342 mix(a, b, c); 2343 hash->addr32[0] = c + key->key32[2]; 2344 break; 2345 #endif /* INET */ 2346 #ifdef INET6 2347 case AF_INET6: 2348 a += inaddr->addr32[0]; 2349 b += inaddr->addr32[2]; 2350 mix(a, b, c); 2351 hash->addr32[0] = c; 2352 a += inaddr->addr32[1]; 2353 b += inaddr->addr32[3]; 2354 c += key->key32[1]; 2355 mix(a, b, c); 2356 hash->addr32[1] = c; 2357 a += inaddr->addr32[2]; 2358 b += inaddr->addr32[1]; 2359 c += key->key32[2]; 2360 mix(a, b, c); 2361 hash->addr32[2] = c; 2362 a += inaddr->addr32[3]; 2363 b += inaddr->addr32[0]; 2364 c += key->key32[3]; 2365 mix(a, b, c); 2366 hash->addr32[3] = c; 2367 break; 2368 #endif /* INET6 */ 2369 } 2370 } 2371 2372 int 2373 pf_map_addr(sa_family_t af, struct pf_rule *r, struct pf_addr *saddr, 2374 struct pf_addr *naddr, struct pf_addr *init_addr, struct pf_src_node **sn) 2375 { 2376 unsigned char hash[16]; 2377 struct pf_pool *rpool = &r->rpool; 2378 struct pf_addr *raddr = &rpool->cur->addr.v.a.addr; 2379 struct pf_addr *rmask = &rpool->cur->addr.v.a.mask; 2380 struct pf_pooladdr *acur = rpool->cur; 2381 struct pf_src_node k; 2382 int cpu = mycpu->gd_cpuid; 2383 2384 if (*sn == NULL && r->rpool.opts & PF_POOL_STICKYADDR && 2385 (r->rpool.opts & PF_POOL_TYPEMASK) != PF_POOL_NONE) { 2386 k.af = af; 2387 PF_ACPY(&k.addr, saddr, af); 2388 if (r->rule_flag & PFRULE_RULESRCTRACK || 2389 r->rpool.opts & PF_POOL_STICKYADDR) 2390 k.rule.ptr = r; 2391 else 2392 k.rule.ptr = NULL; 2393 pf_status.scounters[SCNT_SRC_NODE_SEARCH]++; 2394 *sn = RB_FIND(pf_src_tree, &tree_src_tracking[cpu], &k); 2395 if (*sn != NULL && !PF_AZERO(&(*sn)->raddr, af)) { 2396 PF_ACPY(naddr, &(*sn)->raddr, af); 2397 if (pf_status.debug >= PF_DEBUG_MISC) { 2398 kprintf("pf_map_addr: src tracking maps "); 2399 pf_print_host(&k.addr, 0, af); 2400 kprintf(" to "); 2401 pf_print_host(naddr, 0, af); 2402 kprintf("\n"); 2403 } 2404 return (0); 2405 } 2406 } 2407 2408 if (rpool->cur->addr.type == PF_ADDR_NOROUTE) 2409 return (1); 2410 if (rpool->cur->addr.type == PF_ADDR_DYNIFTL) { 2411 switch (af) { 2412 #ifdef INET 2413 case AF_INET: 2414 if (rpool->cur->addr.p.dyn->pfid_acnt4 < 1 && 2415 (rpool->opts & PF_POOL_TYPEMASK) != 2416 PF_POOL_ROUNDROBIN) 2417 return (1); 2418 raddr = &rpool->cur->addr.p.dyn->pfid_addr4; 2419 rmask = &rpool->cur->addr.p.dyn->pfid_mask4; 2420 break; 2421 #endif /* INET */ 2422 #ifdef INET6 2423 case AF_INET6: 2424 if (rpool->cur->addr.p.dyn->pfid_acnt6 < 1 && 2425 (rpool->opts & PF_POOL_TYPEMASK) != 2426 PF_POOL_ROUNDROBIN) 2427 return (1); 2428 raddr = &rpool->cur->addr.p.dyn->pfid_addr6; 2429 rmask = &rpool->cur->addr.p.dyn->pfid_mask6; 2430 break; 2431 #endif /* INET6 */ 2432 } 2433 } else if (rpool->cur->addr.type == PF_ADDR_TABLE) { 2434 if ((rpool->opts & PF_POOL_TYPEMASK) != PF_POOL_ROUNDROBIN) 2435 return (1); /* unsupported */ 2436 } else { 2437 raddr = &rpool->cur->addr.v.a.addr; 2438 rmask = &rpool->cur->addr.v.a.mask; 2439 } 2440 2441 switch (rpool->opts & PF_POOL_TYPEMASK) { 2442 case PF_POOL_NONE: 2443 PF_ACPY(naddr, raddr, af); 2444 break; 2445 case PF_POOL_BITMASK: 2446 PF_POOLMASK(naddr, raddr, rmask, saddr, af); 2447 break; 2448 case PF_POOL_RANDOM: 2449 if (init_addr != NULL && PF_AZERO(init_addr, af)) { 2450 switch (af) { 2451 #ifdef INET 2452 case AF_INET: 2453 rpool->counter.addr32[0] = htonl(karc4random()); 2454 break; 2455 #endif /* INET */ 2456 #ifdef INET6 2457 case AF_INET6: 2458 if (rmask->addr32[3] != 0xffffffff) 2459 rpool->counter.addr32[3] = 2460 htonl(karc4random()); 2461 else 2462 break; 2463 if (rmask->addr32[2] != 0xffffffff) 2464 rpool->counter.addr32[2] = 2465 htonl(karc4random()); 2466 else 2467 break; 2468 if (rmask->addr32[1] != 0xffffffff) 2469 rpool->counter.addr32[1] = 2470 htonl(karc4random()); 2471 else 2472 break; 2473 if (rmask->addr32[0] != 0xffffffff) 2474 rpool->counter.addr32[0] = 2475 htonl(karc4random()); 2476 break; 2477 #endif /* INET6 */ 2478 } 2479 PF_POOLMASK(naddr, raddr, rmask, &rpool->counter, af); 2480 PF_ACPY(init_addr, naddr, af); 2481 2482 } else { 2483 PF_AINC(&rpool->counter, af); 2484 PF_POOLMASK(naddr, raddr, rmask, &rpool->counter, af); 2485 } 2486 break; 2487 case PF_POOL_SRCHASH: 2488 pf_hash(saddr, (struct pf_addr *)&hash, &rpool->key, af); 2489 PF_POOLMASK(naddr, raddr, rmask, (struct pf_addr *)&hash, af); 2490 break; 2491 case PF_POOL_ROUNDROBIN: 2492 if (rpool->cur->addr.type == PF_ADDR_TABLE) { 2493 if (!pfr_pool_get(rpool->cur->addr.p.tbl, 2494 &rpool->tblidx, &rpool->counter, 2495 &raddr, &rmask, af)) 2496 goto get_addr; 2497 } else if (rpool->cur->addr.type == PF_ADDR_DYNIFTL) { 2498 if (!pfr_pool_get(rpool->cur->addr.p.dyn->pfid_kt, 2499 &rpool->tblidx, &rpool->counter, 2500 &raddr, &rmask, af)) 2501 goto get_addr; 2502 } else if (pf_match_addr(0, raddr, rmask, &rpool->counter, af)) 2503 goto get_addr; 2504 2505 try_next: 2506 if ((rpool->cur = TAILQ_NEXT(rpool->cur, entries)) == NULL) 2507 rpool->cur = TAILQ_FIRST(&rpool->list); 2508 if (rpool->cur->addr.type == PF_ADDR_TABLE) { 2509 rpool->tblidx = -1; 2510 if (pfr_pool_get(rpool->cur->addr.p.tbl, 2511 &rpool->tblidx, &rpool->counter, 2512 &raddr, &rmask, af)) { 2513 /* table contains no address of type 'af' */ 2514 if (rpool->cur != acur) 2515 goto try_next; 2516 return (1); 2517 } 2518 } else if (rpool->cur->addr.type == PF_ADDR_DYNIFTL) { 2519 rpool->tblidx = -1; 2520 if (pfr_pool_get(rpool->cur->addr.p.dyn->pfid_kt, 2521 &rpool->tblidx, &rpool->counter, 2522 &raddr, &rmask, af)) { 2523 /* table contains no address of type 'af' */ 2524 if (rpool->cur != acur) 2525 goto try_next; 2526 return (1); 2527 } 2528 } else { 2529 raddr = &rpool->cur->addr.v.a.addr; 2530 rmask = &rpool->cur->addr.v.a.mask; 2531 PF_ACPY(&rpool->counter, raddr, af); 2532 } 2533 2534 get_addr: 2535 PF_ACPY(naddr, &rpool->counter, af); 2536 if (init_addr != NULL && PF_AZERO(init_addr, af)) 2537 PF_ACPY(init_addr, naddr, af); 2538 PF_AINC(&rpool->counter, af); 2539 break; 2540 } 2541 if (*sn != NULL) 2542 PF_ACPY(&(*sn)->raddr, naddr, af); 2543 2544 if (pf_status.debug >= PF_DEBUG_MISC && 2545 (rpool->opts & PF_POOL_TYPEMASK) != PF_POOL_NONE) { 2546 kprintf("pf_map_addr: selected address "); 2547 pf_print_host(naddr, 0, af); 2548 kprintf("\n"); 2549 } 2550 2551 return (0); 2552 } 2553 2554 int 2555 pf_get_sport(sa_family_t af, u_int8_t proto, struct pf_rule *r, 2556 struct pf_addr *saddr, struct pf_addr *daddr, 2557 u_int16_t sport, u_int16_t dport, 2558 struct pf_addr *naddr, u_int16_t *nport, u_int16_t low, u_int16_t high, 2559 struct pf_src_node **sn) 2560 { 2561 struct pf_state_key_cmp key; 2562 struct pf_addr init_addr; 2563 u_int16_t cut; 2564 u_int32_t toeplitz_sport; 2565 2566 bzero(&init_addr, sizeof(init_addr)); 2567 if (pf_map_addr(af, r, saddr, naddr, &init_addr, sn)) 2568 return (1); 2569 2570 if (proto == IPPROTO_ICMP) { 2571 low = 1; 2572 high = 65535; 2573 } 2574 2575 bzero(&key, sizeof(key)); 2576 key.af = af; 2577 key.proto = proto; 2578 key.port[0] = dport; 2579 PF_ACPY(&key.addr[0], daddr, key.af); 2580 2581 do { 2582 PF_ACPY(&key.addr[1], naddr, key.af); 2583 2584 /* 2585 * We want to select a port that calculates to a toeplitz hash 2586 * that masks to the same cpu, otherwise the response may 2587 * not see the new state. 2588 */ 2589 switch(af) { 2590 case AF_INET: 2591 toeplitz_sport = 2592 toeplitz_piecemeal_port(sport) ^ 2593 toeplitz_piecemeal_addr(saddr->v4.s_addr) ^ 2594 toeplitz_piecemeal_addr(naddr->v4.s_addr); 2595 break; 2596 case AF_INET6: 2597 /* XXX TODO XXX */ 2598 default: 2599 /* XXX TODO XXX */ 2600 toeplitz_sport = 0; 2601 break; 2602 } 2603 2604 /* 2605 * port search; start random, step; 2606 * similar 2 portloop in in_pcbbind 2607 * 2608 * XXX fixed ports present a problem for cpu localization. 2609 */ 2610 if (!(proto == IPPROTO_TCP || proto == IPPROTO_UDP || 2611 proto == IPPROTO_ICMP)) { 2612 key.port[1] = sport; 2613 if (pf_find_state_all(&key, PF_IN, NULL) == NULL) { 2614 *nport = sport; 2615 return (0); 2616 } 2617 } else if (low == 0 && high == 0) { 2618 key.port[1] = sport; 2619 if (pf_find_state_all(&key, PF_IN, NULL) == NULL) { 2620 *nport = sport; 2621 return (0); 2622 } 2623 } else if (low == high) { 2624 key.port[1] = htons(low); 2625 if (pf_find_state_all(&key, PF_IN, NULL) == NULL) { 2626 *nport = htons(low); 2627 return (0); 2628 } 2629 } else { 2630 u_int16_t tmp; 2631 2632 if (low > high) { 2633 tmp = low; 2634 low = high; 2635 high = tmp; 2636 } 2637 /* low < high */ 2638 cut = htonl(karc4random()) % (1 + high - low) + low; 2639 /* low <= cut <= high */ 2640 for (tmp = cut; tmp <= high; ++(tmp)) { 2641 key.port[1] = htons(tmp); 2642 if ((toeplitz_piecemeal_port(key.port[1]) ^ 2643 toeplitz_sport) & ncpus2_mask) { 2644 continue; 2645 } 2646 if (pf_find_state_all(&key, PF_IN, NULL) == 2647 NULL && !in_baddynamic(tmp, proto)) { 2648 *nport = htons(tmp); 2649 return (0); 2650 } 2651 } 2652 for (tmp = cut - 1; tmp >= low; --(tmp)) { 2653 key.port[1] = htons(tmp); 2654 if ((toeplitz_piecemeal_port(key.port[1]) ^ 2655 toeplitz_sport) & ncpus2_mask) { 2656 continue; 2657 } 2658 if (pf_find_state_all(&key, PF_IN, NULL) == 2659 NULL && !in_baddynamic(tmp, proto)) { 2660 *nport = htons(tmp); 2661 return (0); 2662 } 2663 } 2664 } 2665 2666 /* 2667 * Next address 2668 */ 2669 switch (r->rpool.opts & PF_POOL_TYPEMASK) { 2670 case PF_POOL_RANDOM: 2671 case PF_POOL_ROUNDROBIN: 2672 if (pf_map_addr(af, r, saddr, naddr, &init_addr, sn)) 2673 return (1); 2674 break; 2675 case PF_POOL_NONE: 2676 case PF_POOL_SRCHASH: 2677 case PF_POOL_BITMASK: 2678 default: 2679 return (1); 2680 } 2681 } while (! PF_AEQ(&init_addr, naddr, af) ); 2682 return (1); /* none available */ 2683 } 2684 2685 struct pf_rule * 2686 pf_match_translation(struct pf_pdesc *pd, struct mbuf *m, int off, 2687 int direction, struct pfi_kif *kif, struct pf_addr *saddr, u_int16_t sport, 2688 struct pf_addr *daddr, u_int16_t dport, int rs_num) 2689 { 2690 struct pf_rule *r, *rm = NULL; 2691 struct pf_ruleset *ruleset = NULL; 2692 int tag = -1; 2693 int rtableid = -1; 2694 int asd = 0; 2695 2696 r = TAILQ_FIRST(pf_main_ruleset.rules[rs_num].active.ptr); 2697 while (r && rm == NULL) { 2698 struct pf_rule_addr *src = NULL, *dst = NULL; 2699 struct pf_addr_wrap *xdst = NULL; 2700 2701 if (r->action == PF_BINAT && direction == PF_IN) { 2702 src = &r->dst; 2703 if (r->rpool.cur != NULL) 2704 xdst = &r->rpool.cur->addr; 2705 } else { 2706 src = &r->src; 2707 dst = &r->dst; 2708 } 2709 2710 r->evaluations++; 2711 if (pfi_kif_match(r->kif, kif) == r->ifnot) 2712 r = r->skip[PF_SKIP_IFP].ptr; 2713 else if (r->direction && r->direction != direction) 2714 r = r->skip[PF_SKIP_DIR].ptr; 2715 else if (r->af && r->af != pd->af) 2716 r = r->skip[PF_SKIP_AF].ptr; 2717 else if (r->proto && r->proto != pd->proto) 2718 r = r->skip[PF_SKIP_PROTO].ptr; 2719 else if (PF_MISMATCHAW(&src->addr, saddr, pd->af, 2720 src->neg, kif)) 2721 r = r->skip[src == &r->src ? PF_SKIP_SRC_ADDR : 2722 PF_SKIP_DST_ADDR].ptr; 2723 else if (src->port_op && !pf_match_port(src->port_op, 2724 src->port[0], src->port[1], sport)) 2725 r = r->skip[src == &r->src ? PF_SKIP_SRC_PORT : 2726 PF_SKIP_DST_PORT].ptr; 2727 else if (dst != NULL && 2728 PF_MISMATCHAW(&dst->addr, daddr, pd->af, dst->neg, NULL)) 2729 r = r->skip[PF_SKIP_DST_ADDR].ptr; 2730 else if (xdst != NULL && PF_MISMATCHAW(xdst, daddr, pd->af, 2731 0, NULL)) 2732 r = TAILQ_NEXT(r, entries); 2733 else if (dst != NULL && dst->port_op && 2734 !pf_match_port(dst->port_op, dst->port[0], 2735 dst->port[1], dport)) 2736 r = r->skip[PF_SKIP_DST_PORT].ptr; 2737 else if (r->match_tag && !pf_match_tag(m, r, &tag)) 2738 r = TAILQ_NEXT(r, entries); 2739 else if (r->os_fingerprint != PF_OSFP_ANY && (pd->proto != 2740 IPPROTO_TCP || !pf_osfp_match(pf_osfp_fingerprint(pd, m, 2741 off, pd->hdr.tcp), r->os_fingerprint))) 2742 r = TAILQ_NEXT(r, entries); 2743 else { 2744 if (r->tag) 2745 tag = r->tag; 2746 if (r->rtableid >= 0) 2747 rtableid = r->rtableid; 2748 if (r->anchor == NULL) { 2749 rm = r; 2750 } else 2751 pf_step_into_anchor(&asd, &ruleset, rs_num, 2752 &r, NULL, NULL); 2753 } 2754 if (r == NULL) 2755 pf_step_out_of_anchor(&asd, &ruleset, rs_num, &r, 2756 NULL, NULL); 2757 } 2758 if (pf_tag_packet(m, tag, rtableid)) 2759 return (NULL); 2760 if (rm != NULL && (rm->action == PF_NONAT || 2761 rm->action == PF_NORDR || rm->action == PF_NOBINAT)) 2762 return (NULL); 2763 return (rm); 2764 } 2765 2766 struct pf_rule * 2767 pf_get_translation(struct pf_pdesc *pd, struct mbuf *m, int off, int direction, 2768 struct pfi_kif *kif, struct pf_src_node **sn, 2769 struct pf_state_key **skw, struct pf_state_key **sks, 2770 struct pf_state_key **skp, struct pf_state_key **nkp, 2771 struct pf_addr *saddr, struct pf_addr *daddr, 2772 u_int16_t sport, u_int16_t dport) 2773 { 2774 struct pf_rule *r = NULL; 2775 2776 2777 if (direction == PF_OUT) { 2778 r = pf_match_translation(pd, m, off, direction, kif, saddr, 2779 sport, daddr, dport, PF_RULESET_BINAT); 2780 if (r == NULL) 2781 r = pf_match_translation(pd, m, off, direction, kif, 2782 saddr, sport, daddr, dport, PF_RULESET_NAT); 2783 } else { 2784 r = pf_match_translation(pd, m, off, direction, kif, saddr, 2785 sport, daddr, dport, PF_RULESET_RDR); 2786 if (r == NULL) 2787 r = pf_match_translation(pd, m, off, direction, kif, 2788 saddr, sport, daddr, dport, PF_RULESET_BINAT); 2789 } 2790 2791 if (r != NULL) { 2792 struct pf_addr *naddr; 2793 u_int16_t *nport; 2794 2795 if (pf_state_key_setup(pd, r, skw, sks, skp, nkp, 2796 saddr, daddr, sport, dport)) 2797 return r; 2798 2799 /* XXX We only modify one side for now. */ 2800 naddr = &(*nkp)->addr[1]; 2801 nport = &(*nkp)->port[1]; 2802 2803 /* 2804 * NOTE: Currently all translations will clear 2805 * BRIDGE_MBUF_TAGGED, telling the bridge to 2806 * ignore the original input encapsulation. 2807 */ 2808 switch (r->action) { 2809 case PF_NONAT: 2810 case PF_NOBINAT: 2811 case PF_NORDR: 2812 return (NULL); 2813 case PF_NAT: 2814 m->m_pkthdr.fw_flags &= ~BRIDGE_MBUF_TAGGED; 2815 if (pf_get_sport(pd->af, pd->proto, r, 2816 saddr, daddr, sport, dport, 2817 naddr, nport, r->rpool.proxy_port[0], 2818 r->rpool.proxy_port[1], sn)) { 2819 DPFPRINTF(PF_DEBUG_MISC, 2820 ("pf: NAT proxy port allocation " 2821 "(%u-%u) failed\n", 2822 r->rpool.proxy_port[0], 2823 r->rpool.proxy_port[1])); 2824 return (NULL); 2825 } 2826 break; 2827 case PF_BINAT: 2828 m->m_pkthdr.fw_flags &= ~BRIDGE_MBUF_TAGGED; 2829 switch (direction) { 2830 case PF_OUT: 2831 if (r->rpool.cur->addr.type == PF_ADDR_DYNIFTL){ 2832 switch (pd->af) { 2833 #ifdef INET 2834 case AF_INET: 2835 if (r->rpool.cur->addr.p.dyn-> 2836 pfid_acnt4 < 1) 2837 return (NULL); 2838 PF_POOLMASK(naddr, 2839 &r->rpool.cur->addr.p.dyn-> 2840 pfid_addr4, 2841 &r->rpool.cur->addr.p.dyn-> 2842 pfid_mask4, 2843 saddr, AF_INET); 2844 break; 2845 #endif /* INET */ 2846 #ifdef INET6 2847 case AF_INET6: 2848 if (r->rpool.cur->addr.p.dyn-> 2849 pfid_acnt6 < 1) 2850 return (NULL); 2851 PF_POOLMASK(naddr, 2852 &r->rpool.cur->addr.p.dyn-> 2853 pfid_addr6, 2854 &r->rpool.cur->addr.p.dyn-> 2855 pfid_mask6, 2856 saddr, AF_INET6); 2857 break; 2858 #endif /* INET6 */ 2859 } 2860 } else 2861 PF_POOLMASK(naddr, 2862 &r->rpool.cur->addr.v.a.addr, 2863 &r->rpool.cur->addr.v.a.mask, 2864 saddr, pd->af); 2865 break; 2866 case PF_IN: 2867 if (r->src.addr.type == PF_ADDR_DYNIFTL) { 2868 switch (pd->af) { 2869 #ifdef INET 2870 case AF_INET: 2871 if (r->src.addr.p.dyn-> 2872 pfid_acnt4 < 1) 2873 return (NULL); 2874 PF_POOLMASK(naddr, 2875 &r->src.addr.p.dyn-> 2876 pfid_addr4, 2877 &r->src.addr.p.dyn-> 2878 pfid_mask4, 2879 daddr, AF_INET); 2880 break; 2881 #endif /* INET */ 2882 #ifdef INET6 2883 case AF_INET6: 2884 if (r->src.addr.p.dyn-> 2885 pfid_acnt6 < 1) 2886 return (NULL); 2887 PF_POOLMASK(naddr, 2888 &r->src.addr.p.dyn-> 2889 pfid_addr6, 2890 &r->src.addr.p.dyn-> 2891 pfid_mask6, 2892 daddr, AF_INET6); 2893 break; 2894 #endif /* INET6 */ 2895 } 2896 } else 2897 PF_POOLMASK(naddr, 2898 &r->src.addr.v.a.addr, 2899 &r->src.addr.v.a.mask, daddr, 2900 pd->af); 2901 break; 2902 } 2903 break; 2904 case PF_RDR: { 2905 m->m_pkthdr.fw_flags &= ~BRIDGE_MBUF_TAGGED; 2906 if (pf_map_addr(pd->af, r, saddr, naddr, NULL, sn)) 2907 return (NULL); 2908 if ((r->rpool.opts & PF_POOL_TYPEMASK) == 2909 PF_POOL_BITMASK) 2910 PF_POOLMASK(naddr, naddr, 2911 &r->rpool.cur->addr.v.a.mask, daddr, 2912 pd->af); 2913 2914 if (r->rpool.proxy_port[1]) { 2915 u_int32_t tmp_nport; 2916 2917 tmp_nport = ((ntohs(dport) - 2918 ntohs(r->dst.port[0])) % 2919 (r->rpool.proxy_port[1] - 2920 r->rpool.proxy_port[0] + 1)) + 2921 r->rpool.proxy_port[0]; 2922 2923 /* wrap around if necessary */ 2924 if (tmp_nport > 65535) 2925 tmp_nport -= 65535; 2926 *nport = htons((u_int16_t)tmp_nport); 2927 } else if (r->rpool.proxy_port[0]) 2928 *nport = htons(r->rpool.proxy_port[0]); 2929 break; 2930 } 2931 default: 2932 return (NULL); 2933 } 2934 } 2935 2936 return (r); 2937 } 2938 2939 struct netmsg_hashlookup { 2940 struct netmsg_base base; 2941 struct inpcb **nm_pinp; 2942 struct inpcbinfo *nm_pcbinfo; 2943 struct pf_addr *nm_saddr; 2944 struct pf_addr *nm_daddr; 2945 uint16_t nm_sport; 2946 uint16_t nm_dport; 2947 sa_family_t nm_af; 2948 }; 2949 2950 #ifdef PF_SOCKET_LOOKUP_DOMSG 2951 static void 2952 in_pcblookup_hash_handler(netmsg_t msg) 2953 { 2954 struct netmsg_hashlookup *rmsg = (struct netmsg_hashlookup *)msg; 2955 2956 if (rmsg->nm_af == AF_INET) 2957 *rmsg->nm_pinp = in_pcblookup_hash(rmsg->nm_pcbinfo, 2958 rmsg->nm_saddr->v4, rmsg->nm_sport, rmsg->nm_daddr->v4, 2959 rmsg->nm_dport, INPLOOKUP_WILDCARD, NULL); 2960 #ifdef INET6 2961 else 2962 *rmsg->nm_pinp = in6_pcblookup_hash(rmsg->nm_pcbinfo, 2963 &rmsg->nm_saddr->v6, rmsg->nm_sport, &rmsg->nm_daddr->v6, 2964 rmsg->nm_dport, INPLOOKUP_WILDCARD, NULL); 2965 #endif /* INET6 */ 2966 lwkt_replymsg(&rmsg->base.lmsg, 0); 2967 } 2968 #endif /* PF_SOCKET_LOOKUP_DOMSG */ 2969 2970 int 2971 pf_socket_lookup(int direction, struct pf_pdesc *pd) 2972 { 2973 struct pf_addr *saddr, *daddr; 2974 u_int16_t sport, dport; 2975 struct inpcbinfo *pi; 2976 struct inpcb *inp; 2977 struct netmsg_hashlookup *msg = NULL; 2978 #ifdef PF_SOCKET_LOOKUP_DOMSG 2979 struct netmsg_hashlookup msg0; 2980 #endif 2981 int pi_cpu = 0; 2982 2983 if (pd == NULL) 2984 return (-1); 2985 pd->lookup.uid = UID_MAX; 2986 pd->lookup.gid = GID_MAX; 2987 pd->lookup.pid = NO_PID; 2988 if (direction == PF_IN) { 2989 saddr = pd->src; 2990 daddr = pd->dst; 2991 } else { 2992 saddr = pd->dst; 2993 daddr = pd->src; 2994 } 2995 switch (pd->proto) { 2996 case IPPROTO_TCP: 2997 if (pd->hdr.tcp == NULL) 2998 return (-1); 2999 sport = pd->hdr.tcp->th_sport; 3000 dport = pd->hdr.tcp->th_dport; 3001 3002 pi_cpu = tcp_addrcpu(saddr->v4.s_addr, sport, daddr->v4.s_addr, dport); 3003 pi = &tcbinfo[pi_cpu]; 3004 /* 3005 * Our netstack runs lockless on MP systems 3006 * (only for TCP connections at the moment). 3007 * 3008 * As we are not allowed to read another CPU's tcbinfo, 3009 * we have to ask that CPU via remote call to search the 3010 * table for us. 3011 * 3012 * Prepare a msg iff data belongs to another CPU. 3013 */ 3014 if (pi_cpu != mycpu->gd_cpuid) { 3015 #ifdef PF_SOCKET_LOOKUP_DOMSG 3016 /* 3017 * NOTE: 3018 * 3019 * Following lwkt_domsg() is dangerous and could 3020 * lockup the network system, e.g. 3021 * 3022 * On 2 CPU system: 3023 * netisr0 domsg to netisr1 (due to lookup) 3024 * netisr1 domsg to netisr0 (due to lookup) 3025 * 3026 * We simply return -1 here, since we are probably 3027 * called before NAT, so the TCP packet should 3028 * already be on the correct CPU. 3029 */ 3030 msg = &msg0; 3031 netmsg_init(&msg->base, NULL, &curthread->td_msgport, 3032 0, in_pcblookup_hash_handler); 3033 msg->nm_pinp = &inp; 3034 msg->nm_pcbinfo = pi; 3035 msg->nm_saddr = saddr; 3036 msg->nm_sport = sport; 3037 msg->nm_daddr = daddr; 3038 msg->nm_dport = dport; 3039 msg->nm_af = pd->af; 3040 #else /* !PF_SOCKET_LOOKUP_DOMSG */ 3041 kprintf("pf_socket_lookup: tcp packet not on the " 3042 "correct cpu %d, cur cpu %d\n", 3043 pi_cpu, mycpuid); 3044 print_backtrace(-1); 3045 return -1; 3046 #endif /* PF_SOCKET_LOOKUP_DOMSG */ 3047 } 3048 break; 3049 case IPPROTO_UDP: 3050 if (pd->hdr.udp == NULL) 3051 return (-1); 3052 sport = pd->hdr.udp->uh_sport; 3053 dport = pd->hdr.udp->uh_dport; 3054 pi = &udbinfo; 3055 break; 3056 default: 3057 return (-1); 3058 } 3059 if (direction != PF_IN) { 3060 u_int16_t p; 3061 3062 p = sport; 3063 sport = dport; 3064 dport = p; 3065 } 3066 switch (pd->af) { 3067 #ifdef INET6 3068 case AF_INET6: 3069 /* 3070 * Query other CPU, second part 3071 * 3072 * msg only gets initialized when: 3073 * 1) packet is TCP 3074 * 2) the info belongs to another CPU 3075 * 3076 * Use some switch/case magic to avoid code duplication. 3077 */ 3078 if (msg == NULL) { 3079 inp = in6_pcblookup_hash(pi, &saddr->v6, sport, 3080 &daddr->v6, dport, INPLOOKUP_WILDCARD, NULL); 3081 3082 if (inp == NULL) 3083 return (-1); 3084 break; 3085 } 3086 /* FALLTHROUGH if SMP and on other CPU */ 3087 #endif /* INET6 */ 3088 case AF_INET: 3089 if (msg != NULL) { 3090 lwkt_domsg(netisr_cpuport(pi_cpu), 3091 &msg->base.lmsg, 0); 3092 } else 3093 { 3094 inp = in_pcblookup_hash(pi, saddr->v4, sport, daddr->v4, 3095 dport, INPLOOKUP_WILDCARD, NULL); 3096 } 3097 if (inp == NULL) 3098 return (-1); 3099 break; 3100 3101 default: 3102 return (-1); 3103 } 3104 pd->lookup.uid = inp->inp_socket->so_cred->cr_uid; 3105 pd->lookup.gid = inp->inp_socket->so_cred->cr_groups[0]; 3106 return (1); 3107 } 3108 3109 u_int8_t 3110 pf_get_wscale(struct mbuf *m, int off, u_int16_t th_off, sa_family_t af) 3111 { 3112 int hlen; 3113 u_int8_t hdr[60]; 3114 u_int8_t *opt, optlen; 3115 u_int8_t wscale = 0; 3116 3117 hlen = th_off << 2; /* hlen <= sizeof(hdr) */ 3118 if (hlen <= sizeof(struct tcphdr)) 3119 return (0); 3120 if (!pf_pull_hdr(m, off, hdr, hlen, NULL, NULL, af)) 3121 return (0); 3122 opt = hdr + sizeof(struct tcphdr); 3123 hlen -= sizeof(struct tcphdr); 3124 while (hlen >= 3) { 3125 switch (*opt) { 3126 case TCPOPT_EOL: 3127 case TCPOPT_NOP: 3128 ++opt; 3129 --hlen; 3130 break; 3131 case TCPOPT_WINDOW: 3132 wscale = opt[2]; 3133 if (wscale > TCP_MAX_WINSHIFT) 3134 wscale = TCP_MAX_WINSHIFT; 3135 wscale |= PF_WSCALE_FLAG; 3136 /* FALLTHROUGH */ 3137 default: 3138 optlen = opt[1]; 3139 if (optlen < 2) 3140 optlen = 2; 3141 hlen -= optlen; 3142 opt += optlen; 3143 break; 3144 } 3145 } 3146 return (wscale); 3147 } 3148 3149 u_int16_t 3150 pf_get_mss(struct mbuf *m, int off, u_int16_t th_off, sa_family_t af) 3151 { 3152 int hlen; 3153 u_int8_t hdr[60]; 3154 u_int8_t *opt, optlen; 3155 u_int16_t mss = tcp_mssdflt; 3156 3157 hlen = th_off << 2; /* hlen <= sizeof(hdr) */ 3158 if (hlen <= sizeof(struct tcphdr)) 3159 return (0); 3160 if (!pf_pull_hdr(m, off, hdr, hlen, NULL, NULL, af)) 3161 return (0); 3162 opt = hdr + sizeof(struct tcphdr); 3163 hlen -= sizeof(struct tcphdr); 3164 while (hlen >= TCPOLEN_MAXSEG) { 3165 switch (*opt) { 3166 case TCPOPT_EOL: 3167 case TCPOPT_NOP: 3168 ++opt; 3169 --hlen; 3170 break; 3171 case TCPOPT_MAXSEG: 3172 bcopy((caddr_t)(opt + 2), (caddr_t)&mss, 2); 3173 /* FALLTHROUGH */ 3174 default: 3175 optlen = opt[1]; 3176 if (optlen < 2) 3177 optlen = 2; 3178 hlen -= optlen; 3179 opt += optlen; 3180 break; 3181 } 3182 } 3183 return (mss); 3184 } 3185 3186 u_int16_t 3187 pf_calc_mss(struct pf_addr *addr, sa_family_t af, u_int16_t offer) 3188 { 3189 #ifdef INET 3190 struct sockaddr_in *dst; 3191 struct route ro; 3192 #endif /* INET */ 3193 #ifdef INET6 3194 struct sockaddr_in6 *dst6; 3195 struct route_in6 ro6; 3196 #endif /* INET6 */ 3197 struct rtentry *rt = NULL; 3198 int hlen = 0; 3199 u_int16_t mss = tcp_mssdflt; 3200 3201 switch (af) { 3202 #ifdef INET 3203 case AF_INET: 3204 hlen = sizeof(struct ip); 3205 bzero(&ro, sizeof(ro)); 3206 dst = (struct sockaddr_in *)&ro.ro_dst; 3207 dst->sin_family = AF_INET; 3208 dst->sin_len = sizeof(*dst); 3209 dst->sin_addr = addr->v4; 3210 rtalloc_ign(&ro, (RTF_CLONING | RTF_PRCLONING)); 3211 rt = ro.ro_rt; 3212 break; 3213 #endif /* INET */ 3214 #ifdef INET6 3215 case AF_INET6: 3216 hlen = sizeof(struct ip6_hdr); 3217 bzero(&ro6, sizeof(ro6)); 3218 dst6 = (struct sockaddr_in6 *)&ro6.ro_dst; 3219 dst6->sin6_family = AF_INET6; 3220 dst6->sin6_len = sizeof(*dst6); 3221 dst6->sin6_addr = addr->v6; 3222 rtalloc_ign((struct route *)&ro6, (RTF_CLONING | RTF_PRCLONING)); 3223 rt = ro6.ro_rt; 3224 break; 3225 #endif /* INET6 */ 3226 } 3227 3228 if (rt && rt->rt_ifp) { 3229 mss = rt->rt_ifp->if_mtu - hlen - sizeof(struct tcphdr); 3230 mss = max(tcp_mssdflt, mss); 3231 RTFREE(rt); 3232 } 3233 mss = min(mss, offer); 3234 mss = max(mss, 64); /* sanity - at least max opt space */ 3235 return (mss); 3236 } 3237 3238 void 3239 pf_set_rt_ifp(struct pf_state *s, struct pf_addr *saddr) 3240 { 3241 struct pf_rule *r = s->rule.ptr; 3242 3243 s->rt_kif = NULL; 3244 if (!r->rt || r->rt == PF_FASTROUTE) 3245 return; 3246 switch (s->key[PF_SK_WIRE]->af) { 3247 #ifdef INET 3248 case AF_INET: 3249 pf_map_addr(AF_INET, r, saddr, &s->rt_addr, NULL, 3250 &s->nat_src_node); 3251 s->rt_kif = r->rpool.cur->kif; 3252 break; 3253 #endif /* INET */ 3254 #ifdef INET6 3255 case AF_INET6: 3256 pf_map_addr(AF_INET6, r, saddr, &s->rt_addr, NULL, 3257 &s->nat_src_node); 3258 s->rt_kif = r->rpool.cur->kif; 3259 break; 3260 #endif /* INET6 */ 3261 } 3262 } 3263 3264 u_int32_t 3265 pf_tcp_iss(struct pf_pdesc *pd) 3266 { 3267 MD5_CTX ctx; 3268 u_int32_t digest[4]; 3269 3270 if (pf_tcp_secret_init == 0) { 3271 lwkt_gettoken(&pf_secret_token); 3272 if (pf_tcp_secret_init == 0) { 3273 karc4rand(pf_tcp_secret, sizeof(pf_tcp_secret)); 3274 MD5Init(&pf_tcp_secret_ctx); 3275 MD5Update(&pf_tcp_secret_ctx, pf_tcp_secret, 3276 sizeof(pf_tcp_secret)); 3277 pf_tcp_secret_init = 1; 3278 } 3279 lwkt_reltoken(&pf_secret_token); 3280 } 3281 ctx = pf_tcp_secret_ctx; 3282 3283 MD5Update(&ctx, (char *)&pd->hdr.tcp->th_sport, sizeof(u_short)); 3284 MD5Update(&ctx, (char *)&pd->hdr.tcp->th_dport, sizeof(u_short)); 3285 if (pd->af == AF_INET6) { 3286 MD5Update(&ctx, (char *)&pd->src->v6, sizeof(struct in6_addr)); 3287 MD5Update(&ctx, (char *)&pd->dst->v6, sizeof(struct in6_addr)); 3288 } else { 3289 MD5Update(&ctx, (char *)&pd->src->v4, sizeof(struct in_addr)); 3290 MD5Update(&ctx, (char *)&pd->dst->v4, sizeof(struct in_addr)); 3291 } 3292 MD5Final((u_char *)digest, &ctx); 3293 pf_tcp_iss_off += 4096; 3294 3295 return (digest[0] + pd->hdr.tcp->th_seq + pf_tcp_iss_off); 3296 } 3297 3298 int 3299 pf_test_rule(struct pf_rule **rm, struct pf_state **sm, int direction, 3300 struct pfi_kif *kif, struct mbuf *m, int off, void *h, 3301 struct pf_pdesc *pd, struct pf_rule **am, struct pf_ruleset **rsm, 3302 struct ifqueue *ifq, struct inpcb *inp) 3303 { 3304 struct pf_rule *nr = NULL; 3305 struct pf_addr *saddr = pd->src, *daddr = pd->dst; 3306 sa_family_t af = pd->af; 3307 struct pf_rule *r, *a = NULL; 3308 struct pf_ruleset *ruleset = NULL; 3309 struct pf_src_node *nsn = NULL; 3310 struct tcphdr *th = pd->hdr.tcp; 3311 struct pf_state_key *skw = NULL, *sks = NULL; 3312 struct pf_state_key *sk = NULL, *nk = NULL; 3313 u_short reason; 3314 int rewrite = 0, hdrlen = 0; 3315 int tag = -1, rtableid = -1; 3316 int asd = 0; 3317 int match = 0; 3318 int state_icmp = 0; 3319 u_int16_t sport = 0, dport = 0; 3320 u_int16_t bproto_sum = 0, bip_sum = 0; 3321 u_int8_t icmptype = 0, icmpcode = 0; 3322 3323 3324 if (direction == PF_IN && pf_check_congestion(ifq)) { 3325 REASON_SET(&reason, PFRES_CONGEST); 3326 return (PF_DROP); 3327 } 3328 3329 if (inp != NULL) 3330 pd->lookup.done = pf_socket_lookup(direction, pd); 3331 else if (debug_pfugidhack) { 3332 DPFPRINTF(PF_DEBUG_MISC, ("pf: unlocked lookup\n")); 3333 pd->lookup.done = pf_socket_lookup(direction, pd); 3334 } 3335 3336 switch (pd->proto) { 3337 case IPPROTO_TCP: 3338 sport = th->th_sport; 3339 dport = th->th_dport; 3340 hdrlen = sizeof(*th); 3341 break; 3342 case IPPROTO_UDP: 3343 sport = pd->hdr.udp->uh_sport; 3344 dport = pd->hdr.udp->uh_dport; 3345 hdrlen = sizeof(*pd->hdr.udp); 3346 break; 3347 #ifdef INET 3348 case IPPROTO_ICMP: 3349 if (pd->af != AF_INET) 3350 break; 3351 sport = dport = pd->hdr.icmp->icmp_id; 3352 hdrlen = sizeof(*pd->hdr.icmp); 3353 icmptype = pd->hdr.icmp->icmp_type; 3354 icmpcode = pd->hdr.icmp->icmp_code; 3355 3356 if (icmptype == ICMP_UNREACH || 3357 icmptype == ICMP_SOURCEQUENCH || 3358 icmptype == ICMP_REDIRECT || 3359 icmptype == ICMP_TIMXCEED || 3360 icmptype == ICMP_PARAMPROB) 3361 state_icmp++; 3362 break; 3363 #endif /* INET */ 3364 #ifdef INET6 3365 case IPPROTO_ICMPV6: 3366 if (af != AF_INET6) 3367 break; 3368 sport = dport = pd->hdr.icmp6->icmp6_id; 3369 hdrlen = sizeof(*pd->hdr.icmp6); 3370 icmptype = pd->hdr.icmp6->icmp6_type; 3371 icmpcode = pd->hdr.icmp6->icmp6_code; 3372 3373 if (icmptype == ICMP6_DST_UNREACH || 3374 icmptype == ICMP6_PACKET_TOO_BIG || 3375 icmptype == ICMP6_TIME_EXCEEDED || 3376 icmptype == ICMP6_PARAM_PROB) 3377 state_icmp++; 3378 break; 3379 #endif /* INET6 */ 3380 default: 3381 sport = dport = hdrlen = 0; 3382 break; 3383 } 3384 3385 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_FILTER].active.ptr); 3386 3387 /* check packet for BINAT/NAT/RDR */ 3388 if ((nr = pf_get_translation(pd, m, off, direction, kif, &nsn, 3389 &skw, &sks, &sk, &nk, saddr, daddr, sport, dport)) != NULL) { 3390 if (nk == NULL || sk == NULL) { 3391 REASON_SET(&reason, PFRES_MEMORY); 3392 goto cleanup; 3393 } 3394 3395 if (pd->ip_sum) 3396 bip_sum = *pd->ip_sum; 3397 3398 m->m_flags &= ~M_HASH; 3399 switch (pd->proto) { 3400 case IPPROTO_TCP: 3401 bproto_sum = th->th_sum; 3402 pd->proto_sum = &th->th_sum; 3403 3404 if (PF_ANEQ(saddr, &nk->addr[pd->sidx], af) || 3405 nk->port[pd->sidx] != sport) { 3406 pf_change_ap(saddr, &th->th_sport, pd->ip_sum, 3407 &th->th_sum, &nk->addr[pd->sidx], 3408 nk->port[pd->sidx], 0, af); 3409 pd->sport = &th->th_sport; 3410 sport = th->th_sport; 3411 } 3412 3413 if (PF_ANEQ(daddr, &nk->addr[pd->didx], af) || 3414 nk->port[pd->didx] != dport) { 3415 pf_change_ap(daddr, &th->th_dport, pd->ip_sum, 3416 &th->th_sum, &nk->addr[pd->didx], 3417 nk->port[pd->didx], 0, af); 3418 dport = th->th_dport; 3419 pd->dport = &th->th_dport; 3420 } 3421 rewrite++; 3422 break; 3423 case IPPROTO_UDP: 3424 bproto_sum = pd->hdr.udp->uh_sum; 3425 pd->proto_sum = &pd->hdr.udp->uh_sum; 3426 3427 if (PF_ANEQ(saddr, &nk->addr[pd->sidx], af) || 3428 nk->port[pd->sidx] != sport) { 3429 pf_change_ap(saddr, &pd->hdr.udp->uh_sport, 3430 pd->ip_sum, &pd->hdr.udp->uh_sum, 3431 &nk->addr[pd->sidx], 3432 nk->port[pd->sidx], 1, af); 3433 sport = pd->hdr.udp->uh_sport; 3434 pd->sport = &pd->hdr.udp->uh_sport; 3435 } 3436 3437 if (PF_ANEQ(daddr, &nk->addr[pd->didx], af) || 3438 nk->port[pd->didx] != dport) { 3439 pf_change_ap(daddr, &pd->hdr.udp->uh_dport, 3440 pd->ip_sum, &pd->hdr.udp->uh_sum, 3441 &nk->addr[pd->didx], 3442 nk->port[pd->didx], 1, af); 3443 dport = pd->hdr.udp->uh_dport; 3444 pd->dport = &pd->hdr.udp->uh_dport; 3445 } 3446 rewrite++; 3447 break; 3448 #ifdef INET 3449 case IPPROTO_ICMP: 3450 nk->port[0] = nk->port[1]; 3451 if (PF_ANEQ(saddr, &nk->addr[pd->sidx], AF_INET)) 3452 pf_change_a(&saddr->v4.s_addr, pd->ip_sum, 3453 nk->addr[pd->sidx].v4.s_addr, 0); 3454 3455 if (PF_ANEQ(daddr, &nk->addr[pd->didx], AF_INET)) 3456 pf_change_a(&daddr->v4.s_addr, pd->ip_sum, 3457 nk->addr[pd->didx].v4.s_addr, 0); 3458 3459 if (nk->port[1] != pd->hdr.icmp->icmp_id) { 3460 pd->hdr.icmp->icmp_cksum = pf_cksum_fixup( 3461 pd->hdr.icmp->icmp_cksum, sport, 3462 nk->port[1], 0); 3463 pd->hdr.icmp->icmp_id = nk->port[1]; 3464 pd->sport = &pd->hdr.icmp->icmp_id; 3465 } 3466 m_copyback(m, off, ICMP_MINLEN, (caddr_t)pd->hdr.icmp); 3467 break; 3468 #endif /* INET */ 3469 #ifdef INET6 3470 case IPPROTO_ICMPV6: 3471 nk->port[0] = nk->port[1]; 3472 if (PF_ANEQ(saddr, &nk->addr[pd->sidx], AF_INET6)) 3473 pf_change_a6(saddr, &pd->hdr.icmp6->icmp6_cksum, 3474 &nk->addr[pd->sidx], 0); 3475 3476 if (PF_ANEQ(daddr, &nk->addr[pd->didx], AF_INET6)) 3477 pf_change_a6(daddr, &pd->hdr.icmp6->icmp6_cksum, 3478 &nk->addr[pd->didx], 0); 3479 rewrite++; 3480 break; 3481 #endif /* INET */ 3482 default: 3483 switch (af) { 3484 #ifdef INET 3485 case AF_INET: 3486 if (PF_ANEQ(saddr, 3487 &nk->addr[pd->sidx], AF_INET)) 3488 pf_change_a(&saddr->v4.s_addr, 3489 pd->ip_sum, 3490 nk->addr[pd->sidx].v4.s_addr, 0); 3491 3492 if (PF_ANEQ(daddr, 3493 &nk->addr[pd->didx], AF_INET)) 3494 pf_change_a(&daddr->v4.s_addr, 3495 pd->ip_sum, 3496 nk->addr[pd->didx].v4.s_addr, 0); 3497 break; 3498 #endif /* INET */ 3499 #ifdef INET6 3500 case AF_INET6: 3501 if (PF_ANEQ(saddr, 3502 &nk->addr[pd->sidx], AF_INET6)) 3503 PF_ACPY(saddr, &nk->addr[pd->sidx], af); 3504 3505 if (PF_ANEQ(daddr, 3506 &nk->addr[pd->didx], AF_INET6)) 3507 PF_ACPY(saddr, &nk->addr[pd->didx], af); 3508 break; 3509 #endif /* INET */ 3510 } 3511 break; 3512 } 3513 if (nr->natpass) 3514 r = NULL; 3515 pd->nat_rule = nr; 3516 } 3517 3518 while (r != NULL) { 3519 r->evaluations++; 3520 if (pfi_kif_match(r->kif, kif) == r->ifnot) 3521 r = r->skip[PF_SKIP_IFP].ptr; 3522 else if (r->direction && r->direction != direction) 3523 r = r->skip[PF_SKIP_DIR].ptr; 3524 else if (r->af && r->af != af) 3525 r = r->skip[PF_SKIP_AF].ptr; 3526 else if (r->proto && r->proto != pd->proto) 3527 r = r->skip[PF_SKIP_PROTO].ptr; 3528 else if (PF_MISMATCHAW(&r->src.addr, saddr, af, 3529 r->src.neg, kif)) 3530 r = r->skip[PF_SKIP_SRC_ADDR].ptr; 3531 /* tcp/udp only. port_op always 0 in other cases */ 3532 else if (r->src.port_op && !pf_match_port(r->src.port_op, 3533 r->src.port[0], r->src.port[1], sport)) 3534 r = r->skip[PF_SKIP_SRC_PORT].ptr; 3535 else if (PF_MISMATCHAW(&r->dst.addr, daddr, af, 3536 r->dst.neg, NULL)) 3537 r = r->skip[PF_SKIP_DST_ADDR].ptr; 3538 /* tcp/udp only. port_op always 0 in other cases */ 3539 else if (r->dst.port_op && !pf_match_port(r->dst.port_op, 3540 r->dst.port[0], r->dst.port[1], dport)) 3541 r = r->skip[PF_SKIP_DST_PORT].ptr; 3542 /* icmp only. type always 0 in other cases */ 3543 else if (r->type && r->type != icmptype + 1) 3544 r = TAILQ_NEXT(r, entries); 3545 /* icmp only. type always 0 in other cases */ 3546 else if (r->code && r->code != icmpcode + 1) 3547 r = TAILQ_NEXT(r, entries); 3548 else if (r->tos && !(r->tos == pd->tos)) 3549 r = TAILQ_NEXT(r, entries); 3550 else if (r->rule_flag & PFRULE_FRAGMENT) 3551 r = TAILQ_NEXT(r, entries); 3552 else if (pd->proto == IPPROTO_TCP && 3553 (r->flagset & th->th_flags) != r->flags) 3554 r = TAILQ_NEXT(r, entries); 3555 /* tcp/udp only. uid.op always 0 in other cases */ 3556 else if (r->uid.op && (pd->lookup.done || (pd->lookup.done = 3557 pf_socket_lookup(direction, pd), 1)) && 3558 !pf_match_uid(r->uid.op, r->uid.uid[0], r->uid.uid[1], 3559 pd->lookup.uid)) 3560 r = TAILQ_NEXT(r, entries); 3561 /* tcp/udp only. gid.op always 0 in other cases */ 3562 else if (r->gid.op && (pd->lookup.done || (pd->lookup.done = 3563 pf_socket_lookup(direction, pd), 1)) && 3564 !pf_match_gid(r->gid.op, r->gid.gid[0], r->gid.gid[1], 3565 pd->lookup.gid)) 3566 r = TAILQ_NEXT(r, entries); 3567 else if (r->prob && 3568 r->prob <= karc4random()) 3569 r = TAILQ_NEXT(r, entries); 3570 else if (r->match_tag && !pf_match_tag(m, r, &tag)) 3571 r = TAILQ_NEXT(r, entries); 3572 else if (r->os_fingerprint != PF_OSFP_ANY && 3573 (pd->proto != IPPROTO_TCP || !pf_osfp_match( 3574 pf_osfp_fingerprint(pd, m, off, th), 3575 r->os_fingerprint))) 3576 r = TAILQ_NEXT(r, entries); 3577 else { 3578 if (r->tag) 3579 tag = r->tag; 3580 if (r->rtableid >= 0) 3581 rtableid = r->rtableid; 3582 if (r->anchor == NULL) { 3583 match = 1; 3584 *rm = r; 3585 *am = a; 3586 *rsm = ruleset; 3587 if ((*rm)->quick) 3588 break; 3589 r = TAILQ_NEXT(r, entries); 3590 } else 3591 pf_step_into_anchor(&asd, &ruleset, 3592 PF_RULESET_FILTER, &r, &a, &match); 3593 } 3594 if (r == NULL && pf_step_out_of_anchor(&asd, &ruleset, 3595 PF_RULESET_FILTER, &r, &a, &match)) 3596 break; 3597 } 3598 r = *rm; 3599 a = *am; 3600 ruleset = *rsm; 3601 3602 REASON_SET(&reason, PFRES_MATCH); 3603 3604 if (r->log || (nr != NULL && nr->log)) { 3605 if (rewrite) 3606 m_copyback(m, off, hdrlen, pd->hdr.any); 3607 PFLOG_PACKET(kif, h, m, af, direction, reason, r->log ? r : nr, 3608 a, ruleset, pd); 3609 } 3610 3611 if ((r->action == PF_DROP) && 3612 ((r->rule_flag & PFRULE_RETURNRST) || 3613 (r->rule_flag & PFRULE_RETURNICMP) || 3614 (r->rule_flag & PFRULE_RETURN))) { 3615 /* undo NAT changes, if they have taken place */ 3616 if (nr != NULL) { 3617 PF_ACPY(saddr, &sk->addr[pd->sidx], af); 3618 PF_ACPY(daddr, &sk->addr[pd->didx], af); 3619 if (pd->sport) 3620 *pd->sport = sk->port[pd->sidx]; 3621 if (pd->dport) 3622 *pd->dport = sk->port[pd->didx]; 3623 if (pd->proto_sum) 3624 *pd->proto_sum = bproto_sum; 3625 if (pd->ip_sum) 3626 *pd->ip_sum = bip_sum; 3627 m_copyback(m, off, hdrlen, pd->hdr.any); 3628 } 3629 if (pd->proto == IPPROTO_TCP && 3630 ((r->rule_flag & PFRULE_RETURNRST) || 3631 (r->rule_flag & PFRULE_RETURN)) && 3632 !(th->th_flags & TH_RST)) { 3633 u_int32_t ack = ntohl(th->th_seq) + pd->p_len; 3634 int len = 0; 3635 struct ip *h4; 3636 #ifdef INET6 3637 struct ip6_hdr *h6; 3638 #endif 3639 switch (af) { 3640 case AF_INET: 3641 h4 = mtod(m, struct ip *); 3642 len = h4->ip_len - off; 3643 break; 3644 #ifdef INET6 3645 case AF_INET6: 3646 h6 = mtod(m, struct ip6_hdr *); 3647 len = h6->ip6_plen - (off - sizeof(*h6)); 3648 break; 3649 #endif 3650 } 3651 3652 if (pf_check_proto_cksum(m, off, len, IPPROTO_TCP, af)) 3653 REASON_SET(&reason, PFRES_PROTCKSUM); 3654 else { 3655 if (th->th_flags & TH_SYN) 3656 ack++; 3657 if (th->th_flags & TH_FIN) 3658 ack++; 3659 pf_send_tcp(r, af, pd->dst, 3660 pd->src, th->th_dport, th->th_sport, 3661 ntohl(th->th_ack), ack, TH_RST|TH_ACK, 0, 0, 3662 r->return_ttl, 1, 0, pd->eh, kif->pfik_ifp); 3663 } 3664 } else if (pd->proto != IPPROTO_ICMP && af == AF_INET && 3665 r->return_icmp) 3666 pf_send_icmp(m, r->return_icmp >> 8, 3667 r->return_icmp & 255, af, r); 3668 else if (pd->proto != IPPROTO_ICMPV6 && af == AF_INET6 && 3669 r->return_icmp6) 3670 pf_send_icmp(m, r->return_icmp6 >> 8, 3671 r->return_icmp6 & 255, af, r); 3672 } 3673 3674 if (r->action == PF_DROP) 3675 goto cleanup; 3676 3677 if (pf_tag_packet(m, tag, rtableid)) { 3678 REASON_SET(&reason, PFRES_MEMORY); 3679 goto cleanup; 3680 } 3681 3682 if (!state_icmp && (r->keep_state || nr != NULL || 3683 (pd->flags & PFDESC_TCP_NORM))) { 3684 int action; 3685 action = pf_create_state(r, nr, a, pd, nsn, skw, sks, nk, sk, m, 3686 off, sport, dport, &rewrite, kif, sm, tag, bproto_sum, 3687 bip_sum, hdrlen); 3688 if (action != PF_PASS) 3689 return (action); 3690 } 3691 3692 /* copy back packet headers if we performed NAT operations */ 3693 if (rewrite) 3694 m_copyback(m, off, hdrlen, pd->hdr.any); 3695 3696 return (PF_PASS); 3697 3698 cleanup: 3699 if (sk != NULL) 3700 kfree(sk, M_PFSTATEKEYPL); 3701 if (nk != NULL) 3702 kfree(nk, M_PFSTATEKEYPL); 3703 return (PF_DROP); 3704 } 3705 3706 static __inline int 3707 pf_create_state(struct pf_rule *r, struct pf_rule *nr, struct pf_rule *a, 3708 struct pf_pdesc *pd, struct pf_src_node *nsn, struct pf_state_key *skw, 3709 struct pf_state_key *sks, struct pf_state_key *nk, struct pf_state_key *sk, 3710 struct mbuf *m, int off, u_int16_t sport, u_int16_t dport, int *rewrite, 3711 struct pfi_kif *kif, struct pf_state **sm, int tag, u_int16_t bproto_sum, 3712 u_int16_t bip_sum, int hdrlen) 3713 { 3714 struct pf_state *s = NULL; 3715 struct pf_src_node *sn = NULL; 3716 struct tcphdr *th = pd->hdr.tcp; 3717 u_int16_t mss = tcp_mssdflt; 3718 u_short reason; 3719 int cpu = mycpu->gd_cpuid; 3720 3721 /* check maximums */ 3722 if (r->max_states && (r->states_cur >= r->max_states)) { 3723 pf_status.lcounters[LCNT_STATES]++; 3724 REASON_SET(&reason, PFRES_MAXSTATES); 3725 return (PF_DROP); 3726 } 3727 /* src node for filter rule */ 3728 if ((r->rule_flag & PFRULE_SRCTRACK || 3729 r->rpool.opts & PF_POOL_STICKYADDR) && 3730 pf_insert_src_node(&sn, r, pd->src, pd->af) != 0) { 3731 REASON_SET(&reason, PFRES_SRCLIMIT); 3732 goto csfailed; 3733 } 3734 /* src node for translation rule */ 3735 if (nr != NULL && (nr->rpool.opts & PF_POOL_STICKYADDR) && 3736 pf_insert_src_node(&nsn, nr, &sk->addr[pd->sidx], pd->af)) { 3737 REASON_SET(&reason, PFRES_SRCLIMIT); 3738 goto csfailed; 3739 } 3740 s = kmalloc(sizeof(struct pf_state), M_PFSTATEPL, M_NOWAIT|M_ZERO); 3741 if (s == NULL) { 3742 REASON_SET(&reason, PFRES_MEMORY); 3743 goto csfailed; 3744 } 3745 s->id = 0; /* XXX Do we really need that? not in OpenBSD */ 3746 s->creatorid = 0; 3747 s->rule.ptr = r; 3748 s->nat_rule.ptr = nr; 3749 s->anchor.ptr = a; 3750 STATE_INC_COUNTERS(s); 3751 if (r->allow_opts) 3752 s->state_flags |= PFSTATE_ALLOWOPTS; 3753 if (r->rule_flag & PFRULE_STATESLOPPY) 3754 s->state_flags |= PFSTATE_SLOPPY; 3755 s->log = r->log & PF_LOG_ALL; 3756 if (nr != NULL) 3757 s->log |= nr->log & PF_LOG_ALL; 3758 switch (pd->proto) { 3759 case IPPROTO_TCP: 3760 s->src.seqlo = ntohl(th->th_seq); 3761 s->src.seqhi = s->src.seqlo + pd->p_len + 1; 3762 if ((th->th_flags & (TH_SYN|TH_ACK)) == TH_SYN && 3763 r->keep_state == PF_STATE_MODULATE) { 3764 /* Generate sequence number modulator */ 3765 if ((s->src.seqdiff = pf_tcp_iss(pd) - s->src.seqlo) == 3766 0) 3767 s->src.seqdiff = 1; 3768 pf_change_a(&th->th_seq, &th->th_sum, 3769 htonl(s->src.seqlo + s->src.seqdiff), 0); 3770 *rewrite = 1; 3771 } else 3772 s->src.seqdiff = 0; 3773 if (th->th_flags & TH_SYN) { 3774 s->src.seqhi++; 3775 s->src.wscale = pf_get_wscale(m, off, 3776 th->th_off, pd->af); 3777 } 3778 s->src.max_win = MAX(ntohs(th->th_win), 1); 3779 if (s->src.wscale & PF_WSCALE_MASK) { 3780 /* Remove scale factor from initial window */ 3781 int win = s->src.max_win; 3782 win += 1 << (s->src.wscale & PF_WSCALE_MASK); 3783 s->src.max_win = (win - 1) >> 3784 (s->src.wscale & PF_WSCALE_MASK); 3785 } 3786 if (th->th_flags & TH_FIN) 3787 s->src.seqhi++; 3788 s->dst.seqhi = 1; 3789 s->dst.max_win = 1; 3790 s->src.state = TCPS_SYN_SENT; 3791 s->dst.state = TCPS_CLOSED; 3792 s->timeout = PFTM_TCP_FIRST_PACKET; 3793 break; 3794 case IPPROTO_UDP: 3795 s->src.state = PFUDPS_SINGLE; 3796 s->dst.state = PFUDPS_NO_TRAFFIC; 3797 s->timeout = PFTM_UDP_FIRST_PACKET; 3798 break; 3799 case IPPROTO_ICMP: 3800 #ifdef INET6 3801 case IPPROTO_ICMPV6: 3802 #endif 3803 s->timeout = PFTM_ICMP_FIRST_PACKET; 3804 break; 3805 default: 3806 s->src.state = PFOTHERS_SINGLE; 3807 s->dst.state = PFOTHERS_NO_TRAFFIC; 3808 s->timeout = PFTM_OTHER_FIRST_PACKET; 3809 } 3810 3811 s->creation = time_second; 3812 s->expire = time_second; 3813 3814 if (sn != NULL) { 3815 s->src_node = sn; 3816 s->src_node->states++; 3817 } 3818 if (nsn != NULL) { 3819 /* XXX We only modify one side for now. */ 3820 PF_ACPY(&nsn->raddr, &nk->addr[1], pd->af); 3821 s->nat_src_node = nsn; 3822 s->nat_src_node->states++; 3823 } 3824 if (pd->proto == IPPROTO_TCP) { 3825 if ((pd->flags & PFDESC_TCP_NORM) && pf_normalize_tcp_init(m, 3826 off, pd, th, &s->src, &s->dst)) { 3827 REASON_SET(&reason, PFRES_MEMORY); 3828 pf_src_tree_remove_state(s); 3829 STATE_DEC_COUNTERS(s); 3830 kfree(s, M_PFSTATEPL); 3831 return (PF_DROP); 3832 } 3833 if ((pd->flags & PFDESC_TCP_NORM) && s->src.scrub && 3834 pf_normalize_tcp_stateful(m, off, pd, &reason, th, s, 3835 &s->src, &s->dst, rewrite)) { 3836 /* This really shouldn't happen!!! */ 3837 DPFPRINTF(PF_DEBUG_URGENT, 3838 ("pf_normalize_tcp_stateful failed on first pkt")); 3839 pf_normalize_tcp_cleanup(s); 3840 pf_src_tree_remove_state(s); 3841 STATE_DEC_COUNTERS(s); 3842 kfree(s, M_PFSTATEPL); 3843 return (PF_DROP); 3844 } 3845 } 3846 s->direction = pd->dir; 3847 3848 if (sk == NULL && pf_state_key_setup(pd, nr, &skw, &sks, &sk, &nk, 3849 pd->src, pd->dst, sport, dport)) 3850 goto csfailed; 3851 3852 if (pf_state_insert(BOUND_IFACE(r, kif), skw, sks, s)) { 3853 if (pd->proto == IPPROTO_TCP) 3854 pf_normalize_tcp_cleanup(s); 3855 REASON_SET(&reason, PFRES_STATEINS); 3856 pf_src_tree_remove_state(s); 3857 STATE_DEC_COUNTERS(s); 3858 kfree(s, M_PFSTATEPL); 3859 return (PF_DROP); 3860 } else 3861 *sm = s; 3862 3863 pf_set_rt_ifp(s, pd->src); /* needs s->state_key set */ 3864 if (tag > 0) { 3865 pf_tag_ref(tag); 3866 s->tag = tag; 3867 } 3868 if (pd->proto == IPPROTO_TCP && (th->th_flags & (TH_SYN|TH_ACK)) == 3869 TH_SYN && r->keep_state == PF_STATE_SYNPROXY) { 3870 s->src.state = PF_TCPS_PROXY_SRC; 3871 /* undo NAT changes, if they have taken place */ 3872 if (nr != NULL) { 3873 struct pf_state_key *skt = s->key[PF_SK_WIRE]; 3874 if (pd->dir == PF_OUT) 3875 skt = s->key[PF_SK_STACK]; 3876 PF_ACPY(pd->src, &skt->addr[pd->sidx], pd->af); 3877 PF_ACPY(pd->dst, &skt->addr[pd->didx], pd->af); 3878 if (pd->sport) 3879 *pd->sport = skt->port[pd->sidx]; 3880 if (pd->dport) 3881 *pd->dport = skt->port[pd->didx]; 3882 if (pd->proto_sum) 3883 *pd->proto_sum = bproto_sum; 3884 if (pd->ip_sum) 3885 *pd->ip_sum = bip_sum; 3886 m_copyback(m, off, hdrlen, pd->hdr.any); 3887 } 3888 s->src.seqhi = htonl(karc4random()); 3889 /* Find mss option */ 3890 mss = pf_get_mss(m, off, th->th_off, pd->af); 3891 mss = pf_calc_mss(pd->src, pd->af, mss); 3892 mss = pf_calc_mss(pd->dst, pd->af, mss); 3893 s->src.mss = mss; 3894 pf_send_tcp(r, pd->af, pd->dst, pd->src, th->th_dport, 3895 th->th_sport, s->src.seqhi, ntohl(th->th_seq) + 1, 3896 TH_SYN|TH_ACK, 0, s->src.mss, 0, 1, 0, NULL, NULL); 3897 REASON_SET(&reason, PFRES_SYNPROXY); 3898 return (PF_SYNPROXY_DROP); 3899 } 3900 3901 return (PF_PASS); 3902 3903 csfailed: 3904 if (sk != NULL) 3905 kfree(sk, M_PFSTATEKEYPL); 3906 if (nk != NULL) 3907 kfree(nk, M_PFSTATEKEYPL); 3908 3909 if (sn != NULL && sn->states == 0 && sn->expire == 0) { 3910 RB_REMOVE(pf_src_tree, &tree_src_tracking[cpu], sn); 3911 pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++; 3912 atomic_add_int(&pf_status.src_nodes, -1); 3913 kfree(sn, M_PFSRCTREEPL); 3914 } 3915 if (nsn != sn && nsn != NULL && nsn->states == 0 && nsn->expire == 0) { 3916 RB_REMOVE(pf_src_tree, &tree_src_tracking[cpu], nsn); 3917 pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++; 3918 atomic_add_int(&pf_status.src_nodes, -1); 3919 kfree(nsn, M_PFSRCTREEPL); 3920 } 3921 return (PF_DROP); 3922 } 3923 3924 int 3925 pf_test_fragment(struct pf_rule **rm, int direction, struct pfi_kif *kif, 3926 struct mbuf *m, void *h, struct pf_pdesc *pd, struct pf_rule **am, 3927 struct pf_ruleset **rsm) 3928 { 3929 struct pf_rule *r, *a = NULL; 3930 struct pf_ruleset *ruleset = NULL; 3931 sa_family_t af = pd->af; 3932 u_short reason; 3933 int tag = -1; 3934 int asd = 0; 3935 int match = 0; 3936 3937 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_FILTER].active.ptr); 3938 while (r != NULL) { 3939 r->evaluations++; 3940 if (pfi_kif_match(r->kif, kif) == r->ifnot) 3941 r = r->skip[PF_SKIP_IFP].ptr; 3942 else if (r->direction && r->direction != direction) 3943 r = r->skip[PF_SKIP_DIR].ptr; 3944 else if (r->af && r->af != af) 3945 r = r->skip[PF_SKIP_AF].ptr; 3946 else if (r->proto && r->proto != pd->proto) 3947 r = r->skip[PF_SKIP_PROTO].ptr; 3948 else if (PF_MISMATCHAW(&r->src.addr, pd->src, af, 3949 r->src.neg, kif)) 3950 r = r->skip[PF_SKIP_SRC_ADDR].ptr; 3951 else if (PF_MISMATCHAW(&r->dst.addr, pd->dst, af, 3952 r->dst.neg, NULL)) 3953 r = r->skip[PF_SKIP_DST_ADDR].ptr; 3954 else if (r->tos && !(r->tos == pd->tos)) 3955 r = TAILQ_NEXT(r, entries); 3956 else if (r->os_fingerprint != PF_OSFP_ANY) 3957 r = TAILQ_NEXT(r, entries); 3958 else if (pd->proto == IPPROTO_UDP && 3959 (r->src.port_op || r->dst.port_op)) 3960 r = TAILQ_NEXT(r, entries); 3961 else if (pd->proto == IPPROTO_TCP && 3962 (r->src.port_op || r->dst.port_op || r->flagset)) 3963 r = TAILQ_NEXT(r, entries); 3964 else if ((pd->proto == IPPROTO_ICMP || 3965 pd->proto == IPPROTO_ICMPV6) && 3966 (r->type || r->code)) 3967 r = TAILQ_NEXT(r, entries); 3968 else if (r->prob && r->prob <= karc4random()) 3969 r = TAILQ_NEXT(r, entries); 3970 else if (r->match_tag && !pf_match_tag(m, r, &tag)) 3971 r = TAILQ_NEXT(r, entries); 3972 else { 3973 if (r->anchor == NULL) { 3974 match = 1; 3975 *rm = r; 3976 *am = a; 3977 *rsm = ruleset; 3978 if ((*rm)->quick) 3979 break; 3980 r = TAILQ_NEXT(r, entries); 3981 } else 3982 pf_step_into_anchor(&asd, &ruleset, 3983 PF_RULESET_FILTER, &r, &a, &match); 3984 } 3985 if (r == NULL && pf_step_out_of_anchor(&asd, &ruleset, 3986 PF_RULESET_FILTER, &r, &a, &match)) 3987 break; 3988 } 3989 r = *rm; 3990 a = *am; 3991 ruleset = *rsm; 3992 3993 REASON_SET(&reason, PFRES_MATCH); 3994 3995 if (r->log) 3996 PFLOG_PACKET(kif, h, m, af, direction, reason, r, a, ruleset, 3997 pd); 3998 3999 if (r->action != PF_PASS) 4000 return (PF_DROP); 4001 4002 if (pf_tag_packet(m, tag, -1)) { 4003 REASON_SET(&reason, PFRES_MEMORY); 4004 return (PF_DROP); 4005 } 4006 4007 return (PF_PASS); 4008 } 4009 4010 int 4011 pf_tcp_track_full(struct pf_state_peer *src, struct pf_state_peer *dst, 4012 struct pf_state **state, struct pfi_kif *kif, struct mbuf *m, int off, 4013 struct pf_pdesc *pd, u_short *reason, int *copyback) 4014 { 4015 struct tcphdr *th = pd->hdr.tcp; 4016 u_int16_t win = ntohs(th->th_win); 4017 u_int32_t ack, end, seq, orig_seq; 4018 u_int8_t sws, dws; 4019 int ackskew; 4020 4021 if (src->wscale && dst->wscale && !(th->th_flags & TH_SYN)) { 4022 sws = src->wscale & PF_WSCALE_MASK; 4023 dws = dst->wscale & PF_WSCALE_MASK; 4024 } else 4025 sws = dws = 0; 4026 4027 /* 4028 * Sequence tracking algorithm from Guido van Rooij's paper: 4029 * http://www.madison-gurkha.com/publications/tcp_filtering/ 4030 * tcp_filtering.ps 4031 */ 4032 4033 orig_seq = seq = ntohl(th->th_seq); 4034 if (src->seqlo == 0) { 4035 /* First packet from this end. Set its state */ 4036 4037 if ((pd->flags & PFDESC_TCP_NORM || dst->scrub) && 4038 src->scrub == NULL) { 4039 if (pf_normalize_tcp_init(m, off, pd, th, src, dst)) { 4040 REASON_SET(reason, PFRES_MEMORY); 4041 return (PF_DROP); 4042 } 4043 } 4044 4045 /* Deferred generation of sequence number modulator */ 4046 if (dst->seqdiff && !src->seqdiff) { 4047 /* use random iss for the TCP server */ 4048 while ((src->seqdiff = karc4random() - seq) == 0) 4049 ; 4050 ack = ntohl(th->th_ack) - dst->seqdiff; 4051 pf_change_a(&th->th_seq, &th->th_sum, htonl(seq + 4052 src->seqdiff), 0); 4053 pf_change_a(&th->th_ack, &th->th_sum, htonl(ack), 0); 4054 *copyback = 1; 4055 } else { 4056 ack = ntohl(th->th_ack); 4057 } 4058 4059 end = seq + pd->p_len; 4060 if (th->th_flags & TH_SYN) { 4061 end++; 4062 (*state)->sync_flags |= PFSTATE_GOT_SYN2; 4063 if (dst->wscale & PF_WSCALE_FLAG) { 4064 src->wscale = pf_get_wscale(m, off, th->th_off, 4065 pd->af); 4066 if (src->wscale & PF_WSCALE_FLAG) { 4067 /* Remove scale factor from initial 4068 * window */ 4069 sws = src->wscale & PF_WSCALE_MASK; 4070 win = ((u_int32_t)win + (1 << sws) - 1) 4071 >> sws; 4072 dws = dst->wscale & PF_WSCALE_MASK; 4073 } else { 4074 /* fixup other window */ 4075 dst->max_win <<= dst->wscale & 4076 PF_WSCALE_MASK; 4077 /* in case of a retrans SYN|ACK */ 4078 dst->wscale = 0; 4079 } 4080 } 4081 } 4082 if (th->th_flags & TH_FIN) 4083 end++; 4084 4085 src->seqlo = seq; 4086 if (src->state < TCPS_SYN_SENT) 4087 src->state = TCPS_SYN_SENT; 4088 4089 /* 4090 * May need to slide the window (seqhi may have been set by 4091 * the crappy stack check or if we picked up the connection 4092 * after establishment) 4093 */ 4094 if (src->seqhi == 1 || 4095 SEQ_GEQ(end + MAX(1, dst->max_win << dws), src->seqhi)) 4096 src->seqhi = end + MAX(1, dst->max_win << dws); 4097 if (win > src->max_win) 4098 src->max_win = win; 4099 4100 } else { 4101 ack = ntohl(th->th_ack) - dst->seqdiff; 4102 if (src->seqdiff) { 4103 /* Modulate sequence numbers */ 4104 pf_change_a(&th->th_seq, &th->th_sum, htonl(seq + 4105 src->seqdiff), 0); 4106 pf_change_a(&th->th_ack, &th->th_sum, htonl(ack), 0); 4107 *copyback = 1; 4108 } 4109 end = seq + pd->p_len; 4110 if (th->th_flags & TH_SYN) 4111 end++; 4112 if (th->th_flags & TH_FIN) 4113 end++; 4114 } 4115 4116 if ((th->th_flags & TH_ACK) == 0) { 4117 /* Let it pass through the ack skew check */ 4118 ack = dst->seqlo; 4119 } else if ((ack == 0 && 4120 (th->th_flags & (TH_ACK|TH_RST)) == (TH_ACK|TH_RST)) || 4121 /* broken tcp stacks do not set ack */ 4122 (dst->state < TCPS_SYN_SENT)) { 4123 /* 4124 * Many stacks (ours included) will set the ACK number in an 4125 * FIN|ACK if the SYN times out -- no sequence to ACK. 4126 */ 4127 ack = dst->seqlo; 4128 } 4129 4130 if (seq == end) { 4131 /* Ease sequencing restrictions on no data packets */ 4132 seq = src->seqlo; 4133 end = seq; 4134 } 4135 4136 ackskew = dst->seqlo - ack; 4137 4138 4139 /* 4140 * Need to demodulate the sequence numbers in any TCP SACK options 4141 * (Selective ACK). We could optionally validate the SACK values 4142 * against the current ACK window, either forwards or backwards, but 4143 * I'm not confident that SACK has been implemented properly 4144 * everywhere. It wouldn't surprise me if several stacks accidently 4145 * SACK too far backwards of previously ACKed data. There really aren't 4146 * any security implications of bad SACKing unless the target stack 4147 * doesn't validate the option length correctly. Someone trying to 4148 * spoof into a TCP connection won't bother blindly sending SACK 4149 * options anyway. 4150 */ 4151 if (dst->seqdiff && (th->th_off << 2) > sizeof(struct tcphdr)) { 4152 if (pf_modulate_sack(m, off, pd, th, dst)) 4153 *copyback = 1; 4154 } 4155 4156 4157 #define MAXACKWINDOW (0xffff + 1500) /* 1500 is an arbitrary fudge factor */ 4158 if (SEQ_GEQ(src->seqhi, end) && 4159 /* Last octet inside other's window space */ 4160 SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)) && 4161 /* Retrans: not more than one window back */ 4162 (ackskew >= -MAXACKWINDOW) && 4163 /* Acking not more than one reassembled fragment backwards */ 4164 (ackskew <= (MAXACKWINDOW << sws)) && 4165 /* Acking not more than one window forward */ 4166 ((th->th_flags & TH_RST) == 0 || orig_seq == src->seqlo || 4167 (orig_seq == src->seqlo + 1) || (orig_seq + 1 == src->seqlo) || 4168 (pd->flags & PFDESC_IP_REAS) == 0)) { 4169 /* Require an exact/+1 sequence match on resets when possible */ 4170 4171 if (dst->scrub || src->scrub) { 4172 if (pf_normalize_tcp_stateful(m, off, pd, reason, th, 4173 *state, src, dst, copyback)) 4174 return (PF_DROP); 4175 } 4176 4177 /* update max window */ 4178 if (src->max_win < win) 4179 src->max_win = win; 4180 /* synchronize sequencing */ 4181 if (SEQ_GT(end, src->seqlo)) 4182 src->seqlo = end; 4183 /* slide the window of what the other end can send */ 4184 if (SEQ_GEQ(ack + (win << sws), dst->seqhi)) 4185 dst->seqhi = ack + MAX((win << sws), 1); 4186 4187 4188 /* update states */ 4189 if (th->th_flags & TH_SYN) 4190 if (src->state < TCPS_SYN_SENT) 4191 src->state = TCPS_SYN_SENT; 4192 if (th->th_flags & TH_FIN) 4193 if (src->state < TCPS_CLOSING) 4194 src->state = TCPS_CLOSING; 4195 if (th->th_flags & TH_ACK) { 4196 if (dst->state == TCPS_SYN_SENT) { 4197 dst->state = TCPS_ESTABLISHED; 4198 if (src->state == TCPS_ESTABLISHED && 4199 (*state)->src_node != NULL && 4200 pf_src_connlimit(state)) { 4201 REASON_SET(reason, PFRES_SRCLIMIT); 4202 return (PF_DROP); 4203 } 4204 } else if (dst->state == TCPS_CLOSING) 4205 dst->state = TCPS_FIN_WAIT_2; 4206 } 4207 if (th->th_flags & TH_RST) 4208 src->state = dst->state = TCPS_TIME_WAIT; 4209 4210 /* update expire time */ 4211 (*state)->expire = time_second; 4212 if (src->state >= TCPS_FIN_WAIT_2 && 4213 dst->state >= TCPS_FIN_WAIT_2) 4214 (*state)->timeout = PFTM_TCP_CLOSED; 4215 else if (src->state >= TCPS_CLOSING && 4216 dst->state >= TCPS_CLOSING) 4217 (*state)->timeout = PFTM_TCP_FIN_WAIT; 4218 else if (src->state < TCPS_ESTABLISHED || 4219 dst->state < TCPS_ESTABLISHED) 4220 (*state)->timeout = PFTM_TCP_OPENING; 4221 else if (src->state >= TCPS_CLOSING || 4222 dst->state >= TCPS_CLOSING) 4223 (*state)->timeout = PFTM_TCP_CLOSING; 4224 else 4225 (*state)->timeout = PFTM_TCP_ESTABLISHED; 4226 4227 /* Fall through to PASS packet */ 4228 4229 } else if ((dst->state < TCPS_SYN_SENT || 4230 dst->state >= TCPS_FIN_WAIT_2 || 4231 src->state >= TCPS_FIN_WAIT_2) && 4232 SEQ_GEQ(src->seqhi + MAXACKWINDOW, end) && 4233 /* Within a window forward of the originating packet */ 4234 SEQ_GEQ(seq, src->seqlo - MAXACKWINDOW)) { 4235 /* Within a window backward of the originating packet */ 4236 4237 /* 4238 * This currently handles three situations: 4239 * 1) Stupid stacks will shotgun SYNs before their peer 4240 * replies. 4241 * 2) When PF catches an already established stream (the 4242 * firewall rebooted, the state table was flushed, routes 4243 * changed...) 4244 * 3) Packets get funky immediately after the connection 4245 * closes (this should catch Solaris spurious ACK|FINs 4246 * that web servers like to spew after a close) 4247 * 4248 * This must be a little more careful than the above code 4249 * since packet floods will also be caught here. We don't 4250 * update the TTL here to mitigate the damage of a packet 4251 * flood and so the same code can handle awkward establishment 4252 * and a loosened connection close. 4253 * In the establishment case, a correct peer response will 4254 * validate the connection, go through the normal state code 4255 * and keep updating the state TTL. 4256 */ 4257 4258 if (pf_status.debug >= PF_DEBUG_MISC) { 4259 kprintf("pf: loose state match: "); 4260 pf_print_state(*state); 4261 pf_print_flags(th->th_flags); 4262 kprintf(" seq=%u (%u) ack=%u len=%u ackskew=%d " 4263 "pkts=%llu:%llu dir=%s,%s\n", seq, orig_seq, ack, pd->p_len, 4264 ackskew, (unsigned long long)(*state)->packets[0], 4265 (unsigned long long)(*state)->packets[1], 4266 pd->dir == PF_IN ? "in" : "out", 4267 pd->dir == (*state)->direction ? "fwd" : "rev"); 4268 } 4269 4270 if (dst->scrub || src->scrub) { 4271 if (pf_normalize_tcp_stateful(m, off, pd, reason, th, 4272 *state, src, dst, copyback)) 4273 return (PF_DROP); 4274 } 4275 4276 /* update max window */ 4277 if (src->max_win < win) 4278 src->max_win = win; 4279 /* synchronize sequencing */ 4280 if (SEQ_GT(end, src->seqlo)) 4281 src->seqlo = end; 4282 /* slide the window of what the other end can send */ 4283 if (SEQ_GEQ(ack + (win << sws), dst->seqhi)) 4284 dst->seqhi = ack + MAX((win << sws), 1); 4285 4286 /* 4287 * Cannot set dst->seqhi here since this could be a shotgunned 4288 * SYN and not an already established connection. 4289 */ 4290 4291 if (th->th_flags & TH_FIN) 4292 if (src->state < TCPS_CLOSING) 4293 src->state = TCPS_CLOSING; 4294 if (th->th_flags & TH_RST) 4295 src->state = dst->state = TCPS_TIME_WAIT; 4296 4297 /* Fall through to PASS packet */ 4298 4299 } else if ((*state)->pickup_mode == PF_PICKUPS_HASHONLY || 4300 ((*state)->pickup_mode == PF_PICKUPS_ENABLED && 4301 ((*state)->sync_flags & PFSTATE_GOT_SYN_MASK) != 4302 PFSTATE_GOT_SYN_MASK)) { 4303 /* 4304 * If pickup mode is hash only, do not fail on sequence checks. 4305 * 4306 * If pickup mode is enabled and we did not see the SYN in 4307 * both direction, do not fail on sequence checks because 4308 * we do not have complete information on window scale. 4309 * 4310 * Adjust expiration and fall through to PASS packet. 4311 * XXX Add a FIN check to reduce timeout? 4312 */ 4313 (*state)->expire = time_second; 4314 } else { 4315 /* 4316 * Failure processing 4317 */ 4318 if ((*state)->dst.state == TCPS_SYN_SENT && 4319 (*state)->src.state == TCPS_SYN_SENT) { 4320 /* Send RST for state mismatches during handshake */ 4321 if (!(th->th_flags & TH_RST)) 4322 pf_send_tcp((*state)->rule.ptr, pd->af, 4323 pd->dst, pd->src, th->th_dport, 4324 th->th_sport, ntohl(th->th_ack), 0, 4325 TH_RST, 0, 0, 4326 (*state)->rule.ptr->return_ttl, 1, 0, 4327 pd->eh, kif->pfik_ifp); 4328 src->seqlo = 0; 4329 src->seqhi = 1; 4330 src->max_win = 1; 4331 } else if (pf_status.debug >= PF_DEBUG_MISC) { 4332 kprintf("pf: BAD state: "); 4333 pf_print_state(*state); 4334 pf_print_flags(th->th_flags); 4335 kprintf(" seq=%u (%u) ack=%u len=%u ackskew=%d " 4336 "pkts=%llu:%llu dir=%s,%s\n", 4337 seq, orig_seq, ack, pd->p_len, ackskew, 4338 (unsigned long long)(*state)->packets[0], 4339 (unsigned long long)(*state)->packets[1], 4340 pd->dir == PF_IN ? "in" : "out", 4341 pd->dir == (*state)->direction ? "fwd" : "rev"); 4342 kprintf("pf: State failure on: %c %c %c %c | %c %c\n", 4343 SEQ_GEQ(src->seqhi, end) ? ' ' : '1', 4344 SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)) ? 4345 ' ': '2', 4346 (ackskew >= -MAXACKWINDOW) ? ' ' : '3', 4347 (ackskew <= (MAXACKWINDOW << sws)) ? ' ' : '4', 4348 SEQ_GEQ(src->seqhi + MAXACKWINDOW, end) ?' ' :'5', 4349 SEQ_GEQ(seq, src->seqlo - MAXACKWINDOW) ?' ' :'6'); 4350 } 4351 REASON_SET(reason, PFRES_BADSTATE); 4352 return (PF_DROP); 4353 } 4354 4355 return (PF_PASS); 4356 } 4357 4358 int 4359 pf_tcp_track_sloppy(struct pf_state_peer *src, struct pf_state_peer *dst, 4360 struct pf_state **state, struct pf_pdesc *pd, u_short *reason) 4361 { 4362 struct tcphdr *th = pd->hdr.tcp; 4363 4364 if (th->th_flags & TH_SYN) 4365 if (src->state < TCPS_SYN_SENT) 4366 src->state = TCPS_SYN_SENT; 4367 if (th->th_flags & TH_FIN) 4368 if (src->state < TCPS_CLOSING) 4369 src->state = TCPS_CLOSING; 4370 if (th->th_flags & TH_ACK) { 4371 if (dst->state == TCPS_SYN_SENT) { 4372 dst->state = TCPS_ESTABLISHED; 4373 if (src->state == TCPS_ESTABLISHED && 4374 (*state)->src_node != NULL && 4375 pf_src_connlimit(state)) { 4376 REASON_SET(reason, PFRES_SRCLIMIT); 4377 return (PF_DROP); 4378 } 4379 } else if (dst->state == TCPS_CLOSING) { 4380 dst->state = TCPS_FIN_WAIT_2; 4381 } else if (src->state == TCPS_SYN_SENT && 4382 dst->state < TCPS_SYN_SENT) { 4383 /* 4384 * Handle a special sloppy case where we only see one 4385 * half of the connection. If there is a ACK after 4386 * the initial SYN without ever seeing a packet from 4387 * the destination, set the connection to established. 4388 */ 4389 dst->state = src->state = TCPS_ESTABLISHED; 4390 if ((*state)->src_node != NULL && 4391 pf_src_connlimit(state)) { 4392 REASON_SET(reason, PFRES_SRCLIMIT); 4393 return (PF_DROP); 4394 } 4395 } else if (src->state == TCPS_CLOSING && 4396 dst->state == TCPS_ESTABLISHED && 4397 dst->seqlo == 0) { 4398 /* 4399 * Handle the closing of half connections where we 4400 * don't see the full bidirectional FIN/ACK+ACK 4401 * handshake. 4402 */ 4403 dst->state = TCPS_CLOSING; 4404 } 4405 } 4406 if (th->th_flags & TH_RST) 4407 src->state = dst->state = TCPS_TIME_WAIT; 4408 4409 /* update expire time */ 4410 (*state)->expire = time_second; 4411 if (src->state >= TCPS_FIN_WAIT_2 && 4412 dst->state >= TCPS_FIN_WAIT_2) 4413 (*state)->timeout = PFTM_TCP_CLOSED; 4414 else if (src->state >= TCPS_CLOSING && 4415 dst->state >= TCPS_CLOSING) 4416 (*state)->timeout = PFTM_TCP_FIN_WAIT; 4417 else if (src->state < TCPS_ESTABLISHED || 4418 dst->state < TCPS_ESTABLISHED) 4419 (*state)->timeout = PFTM_TCP_OPENING; 4420 else if (src->state >= TCPS_CLOSING || 4421 dst->state >= TCPS_CLOSING) 4422 (*state)->timeout = PFTM_TCP_CLOSING; 4423 else 4424 (*state)->timeout = PFTM_TCP_ESTABLISHED; 4425 4426 return (PF_PASS); 4427 } 4428 4429 int 4430 pf_test_state_tcp(struct pf_state **state, int direction, struct pfi_kif *kif, 4431 struct mbuf *m, int off, void *h, struct pf_pdesc *pd, 4432 u_short *reason) 4433 { 4434 struct pf_state_key_cmp key; 4435 struct tcphdr *th = pd->hdr.tcp; 4436 int copyback = 0; 4437 struct pf_state_peer *src, *dst; 4438 struct pf_state_key *sk; 4439 4440 key.af = pd->af; 4441 key.proto = IPPROTO_TCP; 4442 if (direction == PF_IN) { /* wire side, straight */ 4443 PF_ACPY(&key.addr[0], pd->src, key.af); 4444 PF_ACPY(&key.addr[1], pd->dst, key.af); 4445 key.port[0] = th->th_sport; 4446 key.port[1] = th->th_dport; 4447 } else { /* stack side, reverse */ 4448 PF_ACPY(&key.addr[1], pd->src, key.af); 4449 PF_ACPY(&key.addr[0], pd->dst, key.af); 4450 key.port[1] = th->th_sport; 4451 key.port[0] = th->th_dport; 4452 } 4453 4454 STATE_LOOKUP(kif, &key, direction, *state, m); 4455 4456 if (direction == (*state)->direction) { 4457 src = &(*state)->src; 4458 dst = &(*state)->dst; 4459 } else { 4460 src = &(*state)->dst; 4461 dst = &(*state)->src; 4462 } 4463 4464 sk = (*state)->key[pd->didx]; 4465 4466 if ((*state)->src.state == PF_TCPS_PROXY_SRC) { 4467 if (direction != (*state)->direction) { 4468 REASON_SET(reason, PFRES_SYNPROXY); 4469 return (PF_SYNPROXY_DROP); 4470 } 4471 if (th->th_flags & TH_SYN) { 4472 if (ntohl(th->th_seq) != (*state)->src.seqlo) { 4473 REASON_SET(reason, PFRES_SYNPROXY); 4474 return (PF_DROP); 4475 } 4476 pf_send_tcp((*state)->rule.ptr, pd->af, pd->dst, 4477 pd->src, th->th_dport, th->th_sport, 4478 (*state)->src.seqhi, ntohl(th->th_seq) + 1, 4479 TH_SYN|TH_ACK, 0, (*state)->src.mss, 0, 1, 4480 0, NULL, NULL); 4481 REASON_SET(reason, PFRES_SYNPROXY); 4482 return (PF_SYNPROXY_DROP); 4483 } else if (!(th->th_flags & TH_ACK) || 4484 (ntohl(th->th_ack) != (*state)->src.seqhi + 1) || 4485 (ntohl(th->th_seq) != (*state)->src.seqlo + 1)) { 4486 REASON_SET(reason, PFRES_SYNPROXY); 4487 return (PF_DROP); 4488 } else if ((*state)->src_node != NULL && 4489 pf_src_connlimit(state)) { 4490 REASON_SET(reason, PFRES_SRCLIMIT); 4491 return (PF_DROP); 4492 } else 4493 (*state)->src.state = PF_TCPS_PROXY_DST; 4494 } 4495 if ((*state)->src.state == PF_TCPS_PROXY_DST) { 4496 if (direction == (*state)->direction) { 4497 if (((th->th_flags & (TH_SYN|TH_ACK)) != TH_ACK) || 4498 (ntohl(th->th_ack) != (*state)->src.seqhi + 1) || 4499 (ntohl(th->th_seq) != (*state)->src.seqlo + 1)) { 4500 REASON_SET(reason, PFRES_SYNPROXY); 4501 return (PF_DROP); 4502 } 4503 (*state)->src.max_win = MAX(ntohs(th->th_win), 1); 4504 if ((*state)->dst.seqhi == 1) 4505 (*state)->dst.seqhi = htonl(karc4random()); 4506 pf_send_tcp((*state)->rule.ptr, pd->af, 4507 &sk->addr[pd->sidx], &sk->addr[pd->didx], 4508 sk->port[pd->sidx], sk->port[pd->didx], 4509 (*state)->dst.seqhi, 0, TH_SYN, 0, 4510 (*state)->src.mss, 0, 0, (*state)->tag, NULL, NULL); 4511 REASON_SET(reason, PFRES_SYNPROXY); 4512 return (PF_SYNPROXY_DROP); 4513 } else if (((th->th_flags & (TH_SYN|TH_ACK)) != 4514 (TH_SYN|TH_ACK)) || 4515 (ntohl(th->th_ack) != (*state)->dst.seqhi + 1)) { 4516 REASON_SET(reason, PFRES_SYNPROXY); 4517 return (PF_DROP); 4518 } else { 4519 (*state)->dst.max_win = MAX(ntohs(th->th_win), 1); 4520 (*state)->dst.seqlo = ntohl(th->th_seq); 4521 pf_send_tcp((*state)->rule.ptr, pd->af, pd->dst, 4522 pd->src, th->th_dport, th->th_sport, 4523 ntohl(th->th_ack), ntohl(th->th_seq) + 1, 4524 TH_ACK, (*state)->src.max_win, 0, 0, 0, 4525 (*state)->tag, NULL, NULL); 4526 pf_send_tcp((*state)->rule.ptr, pd->af, 4527 &sk->addr[pd->sidx], &sk->addr[pd->didx], 4528 sk->port[pd->sidx], sk->port[pd->didx], 4529 (*state)->src.seqhi + 1, (*state)->src.seqlo + 1, 4530 TH_ACK, (*state)->dst.max_win, 0, 0, 1, 4531 0, NULL, NULL); 4532 (*state)->src.seqdiff = (*state)->dst.seqhi - 4533 (*state)->src.seqlo; 4534 (*state)->dst.seqdiff = (*state)->src.seqhi - 4535 (*state)->dst.seqlo; 4536 (*state)->src.seqhi = (*state)->src.seqlo + 4537 (*state)->dst.max_win; 4538 (*state)->dst.seqhi = (*state)->dst.seqlo + 4539 (*state)->src.max_win; 4540 (*state)->src.wscale = (*state)->dst.wscale = 0; 4541 (*state)->src.state = (*state)->dst.state = 4542 TCPS_ESTABLISHED; 4543 REASON_SET(reason, PFRES_SYNPROXY); 4544 return (PF_SYNPROXY_DROP); 4545 } 4546 } 4547 4548 if (((th->th_flags & (TH_SYN|TH_ACK)) == TH_SYN) && 4549 dst->state >= TCPS_FIN_WAIT_2 && 4550 src->state >= TCPS_FIN_WAIT_2) { 4551 if (pf_status.debug >= PF_DEBUG_MISC) { 4552 kprintf("pf: state reuse "); 4553 pf_print_state(*state); 4554 pf_print_flags(th->th_flags); 4555 kprintf("\n"); 4556 } 4557 /* XXX make sure it's the same direction ?? */ 4558 (*state)->src.state = (*state)->dst.state = TCPS_CLOSED; 4559 pf_unlink_state(*state); 4560 *state = NULL; 4561 return (PF_DROP); 4562 } 4563 4564 if ((*state)->state_flags & PFSTATE_SLOPPY) { 4565 if (pf_tcp_track_sloppy(src, dst, state, pd, reason) == PF_DROP) 4566 return (PF_DROP); 4567 } else { 4568 if (pf_tcp_track_full(src, dst, state, kif, m, off, pd, reason, 4569 ©back) == PF_DROP) 4570 return (PF_DROP); 4571 } 4572 4573 /* translate source/destination address, if necessary */ 4574 if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) { 4575 struct pf_state_key *nk = (*state)->key[pd->didx]; 4576 4577 if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], pd->af) || 4578 nk->port[pd->sidx] != th->th_sport) { 4579 /* 4580 * The translated source address may be completely 4581 * unrelated to the saved link header, make sure 4582 * a bridge doesn't try to use it. 4583 */ 4584 m->m_pkthdr.fw_flags &= ~BRIDGE_MBUF_TAGGED; 4585 m->m_flags &= ~M_HASH; 4586 pf_change_ap(pd->src, &th->th_sport, pd->ip_sum, 4587 &th->th_sum, &nk->addr[pd->sidx], 4588 nk->port[pd->sidx], 0, pd->af); 4589 } 4590 4591 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], pd->af) || 4592 nk->port[pd->didx] != th->th_dport) { 4593 /* 4594 * If we don't redispatch the packet will go into 4595 * the protocol stack on the wrong cpu for the 4596 * post-translated address. 4597 */ 4598 m->m_flags &= ~M_HASH; 4599 pf_change_ap(pd->dst, &th->th_dport, pd->ip_sum, 4600 &th->th_sum, &nk->addr[pd->didx], 4601 nk->port[pd->didx], 0, pd->af); 4602 } 4603 copyback = 1; 4604 } 4605 4606 /* Copyback sequence modulation or stateful scrub changes if needed */ 4607 if (copyback) 4608 m_copyback(m, off, sizeof(*th), (caddr_t)th); 4609 4610 return (PF_PASS); 4611 } 4612 4613 int 4614 pf_test_state_udp(struct pf_state **state, int direction, struct pfi_kif *kif, 4615 struct mbuf *m, int off, void *h, struct pf_pdesc *pd) 4616 { 4617 struct pf_state_peer *src, *dst; 4618 struct pf_state_key_cmp key; 4619 struct udphdr *uh = pd->hdr.udp; 4620 4621 key.af = pd->af; 4622 key.proto = IPPROTO_UDP; 4623 if (direction == PF_IN) { /* wire side, straight */ 4624 PF_ACPY(&key.addr[0], pd->src, key.af); 4625 PF_ACPY(&key.addr[1], pd->dst, key.af); 4626 key.port[0] = uh->uh_sport; 4627 key.port[1] = uh->uh_dport; 4628 } else { /* stack side, reverse */ 4629 PF_ACPY(&key.addr[1], pd->src, key.af); 4630 PF_ACPY(&key.addr[0], pd->dst, key.af); 4631 key.port[1] = uh->uh_sport; 4632 key.port[0] = uh->uh_dport; 4633 } 4634 4635 STATE_LOOKUP(kif, &key, direction, *state, m); 4636 4637 if (direction == (*state)->direction) { 4638 src = &(*state)->src; 4639 dst = &(*state)->dst; 4640 } else { 4641 src = &(*state)->dst; 4642 dst = &(*state)->src; 4643 } 4644 4645 /* update states */ 4646 if (src->state < PFUDPS_SINGLE) 4647 src->state = PFUDPS_SINGLE; 4648 if (dst->state == PFUDPS_SINGLE) 4649 dst->state = PFUDPS_MULTIPLE; 4650 4651 /* update expire time */ 4652 (*state)->expire = time_second; 4653 if (src->state == PFUDPS_MULTIPLE && dst->state == PFUDPS_MULTIPLE) 4654 (*state)->timeout = PFTM_UDP_MULTIPLE; 4655 else 4656 (*state)->timeout = PFTM_UDP_SINGLE; 4657 4658 /* translate source/destination address, if necessary */ 4659 if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) { 4660 struct pf_state_key *nk = (*state)->key[pd->didx]; 4661 4662 if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], pd->af) || 4663 nk->port[pd->sidx] != uh->uh_sport) { 4664 /* 4665 * The translated source address may be completely 4666 * unrelated to the saved link header, make sure 4667 * a bridge doesn't try to use it. 4668 */ 4669 m->m_pkthdr.fw_flags &= ~BRIDGE_MBUF_TAGGED; 4670 m->m_flags &= ~M_HASH; 4671 pf_change_ap(pd->src, &uh->uh_sport, pd->ip_sum, 4672 &uh->uh_sum, &nk->addr[pd->sidx], 4673 nk->port[pd->sidx], 1, pd->af); 4674 } 4675 4676 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], pd->af) || 4677 nk->port[pd->didx] != uh->uh_dport) { 4678 /* 4679 * If we don't redispatch the packet will go into 4680 * the protocol stack on the wrong cpu for the 4681 * post-translated address. 4682 */ 4683 m->m_flags &= ~M_HASH; 4684 pf_change_ap(pd->dst, &uh->uh_dport, pd->ip_sum, 4685 &uh->uh_sum, &nk->addr[pd->didx], 4686 nk->port[pd->didx], 1, pd->af); 4687 } 4688 m_copyback(m, off, sizeof(*uh), (caddr_t)uh); 4689 } 4690 4691 return (PF_PASS); 4692 } 4693 4694 int 4695 pf_test_state_icmp(struct pf_state **state, int direction, struct pfi_kif *kif, 4696 struct mbuf *m, int off, void *h, struct pf_pdesc *pd, u_short *reason) 4697 { 4698 struct pf_addr *saddr = pd->src, *daddr = pd->dst; 4699 u_int16_t icmpid = 0, *icmpsum; 4700 u_int8_t icmptype; 4701 int state_icmp = 0; 4702 struct pf_state_key_cmp key; 4703 4704 switch (pd->proto) { 4705 #ifdef INET 4706 case IPPROTO_ICMP: 4707 icmptype = pd->hdr.icmp->icmp_type; 4708 icmpid = pd->hdr.icmp->icmp_id; 4709 icmpsum = &pd->hdr.icmp->icmp_cksum; 4710 4711 if (icmptype == ICMP_UNREACH || 4712 icmptype == ICMP_SOURCEQUENCH || 4713 icmptype == ICMP_REDIRECT || 4714 icmptype == ICMP_TIMXCEED || 4715 icmptype == ICMP_PARAMPROB) 4716 state_icmp++; 4717 break; 4718 #endif /* INET */ 4719 #ifdef INET6 4720 case IPPROTO_ICMPV6: 4721 icmptype = pd->hdr.icmp6->icmp6_type; 4722 icmpid = pd->hdr.icmp6->icmp6_id; 4723 icmpsum = &pd->hdr.icmp6->icmp6_cksum; 4724 4725 if (icmptype == ICMP6_DST_UNREACH || 4726 icmptype == ICMP6_PACKET_TOO_BIG || 4727 icmptype == ICMP6_TIME_EXCEEDED || 4728 icmptype == ICMP6_PARAM_PROB) 4729 state_icmp++; 4730 break; 4731 #endif /* INET6 */ 4732 } 4733 4734 if (!state_icmp) { 4735 4736 /* 4737 * ICMP query/reply message not related to a TCP/UDP packet. 4738 * Search for an ICMP state. 4739 */ 4740 key.af = pd->af; 4741 key.proto = pd->proto; 4742 key.port[0] = key.port[1] = icmpid; 4743 if (direction == PF_IN) { /* wire side, straight */ 4744 PF_ACPY(&key.addr[0], pd->src, key.af); 4745 PF_ACPY(&key.addr[1], pd->dst, key.af); 4746 } else { /* stack side, reverse */ 4747 PF_ACPY(&key.addr[1], pd->src, key.af); 4748 PF_ACPY(&key.addr[0], pd->dst, key.af); 4749 } 4750 4751 STATE_LOOKUP(kif, &key, direction, *state, m); 4752 4753 (*state)->expire = time_second; 4754 (*state)->timeout = PFTM_ICMP_ERROR_REPLY; 4755 4756 /* translate source/destination address, if necessary */ 4757 if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) { 4758 struct pf_state_key *nk = (*state)->key[pd->didx]; 4759 4760 switch (pd->af) { 4761 #ifdef INET 4762 case AF_INET: 4763 if (PF_ANEQ(pd->src, 4764 &nk->addr[pd->sidx], AF_INET)) 4765 pf_change_a(&saddr->v4.s_addr, 4766 pd->ip_sum, 4767 nk->addr[pd->sidx].v4.s_addr, 0); 4768 4769 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], 4770 AF_INET)) 4771 pf_change_a(&daddr->v4.s_addr, 4772 pd->ip_sum, 4773 nk->addr[pd->didx].v4.s_addr, 0); 4774 4775 if (nk->port[0] != 4776 pd->hdr.icmp->icmp_id) { 4777 pd->hdr.icmp->icmp_cksum = 4778 pf_cksum_fixup( 4779 pd->hdr.icmp->icmp_cksum, icmpid, 4780 nk->port[pd->sidx], 0); 4781 pd->hdr.icmp->icmp_id = 4782 nk->port[pd->sidx]; 4783 } 4784 4785 m_copyback(m, off, ICMP_MINLEN, 4786 (caddr_t)pd->hdr.icmp); 4787 break; 4788 #endif /* INET */ 4789 #ifdef INET6 4790 case AF_INET6: 4791 if (PF_ANEQ(pd->src, 4792 &nk->addr[pd->sidx], AF_INET6)) 4793 pf_change_a6(saddr, 4794 &pd->hdr.icmp6->icmp6_cksum, 4795 &nk->addr[pd->sidx], 0); 4796 4797 if (PF_ANEQ(pd->dst, 4798 &nk->addr[pd->didx], AF_INET6)) 4799 pf_change_a6(daddr, 4800 &pd->hdr.icmp6->icmp6_cksum, 4801 &nk->addr[pd->didx], 0); 4802 4803 m_copyback(m, off, 4804 sizeof(struct icmp6_hdr), 4805 (caddr_t)pd->hdr.icmp6); 4806 break; 4807 #endif /* INET6 */ 4808 } 4809 } 4810 return (PF_PASS); 4811 4812 } else { 4813 /* 4814 * ICMP error message in response to a TCP/UDP packet. 4815 * Extract the inner TCP/UDP header and search for that state. 4816 */ 4817 4818 struct pf_pdesc pd2; 4819 #ifdef INET 4820 struct ip h2; 4821 #endif /* INET */ 4822 #ifdef INET6 4823 struct ip6_hdr h2_6; 4824 int terminal = 0; 4825 #endif /* INET6 */ 4826 int ipoff2; 4827 int off2; 4828 4829 pd2.af = pd->af; 4830 /* Payload packet is from the opposite direction. */ 4831 pd2.sidx = (direction == PF_IN) ? 1 : 0; 4832 pd2.didx = (direction == PF_IN) ? 0 : 1; 4833 switch (pd->af) { 4834 #ifdef INET 4835 case AF_INET: 4836 /* offset of h2 in mbuf chain */ 4837 ipoff2 = off + ICMP_MINLEN; 4838 4839 if (!pf_pull_hdr(m, ipoff2, &h2, sizeof(h2), 4840 NULL, reason, pd2.af)) { 4841 DPFPRINTF(PF_DEBUG_MISC, 4842 ("pf: ICMP error message too short " 4843 "(ip)\n")); 4844 return (PF_DROP); 4845 } 4846 /* 4847 * ICMP error messages don't refer to non-first 4848 * fragments 4849 */ 4850 if (h2.ip_off & htons(IP_OFFMASK)) { 4851 REASON_SET(reason, PFRES_FRAG); 4852 return (PF_DROP); 4853 } 4854 4855 /* offset of protocol header that follows h2 */ 4856 off2 = ipoff2 + (h2.ip_hl << 2); 4857 4858 pd2.proto = h2.ip_p; 4859 pd2.src = (struct pf_addr *)&h2.ip_src; 4860 pd2.dst = (struct pf_addr *)&h2.ip_dst; 4861 pd2.ip_sum = &h2.ip_sum; 4862 break; 4863 #endif /* INET */ 4864 #ifdef INET6 4865 case AF_INET6: 4866 ipoff2 = off + sizeof(struct icmp6_hdr); 4867 4868 if (!pf_pull_hdr(m, ipoff2, &h2_6, sizeof(h2_6), 4869 NULL, reason, pd2.af)) { 4870 DPFPRINTF(PF_DEBUG_MISC, 4871 ("pf: ICMP error message too short " 4872 "(ip6)\n")); 4873 return (PF_DROP); 4874 } 4875 pd2.proto = h2_6.ip6_nxt; 4876 pd2.src = (struct pf_addr *)&h2_6.ip6_src; 4877 pd2.dst = (struct pf_addr *)&h2_6.ip6_dst; 4878 pd2.ip_sum = NULL; 4879 off2 = ipoff2 + sizeof(h2_6); 4880 do { 4881 switch (pd2.proto) { 4882 case IPPROTO_FRAGMENT: 4883 /* 4884 * ICMPv6 error messages for 4885 * non-first fragments 4886 */ 4887 REASON_SET(reason, PFRES_FRAG); 4888 return (PF_DROP); 4889 case IPPROTO_AH: 4890 case IPPROTO_HOPOPTS: 4891 case IPPROTO_ROUTING: 4892 case IPPROTO_DSTOPTS: { 4893 /* get next header and header length */ 4894 struct ip6_ext opt6; 4895 4896 if (!pf_pull_hdr(m, off2, &opt6, 4897 sizeof(opt6), NULL, reason, 4898 pd2.af)) { 4899 DPFPRINTF(PF_DEBUG_MISC, 4900 ("pf: ICMPv6 short opt\n")); 4901 return (PF_DROP); 4902 } 4903 if (pd2.proto == IPPROTO_AH) 4904 off2 += (opt6.ip6e_len + 2) * 4; 4905 else 4906 off2 += (opt6.ip6e_len + 1) * 8; 4907 pd2.proto = opt6.ip6e_nxt; 4908 /* goto the next header */ 4909 break; 4910 } 4911 default: 4912 terminal++; 4913 break; 4914 } 4915 } while (!terminal); 4916 break; 4917 #endif /* INET6 */ 4918 default: 4919 DPFPRINTF(PF_DEBUG_MISC, 4920 ("pf: ICMP AF %d unknown (ip6)\n", pd->af)); 4921 return (PF_DROP); 4922 break; 4923 } 4924 4925 switch (pd2.proto) { 4926 case IPPROTO_TCP: { 4927 struct tcphdr th; 4928 u_int32_t seq; 4929 struct pf_state_peer *src, *dst; 4930 u_int8_t dws; 4931 int copyback = 0; 4932 4933 /* 4934 * Only the first 8 bytes of the TCP header can be 4935 * expected. Don't access any TCP header fields after 4936 * th_seq, an ackskew test is not possible. 4937 */ 4938 if (!pf_pull_hdr(m, off2, &th, 8, NULL, reason, 4939 pd2.af)) { 4940 DPFPRINTF(PF_DEBUG_MISC, 4941 ("pf: ICMP error message too short " 4942 "(tcp)\n")); 4943 return (PF_DROP); 4944 } 4945 4946 key.af = pd2.af; 4947 key.proto = IPPROTO_TCP; 4948 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af); 4949 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af); 4950 key.port[pd2.sidx] = th.th_sport; 4951 key.port[pd2.didx] = th.th_dport; 4952 4953 STATE_LOOKUP(kif, &key, direction, *state, m); 4954 4955 if (direction == (*state)->direction) { 4956 src = &(*state)->dst; 4957 dst = &(*state)->src; 4958 } else { 4959 src = &(*state)->src; 4960 dst = &(*state)->dst; 4961 } 4962 4963 if (src->wscale && dst->wscale) 4964 dws = dst->wscale & PF_WSCALE_MASK; 4965 else 4966 dws = 0; 4967 4968 /* Demodulate sequence number */ 4969 seq = ntohl(th.th_seq) - src->seqdiff; 4970 if (src->seqdiff) { 4971 pf_change_a(&th.th_seq, icmpsum, 4972 htonl(seq), 0); 4973 copyback = 1; 4974 } 4975 4976 if (!((*state)->state_flags & PFSTATE_SLOPPY) && 4977 (!SEQ_GEQ(src->seqhi, seq) || 4978 !SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)))) { 4979 if (pf_status.debug >= PF_DEBUG_MISC) { 4980 kprintf("pf: BAD ICMP %d:%d ", 4981 icmptype, pd->hdr.icmp->icmp_code); 4982 pf_print_host(pd->src, 0, pd->af); 4983 kprintf(" -> "); 4984 pf_print_host(pd->dst, 0, pd->af); 4985 kprintf(" state: "); 4986 pf_print_state(*state); 4987 kprintf(" seq=%u\n", seq); 4988 } 4989 REASON_SET(reason, PFRES_BADSTATE); 4990 return (PF_DROP); 4991 } else { 4992 if (pf_status.debug >= PF_DEBUG_MISC) { 4993 kprintf("pf: OK ICMP %d:%d ", 4994 icmptype, pd->hdr.icmp->icmp_code); 4995 pf_print_host(pd->src, 0, pd->af); 4996 kprintf(" -> "); 4997 pf_print_host(pd->dst, 0, pd->af); 4998 kprintf(" state: "); 4999 pf_print_state(*state); 5000 kprintf(" seq=%u\n", seq); 5001 } 5002 } 5003 5004 /* translate source/destination address, if necessary */ 5005 if ((*state)->key[PF_SK_WIRE] != 5006 (*state)->key[PF_SK_STACK]) { 5007 struct pf_state_key *nk = 5008 (*state)->key[pd->didx]; 5009 5010 if (PF_ANEQ(pd2.src, 5011 &nk->addr[pd2.sidx], pd2.af) || 5012 nk->port[pd2.sidx] != th.th_sport) 5013 pf_change_icmp(pd2.src, &th.th_sport, 5014 daddr, &nk->addr[pd2.sidx], 5015 nk->port[pd2.sidx], NULL, 5016 pd2.ip_sum, icmpsum, 5017 pd->ip_sum, 0, pd2.af); 5018 5019 if (PF_ANEQ(pd2.dst, 5020 &nk->addr[pd2.didx], pd2.af) || 5021 nk->port[pd2.didx] != th.th_dport) 5022 pf_change_icmp(pd2.dst, &th.th_dport, 5023 NULL, /* XXX Inbound NAT? */ 5024 &nk->addr[pd2.didx], 5025 nk->port[pd2.didx], NULL, 5026 pd2.ip_sum, icmpsum, 5027 pd->ip_sum, 0, pd2.af); 5028 copyback = 1; 5029 } 5030 5031 if (copyback) { 5032 switch (pd2.af) { 5033 #ifdef INET 5034 case AF_INET: 5035 m_copyback(m, off, ICMP_MINLEN, 5036 (caddr_t)pd->hdr.icmp); 5037 m_copyback(m, ipoff2, sizeof(h2), 5038 (caddr_t)&h2); 5039 break; 5040 #endif /* INET */ 5041 #ifdef INET6 5042 case AF_INET6: 5043 m_copyback(m, off, 5044 sizeof(struct icmp6_hdr), 5045 (caddr_t)pd->hdr.icmp6); 5046 m_copyback(m, ipoff2, sizeof(h2_6), 5047 (caddr_t)&h2_6); 5048 break; 5049 #endif /* INET6 */ 5050 } 5051 m_copyback(m, off2, 8, (caddr_t)&th); 5052 } 5053 5054 return (PF_PASS); 5055 break; 5056 } 5057 case IPPROTO_UDP: { 5058 struct udphdr uh; 5059 5060 if (!pf_pull_hdr(m, off2, &uh, sizeof(uh), 5061 NULL, reason, pd2.af)) { 5062 DPFPRINTF(PF_DEBUG_MISC, 5063 ("pf: ICMP error message too short " 5064 "(udp)\n")); 5065 return (PF_DROP); 5066 } 5067 5068 key.af = pd2.af; 5069 key.proto = IPPROTO_UDP; 5070 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af); 5071 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af); 5072 key.port[pd2.sidx] = uh.uh_sport; 5073 key.port[pd2.didx] = uh.uh_dport; 5074 5075 STATE_LOOKUP(kif, &key, direction, *state, m); 5076 5077 /* translate source/destination address, if necessary */ 5078 if ((*state)->key[PF_SK_WIRE] != 5079 (*state)->key[PF_SK_STACK]) { 5080 struct pf_state_key *nk = 5081 (*state)->key[pd->didx]; 5082 5083 if (PF_ANEQ(pd2.src, 5084 &nk->addr[pd2.sidx], pd2.af) || 5085 nk->port[pd2.sidx] != uh.uh_sport) 5086 pf_change_icmp(pd2.src, &uh.uh_sport, 5087 daddr, &nk->addr[pd2.sidx], 5088 nk->port[pd2.sidx], &uh.uh_sum, 5089 pd2.ip_sum, icmpsum, 5090 pd->ip_sum, 1, pd2.af); 5091 5092 if (PF_ANEQ(pd2.dst, 5093 &nk->addr[pd2.didx], pd2.af) || 5094 nk->port[pd2.didx] != uh.uh_dport) 5095 pf_change_icmp(pd2.dst, &uh.uh_dport, 5096 NULL, /* XXX Inbound NAT? */ 5097 &nk->addr[pd2.didx], 5098 nk->port[pd2.didx], &uh.uh_sum, 5099 pd2.ip_sum, icmpsum, 5100 pd->ip_sum, 1, pd2.af); 5101 5102 switch (pd2.af) { 5103 #ifdef INET 5104 case AF_INET: 5105 m_copyback(m, off, ICMP_MINLEN, 5106 (caddr_t)pd->hdr.icmp); 5107 m_copyback(m, ipoff2, sizeof(h2), (caddr_t)&h2); 5108 break; 5109 #endif /* INET */ 5110 #ifdef INET6 5111 case AF_INET6: 5112 m_copyback(m, off, 5113 sizeof(struct icmp6_hdr), 5114 (caddr_t)pd->hdr.icmp6); 5115 m_copyback(m, ipoff2, sizeof(h2_6), 5116 (caddr_t)&h2_6); 5117 break; 5118 #endif /* INET6 */ 5119 } 5120 m_copyback(m, off2, sizeof(uh), (caddr_t)&uh); 5121 } 5122 5123 return (PF_PASS); 5124 break; 5125 } 5126 #ifdef INET 5127 case IPPROTO_ICMP: { 5128 struct icmp iih; 5129 5130 if (!pf_pull_hdr(m, off2, &iih, ICMP_MINLEN, 5131 NULL, reason, pd2.af)) { 5132 DPFPRINTF(PF_DEBUG_MISC, 5133 ("pf: ICMP error message too short i" 5134 "(icmp)\n")); 5135 return (PF_DROP); 5136 } 5137 5138 key.af = pd2.af; 5139 key.proto = IPPROTO_ICMP; 5140 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af); 5141 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af); 5142 key.port[0] = key.port[1] = iih.icmp_id; 5143 5144 STATE_LOOKUP(kif, &key, direction, *state, m); 5145 5146 /* translate source/destination address, if necessary */ 5147 if ((*state)->key[PF_SK_WIRE] != 5148 (*state)->key[PF_SK_STACK]) { 5149 struct pf_state_key *nk = 5150 (*state)->key[pd->didx]; 5151 5152 if (PF_ANEQ(pd2.src, 5153 &nk->addr[pd2.sidx], pd2.af) || 5154 nk->port[pd2.sidx] != iih.icmp_id) 5155 pf_change_icmp(pd2.src, &iih.icmp_id, 5156 daddr, &nk->addr[pd2.sidx], 5157 nk->port[pd2.sidx], NULL, 5158 pd2.ip_sum, icmpsum, 5159 pd->ip_sum, 0, AF_INET); 5160 5161 if (PF_ANEQ(pd2.dst, 5162 &nk->addr[pd2.didx], pd2.af) || 5163 nk->port[pd2.didx] != iih.icmp_id) 5164 pf_change_icmp(pd2.dst, &iih.icmp_id, 5165 NULL, /* XXX Inbound NAT? */ 5166 &nk->addr[pd2.didx], 5167 nk->port[pd2.didx], NULL, 5168 pd2.ip_sum, icmpsum, 5169 pd->ip_sum, 0, AF_INET); 5170 5171 m_copyback(m, off, ICMP_MINLEN, (caddr_t)pd->hdr.icmp); 5172 m_copyback(m, ipoff2, sizeof(h2), (caddr_t)&h2); 5173 m_copyback(m, off2, ICMP_MINLEN, (caddr_t)&iih); 5174 } 5175 return (PF_PASS); 5176 break; 5177 } 5178 #endif /* INET */ 5179 #ifdef INET6 5180 case IPPROTO_ICMPV6: { 5181 struct icmp6_hdr iih; 5182 5183 if (!pf_pull_hdr(m, off2, &iih, 5184 sizeof(struct icmp6_hdr), NULL, reason, pd2.af)) { 5185 DPFPRINTF(PF_DEBUG_MISC, 5186 ("pf: ICMP error message too short " 5187 "(icmp6)\n")); 5188 return (PF_DROP); 5189 } 5190 5191 key.af = pd2.af; 5192 key.proto = IPPROTO_ICMPV6; 5193 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af); 5194 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af); 5195 key.port[0] = key.port[1] = iih.icmp6_id; 5196 5197 STATE_LOOKUP(kif, &key, direction, *state, m); 5198 5199 /* translate source/destination address, if necessary */ 5200 if ((*state)->key[PF_SK_WIRE] != 5201 (*state)->key[PF_SK_STACK]) { 5202 struct pf_state_key *nk = 5203 (*state)->key[pd->didx]; 5204 5205 if (PF_ANEQ(pd2.src, 5206 &nk->addr[pd2.sidx], pd2.af) || 5207 nk->port[pd2.sidx] != iih.icmp6_id) 5208 pf_change_icmp(pd2.src, &iih.icmp6_id, 5209 daddr, &nk->addr[pd2.sidx], 5210 nk->port[pd2.sidx], NULL, 5211 pd2.ip_sum, icmpsum, 5212 pd->ip_sum, 0, AF_INET6); 5213 5214 if (PF_ANEQ(pd2.dst, 5215 &nk->addr[pd2.didx], pd2.af) || 5216 nk->port[pd2.didx] != iih.icmp6_id) 5217 pf_change_icmp(pd2.dst, &iih.icmp6_id, 5218 NULL, /* XXX Inbound NAT? */ 5219 &nk->addr[pd2.didx], 5220 nk->port[pd2.didx], NULL, 5221 pd2.ip_sum, icmpsum, 5222 pd->ip_sum, 0, AF_INET6); 5223 5224 m_copyback(m, off, sizeof(struct icmp6_hdr), 5225 (caddr_t)pd->hdr.icmp6); 5226 m_copyback(m, ipoff2, sizeof(h2_6), (caddr_t)&h2_6); 5227 m_copyback(m, off2, sizeof(struct icmp6_hdr), 5228 (caddr_t)&iih); 5229 } 5230 5231 return (PF_PASS); 5232 break; 5233 } 5234 #endif /* INET6 */ 5235 default: { 5236 key.af = pd2.af; 5237 key.proto = pd2.proto; 5238 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af); 5239 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af); 5240 key.port[0] = key.port[1] = 0; 5241 5242 STATE_LOOKUP(kif, &key, direction, *state, m); 5243 5244 /* translate source/destination address, if necessary */ 5245 if ((*state)->key[PF_SK_WIRE] != 5246 (*state)->key[PF_SK_STACK]) { 5247 struct pf_state_key *nk = 5248 (*state)->key[pd->didx]; 5249 5250 if (PF_ANEQ(pd2.src, 5251 &nk->addr[pd2.sidx], pd2.af)) 5252 pf_change_icmp(pd2.src, NULL, daddr, 5253 &nk->addr[pd2.sidx], 0, NULL, 5254 pd2.ip_sum, icmpsum, 5255 pd->ip_sum, 0, pd2.af); 5256 5257 if (PF_ANEQ(pd2.dst, 5258 &nk->addr[pd2.didx], pd2.af)) 5259 pf_change_icmp(pd2.src, NULL, 5260 NULL, /* XXX Inbound NAT? */ 5261 &nk->addr[pd2.didx], 0, NULL, 5262 pd2.ip_sum, icmpsum, 5263 pd->ip_sum, 0, pd2.af); 5264 5265 switch (pd2.af) { 5266 #ifdef INET 5267 case AF_INET: 5268 m_copyback(m, off, ICMP_MINLEN, 5269 (caddr_t)pd->hdr.icmp); 5270 m_copyback(m, ipoff2, sizeof(h2), (caddr_t)&h2); 5271 break; 5272 #endif /* INET */ 5273 #ifdef INET6 5274 case AF_INET6: 5275 m_copyback(m, off, 5276 sizeof(struct icmp6_hdr), 5277 (caddr_t)pd->hdr.icmp6); 5278 m_copyback(m, ipoff2, sizeof(h2_6), 5279 (caddr_t)&h2_6); 5280 break; 5281 #endif /* INET6 */ 5282 } 5283 } 5284 return (PF_PASS); 5285 break; 5286 } 5287 } 5288 } 5289 } 5290 5291 int 5292 pf_test_state_other(struct pf_state **state, int direction, struct pfi_kif *kif, 5293 struct mbuf *m, struct pf_pdesc *pd) 5294 { 5295 struct pf_state_peer *src, *dst; 5296 struct pf_state_key_cmp key; 5297 5298 key.af = pd->af; 5299 key.proto = pd->proto; 5300 if (direction == PF_IN) { 5301 PF_ACPY(&key.addr[0], pd->src, key.af); 5302 PF_ACPY(&key.addr[1], pd->dst, key.af); 5303 key.port[0] = key.port[1] = 0; 5304 } else { 5305 PF_ACPY(&key.addr[1], pd->src, key.af); 5306 PF_ACPY(&key.addr[0], pd->dst, key.af); 5307 key.port[1] = key.port[0] = 0; 5308 } 5309 5310 STATE_LOOKUP(kif, &key, direction, *state, m); 5311 5312 if (direction == (*state)->direction) { 5313 src = &(*state)->src; 5314 dst = &(*state)->dst; 5315 } else { 5316 src = &(*state)->dst; 5317 dst = &(*state)->src; 5318 } 5319 5320 /* update states */ 5321 if (src->state < PFOTHERS_SINGLE) 5322 src->state = PFOTHERS_SINGLE; 5323 if (dst->state == PFOTHERS_SINGLE) 5324 dst->state = PFOTHERS_MULTIPLE; 5325 5326 /* update expire time */ 5327 (*state)->expire = time_second; 5328 if (src->state == PFOTHERS_MULTIPLE && dst->state == PFOTHERS_MULTIPLE) 5329 (*state)->timeout = PFTM_OTHER_MULTIPLE; 5330 else 5331 (*state)->timeout = PFTM_OTHER_SINGLE; 5332 5333 /* translate source/destination address, if necessary */ 5334 if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) { 5335 struct pf_state_key *nk = (*state)->key[pd->didx]; 5336 5337 KKASSERT(nk); 5338 KKASSERT(pd); 5339 KKASSERT(pd->src); 5340 KKASSERT(pd->dst); 5341 switch (pd->af) { 5342 #ifdef INET 5343 case AF_INET: 5344 if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], AF_INET)) 5345 pf_change_a(&pd->src->v4.s_addr, 5346 pd->ip_sum, 5347 nk->addr[pd->sidx].v4.s_addr, 5348 0); 5349 5350 5351 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], AF_INET)) 5352 pf_change_a(&pd->dst->v4.s_addr, 5353 pd->ip_sum, 5354 nk->addr[pd->didx].v4.s_addr, 5355 0); 5356 5357 break; 5358 #endif /* INET */ 5359 #ifdef INET6 5360 case AF_INET6: 5361 if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], AF_INET)) 5362 PF_ACPY(pd->src, &nk->addr[pd->sidx], pd->af); 5363 5364 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], AF_INET)) 5365 PF_ACPY(pd->dst, &nk->addr[pd->didx], pd->af); 5366 #endif /* INET6 */ 5367 } 5368 } 5369 return (PF_PASS); 5370 } 5371 5372 /* 5373 * ipoff and off are measured from the start of the mbuf chain. 5374 * h must be at "ipoff" on the mbuf chain. 5375 */ 5376 void * 5377 pf_pull_hdr(struct mbuf *m, int off, void *p, int len, 5378 u_short *actionp, u_short *reasonp, sa_family_t af) 5379 { 5380 switch (af) { 5381 #ifdef INET 5382 case AF_INET: { 5383 struct ip *h = mtod(m, struct ip *); 5384 u_int16_t fragoff = (h->ip_off & IP_OFFMASK) << 3; 5385 5386 if (fragoff) { 5387 if (fragoff >= len) 5388 ACTION_SET(actionp, PF_PASS); 5389 else { 5390 ACTION_SET(actionp, PF_DROP); 5391 REASON_SET(reasonp, PFRES_FRAG); 5392 } 5393 return (NULL); 5394 } 5395 if (m->m_pkthdr.len < off + len || 5396 h->ip_len < off + len) { 5397 ACTION_SET(actionp, PF_DROP); 5398 REASON_SET(reasonp, PFRES_SHORT); 5399 return (NULL); 5400 } 5401 break; 5402 } 5403 #endif /* INET */ 5404 #ifdef INET6 5405 case AF_INET6: { 5406 struct ip6_hdr *h = mtod(m, struct ip6_hdr *); 5407 5408 if (m->m_pkthdr.len < off + len || 5409 (ntohs(h->ip6_plen) + sizeof(struct ip6_hdr)) < 5410 (unsigned)(off + len)) { 5411 ACTION_SET(actionp, PF_DROP); 5412 REASON_SET(reasonp, PFRES_SHORT); 5413 return (NULL); 5414 } 5415 break; 5416 } 5417 #endif /* INET6 */ 5418 } 5419 m_copydata(m, off, len, p); 5420 return (p); 5421 } 5422 5423 int 5424 pf_routable(struct pf_addr *addr, sa_family_t af, struct pfi_kif *kif) 5425 { 5426 struct sockaddr_in *dst; 5427 int ret = 1; 5428 int check_mpath; 5429 #ifdef INET6 5430 struct sockaddr_in6 *dst6; 5431 struct route_in6 ro; 5432 #else 5433 struct route ro; 5434 #endif 5435 struct radix_node *rn; 5436 struct rtentry *rt; 5437 struct ifnet *ifp; 5438 5439 check_mpath = 0; 5440 bzero(&ro, sizeof(ro)); 5441 switch (af) { 5442 case AF_INET: 5443 dst = satosin(&ro.ro_dst); 5444 dst->sin_family = AF_INET; 5445 dst->sin_len = sizeof(*dst); 5446 dst->sin_addr = addr->v4; 5447 break; 5448 #ifdef INET6 5449 case AF_INET6: 5450 dst6 = (struct sockaddr_in6 *)&ro.ro_dst; 5451 dst6->sin6_family = AF_INET6; 5452 dst6->sin6_len = sizeof(*dst6); 5453 dst6->sin6_addr = addr->v6; 5454 break; 5455 #endif /* INET6 */ 5456 default: 5457 return (0); 5458 } 5459 5460 /* Skip checks for ipsec interfaces */ 5461 if (kif != NULL && kif->pfik_ifp->if_type == IFT_ENC) 5462 goto out; 5463 5464 rtalloc_ign((struct route *)&ro, 0); 5465 5466 if (ro.ro_rt != NULL) { 5467 /* No interface given, this is a no-route check */ 5468 if (kif == NULL) 5469 goto out; 5470 5471 if (kif->pfik_ifp == NULL) { 5472 ret = 0; 5473 goto out; 5474 } 5475 5476 /* Perform uRPF check if passed input interface */ 5477 ret = 0; 5478 rn = (struct radix_node *)ro.ro_rt; 5479 do { 5480 rt = (struct rtentry *)rn; 5481 ifp = rt->rt_ifp; 5482 5483 if (kif->pfik_ifp == ifp) 5484 ret = 1; 5485 rn = NULL; 5486 } while (check_mpath == 1 && rn != NULL && ret == 0); 5487 } else 5488 ret = 0; 5489 out: 5490 if (ro.ro_rt != NULL) 5491 RTFREE(ro.ro_rt); 5492 return (ret); 5493 } 5494 5495 int 5496 pf_rtlabel_match(struct pf_addr *addr, sa_family_t af, struct pf_addr_wrap *aw) 5497 { 5498 struct sockaddr_in *dst; 5499 #ifdef INET6 5500 struct sockaddr_in6 *dst6; 5501 struct route_in6 ro; 5502 #else 5503 struct route ro; 5504 #endif 5505 int ret = 0; 5506 5507 ASSERT_LWKT_TOKEN_HELD(&pf_token); 5508 5509 bzero(&ro, sizeof(ro)); 5510 switch (af) { 5511 case AF_INET: 5512 dst = satosin(&ro.ro_dst); 5513 dst->sin_family = AF_INET; 5514 dst->sin_len = sizeof(*dst); 5515 dst->sin_addr = addr->v4; 5516 break; 5517 #ifdef INET6 5518 case AF_INET6: 5519 dst6 = (struct sockaddr_in6 *)&ro.ro_dst; 5520 dst6->sin6_family = AF_INET6; 5521 dst6->sin6_len = sizeof(*dst6); 5522 dst6->sin6_addr = addr->v6; 5523 break; 5524 #endif /* INET6 */ 5525 default: 5526 return (0); 5527 } 5528 5529 rtalloc_ign((struct route *)&ro, (RTF_CLONING | RTF_PRCLONING)); 5530 5531 if (ro.ro_rt != NULL) { 5532 RTFREE(ro.ro_rt); 5533 } 5534 5535 return (ret); 5536 } 5537 5538 #ifdef INET 5539 void 5540 pf_route(struct mbuf **m, struct pf_rule *r, int dir, struct ifnet *oifp, 5541 struct pf_state *s, struct pf_pdesc *pd) 5542 { 5543 struct mbuf *m0, *m1; 5544 struct route iproute; 5545 struct route *ro = NULL; 5546 struct sockaddr_in *dst; 5547 struct ip *ip; 5548 struct ifnet *ifp = NULL; 5549 struct pf_addr naddr; 5550 struct pf_src_node *sn = NULL; 5551 int error = 0; 5552 int sw_csum; 5553 #ifdef IPSEC 5554 struct m_tag *mtag; 5555 #endif /* IPSEC */ 5556 5557 ASSERT_LWKT_TOKEN_HELD(&pf_token); 5558 5559 if (m == NULL || *m == NULL || r == NULL || 5560 (dir != PF_IN && dir != PF_OUT) || oifp == NULL) 5561 panic("pf_route: invalid parameters"); 5562 5563 if (((*m)->m_pkthdr.fw_flags & PF_MBUF_ROUTED) == 0) { 5564 (*m)->m_pkthdr.fw_flags |= PF_MBUF_ROUTED; 5565 (*m)->m_pkthdr.pf.routed = 1; 5566 } else { 5567 if ((*m)->m_pkthdr.pf.routed++ > 3) { 5568 m0 = *m; 5569 *m = NULL; 5570 goto bad; 5571 } 5572 } 5573 5574 if (r->rt == PF_DUPTO) { 5575 if ((m0 = m_dup(*m, MB_DONTWAIT)) == NULL) { 5576 return; 5577 } 5578 } else { 5579 if ((r->rt == PF_REPLYTO) == (r->direction == dir)) { 5580 return; 5581 } 5582 m0 = *m; 5583 } 5584 5585 if (m0->m_len < sizeof(struct ip)) { 5586 DPFPRINTF(PF_DEBUG_URGENT, 5587 ("pf_route: m0->m_len < sizeof(struct ip)\n")); 5588 goto bad; 5589 } 5590 5591 ip = mtod(m0, struct ip *); 5592 5593 ro = &iproute; 5594 bzero((caddr_t)ro, sizeof(*ro)); 5595 dst = satosin(&ro->ro_dst); 5596 dst->sin_family = AF_INET; 5597 dst->sin_len = sizeof(*dst); 5598 dst->sin_addr = ip->ip_dst; 5599 5600 if (r->rt == PF_FASTROUTE) { 5601 rtalloc(ro); 5602 if (ro->ro_rt == 0) { 5603 ipstat.ips_noroute++; 5604 goto bad; 5605 } 5606 5607 ifp = ro->ro_rt->rt_ifp; 5608 ro->ro_rt->rt_use++; 5609 5610 if (ro->ro_rt->rt_flags & RTF_GATEWAY) 5611 dst = satosin(ro->ro_rt->rt_gateway); 5612 } else { 5613 if (TAILQ_EMPTY(&r->rpool.list)) { 5614 DPFPRINTF(PF_DEBUG_URGENT, 5615 ("pf_route: TAILQ_EMPTY(&r->rpool.list)\n")); 5616 goto bad; 5617 } 5618 if (s == NULL) { 5619 pf_map_addr(AF_INET, r, (struct pf_addr *)&ip->ip_src, 5620 &naddr, NULL, &sn); 5621 if (!PF_AZERO(&naddr, AF_INET)) 5622 dst->sin_addr.s_addr = naddr.v4.s_addr; 5623 ifp = r->rpool.cur->kif ? 5624 r->rpool.cur->kif->pfik_ifp : NULL; 5625 } else { 5626 if (!PF_AZERO(&s->rt_addr, AF_INET)) 5627 dst->sin_addr.s_addr = 5628 s->rt_addr.v4.s_addr; 5629 ifp = s->rt_kif ? s->rt_kif->pfik_ifp : NULL; 5630 } 5631 } 5632 if (ifp == NULL) 5633 goto bad; 5634 5635 if (oifp != ifp) { 5636 if (pf_test(PF_OUT, ifp, &m0, NULL, NULL) != PF_PASS) { 5637 goto bad; 5638 } else if (m0 == NULL) { 5639 goto done; 5640 } 5641 if (m0->m_len < sizeof(struct ip)) { 5642 DPFPRINTF(PF_DEBUG_URGENT, 5643 ("pf_route: m0->m_len < sizeof(struct ip)\n")); 5644 goto bad; 5645 } 5646 ip = mtod(m0, struct ip *); 5647 } 5648 5649 /* Copied from FreeBSD 5.1-CURRENT ip_output. */ 5650 m0->m_pkthdr.csum_flags |= CSUM_IP; 5651 sw_csum = m0->m_pkthdr.csum_flags & ~ifp->if_hwassist; 5652 if (sw_csum & CSUM_DELAY_DATA) { 5653 in_delayed_cksum(m0); 5654 sw_csum &= ~CSUM_DELAY_DATA; 5655 } 5656 m0->m_pkthdr.csum_flags &= ifp->if_hwassist; 5657 m0->m_pkthdr.csum_iphlen = (ip->ip_hl << 2); 5658 5659 if (ip->ip_len <= ifp->if_mtu || 5660 (ifp->if_hwassist & CSUM_FRAGMENT && 5661 (ip->ip_off & IP_DF) == 0)) { 5662 ip->ip_len = htons(ip->ip_len); 5663 ip->ip_off = htons(ip->ip_off); 5664 ip->ip_sum = 0; 5665 if (sw_csum & CSUM_DELAY_IP) { 5666 /* From KAME */ 5667 if (ip->ip_v == IPVERSION && 5668 (ip->ip_hl << 2) == sizeof(*ip)) { 5669 ip->ip_sum = in_cksum_hdr(ip); 5670 } else { 5671 ip->ip_sum = in_cksum(m0, ip->ip_hl << 2); 5672 } 5673 } 5674 lwkt_reltoken(&pf_token); 5675 error = ifp->if_output(ifp, m0, sintosa(dst), ro->ro_rt); 5676 lwkt_gettoken(&pf_token); 5677 goto done; 5678 } 5679 5680 /* 5681 * Too large for interface; fragment if possible. 5682 * Must be able to put at least 8 bytes per fragment. 5683 */ 5684 if (ip->ip_off & IP_DF) { 5685 ipstat.ips_cantfrag++; 5686 if (r->rt != PF_DUPTO) { 5687 icmp_error(m0, ICMP_UNREACH, ICMP_UNREACH_NEEDFRAG, 0, 5688 ifp->if_mtu); 5689 goto done; 5690 } else 5691 goto bad; 5692 } 5693 5694 m1 = m0; 5695 error = ip_fragment(ip, &m0, ifp->if_mtu, ifp->if_hwassist, sw_csum); 5696 if (error) { 5697 goto bad; 5698 } 5699 5700 for (m0 = m1; m0; m0 = m1) { 5701 m1 = m0->m_nextpkt; 5702 m0->m_nextpkt = 0; 5703 if (error == 0) { 5704 lwkt_reltoken(&pf_token); 5705 error = (*ifp->if_output)(ifp, m0, sintosa(dst), 5706 NULL); 5707 lwkt_gettoken(&pf_token); 5708 } else 5709 m_freem(m0); 5710 } 5711 5712 if (error == 0) 5713 ipstat.ips_fragmented++; 5714 5715 done: 5716 if (r->rt != PF_DUPTO) 5717 *m = NULL; 5718 if (ro == &iproute && ro->ro_rt) 5719 RTFREE(ro->ro_rt); 5720 return; 5721 5722 bad: 5723 m_freem(m0); 5724 goto done; 5725 } 5726 #endif /* INET */ 5727 5728 #ifdef INET6 5729 void 5730 pf_route6(struct mbuf **m, struct pf_rule *r, int dir, struct ifnet *oifp, 5731 struct pf_state *s, struct pf_pdesc *pd) 5732 { 5733 struct mbuf *m0; 5734 struct route_in6 ip6route; 5735 struct route_in6 *ro; 5736 struct sockaddr_in6 *dst; 5737 struct ip6_hdr *ip6; 5738 struct ifnet *ifp = NULL; 5739 struct pf_addr naddr; 5740 struct pf_src_node *sn = NULL; 5741 5742 if (m == NULL || *m == NULL || r == NULL || 5743 (dir != PF_IN && dir != PF_OUT) || oifp == NULL) 5744 panic("pf_route6: invalid parameters"); 5745 5746 if (((*m)->m_pkthdr.fw_flags & PF_MBUF_ROUTED) == 0) { 5747 (*m)->m_pkthdr.fw_flags |= PF_MBUF_ROUTED; 5748 (*m)->m_pkthdr.pf.routed = 1; 5749 } else { 5750 if ((*m)->m_pkthdr.pf.routed++ > 3) { 5751 m0 = *m; 5752 *m = NULL; 5753 goto bad; 5754 } 5755 } 5756 5757 if (r->rt == PF_DUPTO) { 5758 if ((m0 = m_dup(*m, MB_DONTWAIT)) == NULL) 5759 return; 5760 } else { 5761 if ((r->rt == PF_REPLYTO) == (r->direction == dir)) 5762 return; 5763 m0 = *m; 5764 } 5765 5766 if (m0->m_len < sizeof(struct ip6_hdr)) { 5767 DPFPRINTF(PF_DEBUG_URGENT, 5768 ("pf_route6: m0->m_len < sizeof(struct ip6_hdr)\n")); 5769 goto bad; 5770 } 5771 ip6 = mtod(m0, struct ip6_hdr *); 5772 5773 ro = &ip6route; 5774 bzero((caddr_t)ro, sizeof(*ro)); 5775 dst = (struct sockaddr_in6 *)&ro->ro_dst; 5776 dst->sin6_family = AF_INET6; 5777 dst->sin6_len = sizeof(*dst); 5778 dst->sin6_addr = ip6->ip6_dst; 5779 5780 /* 5781 * DragonFly doesn't zero the auxillary pkghdr fields, only fw_flags, 5782 * so make sure pf.flags is clear. 5783 * 5784 * Cheat. XXX why only in the v6 case??? 5785 */ 5786 if (r->rt == PF_FASTROUTE) { 5787 m0->m_pkthdr.fw_flags |= PF_MBUF_TAGGED; 5788 m0->m_pkthdr.pf.flags = 0; 5789 /* XXX Re-Check when Upgrading to > 4.4 */ 5790 m0->m_pkthdr.pf.statekey = NULL; 5791 ip6_output(m0, NULL, NULL, 0, NULL, NULL, NULL); 5792 return; 5793 } 5794 5795 if (TAILQ_EMPTY(&r->rpool.list)) { 5796 DPFPRINTF(PF_DEBUG_URGENT, 5797 ("pf_route6: TAILQ_EMPTY(&r->rpool.list)\n")); 5798 goto bad; 5799 } 5800 if (s == NULL) { 5801 pf_map_addr(AF_INET6, r, (struct pf_addr *)&ip6->ip6_src, 5802 &naddr, NULL, &sn); 5803 if (!PF_AZERO(&naddr, AF_INET6)) 5804 PF_ACPY((struct pf_addr *)&dst->sin6_addr, 5805 &naddr, AF_INET6); 5806 ifp = r->rpool.cur->kif ? r->rpool.cur->kif->pfik_ifp : NULL; 5807 } else { 5808 if (!PF_AZERO(&s->rt_addr, AF_INET6)) 5809 PF_ACPY((struct pf_addr *)&dst->sin6_addr, 5810 &s->rt_addr, AF_INET6); 5811 ifp = s->rt_kif ? s->rt_kif->pfik_ifp : NULL; 5812 } 5813 if (ifp == NULL) 5814 goto bad; 5815 5816 if (oifp != ifp) { 5817 if (pf_test6(PF_OUT, ifp, &m0, NULL, NULL) != PF_PASS) { 5818 goto bad; 5819 } else if (m0 == NULL) { 5820 goto done; 5821 } 5822 if (m0->m_len < sizeof(struct ip6_hdr)) { 5823 DPFPRINTF(PF_DEBUG_URGENT, 5824 ("pf_route6: m0->m_len < sizeof(struct ip6_hdr)\n")); 5825 goto bad; 5826 } 5827 ip6 = mtod(m0, struct ip6_hdr *); 5828 } 5829 5830 /* 5831 * If the packet is too large for the outgoing interface, 5832 * send back an icmp6 error. 5833 */ 5834 if (IN6_IS_ADDR_LINKLOCAL(&dst->sin6_addr)) 5835 dst->sin6_addr.s6_addr16[1] = htons(ifp->if_index); 5836 if ((u_long)m0->m_pkthdr.len <= ifp->if_mtu) { 5837 nd6_output(ifp, ifp, m0, dst, NULL); 5838 } else { 5839 in6_ifstat_inc(ifp, ifs6_in_toobig); 5840 if (r->rt != PF_DUPTO) 5841 icmp6_error(m0, ICMP6_PACKET_TOO_BIG, 0, ifp->if_mtu); 5842 else 5843 goto bad; 5844 } 5845 5846 done: 5847 if (r->rt != PF_DUPTO) 5848 *m = NULL; 5849 return; 5850 5851 bad: 5852 m_freem(m0); 5853 goto done; 5854 } 5855 #endif /* INET6 */ 5856 5857 5858 /* 5859 * check protocol (tcp/udp/icmp/icmp6) checksum and set mbuf flag 5860 * off is the offset where the protocol header starts 5861 * len is the total length of protocol header plus payload 5862 * returns 0 when the checksum is valid, otherwise returns 1. 5863 */ 5864 /* 5865 * XXX 5866 * FreeBSD supports cksum offload for the following drivers. 5867 * em(4), gx(4), lge(4), nge(4), ti(4), xl(4) 5868 * If we can make full use of it we would outperform ipfw/ipfilter in 5869 * very heavy traffic. 5870 * I have not tested 'cause I don't have NICs that supports cksum offload. 5871 * (There might be problems. Typical phenomena would be 5872 * 1. No route message for UDP packet. 5873 * 2. No connection acceptance from external hosts regardless of rule set.) 5874 */ 5875 int 5876 pf_check_proto_cksum(struct mbuf *m, int off, int len, u_int8_t p, 5877 sa_family_t af) 5878 { 5879 u_int16_t sum = 0; 5880 int hw_assist = 0; 5881 struct ip *ip; 5882 5883 if (off < sizeof(struct ip) || len < sizeof(struct udphdr)) 5884 return (1); 5885 if (m->m_pkthdr.len < off + len) 5886 return (1); 5887 5888 switch (p) { 5889 case IPPROTO_TCP: 5890 case IPPROTO_UDP: 5891 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) { 5892 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) { 5893 sum = m->m_pkthdr.csum_data; 5894 } else { 5895 ip = mtod(m, struct ip *); 5896 sum = in_pseudo(ip->ip_src.s_addr, 5897 ip->ip_dst.s_addr, htonl((u_short)len + 5898 m->m_pkthdr.csum_data + p)); 5899 } 5900 sum ^= 0xffff; 5901 ++hw_assist; 5902 } 5903 break; 5904 case IPPROTO_ICMP: 5905 #ifdef INET6 5906 case IPPROTO_ICMPV6: 5907 #endif /* INET6 */ 5908 break; 5909 default: 5910 return (1); 5911 } 5912 5913 if (!hw_assist) { 5914 switch (af) { 5915 case AF_INET: 5916 if (p == IPPROTO_ICMP) { 5917 if (m->m_len < off) 5918 return (1); 5919 m->m_data += off; 5920 m->m_len -= off; 5921 sum = in_cksum(m, len); 5922 m->m_data -= off; 5923 m->m_len += off; 5924 } else { 5925 if (m->m_len < sizeof(struct ip)) 5926 return (1); 5927 sum = in_cksum_range(m, p, off, len); 5928 if (sum == 0) { 5929 m->m_pkthdr.csum_flags |= 5930 (CSUM_DATA_VALID | 5931 CSUM_PSEUDO_HDR); 5932 m->m_pkthdr.csum_data = 0xffff; 5933 } 5934 } 5935 break; 5936 #ifdef INET6 5937 case AF_INET6: 5938 if (m->m_len < sizeof(struct ip6_hdr)) 5939 return (1); 5940 sum = in6_cksum(m, p, off, len); 5941 /* 5942 * XXX 5943 * IPv6 H/W cksum off-load not supported yet! 5944 * 5945 * if (sum == 0) { 5946 * m->m_pkthdr.csum_flags |= 5947 * (CSUM_DATA_VALID|CSUM_PSEUDO_HDR); 5948 * m->m_pkthdr.csum_data = 0xffff; 5949 *} 5950 */ 5951 break; 5952 #endif /* INET6 */ 5953 default: 5954 return (1); 5955 } 5956 } 5957 if (sum) { 5958 switch (p) { 5959 case IPPROTO_TCP: 5960 tcpstat.tcps_rcvbadsum++; 5961 break; 5962 case IPPROTO_UDP: 5963 udp_stat.udps_badsum++; 5964 break; 5965 case IPPROTO_ICMP: 5966 icmpstat.icps_checksum++; 5967 break; 5968 #ifdef INET6 5969 case IPPROTO_ICMPV6: 5970 icmp6stat.icp6s_checksum++; 5971 break; 5972 #endif /* INET6 */ 5973 } 5974 return (1); 5975 } 5976 return (0); 5977 } 5978 5979 struct pf_divert * 5980 pf_find_divert(struct mbuf *m) 5981 { 5982 struct m_tag *mtag; 5983 5984 if ((mtag = m_tag_find(m, PACKET_TAG_PF_DIVERT, NULL)) == NULL) 5985 return (NULL); 5986 5987 return ((struct pf_divert *)(mtag + 1)); 5988 } 5989 5990 struct pf_divert * 5991 pf_get_divert(struct mbuf *m) 5992 { 5993 struct m_tag *mtag; 5994 5995 if ((mtag = m_tag_find(m, PACKET_TAG_PF_DIVERT, NULL)) == NULL) { 5996 mtag = m_tag_get(PACKET_TAG_PF_DIVERT, sizeof(struct pf_divert), 5997 M_NOWAIT); 5998 if (mtag == NULL) 5999 return (NULL); 6000 bzero(mtag + 1, sizeof(struct pf_divert)); 6001 m_tag_prepend(m, mtag); 6002 } 6003 6004 return ((struct pf_divert *)(mtag + 1)); 6005 } 6006 6007 #ifdef INET 6008 6009 /* 6010 * WARNING: pf_token held shared on entry, THIS IS CPU LOCALIZED CODE 6011 */ 6012 int 6013 pf_test(int dir, struct ifnet *ifp, struct mbuf **m0, 6014 struct ether_header *eh, struct inpcb *inp) 6015 { 6016 struct pfi_kif *kif; 6017 u_short action, reason = 0, log = 0; 6018 struct mbuf *m = *m0; 6019 struct ip *h = NULL; 6020 struct pf_rule *a = NULL, *r = &pf_default_rule, *tr, *nr; 6021 struct pf_state *s = NULL; 6022 struct pf_ruleset *ruleset = NULL; 6023 struct pf_pdesc pd; 6024 int off, dirndx; 6025 #ifdef ALTQ 6026 int pqid = 0; 6027 #endif 6028 6029 if (!pf_status.running) 6030 return (PF_PASS); 6031 6032 memset(&pd, 0, sizeof(pd)); 6033 #ifdef foo 6034 if (ifp->if_type == IFT_CARP && ifp->if_carpdev) 6035 kif = (struct pfi_kif *)ifp->if_carpdev->if_pf_kif; 6036 else 6037 #endif 6038 kif = (struct pfi_kif *)ifp->if_pf_kif; 6039 6040 if (kif == NULL) { 6041 DPFPRINTF(PF_DEBUG_URGENT, 6042 ("pf_test: kif == NULL, if_xname %s\n", ifp->if_xname)); 6043 return (PF_DROP); 6044 } 6045 if (kif->pfik_flags & PFI_IFLAG_SKIP) 6046 return (PF_PASS); 6047 6048 #ifdef DIAGNOSTIC 6049 if ((m->m_flags & M_PKTHDR) == 0) 6050 panic("non-M_PKTHDR is passed to pf_test"); 6051 #endif /* DIAGNOSTIC */ 6052 6053 if (m->m_pkthdr.len < (int)sizeof(*h)) { 6054 action = PF_DROP; 6055 REASON_SET(&reason, PFRES_SHORT); 6056 log = 1; 6057 goto done; 6058 } 6059 6060 /* 6061 * DragonFly doesn't zero the auxillary pkghdr fields, only fw_flags, 6062 * so make sure pf.flags is clear. 6063 */ 6064 if (m->m_pkthdr.fw_flags & PF_MBUF_TAGGED) 6065 return (PF_PASS); 6066 m->m_pkthdr.pf.flags = 0; 6067 /* Re-Check when updating to > 4.4 */ 6068 m->m_pkthdr.pf.statekey = NULL; 6069 6070 /* We do IP header normalization and packet reassembly here */ 6071 if (pf_normalize_ip(m0, dir, kif, &reason, &pd) != PF_PASS) { 6072 action = PF_DROP; 6073 goto done; 6074 } 6075 m = *m0; /* pf_normalize messes with m0 */ 6076 h = mtod(m, struct ip *); 6077 6078 off = h->ip_hl << 2; 6079 if (off < (int)sizeof(*h)) { 6080 action = PF_DROP; 6081 REASON_SET(&reason, PFRES_SHORT); 6082 log = 1; 6083 goto done; 6084 } 6085 6086 pd.src = (struct pf_addr *)&h->ip_src; 6087 pd.dst = (struct pf_addr *)&h->ip_dst; 6088 pd.sport = pd.dport = NULL; 6089 pd.ip_sum = &h->ip_sum; 6090 pd.proto_sum = NULL; 6091 pd.proto = h->ip_p; 6092 pd.dir = dir; 6093 pd.sidx = (dir == PF_IN) ? 0 : 1; 6094 pd.didx = (dir == PF_IN) ? 1 : 0; 6095 pd.af = AF_INET; 6096 pd.tos = h->ip_tos; 6097 pd.tot_len = h->ip_len; 6098 pd.eh = eh; 6099 6100 /* handle fragments that didn't get reassembled by normalization */ 6101 if (h->ip_off & (IP_MF | IP_OFFMASK)) { 6102 action = pf_test_fragment(&r, dir, kif, m, h, 6103 &pd, &a, &ruleset); 6104 goto done; 6105 } 6106 6107 switch (h->ip_p) { 6108 6109 case IPPROTO_TCP: { 6110 struct tcphdr th; 6111 6112 pd.hdr.tcp = &th; 6113 if (!pf_pull_hdr(m, off, &th, sizeof(th), 6114 &action, &reason, AF_INET)) { 6115 log = action != PF_PASS; 6116 goto done; 6117 } 6118 pd.p_len = pd.tot_len - off - (th.th_off << 2); 6119 #ifdef ALTQ 6120 if ((th.th_flags & TH_ACK) && pd.p_len == 0) 6121 pqid = 1; 6122 #endif 6123 action = pf_normalize_tcp(dir, kif, m, 0, off, h, &pd); 6124 if (action == PF_DROP) 6125 goto done; 6126 action = pf_test_state_tcp(&s, dir, kif, m, off, h, &pd, 6127 &reason); 6128 if (action == PF_PASS) { 6129 pfsync_update_state(s); 6130 r = s->rule.ptr; 6131 a = s->anchor.ptr; 6132 log = s->log; 6133 } else if (s == NULL) 6134 action = pf_test_rule(&r, &s, dir, kif, 6135 m, off, h, &pd, &a, &ruleset, NULL, inp); 6136 break; 6137 } 6138 6139 case IPPROTO_UDP: { 6140 struct udphdr uh; 6141 6142 pd.hdr.udp = &uh; 6143 if (!pf_pull_hdr(m, off, &uh, sizeof(uh), 6144 &action, &reason, AF_INET)) { 6145 log = action != PF_PASS; 6146 goto done; 6147 } 6148 if (uh.uh_dport == 0 || 6149 ntohs(uh.uh_ulen) > m->m_pkthdr.len - off || 6150 ntohs(uh.uh_ulen) < sizeof(struct udphdr)) { 6151 action = PF_DROP; 6152 REASON_SET(&reason, PFRES_SHORT); 6153 goto done; 6154 } 6155 action = pf_test_state_udp(&s, dir, kif, m, off, h, &pd); 6156 if (action == PF_PASS) { 6157 pfsync_update_state(s); 6158 r = s->rule.ptr; 6159 a = s->anchor.ptr; 6160 log = s->log; 6161 } else if (s == NULL) 6162 action = pf_test_rule(&r, &s, dir, kif, 6163 m, off, h, &pd, &a, &ruleset, NULL, inp); 6164 break; 6165 } 6166 6167 case IPPROTO_ICMP: { 6168 struct icmp ih; 6169 6170 pd.hdr.icmp = &ih; 6171 if (!pf_pull_hdr(m, off, &ih, ICMP_MINLEN, 6172 &action, &reason, AF_INET)) { 6173 log = action != PF_PASS; 6174 goto done; 6175 } 6176 action = pf_test_state_icmp(&s, dir, kif, m, off, h, &pd, 6177 &reason); 6178 if (action == PF_PASS) { 6179 pfsync_update_state(s); 6180 r = s->rule.ptr; 6181 a = s->anchor.ptr; 6182 log = s->log; 6183 } else if (s == NULL) 6184 action = pf_test_rule(&r, &s, dir, kif, 6185 m, off, h, &pd, &a, &ruleset, NULL, inp); 6186 break; 6187 } 6188 6189 default: 6190 action = pf_test_state_other(&s, dir, kif, m, &pd); 6191 if (action == PF_PASS) { 6192 pfsync_update_state(s); 6193 r = s->rule.ptr; 6194 a = s->anchor.ptr; 6195 log = s->log; 6196 } else if (s == NULL) 6197 action = pf_test_rule(&r, &s, dir, kif, m, off, h, 6198 &pd, &a, &ruleset, NULL, inp); 6199 break; 6200 } 6201 6202 done: 6203 if (action == PF_PASS && h->ip_hl > 5 && 6204 !((s && s->state_flags & PFSTATE_ALLOWOPTS) || r->allow_opts)) { 6205 action = PF_DROP; 6206 REASON_SET(&reason, PFRES_IPOPTIONS); 6207 log = 1; 6208 DPFPRINTF(PF_DEBUG_MISC, 6209 ("pf: dropping packet with ip options\n")); 6210 } 6211 6212 if ((s && s->tag) || r->rtableid) 6213 pf_tag_packet(m, s ? s->tag : 0, r->rtableid); 6214 6215 #if 0 6216 if (dir == PF_IN && s && s->key[PF_SK_STACK]) 6217 m->m_pkthdr.pf.statekey = s->key[PF_SK_STACK]; 6218 #endif 6219 6220 #ifdef ALTQ 6221 if (action == PF_PASS && r->qid) { 6222 m->m_pkthdr.fw_flags |= PF_MBUF_STRUCTURE; 6223 if (pqid || (pd.tos & IPTOS_LOWDELAY)) 6224 m->m_pkthdr.pf.qid = r->pqid; 6225 else 6226 m->m_pkthdr.pf.qid = r->qid; 6227 m->m_pkthdr.pf.ecn_af = AF_INET; 6228 m->m_pkthdr.pf.hdr = h; 6229 /* add connection hash for fairq */ 6230 if (s) { 6231 /* for fairq */ 6232 m->m_pkthdr.pf.state_hash = s->hash; 6233 m->m_pkthdr.pf.flags |= PF_TAG_STATE_HASHED; 6234 } 6235 } 6236 #endif /* ALTQ */ 6237 6238 /* 6239 * connections redirected to loopback should not match sockets 6240 * bound specifically to loopback due to security implications, 6241 * see tcp_input() and in_pcblookup_listen(). 6242 */ 6243 if (dir == PF_IN && action == PF_PASS && (pd.proto == IPPROTO_TCP || 6244 pd.proto == IPPROTO_UDP) && s != NULL && s->nat_rule.ptr != NULL && 6245 (s->nat_rule.ptr->action == PF_RDR || 6246 s->nat_rule.ptr->action == PF_BINAT) && 6247 (ntohl(pd.dst->v4.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET) 6248 m->m_pkthdr.pf.flags |= PF_TAG_TRANSLATE_LOCALHOST; 6249 6250 if (dir == PF_IN && action == PF_PASS && r->divert.port) { 6251 struct pf_divert *divert; 6252 6253 if ((divert = pf_get_divert(m))) { 6254 m->m_pkthdr.pf.flags |= PF_TAG_DIVERTED; 6255 divert->port = r->divert.port; 6256 divert->addr.ipv4 = r->divert.addr.v4; 6257 } 6258 } 6259 6260 if (log) { 6261 struct pf_rule *lr; 6262 6263 if (s != NULL && s->nat_rule.ptr != NULL && 6264 s->nat_rule.ptr->log & PF_LOG_ALL) 6265 lr = s->nat_rule.ptr; 6266 else 6267 lr = r; 6268 PFLOG_PACKET(kif, h, m, AF_INET, dir, reason, lr, a, ruleset, 6269 &pd); 6270 } 6271 6272 kif->pfik_bytes[0][dir == PF_OUT][action != PF_PASS] += pd.tot_len; 6273 kif->pfik_packets[0][dir == PF_OUT][action != PF_PASS]++; 6274 6275 if (action == PF_PASS || r->action == PF_DROP) { 6276 dirndx = (dir == PF_OUT); 6277 r->packets[dirndx]++; 6278 r->bytes[dirndx] += pd.tot_len; 6279 if (a != NULL) { 6280 a->packets[dirndx]++; 6281 a->bytes[dirndx] += pd.tot_len; 6282 } 6283 if (s != NULL) { 6284 if (s->nat_rule.ptr != NULL) { 6285 s->nat_rule.ptr->packets[dirndx]++; 6286 s->nat_rule.ptr->bytes[dirndx] += pd.tot_len; 6287 } 6288 if (s->src_node != NULL) { 6289 s->src_node->packets[dirndx]++; 6290 s->src_node->bytes[dirndx] += pd.tot_len; 6291 } 6292 if (s->nat_src_node != NULL) { 6293 s->nat_src_node->packets[dirndx]++; 6294 s->nat_src_node->bytes[dirndx] += pd.tot_len; 6295 } 6296 dirndx = (dir == s->direction) ? 0 : 1; 6297 s->packets[dirndx]++; 6298 s->bytes[dirndx] += pd.tot_len; 6299 } 6300 tr = r; 6301 nr = (s != NULL) ? s->nat_rule.ptr : pd.nat_rule; 6302 if (nr != NULL && r == &pf_default_rule) 6303 tr = nr; 6304 if (tr->src.addr.type == PF_ADDR_TABLE) 6305 pfr_update_stats(tr->src.addr.p.tbl, 6306 (s == NULL) ? pd.src : 6307 &s->key[(s->direction == PF_IN)]-> 6308 addr[(s->direction == PF_OUT)], 6309 pd.af, pd.tot_len, dir == PF_OUT, 6310 r->action == PF_PASS, tr->src.neg); 6311 if (tr->dst.addr.type == PF_ADDR_TABLE) 6312 pfr_update_stats(tr->dst.addr.p.tbl, 6313 (s == NULL) ? pd.dst : 6314 &s->key[(s->direction == PF_IN)]-> 6315 addr[(s->direction == PF_IN)], 6316 pd.af, pd.tot_len, dir == PF_OUT, 6317 r->action == PF_PASS, tr->dst.neg); 6318 } 6319 6320 6321 if (action == PF_SYNPROXY_DROP) { 6322 m_freem(*m0); 6323 *m0 = NULL; 6324 action = PF_PASS; 6325 } else if (r->rt) 6326 /* pf_route can free the mbuf causing *m0 to become NULL */ 6327 pf_route(m0, r, dir, kif->pfik_ifp, s, &pd); 6328 6329 return (action); 6330 } 6331 #endif /* INET */ 6332 6333 #ifdef INET6 6334 6335 /* 6336 * WARNING: pf_token held shared on entry, THIS IS CPU LOCALIZED CODE 6337 */ 6338 int 6339 pf_test6(int dir, struct ifnet *ifp, struct mbuf **m0, 6340 struct ether_header *eh, struct inpcb *inp) 6341 { 6342 struct pfi_kif *kif; 6343 u_short action, reason = 0, log = 0; 6344 struct mbuf *m = *m0, *n = NULL; 6345 struct ip6_hdr *h = NULL; 6346 struct pf_rule *a = NULL, *r = &pf_default_rule, *tr, *nr; 6347 struct pf_state *s = NULL; 6348 struct pf_ruleset *ruleset = NULL; 6349 struct pf_pdesc pd; 6350 int off, terminal = 0, dirndx, rh_cnt = 0; 6351 6352 if (!pf_status.running) 6353 return (PF_PASS); 6354 6355 memset(&pd, 0, sizeof(pd)); 6356 #ifdef foo 6357 if (ifp->if_type == IFT_CARP && ifp->if_carpdev) 6358 kif = (struct pfi_kif *)ifp->if_carpdev->if_pf_kif; 6359 else 6360 #endif 6361 kif = (struct pfi_kif *)ifp->if_pf_kif; 6362 6363 if (kif == NULL) { 6364 DPFPRINTF(PF_DEBUG_URGENT, 6365 ("pf_test6: kif == NULL, if_xname %s\n", ifp->if_xname)); 6366 return (PF_DROP); 6367 } 6368 if (kif->pfik_flags & PFI_IFLAG_SKIP) 6369 return (PF_PASS); 6370 6371 #ifdef DIAGNOSTIC 6372 if ((m->m_flags & M_PKTHDR) == 0) 6373 panic("non-M_PKTHDR is passed to pf_test6"); 6374 #endif /* DIAGNOSTIC */ 6375 6376 if (m->m_pkthdr.len < (int)sizeof(*h)) { 6377 action = PF_DROP; 6378 REASON_SET(&reason, PFRES_SHORT); 6379 log = 1; 6380 goto done; 6381 } 6382 6383 /* 6384 * DragonFly doesn't zero the auxillary pkghdr fields, only fw_flags, 6385 * so make sure pf.flags is clear. 6386 */ 6387 if (m->m_pkthdr.fw_flags & PF_MBUF_TAGGED) 6388 return (PF_PASS); 6389 m->m_pkthdr.pf.flags = 0; 6390 /* Re-Check when updating to > 4.4 */ 6391 m->m_pkthdr.pf.statekey = NULL; 6392 6393 /* We do IP header normalization and packet reassembly here */ 6394 if (pf_normalize_ip6(m0, dir, kif, &reason, &pd) != PF_PASS) { 6395 action = PF_DROP; 6396 goto done; 6397 } 6398 m = *m0; /* pf_normalize messes with m0 */ 6399 h = mtod(m, struct ip6_hdr *); 6400 6401 #if 1 6402 /* 6403 * we do not support jumbogram yet. if we keep going, zero ip6_plen 6404 * will do something bad, so drop the packet for now. 6405 */ 6406 if (htons(h->ip6_plen) == 0) { 6407 action = PF_DROP; 6408 REASON_SET(&reason, PFRES_NORM); /*XXX*/ 6409 goto done; 6410 } 6411 #endif 6412 6413 pd.src = (struct pf_addr *)&h->ip6_src; 6414 pd.dst = (struct pf_addr *)&h->ip6_dst; 6415 pd.sport = pd.dport = NULL; 6416 pd.ip_sum = NULL; 6417 pd.proto_sum = NULL; 6418 pd.dir = dir; 6419 pd.sidx = (dir == PF_IN) ? 0 : 1; 6420 pd.didx = (dir == PF_IN) ? 1 : 0; 6421 pd.af = AF_INET6; 6422 pd.tos = 0; 6423 pd.tot_len = ntohs(h->ip6_plen) + sizeof(struct ip6_hdr); 6424 pd.eh = eh; 6425 6426 off = ((caddr_t)h - m->m_data) + sizeof(struct ip6_hdr); 6427 pd.proto = h->ip6_nxt; 6428 do { 6429 switch (pd.proto) { 6430 case IPPROTO_FRAGMENT: 6431 action = pf_test_fragment(&r, dir, kif, m, h, 6432 &pd, &a, &ruleset); 6433 if (action == PF_DROP) 6434 REASON_SET(&reason, PFRES_FRAG); 6435 goto done; 6436 case IPPROTO_ROUTING: { 6437 struct ip6_rthdr rthdr; 6438 6439 if (rh_cnt++) { 6440 DPFPRINTF(PF_DEBUG_MISC, 6441 ("pf: IPv6 more than one rthdr\n")); 6442 action = PF_DROP; 6443 REASON_SET(&reason, PFRES_IPOPTIONS); 6444 log = 1; 6445 goto done; 6446 } 6447 if (!pf_pull_hdr(m, off, &rthdr, sizeof(rthdr), NULL, 6448 &reason, pd.af)) { 6449 DPFPRINTF(PF_DEBUG_MISC, 6450 ("pf: IPv6 short rthdr\n")); 6451 action = PF_DROP; 6452 REASON_SET(&reason, PFRES_SHORT); 6453 log = 1; 6454 goto done; 6455 } 6456 if (rthdr.ip6r_type == IPV6_RTHDR_TYPE_0) { 6457 DPFPRINTF(PF_DEBUG_MISC, 6458 ("pf: IPv6 rthdr0\n")); 6459 action = PF_DROP; 6460 REASON_SET(&reason, PFRES_IPOPTIONS); 6461 log = 1; 6462 goto done; 6463 } 6464 /* FALLTHROUGH */ 6465 } 6466 case IPPROTO_AH: 6467 case IPPROTO_HOPOPTS: 6468 case IPPROTO_DSTOPTS: { 6469 /* get next header and header length */ 6470 struct ip6_ext opt6; 6471 6472 if (!pf_pull_hdr(m, off, &opt6, sizeof(opt6), 6473 NULL, &reason, pd.af)) { 6474 DPFPRINTF(PF_DEBUG_MISC, 6475 ("pf: IPv6 short opt\n")); 6476 action = PF_DROP; 6477 log = 1; 6478 goto done; 6479 } 6480 if (pd.proto == IPPROTO_AH) 6481 off += (opt6.ip6e_len + 2) * 4; 6482 else 6483 off += (opt6.ip6e_len + 1) * 8; 6484 pd.proto = opt6.ip6e_nxt; 6485 /* goto the next header */ 6486 break; 6487 } 6488 default: 6489 terminal++; 6490 break; 6491 } 6492 } while (!terminal); 6493 6494 /* if there's no routing header, use unmodified mbuf for checksumming */ 6495 if (!n) 6496 n = m; 6497 6498 switch (pd.proto) { 6499 6500 case IPPROTO_TCP: { 6501 struct tcphdr th; 6502 6503 pd.hdr.tcp = &th; 6504 if (!pf_pull_hdr(m, off, &th, sizeof(th), 6505 &action, &reason, AF_INET6)) { 6506 log = action != PF_PASS; 6507 goto done; 6508 } 6509 pd.p_len = pd.tot_len - off - (th.th_off << 2); 6510 action = pf_normalize_tcp(dir, kif, m, 0, off, h, &pd); 6511 if (action == PF_DROP) 6512 goto done; 6513 action = pf_test_state_tcp(&s, dir, kif, m, off, h, &pd, 6514 &reason); 6515 if (action == PF_PASS) { 6516 pfsync_update_state(s); 6517 r = s->rule.ptr; 6518 a = s->anchor.ptr; 6519 log = s->log; 6520 } else if (s == NULL) 6521 action = pf_test_rule(&r, &s, dir, kif, 6522 m, off, h, &pd, &a, &ruleset, NULL, inp); 6523 break; 6524 } 6525 6526 case IPPROTO_UDP: { 6527 struct udphdr uh; 6528 6529 pd.hdr.udp = &uh; 6530 if (!pf_pull_hdr(m, off, &uh, sizeof(uh), 6531 &action, &reason, AF_INET6)) { 6532 log = action != PF_PASS; 6533 goto done; 6534 } 6535 if (uh.uh_dport == 0 || 6536 ntohs(uh.uh_ulen) > m->m_pkthdr.len - off || 6537 ntohs(uh.uh_ulen) < sizeof(struct udphdr)) { 6538 action = PF_DROP; 6539 REASON_SET(&reason, PFRES_SHORT); 6540 goto done; 6541 } 6542 action = pf_test_state_udp(&s, dir, kif, m, off, h, &pd); 6543 if (action == PF_PASS) { 6544 pfsync_update_state(s); 6545 r = s->rule.ptr; 6546 a = s->anchor.ptr; 6547 log = s->log; 6548 } else if (s == NULL) 6549 action = pf_test_rule(&r, &s, dir, kif, 6550 m, off, h, &pd, &a, &ruleset, NULL, inp); 6551 break; 6552 } 6553 6554 case IPPROTO_ICMPV6: { 6555 struct icmp6_hdr ih; 6556 6557 pd.hdr.icmp6 = &ih; 6558 if (!pf_pull_hdr(m, off, &ih, sizeof(ih), 6559 &action, &reason, AF_INET6)) { 6560 log = action != PF_PASS; 6561 goto done; 6562 } 6563 action = pf_test_state_icmp(&s, dir, kif, 6564 m, off, h, &pd, &reason); 6565 if (action == PF_PASS) { 6566 pfsync_update_state(s); 6567 r = s->rule.ptr; 6568 a = s->anchor.ptr; 6569 log = s->log; 6570 } else if (s == NULL) 6571 action = pf_test_rule(&r, &s, dir, kif, 6572 m, off, h, &pd, &a, &ruleset, NULL, inp); 6573 break; 6574 } 6575 6576 default: 6577 action = pf_test_state_other(&s, dir, kif, m, &pd); 6578 if (action == PF_PASS) { 6579 pfsync_update_state(s); 6580 r = s->rule.ptr; 6581 a = s->anchor.ptr; 6582 log = s->log; 6583 } else if (s == NULL) 6584 action = pf_test_rule(&r, &s, dir, kif, m, off, h, 6585 &pd, &a, &ruleset, NULL, inp); 6586 break; 6587 } 6588 6589 done: 6590 if (n != m) { 6591 m_freem(n); 6592 n = NULL; 6593 } 6594 6595 /* handle dangerous IPv6 extension headers. */ 6596 if (action == PF_PASS && rh_cnt && 6597 !((s && s->state_flags & PFSTATE_ALLOWOPTS) || r->allow_opts)) { 6598 action = PF_DROP; 6599 REASON_SET(&reason, PFRES_IPOPTIONS); 6600 log = 1; 6601 DPFPRINTF(PF_DEBUG_MISC, 6602 ("pf: dropping packet with dangerous v6 headers\n")); 6603 } 6604 6605 if ((s && s->tag) || r->rtableid) 6606 pf_tag_packet(m, s ? s->tag : 0, r->rtableid); 6607 6608 #if 0 6609 if (dir == PF_IN && s && s->key[PF_SK_STACK]) 6610 m->m_pkthdr.pf.statekey = s->key[PF_SK_STACK]; 6611 #endif 6612 6613 #ifdef ALTQ 6614 if (action == PF_PASS && r->qid) { 6615 m->m_pkthdr.fw_flags |= PF_MBUF_STRUCTURE; 6616 if (pd.tos & IPTOS_LOWDELAY) 6617 m->m_pkthdr.pf.qid = r->pqid; 6618 else 6619 m->m_pkthdr.pf.qid = r->qid; 6620 m->m_pkthdr.pf.ecn_af = AF_INET6; 6621 m->m_pkthdr.pf.hdr = h; 6622 if (s) { 6623 /* for fairq */ 6624 m->m_pkthdr.pf.state_hash = s->hash; 6625 m->m_pkthdr.pf.flags |= PF_TAG_STATE_HASHED; 6626 } 6627 } 6628 #endif /* ALTQ */ 6629 6630 if (dir == PF_IN && action == PF_PASS && (pd.proto == IPPROTO_TCP || 6631 pd.proto == IPPROTO_UDP) && s != NULL && s->nat_rule.ptr != NULL && 6632 (s->nat_rule.ptr->action == PF_RDR || 6633 s->nat_rule.ptr->action == PF_BINAT) && 6634 IN6_IS_ADDR_LOOPBACK(&pd.dst->v6)) 6635 m->m_pkthdr.pf.flags |= PF_TAG_TRANSLATE_LOCALHOST; 6636 6637 if (dir == PF_IN && action == PF_PASS && r->divert.port) { 6638 struct pf_divert *divert; 6639 6640 if ((divert = pf_get_divert(m))) { 6641 m->m_pkthdr.pf.flags |= PF_TAG_DIVERTED; 6642 divert->port = r->divert.port; 6643 divert->addr.ipv6 = r->divert.addr.v6; 6644 } 6645 } 6646 6647 if (log) { 6648 struct pf_rule *lr; 6649 6650 if (s != NULL && s->nat_rule.ptr != NULL && 6651 s->nat_rule.ptr->log & PF_LOG_ALL) 6652 lr = s->nat_rule.ptr; 6653 else 6654 lr = r; 6655 PFLOG_PACKET(kif, h, m, AF_INET6, dir, reason, lr, a, ruleset, 6656 &pd); 6657 } 6658 6659 kif->pfik_bytes[1][dir == PF_OUT][action != PF_PASS] += pd.tot_len; 6660 kif->pfik_packets[1][dir == PF_OUT][action != PF_PASS]++; 6661 6662 if (action == PF_PASS || r->action == PF_DROP) { 6663 dirndx = (dir == PF_OUT); 6664 r->packets[dirndx]++; 6665 r->bytes[dirndx] += pd.tot_len; 6666 if (a != NULL) { 6667 a->packets[dirndx]++; 6668 a->bytes[dirndx] += pd.tot_len; 6669 } 6670 if (s != NULL) { 6671 if (s->nat_rule.ptr != NULL) { 6672 s->nat_rule.ptr->packets[dirndx]++; 6673 s->nat_rule.ptr->bytes[dirndx] += pd.tot_len; 6674 } 6675 if (s->src_node != NULL) { 6676 s->src_node->packets[dirndx]++; 6677 s->src_node->bytes[dirndx] += pd.tot_len; 6678 } 6679 if (s->nat_src_node != NULL) { 6680 s->nat_src_node->packets[dirndx]++; 6681 s->nat_src_node->bytes[dirndx] += pd.tot_len; 6682 } 6683 dirndx = (dir == s->direction) ? 0 : 1; 6684 s->packets[dirndx]++; 6685 s->bytes[dirndx] += pd.tot_len; 6686 } 6687 tr = r; 6688 nr = (s != NULL) ? s->nat_rule.ptr : pd.nat_rule; 6689 if (nr != NULL && r == &pf_default_rule) 6690 tr = nr; 6691 if (tr->src.addr.type == PF_ADDR_TABLE) 6692 pfr_update_stats(tr->src.addr.p.tbl, 6693 (s == NULL) ? pd.src : 6694 &s->key[(s->direction == PF_IN)]->addr[0], 6695 pd.af, pd.tot_len, dir == PF_OUT, 6696 r->action == PF_PASS, tr->src.neg); 6697 if (tr->dst.addr.type == PF_ADDR_TABLE) 6698 pfr_update_stats(tr->dst.addr.p.tbl, 6699 (s == NULL) ? pd.dst : 6700 &s->key[(s->direction == PF_IN)]->addr[1], 6701 pd.af, pd.tot_len, dir == PF_OUT, 6702 r->action == PF_PASS, tr->dst.neg); 6703 } 6704 6705 6706 if (action == PF_SYNPROXY_DROP) { 6707 m_freem(*m0); 6708 *m0 = NULL; 6709 action = PF_PASS; 6710 } else if (r->rt) 6711 /* pf_route6 can free the mbuf causing *m0 to become NULL */ 6712 pf_route6(m0, r, dir, kif->pfik_ifp, s, &pd); 6713 6714 return (action); 6715 } 6716 #endif /* INET6 */ 6717 6718 int 6719 pf_check_congestion(struct ifqueue *ifq) 6720 { 6721 return (0); 6722 } 6723