1 /* $OpenBSD: pf.c,v 1.614 2008/08/02 12:34:37 henning Exp $ */ 2 3 /* 4 * Copyright (c) 2004 The DragonFly Project. All rights reserved. 5 * 6 * Copyright (c) 2001 Daniel Hartmeier 7 * Copyright (c) 2002 - 2008 Henning Brauer 8 * All rights reserved. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 14 * - Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * - Redistributions in binary form must reproduce the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer in the documentation and/or other materials provided 19 * with the distribution. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 29 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 31 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 32 * POSSIBILITY OF SUCH DAMAGE. 33 * 34 * Effort sponsored in part by the Defense Advanced Research Projects 35 * Agency (DARPA) and Air Force Research Laboratory, Air Force 36 * Materiel Command, USAF, under agreement number F30602-01-2-0537. 37 * 38 */ 39 40 #include "opt_inet.h" 41 #include "opt_inet6.h" 42 43 #include <sys/param.h> 44 #include <sys/systm.h> 45 #include <sys/malloc.h> 46 #include <sys/mbuf.h> 47 #include <sys/filio.h> 48 #include <sys/socket.h> 49 #include <sys/socketvar.h> 50 #include <sys/kernel.h> 51 #include <sys/time.h> 52 #include <sys/sysctl.h> 53 #include <sys/endian.h> 54 #include <sys/proc.h> 55 #include <sys/kthread.h> 56 #include <sys/spinlock.h> 57 58 #include <machine/inttypes.h> 59 60 #include <sys/md5.h> 61 62 #include <net/if.h> 63 #include <net/if_types.h> 64 #include <net/bpf.h> 65 #include <net/netisr2.h> 66 #include <net/route.h> 67 68 #include <netinet/in.h> 69 #include <netinet/in_var.h> 70 #include <netinet/in_systm.h> 71 #include <netinet/ip.h> 72 #include <netinet/ip_var.h> 73 #include <netinet/tcp.h> 74 #include <netinet/tcp_seq.h> 75 #include <netinet/udp.h> 76 #include <netinet/ip_icmp.h> 77 #include <netinet/in_pcb.h> 78 #include <netinet/tcp_timer.h> 79 #include <netinet/tcp_var.h> 80 #include <netinet/udp_var.h> 81 #include <netinet/icmp_var.h> 82 #include <netinet/if_ether.h> 83 84 #include <net/pf/pfvar.h> 85 #include <net/pf/if_pflog.h> 86 87 #include <net/pf/if_pfsync.h> 88 89 #ifdef INET6 90 #include <netinet/ip6.h> 91 #include <netinet/icmp6.h> 92 #include <netinet6/nd6.h> 93 #include <netinet6/ip6_var.h> 94 #include <netinet6/in6_pcb.h> 95 #endif /* INET6 */ 96 97 #include <sys/in_cksum.h> 98 #include <sys/ucred.h> 99 #include <machine/limits.h> 100 #include <sys/msgport2.h> 101 #include <sys/spinlock2.h> 102 #include <net/netmsg2.h> 103 #include <net/toeplitz2.h> 104 105 extern int ip_optcopy(struct ip *, struct ip *); 106 extern int debug_pfugidhack; 107 108 /* 109 * pf_token - shared lock for cpu-localized operations, 110 * exclusive lock otherwise. 111 * 112 * pf_gtoken- exclusive lock used for initialization. 113 * 114 * pf_spin - only used to atomically fetch and increment stateid 115 * on 32-bit systems. 116 */ 117 struct lwkt_token pf_token = LWKT_TOKEN_INITIALIZER(pf_token); 118 struct lwkt_token pf_gtoken = LWKT_TOKEN_INITIALIZER(pf_gtoken); 119 #if __SIZEOF_LONG__ != 8 120 struct spinlock pf_spin = SPINLOCK_INITIALIZER(pf_spin); 121 #endif 122 123 #define DPFPRINTF(n, x) if (pf_status.debug >= (n)) kprintf x 124 125 #define FAIL(code) { error = (code); goto done; } 126 127 /* 128 * Global variables 129 */ 130 131 /* mask radix tree */ 132 struct radix_node_head *pf_maskhead; 133 134 /* state tables */ 135 struct pf_state_tree pf_statetbl[MAXCPU+1]; /* incls one global table */ 136 137 struct pf_altqqueue pf_altqs[2]; 138 struct pf_palist pf_pabuf; 139 struct pf_altqqueue *pf_altqs_active; 140 struct pf_altqqueue *pf_altqs_inactive; 141 struct pf_status pf_status; 142 143 u_int32_t ticket_altqs_active; 144 u_int32_t ticket_altqs_inactive; 145 int altqs_inactive_open; 146 u_int32_t ticket_pabuf; 147 148 MD5_CTX pf_tcp_secret_ctx; 149 u_char pf_tcp_secret[16]; 150 int pf_tcp_secret_init; 151 int pf_tcp_iss_off; 152 153 struct pf_anchor_stackframe { 154 struct pf_ruleset *rs; 155 struct pf_rule *r; 156 struct pf_anchor_node *parent; 157 struct pf_anchor *child; 158 } pf_anchor_stack[64]; 159 160 struct malloc_type *pf_src_tree_pl, *pf_rule_pl, *pf_pooladdr_pl; 161 struct malloc_type *pf_state_pl, *pf_state_key_pl, *pf_state_item_pl; 162 struct malloc_type *pf_altq_pl; 163 164 void pf_print_host(struct pf_addr *, u_int16_t, u_int8_t); 165 166 void pf_init_threshold(struct pf_threshold *, u_int32_t, 167 u_int32_t); 168 void pf_add_threshold(struct pf_threshold *); 169 int pf_check_threshold(struct pf_threshold *); 170 171 void pf_change_ap(struct pf_addr *, u_int16_t *, 172 u_int16_t *, u_int16_t *, struct pf_addr *, 173 u_int16_t, u_int8_t, sa_family_t); 174 int pf_modulate_sack(struct mbuf *, int, struct pf_pdesc *, 175 struct tcphdr *, struct pf_state_peer *); 176 #ifdef INET6 177 void pf_change_a6(struct pf_addr *, u_int16_t *, 178 struct pf_addr *, u_int8_t); 179 #endif /* INET6 */ 180 void pf_change_icmp(struct pf_addr *, u_int16_t *, 181 struct pf_addr *, struct pf_addr *, u_int16_t, 182 u_int16_t *, u_int16_t *, u_int16_t *, 183 u_int16_t *, u_int8_t, sa_family_t); 184 void pf_send_tcp(const struct pf_rule *, sa_family_t, 185 const struct pf_addr *, const struct pf_addr *, 186 u_int16_t, u_int16_t, u_int32_t, u_int32_t, 187 u_int8_t, u_int16_t, u_int16_t, u_int8_t, int, 188 u_int16_t, struct ether_header *, struct ifnet *); 189 void pf_send_icmp(struct mbuf *, u_int8_t, u_int8_t, 190 sa_family_t, struct pf_rule *); 191 struct pf_rule *pf_match_translation(struct pf_pdesc *, struct mbuf *, 192 int, int, struct pfi_kif *, 193 struct pf_addr *, u_int16_t, struct pf_addr *, 194 u_int16_t, int); 195 struct pf_rule *pf_get_translation(struct pf_pdesc *, struct mbuf *, 196 int, int, struct pfi_kif *, struct pf_src_node **, 197 struct pf_state_key **, struct pf_state_key **, 198 struct pf_state_key **, struct pf_state_key **, 199 struct pf_addr *, struct pf_addr *, 200 u_int16_t, u_int16_t); 201 void pf_detach_state(struct pf_state *); 202 int pf_state_key_setup(struct pf_pdesc *, struct pf_rule *, 203 struct pf_state_key **, struct pf_state_key **, 204 struct pf_state_key **, struct pf_state_key **, 205 struct pf_addr *, struct pf_addr *, 206 u_int16_t, u_int16_t); 207 void pf_state_key_detach(struct pf_state *, int); 208 u_int32_t pf_tcp_iss(struct pf_pdesc *); 209 int pf_test_rule(struct pf_rule **, struct pf_state **, 210 int, struct pfi_kif *, struct mbuf *, int, 211 void *, struct pf_pdesc *, struct pf_rule **, 212 struct pf_ruleset **, struct ifqueue *, struct inpcb *); 213 static __inline int pf_create_state(struct pf_rule *, struct pf_rule *, 214 struct pf_rule *, struct pf_pdesc *, 215 struct pf_src_node *, struct pf_state_key *, 216 struct pf_state_key *, struct pf_state_key *, 217 struct pf_state_key *, struct mbuf *, int, 218 u_int16_t, u_int16_t, int *, struct pfi_kif *, 219 struct pf_state **, int, u_int16_t, u_int16_t, 220 int); 221 int pf_test_fragment(struct pf_rule **, int, 222 struct pfi_kif *, struct mbuf *, void *, 223 struct pf_pdesc *, struct pf_rule **, 224 struct pf_ruleset **); 225 int pf_tcp_track_full(struct pf_state_peer *, 226 struct pf_state_peer *, struct pf_state **, 227 struct pfi_kif *, struct mbuf *, int, 228 struct pf_pdesc *, u_short *, int *); 229 int pf_tcp_track_sloppy(struct pf_state_peer *, 230 struct pf_state_peer *, struct pf_state **, 231 struct pf_pdesc *, u_short *); 232 int pf_test_state_tcp(struct pf_state **, int, 233 struct pfi_kif *, struct mbuf *, int, 234 void *, struct pf_pdesc *, u_short *); 235 int pf_test_state_udp(struct pf_state **, int, 236 struct pfi_kif *, struct mbuf *, int, 237 void *, struct pf_pdesc *); 238 int pf_test_state_icmp(struct pf_state **, int, 239 struct pfi_kif *, struct mbuf *, int, 240 void *, struct pf_pdesc *, u_short *); 241 int pf_test_state_other(struct pf_state **, int, 242 struct pfi_kif *, struct mbuf *, struct pf_pdesc *); 243 void pf_step_into_anchor(int *, struct pf_ruleset **, int, 244 struct pf_rule **, struct pf_rule **, int *); 245 int pf_step_out_of_anchor(int *, struct pf_ruleset **, 246 int, struct pf_rule **, struct pf_rule **, 247 int *); 248 void pf_hash(struct pf_addr *, struct pf_addr *, 249 struct pf_poolhashkey *, sa_family_t); 250 int pf_map_addr(u_int8_t, struct pf_rule *, 251 struct pf_addr *, struct pf_addr *, 252 struct pf_addr *, struct pf_src_node **); 253 int pf_get_sport(struct pf_pdesc *, 254 sa_family_t, u_int8_t, struct pf_rule *, 255 struct pf_addr *, struct pf_addr *, 256 u_int16_t, u_int16_t, 257 struct pf_addr *, u_int16_t *, 258 u_int16_t, u_int16_t, 259 struct pf_src_node **); 260 void pf_route(struct mbuf **, struct pf_rule *, int, 261 struct ifnet *, struct pf_state *, 262 struct pf_pdesc *); 263 void pf_route6(struct mbuf **, struct pf_rule *, int, 264 struct ifnet *, struct pf_state *, 265 struct pf_pdesc *); 266 u_int8_t pf_get_wscale(struct mbuf *, int, u_int16_t, 267 sa_family_t); 268 u_int16_t pf_get_mss(struct mbuf *, int, u_int16_t, 269 sa_family_t); 270 u_int16_t pf_calc_mss(struct pf_addr *, sa_family_t, 271 u_int16_t); 272 void pf_set_rt_ifp(struct pf_state *, 273 struct pf_addr *); 274 int pf_check_proto_cksum(struct mbuf *, int, int, 275 u_int8_t, sa_family_t); 276 struct pf_divert *pf_get_divert(struct mbuf *); 277 void pf_print_state_parts(struct pf_state *, 278 struct pf_state_key *, struct pf_state_key *); 279 int pf_addr_wrap_neq(struct pf_addr_wrap *, 280 struct pf_addr_wrap *); 281 struct pf_state *pf_find_state(struct pfi_kif *, 282 struct pf_state_key_cmp *, u_int, struct mbuf *); 283 int pf_src_connlimit(struct pf_state *); 284 int pf_check_congestion(struct ifqueue *); 285 286 extern int pf_end_threads; 287 288 struct pf_pool_limit pf_pool_limits[PF_LIMIT_MAX] = { 289 { &pf_state_pl, PFSTATE_HIWAT }, 290 { &pf_src_tree_pl, PFSNODE_HIWAT }, 291 { &pf_frent_pl, PFFRAG_FRENT_HIWAT }, 292 { &pfr_ktable_pl, PFR_KTABLE_HIWAT }, 293 { &pfr_kentry_pl, PFR_KENTRY_HIWAT } 294 }; 295 296 #define STATE_LOOKUP(i, k, d, s, m) \ 297 do { \ 298 s = pf_find_state(i, k, d, m); \ 299 if (s == NULL || (s)->timeout == PFTM_PURGE) \ 300 return (PF_DROP); \ 301 if (d == PF_OUT && \ 302 (((s)->rule.ptr->rt == PF_ROUTETO && \ 303 (s)->rule.ptr->direction == PF_OUT) || \ 304 ((s)->rule.ptr->rt == PF_REPLYTO && \ 305 (s)->rule.ptr->direction == PF_IN)) && \ 306 (s)->rt_kif != NULL && \ 307 (s)->rt_kif != i) \ 308 return (PF_PASS); \ 309 } while (0) 310 311 #define BOUND_IFACE(r, k) \ 312 ((r)->rule_flag & PFRULE_IFBOUND) ? (k) : pfi_all 313 314 #define STATE_INC_COUNTERS(s) \ 315 do { \ 316 atomic_add_int(&s->rule.ptr->states_cur, 1); \ 317 s->rule.ptr->states_tot++; \ 318 if (s->anchor.ptr != NULL) { \ 319 atomic_add_int(&s->anchor.ptr->states_cur, 1); \ 320 s->anchor.ptr->states_tot++; \ 321 } \ 322 if (s->nat_rule.ptr != NULL) { \ 323 atomic_add_int(&s->nat_rule.ptr->states_cur, 1); \ 324 s->nat_rule.ptr->states_tot++; \ 325 } \ 326 } while (0) 327 328 #define STATE_DEC_COUNTERS(s) \ 329 do { \ 330 if (s->nat_rule.ptr != NULL) \ 331 atomic_add_int(&s->nat_rule.ptr->states_cur, -1); \ 332 if (s->anchor.ptr != NULL) \ 333 atomic_add_int(&s->anchor.ptr->states_cur, -1); \ 334 atomic_add_int(&s->rule.ptr->states_cur, -1); \ 335 } while (0) 336 337 static MALLOC_DEFINE(M_PFSTATEPL, "pfstatepl", "pf state pool list"); 338 static MALLOC_DEFINE(M_PFSRCTREEPL, "pfsrctpl", "pf source tree pool list"); 339 static MALLOC_DEFINE(M_PFSTATEKEYPL, "pfstatekeypl", "pf state key pool list"); 340 static MALLOC_DEFINE(M_PFSTATEITEMPL, "pfstateitempl", "pf state item pool list"); 341 342 static __inline int pf_src_compare(struct pf_src_node *, struct pf_src_node *); 343 static __inline int pf_state_compare_key(struct pf_state_key *, 344 struct pf_state_key *); 345 static __inline int pf_state_compare_id(struct pf_state *, 346 struct pf_state *); 347 348 struct pf_src_tree tree_src_tracking[MAXCPU]; 349 struct pf_state_tree_id tree_id[MAXCPU]; 350 struct pf_state_queue state_list[MAXCPU]; 351 352 RB_GENERATE(pf_src_tree, pf_src_node, entry, pf_src_compare); 353 RB_GENERATE(pf_state_tree, pf_state_key, entry, pf_state_compare_key); 354 RB_GENERATE(pf_state_tree_id, pf_state, 355 entry_id, pf_state_compare_id); 356 357 static __inline int 358 pf_src_compare(struct pf_src_node *a, struct pf_src_node *b) 359 { 360 int diff; 361 362 if (a->rule.ptr > b->rule.ptr) 363 return (1); 364 if (a->rule.ptr < b->rule.ptr) 365 return (-1); 366 if ((diff = a->af - b->af) != 0) 367 return (diff); 368 switch (a->af) { 369 #ifdef INET 370 case AF_INET: 371 if (a->addr.addr32[0] > b->addr.addr32[0]) 372 return (1); 373 if (a->addr.addr32[0] < b->addr.addr32[0]) 374 return (-1); 375 break; 376 #endif /* INET */ 377 #ifdef INET6 378 case AF_INET6: 379 if (a->addr.addr32[3] > b->addr.addr32[3]) 380 return (1); 381 if (a->addr.addr32[3] < b->addr.addr32[3]) 382 return (-1); 383 if (a->addr.addr32[2] > b->addr.addr32[2]) 384 return (1); 385 if (a->addr.addr32[2] < b->addr.addr32[2]) 386 return (-1); 387 if (a->addr.addr32[1] > b->addr.addr32[1]) 388 return (1); 389 if (a->addr.addr32[1] < b->addr.addr32[1]) 390 return (-1); 391 if (a->addr.addr32[0] > b->addr.addr32[0]) 392 return (1); 393 if (a->addr.addr32[0] < b->addr.addr32[0]) 394 return (-1); 395 break; 396 #endif /* INET6 */ 397 } 398 return (0); 399 } 400 401 u_int32_t 402 pf_state_hash(struct pf_state_key *sk) 403 { 404 u_int32_t hv = (u_int32_t)(((intptr_t)sk >> 6) ^ ((intptr_t)sk >> 15)); 405 if (hv == 0) /* disallow 0 */ 406 hv = 1; 407 return(hv); 408 } 409 410 #ifdef INET6 411 void 412 pf_addrcpy(struct pf_addr *dst, struct pf_addr *src, sa_family_t af) 413 { 414 switch (af) { 415 #ifdef INET 416 case AF_INET: 417 dst->addr32[0] = src->addr32[0]; 418 break; 419 #endif /* INET */ 420 case AF_INET6: 421 dst->addr32[0] = src->addr32[0]; 422 dst->addr32[1] = src->addr32[1]; 423 dst->addr32[2] = src->addr32[2]; 424 dst->addr32[3] = src->addr32[3]; 425 break; 426 } 427 } 428 #endif /* INET6 */ 429 430 void 431 pf_init_threshold(struct pf_threshold *threshold, 432 u_int32_t limit, u_int32_t seconds) 433 { 434 threshold->limit = limit * PF_THRESHOLD_MULT; 435 threshold->seconds = seconds; 436 threshold->count = 0; 437 threshold->last = time_second; 438 } 439 440 void 441 pf_add_threshold(struct pf_threshold *threshold) 442 { 443 u_int32_t t = time_second, diff = t - threshold->last; 444 445 if (diff >= threshold->seconds) 446 threshold->count = 0; 447 else 448 threshold->count -= threshold->count * diff / 449 threshold->seconds; 450 threshold->count += PF_THRESHOLD_MULT; 451 threshold->last = t; 452 } 453 454 int 455 pf_check_threshold(struct pf_threshold *threshold) 456 { 457 return (threshold->count > threshold->limit); 458 } 459 460 int 461 pf_src_connlimit(struct pf_state *state) 462 { 463 int bad = 0; 464 int cpu = mycpu->gd_cpuid; 465 466 state->src_node->conn++; 467 state->src.tcp_est = 1; 468 pf_add_threshold(&state->src_node->conn_rate); 469 470 if (state->rule.ptr->max_src_conn && 471 state->rule.ptr->max_src_conn < 472 state->src_node->conn) { 473 pf_status.lcounters[LCNT_SRCCONN]++; 474 bad++; 475 } 476 477 if (state->rule.ptr->max_src_conn_rate.limit && 478 pf_check_threshold(&state->src_node->conn_rate)) { 479 pf_status.lcounters[LCNT_SRCCONNRATE]++; 480 bad++; 481 } 482 483 if (!bad) 484 return 0; 485 486 if (state->rule.ptr->overload_tbl) { 487 struct pfr_addr p; 488 u_int32_t killed = 0; 489 490 pf_status.lcounters[LCNT_OVERLOAD_TABLE]++; 491 if (pf_status.debug >= PF_DEBUG_MISC) { 492 kprintf("pf_src_connlimit: blocking address "); 493 pf_print_host(&state->src_node->addr, 0, 494 state->key[PF_SK_WIRE]->af); 495 } 496 497 bzero(&p, sizeof(p)); 498 p.pfra_af = state->key[PF_SK_WIRE]->af; 499 switch (state->key[PF_SK_WIRE]->af) { 500 #ifdef INET 501 case AF_INET: 502 p.pfra_net = 32; 503 p.pfra_ip4addr = state->src_node->addr.v4; 504 break; 505 #endif /* INET */ 506 #ifdef INET6 507 case AF_INET6: 508 p.pfra_net = 128; 509 p.pfra_ip6addr = state->src_node->addr.v6; 510 break; 511 #endif /* INET6 */ 512 } 513 514 pfr_insert_kentry(state->rule.ptr->overload_tbl, 515 &p, time_second); 516 517 /* kill existing states if that's required. */ 518 if (state->rule.ptr->flush) { 519 struct pf_state_key *sk; 520 struct pf_state *st; 521 522 pf_status.lcounters[LCNT_OVERLOAD_FLUSH]++; 523 RB_FOREACH(st, pf_state_tree_id, &tree_id[cpu]) { 524 sk = st->key[PF_SK_WIRE]; 525 /* 526 * Kill states from this source. (Only those 527 * from the same rule if PF_FLUSH_GLOBAL is not 528 * set). (Only on current cpu). 529 */ 530 if (sk->af == 531 state->key[PF_SK_WIRE]->af && 532 ((state->direction == PF_OUT && 533 PF_AEQ(&state->src_node->addr, 534 &sk->addr[0], sk->af)) || 535 (state->direction == PF_IN && 536 PF_AEQ(&state->src_node->addr, 537 &sk->addr[1], sk->af))) && 538 (state->rule.ptr->flush & 539 PF_FLUSH_GLOBAL || 540 state->rule.ptr == st->rule.ptr)) { 541 st->timeout = PFTM_PURGE; 542 st->src.state = st->dst.state = 543 TCPS_CLOSED; 544 killed++; 545 } 546 } 547 if (pf_status.debug >= PF_DEBUG_MISC) 548 kprintf(", %u states killed", killed); 549 } 550 if (pf_status.debug >= PF_DEBUG_MISC) 551 kprintf("\n"); 552 } 553 554 /* kill this state */ 555 state->timeout = PFTM_PURGE; 556 state->src.state = state->dst.state = TCPS_CLOSED; 557 558 return 1; 559 } 560 561 int 562 pf_insert_src_node(struct pf_src_node **sn, struct pf_rule *rule, 563 struct pf_addr *src, sa_family_t af) 564 { 565 struct pf_src_node k; 566 int cpu = mycpu->gd_cpuid; 567 568 if (*sn == NULL) { 569 k.af = af; 570 PF_ACPY(&k.addr, src, af); 571 if (rule->rule_flag & PFRULE_RULESRCTRACK || 572 rule->rpool.opts & PF_POOL_STICKYADDR) 573 k.rule.ptr = rule; 574 else 575 k.rule.ptr = NULL; 576 pf_status.scounters[SCNT_SRC_NODE_SEARCH]++; 577 *sn = RB_FIND(pf_src_tree, &tree_src_tracking[cpu], &k); 578 } 579 if (*sn == NULL) { 580 if (!rule->max_src_nodes || 581 rule->src_nodes < rule->max_src_nodes) 582 (*sn) = kmalloc(sizeof(struct pf_src_node), 583 M_PFSRCTREEPL, M_NOWAIT|M_ZERO); 584 else 585 pf_status.lcounters[LCNT_SRCNODES]++; 586 if ((*sn) == NULL) 587 return (-1); 588 589 pf_init_threshold(&(*sn)->conn_rate, 590 rule->max_src_conn_rate.limit, 591 rule->max_src_conn_rate.seconds); 592 593 (*sn)->af = af; 594 if (rule->rule_flag & PFRULE_RULESRCTRACK || 595 rule->rpool.opts & PF_POOL_STICKYADDR) 596 (*sn)->rule.ptr = rule; 597 else 598 (*sn)->rule.ptr = NULL; 599 PF_ACPY(&(*sn)->addr, src, af); 600 if (RB_INSERT(pf_src_tree, 601 &tree_src_tracking[cpu], *sn) != NULL) { 602 if (pf_status.debug >= PF_DEBUG_MISC) { 603 kprintf("pf: src_tree insert failed: "); 604 pf_print_host(&(*sn)->addr, 0, af); 605 kprintf("\n"); 606 } 607 kfree(*sn, M_PFSRCTREEPL); 608 return (-1); 609 } 610 611 /* 612 * Atomic op required to increment src_nodes in the rule 613 * because we hold a shared token here (decrements will use 614 * an exclusive token). 615 */ 616 (*sn)->creation = time_second; 617 (*sn)->ruletype = rule->action; 618 if ((*sn)->rule.ptr != NULL) 619 atomic_add_int(&(*sn)->rule.ptr->src_nodes, 1); 620 pf_status.scounters[SCNT_SRC_NODE_INSERT]++; 621 atomic_add_int(&pf_status.src_nodes, 1); 622 } else { 623 if (rule->max_src_states && 624 (*sn)->states >= rule->max_src_states) { 625 pf_status.lcounters[LCNT_SRCSTATES]++; 626 return (-1); 627 } 628 } 629 return (0); 630 } 631 632 /* state table stuff */ 633 634 static __inline int 635 pf_state_compare_key(struct pf_state_key *a, struct pf_state_key *b) 636 { 637 int diff; 638 639 if ((diff = a->proto - b->proto) != 0) 640 return (diff); 641 if ((diff = a->af - b->af) != 0) 642 return (diff); 643 switch (a->af) { 644 #ifdef INET 645 case AF_INET: 646 if (a->addr[0].addr32[0] > b->addr[0].addr32[0]) 647 return (1); 648 if (a->addr[0].addr32[0] < b->addr[0].addr32[0]) 649 return (-1); 650 if (a->addr[1].addr32[0] > b->addr[1].addr32[0]) 651 return (1); 652 if (a->addr[1].addr32[0] < b->addr[1].addr32[0]) 653 return (-1); 654 break; 655 #endif /* INET */ 656 #ifdef INET6 657 case AF_INET6: 658 if (a->addr[0].addr32[3] > b->addr[0].addr32[3]) 659 return (1); 660 if (a->addr[0].addr32[3] < b->addr[0].addr32[3]) 661 return (-1); 662 if (a->addr[1].addr32[3] > b->addr[1].addr32[3]) 663 return (1); 664 if (a->addr[1].addr32[3] < b->addr[1].addr32[3]) 665 return (-1); 666 if (a->addr[0].addr32[2] > b->addr[0].addr32[2]) 667 return (1); 668 if (a->addr[0].addr32[2] < b->addr[0].addr32[2]) 669 return (-1); 670 if (a->addr[1].addr32[2] > b->addr[1].addr32[2]) 671 return (1); 672 if (a->addr[1].addr32[2] < b->addr[1].addr32[2]) 673 return (-1); 674 if (a->addr[0].addr32[1] > b->addr[0].addr32[1]) 675 return (1); 676 if (a->addr[0].addr32[1] < b->addr[0].addr32[1]) 677 return (-1); 678 if (a->addr[1].addr32[1] > b->addr[1].addr32[1]) 679 return (1); 680 if (a->addr[1].addr32[1] < b->addr[1].addr32[1]) 681 return (-1); 682 if (a->addr[0].addr32[0] > b->addr[0].addr32[0]) 683 return (1); 684 if (a->addr[0].addr32[0] < b->addr[0].addr32[0]) 685 return (-1); 686 if (a->addr[1].addr32[0] > b->addr[1].addr32[0]) 687 return (1); 688 if (a->addr[1].addr32[0] < b->addr[1].addr32[0]) 689 return (-1); 690 break; 691 #endif /* INET6 */ 692 } 693 694 if ((diff = a->port[0] - b->port[0]) != 0) 695 return (diff); 696 if ((diff = a->port[1] - b->port[1]) != 0) 697 return (diff); 698 699 return (0); 700 } 701 702 static __inline int 703 pf_state_compare_id(struct pf_state *a, struct pf_state *b) 704 { 705 if (a->id > b->id) 706 return (1); 707 if (a->id < b->id) 708 return (-1); 709 if (a->creatorid > b->creatorid) 710 return (1); 711 if (a->creatorid < b->creatorid) 712 return (-1); 713 714 return (0); 715 } 716 717 int 718 pf_state_key_attach(struct pf_state_key *sk, struct pf_state *s, int idx) 719 { 720 struct pf_state_item *si; 721 struct pf_state_key *cur; 722 int cpu; 723 int error; 724 725 /* 726 * PFSTATE_STACK_GLOBAL is set for translations when the translated 727 * address/port is not localized to the same cpu that the untranslated 728 * address/port is on. The wire pf_state_key is managed on the global 729 * statetbl tree for this case. 730 */ 731 if ((s->state_flags & PFSTATE_STACK_GLOBAL) && idx == PF_SK_WIRE) { 732 cpu = MAXCPU; 733 lockmgr(&pf_global_statetbl_lock, LK_EXCLUSIVE); 734 } else { 735 cpu = mycpu->gd_cpuid; 736 } 737 738 KKASSERT(s->key[idx] == NULL); /* XXX handle this? */ 739 740 if ((cur = RB_INSERT(pf_state_tree, &pf_statetbl[cpu], sk)) != NULL) { 741 /* key exists. check for same kif, if none, add to key */ 742 TAILQ_FOREACH(si, &cur->states, entry) 743 if (si->s->kif == s->kif && 744 si->s->direction == s->direction) { 745 if (pf_status.debug >= PF_DEBUG_MISC) { 746 kprintf( 747 "pf: %s key attach failed on %s: ", 748 (idx == PF_SK_WIRE) ? 749 "wire" : "stack", 750 s->kif->pfik_name); 751 pf_print_state_parts(s, 752 (idx == PF_SK_WIRE) ? sk : NULL, 753 (idx == PF_SK_STACK) ? sk : NULL); 754 kprintf("\n"); 755 } 756 kfree(sk, M_PFSTATEKEYPL); 757 error = -1; 758 goto failed; /* collision! */ 759 } 760 kfree(sk, M_PFSTATEKEYPL); 761 762 s->key[idx] = cur; 763 } else { 764 s->key[idx] = sk; 765 } 766 767 if ((si = kmalloc(sizeof(struct pf_state_item), 768 M_PFSTATEITEMPL, M_NOWAIT)) == NULL) { 769 pf_state_key_detach(s, idx); 770 error = -1; 771 goto failed; /* collision! */ 772 } 773 si->s = s; 774 775 /* list is sorted, if-bound states before floating */ 776 if (s->kif == pfi_all) 777 TAILQ_INSERT_TAIL(&s->key[idx]->states, si, entry); 778 else 779 TAILQ_INSERT_HEAD(&s->key[idx]->states, si, entry); 780 781 error = 0; 782 failed: 783 if ((s->state_flags & PFSTATE_STACK_GLOBAL) && idx == PF_SK_WIRE) 784 lockmgr(&pf_global_statetbl_lock, LK_RELEASE); 785 return error; 786 } 787 788 /* 789 * NOTE: Can only be called indirectly via the purge thread with pf_token 790 * exclusively locked. 791 */ 792 void 793 pf_detach_state(struct pf_state *s) 794 { 795 if (s->key[PF_SK_WIRE] == s->key[PF_SK_STACK]) 796 s->key[PF_SK_WIRE] = NULL; 797 798 if (s->key[PF_SK_STACK] != NULL) 799 pf_state_key_detach(s, PF_SK_STACK); 800 801 if (s->key[PF_SK_WIRE] != NULL) 802 pf_state_key_detach(s, PF_SK_WIRE); 803 } 804 805 /* 806 * NOTE: Can only be called indirectly via the purge thread with pf_token 807 * exclusively locked. 808 */ 809 void 810 pf_state_key_detach(struct pf_state *s, int idx) 811 { 812 struct pf_state_item *si; 813 int cpu; 814 815 /* 816 * PFSTATE_STACK_GLOBAL is set for translations when the translated 817 * address/port is not localized to the same cpu that the untranslated 818 * address/port is on. The wire pf_state_key is managed on the global 819 * statetbl tree for this case. 820 */ 821 if ((s->state_flags & PFSTATE_STACK_GLOBAL) && idx == PF_SK_WIRE) { 822 cpu = MAXCPU; 823 lockmgr(&pf_global_statetbl_lock, LK_EXCLUSIVE); 824 } else { 825 cpu = mycpu->gd_cpuid; 826 } 827 828 si = TAILQ_FIRST(&s->key[idx]->states); 829 while (si && si->s != s) 830 si = TAILQ_NEXT(si, entry); 831 832 if (si) { 833 TAILQ_REMOVE(&s->key[idx]->states, si, entry); 834 kfree(si, M_PFSTATEITEMPL); 835 } 836 837 if (TAILQ_EMPTY(&s->key[idx]->states)) { 838 RB_REMOVE(pf_state_tree, &pf_statetbl[cpu], s->key[idx]); 839 if (s->key[idx]->reverse) 840 s->key[idx]->reverse->reverse = NULL; 841 if (s->key[idx]->inp) 842 s->key[idx]->inp->inp_pf_sk = NULL; 843 kfree(s->key[idx], M_PFSTATEKEYPL); 844 } 845 s->key[idx] = NULL; 846 847 if ((s->state_flags & PFSTATE_STACK_GLOBAL) && idx == PF_SK_WIRE) 848 lockmgr(&pf_global_statetbl_lock, LK_RELEASE); 849 } 850 851 struct pf_state_key * 852 pf_alloc_state_key(int pool_flags) 853 { 854 struct pf_state_key *sk; 855 856 sk = kmalloc(sizeof(struct pf_state_key), M_PFSTATEKEYPL, pool_flags); 857 if (sk) { 858 TAILQ_INIT(&sk->states); 859 } 860 return (sk); 861 } 862 863 int 864 pf_state_key_setup(struct pf_pdesc *pd, struct pf_rule *nr, 865 struct pf_state_key **skw, struct pf_state_key **sks, 866 struct pf_state_key **skp, struct pf_state_key **nkp, 867 struct pf_addr *saddr, struct pf_addr *daddr, 868 u_int16_t sport, u_int16_t dport) 869 { 870 KKASSERT((*skp == NULL && *nkp == NULL)); 871 872 if ((*skp = pf_alloc_state_key(M_NOWAIT | M_ZERO)) == NULL) 873 return (ENOMEM); 874 875 PF_ACPY(&(*skp)->addr[pd->sidx], saddr, pd->af); 876 PF_ACPY(&(*skp)->addr[pd->didx], daddr, pd->af); 877 (*skp)->port[pd->sidx] = sport; 878 (*skp)->port[pd->didx] = dport; 879 (*skp)->proto = pd->proto; 880 (*skp)->af = pd->af; 881 882 if (nr != NULL) { 883 if ((*nkp = pf_alloc_state_key(M_NOWAIT | M_ZERO)) == NULL) 884 return (ENOMEM); /* caller must handle cleanup */ 885 886 /* XXX maybe just bcopy and TAILQ_INIT(&(*nkp)->states) */ 887 PF_ACPY(&(*nkp)->addr[0], &(*skp)->addr[0], pd->af); 888 PF_ACPY(&(*nkp)->addr[1], &(*skp)->addr[1], pd->af); 889 (*nkp)->port[0] = (*skp)->port[0]; 890 (*nkp)->port[1] = (*skp)->port[1]; 891 (*nkp)->proto = pd->proto; 892 (*nkp)->af = pd->af; 893 } else 894 *nkp = *skp; 895 896 if (pd->dir == PF_IN) { 897 *skw = *skp; 898 *sks = *nkp; 899 } else { 900 *sks = *skp; 901 *skw = *nkp; 902 } 903 return (0); 904 } 905 906 /* 907 * Insert pf_state with one or two state keys (allowing a reverse path lookup 908 * which is used by NAT). In the NAT case skw is the initiator (?) and 909 * sks is the target. 910 */ 911 int 912 pf_state_insert(struct pfi_kif *kif, struct pf_state_key *skw, 913 struct pf_state_key *sks, struct pf_state *s) 914 { 915 int cpu = mycpu->gd_cpuid; 916 917 s->kif = kif; 918 s->cpuid = cpu; 919 920 if (skw == sks) { 921 if (pf_state_key_attach(skw, s, PF_SK_WIRE)) 922 return (-1); 923 s->key[PF_SK_STACK] = s->key[PF_SK_WIRE]; 924 } else { 925 if (pf_state_key_attach(skw, s, PF_SK_WIRE)) { 926 kfree(sks, M_PFSTATEKEYPL); 927 return (-1); 928 } 929 if (pf_state_key_attach(sks, s, PF_SK_STACK)) { 930 pf_state_key_detach(s, PF_SK_WIRE); 931 return (-1); 932 } 933 } 934 935 if (s->id == 0 && s->creatorid == 0) { 936 u_int64_t sid; 937 938 #if __SIZEOF_LONG__ == 8 939 sid = atomic_fetchadd_long(&pf_status.stateid, 1); 940 #else 941 spin_lock(&pf_spin); 942 sid = pf_status.stateid++; 943 spin_unlock(&pf_spin); 944 #endif 945 s->id = htobe64(sid); 946 s->creatorid = pf_status.hostid; 947 } 948 949 /* 950 * Calculate hash code for altq 951 */ 952 s->hash = crc32(s->key[PF_SK_WIRE], sizeof(*sks)); 953 954 if (RB_INSERT(pf_state_tree_id, &tree_id[cpu], s) != NULL) { 955 if (pf_status.debug >= PF_DEBUG_MISC) { 956 kprintf("pf: state insert failed: " 957 "id: %016jx creatorid: %08x", 958 (uintmax_t)be64toh(s->id), ntohl(s->creatorid)); 959 if (s->sync_flags & PFSTATE_FROMSYNC) 960 kprintf(" (from sync)"); 961 kprintf("\n"); 962 } 963 pf_detach_state(s); 964 return (-1); 965 } 966 TAILQ_INSERT_TAIL(&state_list[cpu], s, entry_list); 967 pf_status.fcounters[FCNT_STATE_INSERT]++; 968 atomic_add_int(&pf_status.states, 1); 969 pfi_kif_ref(kif, PFI_KIF_REF_STATE); 970 pfsync_insert_state(s); 971 return (0); 972 } 973 974 struct pf_state * 975 pf_find_state_byid(struct pf_state_cmp *key) 976 { 977 int cpu = mycpu->gd_cpuid; 978 979 pf_status.fcounters[FCNT_STATE_SEARCH]++; 980 981 return (RB_FIND(pf_state_tree_id, &tree_id[cpu], 982 (struct pf_state *)key)); 983 } 984 985 /* 986 * WARNING! May return a state structure that was localized to another cpu, 987 * destruction is typically protected by the callers pf_token. 988 * The element can only be destroyed 989 */ 990 struct pf_state * 991 pf_find_state(struct pfi_kif *kif, struct pf_state_key_cmp *key, u_int dir, 992 struct mbuf *m) 993 { 994 struct pf_state_key *skey = (void *)key; 995 struct pf_state_key *sk; 996 struct pf_state_item *si; 997 struct pf_state *s; 998 int cpu = mycpu->gd_cpuid; 999 int globalstl = 0; 1000 1001 pf_status.fcounters[FCNT_STATE_SEARCH]++; 1002 1003 if (dir == PF_OUT && m->m_pkthdr.pf.statekey && 1004 ((struct pf_state_key *)m->m_pkthdr.pf.statekey)->reverse) { 1005 sk = ((struct pf_state_key *)m->m_pkthdr.pf.statekey)->reverse; 1006 } else { 1007 sk = RB_FIND(pf_state_tree, &pf_statetbl[cpu], skey); 1008 if (sk == NULL) { 1009 lockmgr(&pf_global_statetbl_lock, LK_SHARED); 1010 sk = RB_FIND(pf_state_tree, &pf_statetbl[MAXCPU], skey); 1011 if (sk == NULL) { 1012 lockmgr(&pf_global_statetbl_lock, LK_RELEASE); 1013 return (NULL); 1014 } 1015 globalstl = 1; 1016 } 1017 if (dir == PF_OUT && m->m_pkthdr.pf.statekey) { 1018 ((struct pf_state_key *) 1019 m->m_pkthdr.pf.statekey)->reverse = sk; 1020 sk->reverse = m->m_pkthdr.pf.statekey; 1021 } 1022 } 1023 if (dir == PF_OUT) 1024 m->m_pkthdr.pf.statekey = NULL; 1025 1026 /* list is sorted, if-bound states before floating ones */ 1027 TAILQ_FOREACH(si, &sk->states, entry) { 1028 if ((si->s->kif == pfi_all || si->s->kif == kif) && 1029 sk == (dir == PF_IN ? si->s->key[PF_SK_WIRE] : 1030 si->s->key[PF_SK_STACK])) { 1031 break; 1032 } 1033 } 1034 1035 /* 1036 * Extract state before potentially releasing the global statetbl 1037 * lock. Ignore the state if the create is still in-progress as 1038 * it can be deleted out from under us by the owning localized cpu. 1039 * However, if CREATEINPROG is not set, state can only be deleted 1040 * by the purge thread which we are protected from via our shared 1041 * pf_token. 1042 */ 1043 if (si) { 1044 s = si->s; 1045 if (s && (s->state_flags & PFSTATE_CREATEINPROG)) 1046 s = NULL; 1047 } else { 1048 s = NULL; 1049 } 1050 if (globalstl) 1051 lockmgr(&pf_global_statetbl_lock, LK_RELEASE); 1052 return s; 1053 } 1054 1055 /* 1056 * WARNING! May return a state structure that was localized to another cpu, 1057 * destruction is typically protected by the callers pf_token. 1058 */ 1059 struct pf_state * 1060 pf_find_state_all(struct pf_state_key_cmp *key, u_int dir, int *more) 1061 { 1062 struct pf_state_key *skey = (void *)key; 1063 struct pf_state_key *sk; 1064 struct pf_state_item *si, *ret = NULL; 1065 struct pf_state *s; 1066 int cpu = mycpu->gd_cpuid; 1067 int globalstl = 0; 1068 1069 pf_status.fcounters[FCNT_STATE_SEARCH]++; 1070 1071 sk = RB_FIND(pf_state_tree, &pf_statetbl[cpu], skey); 1072 if (sk == NULL) { 1073 lockmgr(&pf_global_statetbl_lock, LK_SHARED); 1074 sk = RB_FIND(pf_state_tree, &pf_statetbl[MAXCPU], skey); 1075 globalstl = 1; 1076 } 1077 if (sk != NULL) { 1078 TAILQ_FOREACH(si, &sk->states, entry) 1079 if (dir == PF_INOUT || 1080 (sk == (dir == PF_IN ? si->s->key[PF_SK_WIRE] : 1081 si->s->key[PF_SK_STACK]))) { 1082 if (more == NULL) { 1083 ret = si; 1084 break; 1085 } 1086 if (ret) 1087 (*more)++; 1088 else 1089 ret = si; 1090 } 1091 } 1092 1093 /* 1094 * Extract state before potentially releasing the global statetbl 1095 * lock. Ignore the state if the create is still in-progress as 1096 * it can be deleted out from under us by the owning localized cpu. 1097 * However, if CREATEINPROG is not set, state can only be deleted 1098 * by the purge thread which we are protected from via our shared 1099 * pf_token. 1100 */ 1101 if (ret) { 1102 s = ret->s; 1103 if (s && (s->state_flags & PFSTATE_CREATEINPROG)) 1104 s = NULL; 1105 } else { 1106 s = NULL; 1107 } 1108 if (globalstl) 1109 lockmgr(&pf_global_statetbl_lock, LK_RELEASE); 1110 return s; 1111 } 1112 1113 /* END state table stuff */ 1114 1115 void 1116 pf_purge_thread(void *v) 1117 { 1118 globaldata_t save_gd = mycpu; 1119 int nloops = 0; 1120 int locked = 0; 1121 int nn; 1122 int endingit; 1123 1124 for (;;) { 1125 tsleep(pf_purge_thread, PWAIT, "pftm", 1 * hz); 1126 1127 endingit = pf_end_threads; 1128 1129 for (nn = 0; nn < ncpus; ++nn) { 1130 lwkt_setcpu_self(globaldata_find(nn)); 1131 1132 lwkt_gettoken(&pf_token); 1133 lockmgr(&pf_consistency_lock, LK_EXCLUSIVE); 1134 crit_enter(); 1135 1136 /* 1137 * process a fraction of the state table every second 1138 */ 1139 if(!pf_purge_expired_states( 1140 1 + (pf_status.states / 1141 pf_default_rule.timeout[ 1142 PFTM_INTERVAL]), 0)) { 1143 pf_purge_expired_states( 1144 1 + (pf_status.states / 1145 pf_default_rule.timeout[ 1146 PFTM_INTERVAL]), 1); 1147 } 1148 1149 /* 1150 * purge other expired types every PFTM_INTERVAL 1151 * seconds 1152 */ 1153 if (++nloops >= 1154 pf_default_rule.timeout[PFTM_INTERVAL]) { 1155 pf_purge_expired_fragments(); 1156 if (!pf_purge_expired_src_nodes(locked)) { 1157 pf_purge_expired_src_nodes(1); 1158 } 1159 nloops = 0; 1160 } 1161 1162 /* 1163 * If terminating the thread, clean everything out 1164 * (on all cpus). 1165 */ 1166 if (endingit) { 1167 pf_purge_expired_states(pf_status.states, 0); 1168 pf_purge_expired_fragments(); 1169 pf_purge_expired_src_nodes(1); 1170 } 1171 1172 crit_exit(); 1173 lockmgr(&pf_consistency_lock, LK_RELEASE); 1174 lwkt_reltoken(&pf_token); 1175 } 1176 lwkt_setcpu_self(save_gd); 1177 if (endingit) 1178 break; 1179 } 1180 1181 /* 1182 * Thread termination 1183 */ 1184 pf_end_threads++; 1185 wakeup(pf_purge_thread); 1186 kthread_exit(); 1187 } 1188 1189 u_int32_t 1190 pf_state_expires(const struct pf_state *state) 1191 { 1192 u_int32_t timeout; 1193 u_int32_t start; 1194 u_int32_t end; 1195 u_int32_t states; 1196 1197 /* handle all PFTM_* > PFTM_MAX here */ 1198 if (state->timeout == PFTM_PURGE) 1199 return (time_second); 1200 if (state->timeout == PFTM_UNTIL_PACKET) 1201 return (0); 1202 KKASSERT(state->timeout != PFTM_UNLINKED); 1203 KKASSERT(state->timeout < PFTM_MAX); 1204 timeout = state->rule.ptr->timeout[state->timeout]; 1205 if (!timeout) 1206 timeout = pf_default_rule.timeout[state->timeout]; 1207 start = state->rule.ptr->timeout[PFTM_ADAPTIVE_START]; 1208 if (start) { 1209 end = state->rule.ptr->timeout[PFTM_ADAPTIVE_END]; 1210 states = state->rule.ptr->states_cur; 1211 } else { 1212 start = pf_default_rule.timeout[PFTM_ADAPTIVE_START]; 1213 end = pf_default_rule.timeout[PFTM_ADAPTIVE_END]; 1214 states = pf_status.states; 1215 } 1216 if (end && states > start && start < end) { 1217 if (states < end) 1218 return (state->expire + timeout * (end - states) / 1219 (end - start)); 1220 else 1221 return (time_second); 1222 } 1223 return (state->expire + timeout); 1224 } 1225 1226 /* 1227 * (called with exclusive pf_token) 1228 */ 1229 int 1230 pf_purge_expired_src_nodes(int waslocked) 1231 { 1232 struct pf_src_node *cur, *next; 1233 int locked = waslocked; 1234 int cpu = mycpu->gd_cpuid; 1235 1236 for (cur = RB_MIN(pf_src_tree, &tree_src_tracking[cpu]); 1237 cur; 1238 cur = next) { 1239 next = RB_NEXT(pf_src_tree, &tree_src_tracking[cpu], cur); 1240 1241 if (cur->states <= 0 && cur->expire <= time_second) { 1242 if (!locked) { 1243 lockmgr(&pf_consistency_lock, LK_EXCLUSIVE); 1244 next = RB_NEXT(pf_src_tree, 1245 &tree_src_tracking[cpu], cur); 1246 locked = 1; 1247 } 1248 if (cur->rule.ptr != NULL) { 1249 cur->rule.ptr->src_nodes--; 1250 if (cur->rule.ptr->states_cur <= 0 && 1251 cur->rule.ptr->max_src_nodes <= 0) 1252 pf_rm_rule(NULL, cur->rule.ptr); 1253 } 1254 RB_REMOVE(pf_src_tree, &tree_src_tracking[cpu], cur); 1255 pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++; 1256 atomic_add_int(&pf_status.src_nodes, -1); 1257 kfree(cur, M_PFSRCTREEPL); 1258 } 1259 } 1260 if (locked && !waslocked) 1261 lockmgr(&pf_consistency_lock, LK_RELEASE); 1262 return(1); 1263 } 1264 1265 void 1266 pf_src_tree_remove_state(struct pf_state *s) 1267 { 1268 u_int32_t timeout; 1269 1270 if (s->src_node != NULL) { 1271 if (s->src.tcp_est) 1272 --s->src_node->conn; 1273 if (--s->src_node->states <= 0) { 1274 timeout = s->rule.ptr->timeout[PFTM_SRC_NODE]; 1275 if (!timeout) { 1276 timeout = 1277 pf_default_rule.timeout[PFTM_SRC_NODE]; 1278 } 1279 s->src_node->expire = time_second + timeout; 1280 } 1281 } 1282 if (s->nat_src_node != s->src_node && s->nat_src_node != NULL) { 1283 if (--s->nat_src_node->states <= 0) { 1284 timeout = s->rule.ptr->timeout[PFTM_SRC_NODE]; 1285 if (!timeout) 1286 timeout = 1287 pf_default_rule.timeout[PFTM_SRC_NODE]; 1288 s->nat_src_node->expire = time_second + timeout; 1289 } 1290 } 1291 s->src_node = s->nat_src_node = NULL; 1292 } 1293 1294 /* callers should be at crit_enter() */ 1295 void 1296 pf_unlink_state(struct pf_state *cur) 1297 { 1298 int cpu = mycpu->gd_cpuid; 1299 1300 if (cur->src.state == PF_TCPS_PROXY_DST) { 1301 /* XXX wire key the right one? */ 1302 pf_send_tcp(cur->rule.ptr, cur->key[PF_SK_WIRE]->af, 1303 &cur->key[PF_SK_WIRE]->addr[1], 1304 &cur->key[PF_SK_WIRE]->addr[0], 1305 cur->key[PF_SK_WIRE]->port[1], 1306 cur->key[PF_SK_WIRE]->port[0], 1307 cur->src.seqhi, cur->src.seqlo + 1, 1308 TH_RST|TH_ACK, 0, 0, 0, 1, cur->tag, NULL, NULL); 1309 } 1310 RB_REMOVE(pf_state_tree_id, &tree_id[cpu], cur); 1311 if (cur->creatorid == pf_status.hostid) 1312 pfsync_delete_state(cur); 1313 cur->timeout = PFTM_UNLINKED; 1314 pf_src_tree_remove_state(cur); 1315 pf_detach_state(cur); 1316 } 1317 1318 static struct pf_state *purge_cur[MAXCPU]; 1319 1320 /* 1321 * callers should be at crit_enter() and hold pf_consistency_lock exclusively. 1322 * pf_token must also be held exclusively. 1323 */ 1324 void 1325 pf_free_state(struct pf_state *cur) 1326 { 1327 int cpu = mycpu->gd_cpuid; 1328 1329 KKASSERT(cur->cpuid == cpu); 1330 1331 if (pfsyncif != NULL && 1332 (pfsyncif->sc_bulk_send_next == cur || 1333 pfsyncif->sc_bulk_terminator == cur)) 1334 return; 1335 KKASSERT(cur->timeout == PFTM_UNLINKED); 1336 if (--cur->rule.ptr->states_cur <= 0 && 1337 cur->rule.ptr->src_nodes <= 0) 1338 pf_rm_rule(NULL, cur->rule.ptr); 1339 if (cur->nat_rule.ptr != NULL) { 1340 if (--cur->nat_rule.ptr->states_cur <= 0 && 1341 cur->nat_rule.ptr->src_nodes <= 0) { 1342 pf_rm_rule(NULL, cur->nat_rule.ptr); 1343 } 1344 } 1345 if (cur->anchor.ptr != NULL) { 1346 if (--cur->anchor.ptr->states_cur <= 0) 1347 pf_rm_rule(NULL, cur->anchor.ptr); 1348 } 1349 pf_normalize_tcp_cleanup(cur); 1350 pfi_kif_unref(cur->kif, PFI_KIF_REF_STATE); 1351 1352 /* 1353 * We may be freeing pf_purge_expired_states()'s saved scan entry, 1354 * adjust it if necessary. 1355 */ 1356 if (purge_cur[cpu] == cur) { 1357 kprintf("PURGE CONFLICT\n"); 1358 purge_cur[cpu] = TAILQ_NEXT(purge_cur[cpu], entry_list); 1359 } 1360 TAILQ_REMOVE(&state_list[cpu], cur, entry_list); 1361 if (cur->tag) 1362 pf_tag_unref(cur->tag); 1363 kfree(cur, M_PFSTATEPL); 1364 pf_status.fcounters[FCNT_STATE_REMOVALS]++; 1365 atomic_add_int(&pf_status.states, -1); 1366 } 1367 1368 int 1369 pf_purge_expired_states(u_int32_t maxcheck, int waslocked) 1370 { 1371 struct pf_state *cur; 1372 int locked = waslocked; 1373 int cpu = mycpu->gd_cpuid; 1374 1375 while (maxcheck--) { 1376 /* 1377 * Wrap to start of list when we hit the end 1378 */ 1379 cur = purge_cur[cpu]; 1380 if (cur == NULL) { 1381 cur = TAILQ_FIRST(&state_list[cpu]); 1382 if (cur == NULL) 1383 break; /* list empty */ 1384 } 1385 1386 /* 1387 * Setup next (purge_cur) while we process this one. If 1388 * we block and something else deletes purge_cur, 1389 * pf_free_state() will adjust it further ahead. 1390 */ 1391 purge_cur[cpu] = TAILQ_NEXT(cur, entry_list); 1392 1393 if (cur->timeout == PFTM_UNLINKED) { 1394 /* free unlinked state */ 1395 if (! locked) { 1396 lockmgr(&pf_consistency_lock, LK_EXCLUSIVE); 1397 locked = 1; 1398 } 1399 pf_free_state(cur); 1400 } else if (pf_state_expires(cur) <= time_second) { 1401 /* unlink and free expired state */ 1402 pf_unlink_state(cur); 1403 if (! locked) { 1404 if (!lockmgr(&pf_consistency_lock, LK_EXCLUSIVE)) 1405 return (0); 1406 locked = 1; 1407 } 1408 pf_free_state(cur); 1409 } 1410 } 1411 1412 if (locked) 1413 lockmgr(&pf_consistency_lock, LK_RELEASE); 1414 return (1); 1415 } 1416 1417 int 1418 pf_tbladdr_setup(struct pf_ruleset *rs, struct pf_addr_wrap *aw) 1419 { 1420 if (aw->type != PF_ADDR_TABLE) 1421 return (0); 1422 if ((aw->p.tbl = pfr_attach_table(rs, aw->v.tblname)) == NULL) 1423 return (1); 1424 return (0); 1425 } 1426 1427 void 1428 pf_tbladdr_remove(struct pf_addr_wrap *aw) 1429 { 1430 if (aw->type != PF_ADDR_TABLE || aw->p.tbl == NULL) 1431 return; 1432 pfr_detach_table(aw->p.tbl); 1433 aw->p.tbl = NULL; 1434 } 1435 1436 void 1437 pf_tbladdr_copyout(struct pf_addr_wrap *aw) 1438 { 1439 struct pfr_ktable *kt = aw->p.tbl; 1440 1441 if (aw->type != PF_ADDR_TABLE || kt == NULL) 1442 return; 1443 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL) 1444 kt = kt->pfrkt_root; 1445 aw->p.tbl = NULL; 1446 aw->p.tblcnt = (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) ? 1447 kt->pfrkt_cnt : -1; 1448 } 1449 1450 void 1451 pf_print_host(struct pf_addr *addr, u_int16_t p, sa_family_t af) 1452 { 1453 switch (af) { 1454 #ifdef INET 1455 case AF_INET: { 1456 u_int32_t a = ntohl(addr->addr32[0]); 1457 kprintf("%u.%u.%u.%u", (a>>24)&255, (a>>16)&255, 1458 (a>>8)&255, a&255); 1459 if (p) { 1460 p = ntohs(p); 1461 kprintf(":%u", p); 1462 } 1463 break; 1464 } 1465 #endif /* INET */ 1466 #ifdef INET6 1467 case AF_INET6: { 1468 u_int16_t b; 1469 u_int8_t i, curstart = 255, curend = 0, 1470 maxstart = 0, maxend = 0; 1471 for (i = 0; i < 8; i++) { 1472 if (!addr->addr16[i]) { 1473 if (curstart == 255) 1474 curstart = i; 1475 else 1476 curend = i; 1477 } else { 1478 if (curstart) { 1479 if ((curend - curstart) > 1480 (maxend - maxstart)) { 1481 maxstart = curstart; 1482 maxend = curend; 1483 curstart = 255; 1484 } 1485 } 1486 } 1487 } 1488 for (i = 0; i < 8; i++) { 1489 if (i >= maxstart && i <= maxend) { 1490 if (maxend != 7) { 1491 if (i == maxstart) 1492 kprintf(":"); 1493 } else { 1494 if (i == maxend) 1495 kprintf(":"); 1496 } 1497 } else { 1498 b = ntohs(addr->addr16[i]); 1499 kprintf("%x", b); 1500 if (i < 7) 1501 kprintf(":"); 1502 } 1503 } 1504 if (p) { 1505 p = ntohs(p); 1506 kprintf("[%u]", p); 1507 } 1508 break; 1509 } 1510 #endif /* INET6 */ 1511 } 1512 } 1513 1514 void 1515 pf_print_state(struct pf_state *s) 1516 { 1517 pf_print_state_parts(s, NULL, NULL); 1518 } 1519 1520 void 1521 pf_print_state_parts(struct pf_state *s, 1522 struct pf_state_key *skwp, struct pf_state_key *sksp) 1523 { 1524 struct pf_state_key *skw, *sks; 1525 u_int8_t proto, dir; 1526 1527 /* Do our best to fill these, but they're skipped if NULL */ 1528 skw = skwp ? skwp : (s ? s->key[PF_SK_WIRE] : NULL); 1529 sks = sksp ? sksp : (s ? s->key[PF_SK_STACK] : NULL); 1530 proto = skw ? skw->proto : (sks ? sks->proto : 0); 1531 dir = s ? s->direction : 0; 1532 1533 switch (proto) { 1534 case IPPROTO_TCP: 1535 kprintf("TCP "); 1536 break; 1537 case IPPROTO_UDP: 1538 kprintf("UDP "); 1539 break; 1540 case IPPROTO_ICMP: 1541 kprintf("ICMP "); 1542 break; 1543 case IPPROTO_ICMPV6: 1544 kprintf("ICMPV6 "); 1545 break; 1546 default: 1547 kprintf("%u ", skw->proto); 1548 break; 1549 } 1550 switch (dir) { 1551 case PF_IN: 1552 kprintf(" in"); 1553 break; 1554 case PF_OUT: 1555 kprintf(" out"); 1556 break; 1557 } 1558 if (skw) { 1559 kprintf(" wire: "); 1560 pf_print_host(&skw->addr[0], skw->port[0], skw->af); 1561 kprintf(" "); 1562 pf_print_host(&skw->addr[1], skw->port[1], skw->af); 1563 } 1564 if (sks) { 1565 kprintf(" stack: "); 1566 if (sks != skw) { 1567 pf_print_host(&sks->addr[0], sks->port[0], sks->af); 1568 kprintf(" "); 1569 pf_print_host(&sks->addr[1], sks->port[1], sks->af); 1570 } else 1571 kprintf("-"); 1572 } 1573 if (s) { 1574 if (proto == IPPROTO_TCP) { 1575 kprintf(" [lo=%u high=%u win=%u modulator=%u", 1576 s->src.seqlo, s->src.seqhi, 1577 s->src.max_win, s->src.seqdiff); 1578 if (s->src.wscale && s->dst.wscale) 1579 kprintf(" wscale=%u", 1580 s->src.wscale & PF_WSCALE_MASK); 1581 kprintf("]"); 1582 kprintf(" [lo=%u high=%u win=%u modulator=%u", 1583 s->dst.seqlo, s->dst.seqhi, 1584 s->dst.max_win, s->dst.seqdiff); 1585 if (s->src.wscale && s->dst.wscale) 1586 kprintf(" wscale=%u", 1587 s->dst.wscale & PF_WSCALE_MASK); 1588 kprintf("]"); 1589 } 1590 kprintf(" %u:%u", s->src.state, s->dst.state); 1591 } 1592 } 1593 1594 void 1595 pf_print_flags(u_int8_t f) 1596 { 1597 if (f) 1598 kprintf(" "); 1599 if (f & TH_FIN) 1600 kprintf("F"); 1601 if (f & TH_SYN) 1602 kprintf("S"); 1603 if (f & TH_RST) 1604 kprintf("R"); 1605 if (f & TH_PUSH) 1606 kprintf("P"); 1607 if (f & TH_ACK) 1608 kprintf("A"); 1609 if (f & TH_URG) 1610 kprintf("U"); 1611 if (f & TH_ECE) 1612 kprintf("E"); 1613 if (f & TH_CWR) 1614 kprintf("W"); 1615 } 1616 1617 #define PF_SET_SKIP_STEPS(i) \ 1618 do { \ 1619 while (head[i] != cur) { \ 1620 head[i]->skip[i].ptr = cur; \ 1621 head[i] = TAILQ_NEXT(head[i], entries); \ 1622 } \ 1623 } while (0) 1624 1625 void 1626 pf_calc_skip_steps(struct pf_rulequeue *rules) 1627 { 1628 struct pf_rule *cur, *prev, *head[PF_SKIP_COUNT]; 1629 int i; 1630 1631 cur = TAILQ_FIRST(rules); 1632 prev = cur; 1633 for (i = 0; i < PF_SKIP_COUNT; ++i) 1634 head[i] = cur; 1635 while (cur != NULL) { 1636 1637 if (cur->kif != prev->kif || cur->ifnot != prev->ifnot) 1638 PF_SET_SKIP_STEPS(PF_SKIP_IFP); 1639 if (cur->direction != prev->direction) 1640 PF_SET_SKIP_STEPS(PF_SKIP_DIR); 1641 if (cur->af != prev->af) 1642 PF_SET_SKIP_STEPS(PF_SKIP_AF); 1643 if (cur->proto != prev->proto) 1644 PF_SET_SKIP_STEPS(PF_SKIP_PROTO); 1645 if (cur->src.neg != prev->src.neg || 1646 pf_addr_wrap_neq(&cur->src.addr, &prev->src.addr)) 1647 PF_SET_SKIP_STEPS(PF_SKIP_SRC_ADDR); 1648 if (cur->src.port[0] != prev->src.port[0] || 1649 cur->src.port[1] != prev->src.port[1] || 1650 cur->src.port_op != prev->src.port_op) 1651 PF_SET_SKIP_STEPS(PF_SKIP_SRC_PORT); 1652 if (cur->dst.neg != prev->dst.neg || 1653 pf_addr_wrap_neq(&cur->dst.addr, &prev->dst.addr)) 1654 PF_SET_SKIP_STEPS(PF_SKIP_DST_ADDR); 1655 if (cur->dst.port[0] != prev->dst.port[0] || 1656 cur->dst.port[1] != prev->dst.port[1] || 1657 cur->dst.port_op != prev->dst.port_op) 1658 PF_SET_SKIP_STEPS(PF_SKIP_DST_PORT); 1659 1660 prev = cur; 1661 cur = TAILQ_NEXT(cur, entries); 1662 } 1663 for (i = 0; i < PF_SKIP_COUNT; ++i) 1664 PF_SET_SKIP_STEPS(i); 1665 } 1666 1667 int 1668 pf_addr_wrap_neq(struct pf_addr_wrap *aw1, struct pf_addr_wrap *aw2) 1669 { 1670 if (aw1->type != aw2->type) 1671 return (1); 1672 switch (aw1->type) { 1673 case PF_ADDR_ADDRMASK: 1674 case PF_ADDR_RANGE: 1675 if (PF_ANEQ(&aw1->v.a.addr, &aw2->v.a.addr, 0)) 1676 return (1); 1677 if (PF_ANEQ(&aw1->v.a.mask, &aw2->v.a.mask, 0)) 1678 return (1); 1679 return (0); 1680 case PF_ADDR_DYNIFTL: 1681 return (aw1->p.dyn->pfid_kt != aw2->p.dyn->pfid_kt); 1682 case PF_ADDR_NOROUTE: 1683 case PF_ADDR_URPFFAILED: 1684 return (0); 1685 case PF_ADDR_TABLE: 1686 return (aw1->p.tbl != aw2->p.tbl); 1687 case PF_ADDR_RTLABEL: 1688 return (aw1->v.rtlabel != aw2->v.rtlabel); 1689 default: 1690 kprintf("invalid address type: %d\n", aw1->type); 1691 return (1); 1692 } 1693 } 1694 1695 u_int16_t 1696 pf_cksum_fixup(u_int16_t cksum, u_int16_t old, u_int16_t new, u_int8_t udp) 1697 { 1698 u_int32_t l; 1699 1700 if (udp && !cksum) 1701 return (0x0000); 1702 l = cksum + old - new; 1703 l = (l >> 16) + (l & 65535); 1704 l = l & 65535; 1705 if (udp && !l) 1706 return (0xFFFF); 1707 return (l); 1708 } 1709 1710 void 1711 pf_change_ap(struct pf_addr *a, u_int16_t *p, u_int16_t *ic, u_int16_t *pc, 1712 struct pf_addr *an, u_int16_t pn, u_int8_t u, sa_family_t af) 1713 { 1714 struct pf_addr ao; 1715 u_int16_t po = *p; 1716 1717 PF_ACPY(&ao, a, af); 1718 PF_ACPY(a, an, af); 1719 1720 *p = pn; 1721 1722 switch (af) { 1723 #ifdef INET 1724 case AF_INET: 1725 *ic = pf_cksum_fixup(pf_cksum_fixup(*ic, 1726 ao.addr16[0], an->addr16[0], 0), 1727 ao.addr16[1], an->addr16[1], 0); 1728 *p = pn; 1729 *pc = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(*pc, 1730 ao.addr16[0], an->addr16[0], u), 1731 ao.addr16[1], an->addr16[1], u), 1732 po, pn, u); 1733 break; 1734 #endif /* INET */ 1735 #ifdef INET6 1736 case AF_INET6: 1737 *pc = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( 1738 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( 1739 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(*pc, 1740 ao.addr16[0], an->addr16[0], u), 1741 ao.addr16[1], an->addr16[1], u), 1742 ao.addr16[2], an->addr16[2], u), 1743 ao.addr16[3], an->addr16[3], u), 1744 ao.addr16[4], an->addr16[4], u), 1745 ao.addr16[5], an->addr16[5], u), 1746 ao.addr16[6], an->addr16[6], u), 1747 ao.addr16[7], an->addr16[7], u), 1748 po, pn, u); 1749 break; 1750 #endif /* INET6 */ 1751 } 1752 } 1753 1754 1755 /* Changes a u_int32_t. Uses a void * so there are no align restrictions */ 1756 void 1757 pf_change_a(void *a, u_int16_t *c, u_int32_t an, u_int8_t u) 1758 { 1759 u_int32_t ao; 1760 1761 memcpy(&ao, a, sizeof(ao)); 1762 memcpy(a, &an, sizeof(u_int32_t)); 1763 *c = pf_cksum_fixup(pf_cksum_fixup(*c, ao / 65536, an / 65536, u), 1764 ao % 65536, an % 65536, u); 1765 } 1766 1767 #ifdef INET6 1768 void 1769 pf_change_a6(struct pf_addr *a, u_int16_t *c, struct pf_addr *an, u_int8_t u) 1770 { 1771 struct pf_addr ao; 1772 1773 PF_ACPY(&ao, a, AF_INET6); 1774 PF_ACPY(a, an, AF_INET6); 1775 1776 *c = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( 1777 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( 1778 pf_cksum_fixup(pf_cksum_fixup(*c, 1779 ao.addr16[0], an->addr16[0], u), 1780 ao.addr16[1], an->addr16[1], u), 1781 ao.addr16[2], an->addr16[2], u), 1782 ao.addr16[3], an->addr16[3], u), 1783 ao.addr16[4], an->addr16[4], u), 1784 ao.addr16[5], an->addr16[5], u), 1785 ao.addr16[6], an->addr16[6], u), 1786 ao.addr16[7], an->addr16[7], u); 1787 } 1788 #endif /* INET6 */ 1789 1790 void 1791 pf_change_icmp(struct pf_addr *ia, u_int16_t *ip, struct pf_addr *oa, 1792 struct pf_addr *na, u_int16_t np, u_int16_t *pc, u_int16_t *h2c, 1793 u_int16_t *ic, u_int16_t *hc, u_int8_t u, sa_family_t af) 1794 { 1795 struct pf_addr oia, ooa; 1796 1797 PF_ACPY(&oia, ia, af); 1798 if (oa) 1799 PF_ACPY(&ooa, oa, af); 1800 1801 /* Change inner protocol port, fix inner protocol checksum. */ 1802 if (ip != NULL) { 1803 u_int16_t oip = *ip; 1804 u_int32_t opc = 0; 1805 1806 if (pc != NULL) 1807 opc = *pc; 1808 *ip = np; 1809 if (pc != NULL) 1810 *pc = pf_cksum_fixup(*pc, oip, *ip, u); 1811 *ic = pf_cksum_fixup(*ic, oip, *ip, 0); 1812 if (pc != NULL) 1813 *ic = pf_cksum_fixup(*ic, opc, *pc, 0); 1814 } 1815 /* Change inner ip address, fix inner ip and icmp checksums. */ 1816 PF_ACPY(ia, na, af); 1817 switch (af) { 1818 #ifdef INET 1819 case AF_INET: { 1820 u_int32_t oh2c = *h2c; 1821 1822 *h2c = pf_cksum_fixup(pf_cksum_fixup(*h2c, 1823 oia.addr16[0], ia->addr16[0], 0), 1824 oia.addr16[1], ia->addr16[1], 0); 1825 *ic = pf_cksum_fixup(pf_cksum_fixup(*ic, 1826 oia.addr16[0], ia->addr16[0], 0), 1827 oia.addr16[1], ia->addr16[1], 0); 1828 *ic = pf_cksum_fixup(*ic, oh2c, *h2c, 0); 1829 break; 1830 } 1831 #endif /* INET */ 1832 #ifdef INET6 1833 case AF_INET6: 1834 *ic = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( 1835 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( 1836 pf_cksum_fixup(pf_cksum_fixup(*ic, 1837 oia.addr16[0], ia->addr16[0], u), 1838 oia.addr16[1], ia->addr16[1], u), 1839 oia.addr16[2], ia->addr16[2], u), 1840 oia.addr16[3], ia->addr16[3], u), 1841 oia.addr16[4], ia->addr16[4], u), 1842 oia.addr16[5], ia->addr16[5], u), 1843 oia.addr16[6], ia->addr16[6], u), 1844 oia.addr16[7], ia->addr16[7], u); 1845 break; 1846 #endif /* INET6 */ 1847 } 1848 /* Outer ip address, fix outer ip or icmpv6 checksum, if necessary. */ 1849 if (oa) { 1850 PF_ACPY(oa, na, af); 1851 switch (af) { 1852 #ifdef INET 1853 case AF_INET: 1854 *hc = pf_cksum_fixup(pf_cksum_fixup(*hc, 1855 ooa.addr16[0], oa->addr16[0], 0), 1856 ooa.addr16[1], oa->addr16[1], 0); 1857 break; 1858 #endif /* INET */ 1859 #ifdef INET6 1860 case AF_INET6: 1861 *ic = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( 1862 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( 1863 pf_cksum_fixup(pf_cksum_fixup(*ic, 1864 ooa.addr16[0], oa->addr16[0], u), 1865 ooa.addr16[1], oa->addr16[1], u), 1866 ooa.addr16[2], oa->addr16[2], u), 1867 ooa.addr16[3], oa->addr16[3], u), 1868 ooa.addr16[4], oa->addr16[4], u), 1869 ooa.addr16[5], oa->addr16[5], u), 1870 ooa.addr16[6], oa->addr16[6], u), 1871 ooa.addr16[7], oa->addr16[7], u); 1872 break; 1873 #endif /* INET6 */ 1874 } 1875 } 1876 } 1877 1878 1879 /* 1880 * Need to modulate the sequence numbers in the TCP SACK option 1881 * (credits to Krzysztof Pfaff for report and patch) 1882 */ 1883 int 1884 pf_modulate_sack(struct mbuf *m, int off, struct pf_pdesc *pd, 1885 struct tcphdr *th, struct pf_state_peer *dst) 1886 { 1887 int hlen = (th->th_off << 2) - sizeof(*th), thoptlen = hlen; 1888 u_int8_t opts[TCP_MAXOLEN], *opt = opts; 1889 int copyback = 0, i, olen; 1890 struct raw_sackblock sack; 1891 1892 #define TCPOLEN_SACKLEN (TCPOLEN_SACK + 2) 1893 if (hlen < TCPOLEN_SACKLEN || 1894 !pf_pull_hdr(m, off + sizeof(*th), opts, hlen, NULL, NULL, pd->af)) 1895 return 0; 1896 1897 while (hlen >= TCPOLEN_SACKLEN) { 1898 olen = opt[1]; 1899 switch (*opt) { 1900 case TCPOPT_EOL: /* FALLTHROUGH */ 1901 case TCPOPT_NOP: 1902 opt++; 1903 hlen--; 1904 break; 1905 case TCPOPT_SACK: 1906 if (olen > hlen) 1907 olen = hlen; 1908 if (olen >= TCPOLEN_SACKLEN) { 1909 for (i = 2; i + TCPOLEN_SACK <= olen; 1910 i += TCPOLEN_SACK) { 1911 memcpy(&sack, &opt[i], sizeof(sack)); 1912 pf_change_a(&sack.rblk_start, &th->th_sum, 1913 htonl(ntohl(sack.rblk_start) - 1914 dst->seqdiff), 0); 1915 pf_change_a(&sack.rblk_end, &th->th_sum, 1916 htonl(ntohl(sack.rblk_end) - 1917 dst->seqdiff), 0); 1918 memcpy(&opt[i], &sack, sizeof(sack)); 1919 } 1920 copyback = 1; 1921 } 1922 /* FALLTHROUGH */ 1923 default: 1924 if (olen < 2) 1925 olen = 2; 1926 hlen -= olen; 1927 opt += olen; 1928 } 1929 } 1930 1931 if (copyback) 1932 m_copyback(m, off + sizeof(*th), thoptlen, opts); 1933 return (copyback); 1934 } 1935 1936 void 1937 pf_send_tcp(const struct pf_rule *r, sa_family_t af, 1938 const struct pf_addr *saddr, const struct pf_addr *daddr, 1939 u_int16_t sport, u_int16_t dport, u_int32_t seq, u_int32_t ack, 1940 u_int8_t flags, u_int16_t win, u_int16_t mss, u_int8_t ttl, int tag, 1941 u_int16_t rtag, struct ether_header *eh, struct ifnet *ifp) 1942 { 1943 struct mbuf *m; 1944 int len = 0, tlen; 1945 #ifdef INET 1946 struct ip *h = NULL; 1947 #endif /* INET */ 1948 #ifdef INET6 1949 struct ip6_hdr *h6 = NULL; 1950 #endif /* INET6 */ 1951 struct tcphdr *th = NULL; 1952 char *opt; 1953 1954 ASSERT_LWKT_TOKEN_HELD(&pf_token); 1955 1956 /* maximum segment size tcp option */ 1957 tlen = sizeof(struct tcphdr); 1958 if (mss) 1959 tlen += 4; 1960 1961 switch (af) { 1962 #ifdef INET 1963 case AF_INET: 1964 len = sizeof(struct ip) + tlen; 1965 break; 1966 #endif /* INET */ 1967 #ifdef INET6 1968 case AF_INET6: 1969 len = sizeof(struct ip6_hdr) + tlen; 1970 break; 1971 #endif /* INET6 */ 1972 } 1973 1974 /* 1975 * Create outgoing mbuf. 1976 * 1977 * DragonFly doesn't zero the auxillary pkghdr fields, only fw_flags, 1978 * so make sure pf.flags is clear. 1979 */ 1980 m = m_gethdr(MB_DONTWAIT, MT_HEADER); 1981 if (m == NULL) { 1982 return; 1983 } 1984 if (tag) 1985 m->m_pkthdr.fw_flags |= PF_MBUF_TAGGED; 1986 m->m_pkthdr.pf.flags = 0; 1987 m->m_pkthdr.pf.tag = rtag; 1988 /* XXX Recheck when upgrading to > 4.4 */ 1989 m->m_pkthdr.pf.statekey = NULL; 1990 if (r != NULL && r->rtableid >= 0) 1991 m->m_pkthdr.pf.rtableid = r->rtableid; 1992 1993 #ifdef ALTQ 1994 if (r != NULL && r->qid) { 1995 m->m_pkthdr.fw_flags |= PF_MBUF_STRUCTURE; 1996 m->m_pkthdr.pf.qid = r->qid; 1997 m->m_pkthdr.pf.ecn_af = af; 1998 m->m_pkthdr.pf.hdr = mtod(m, struct ip *); 1999 } 2000 #endif /* ALTQ */ 2001 m->m_data += max_linkhdr; 2002 m->m_pkthdr.len = m->m_len = len; 2003 m->m_pkthdr.rcvif = NULL; 2004 bzero(m->m_data, len); 2005 switch (af) { 2006 #ifdef INET 2007 case AF_INET: 2008 h = mtod(m, struct ip *); 2009 2010 /* IP header fields included in the TCP checksum */ 2011 h->ip_p = IPPROTO_TCP; 2012 h->ip_len = tlen; 2013 h->ip_src.s_addr = saddr->v4.s_addr; 2014 h->ip_dst.s_addr = daddr->v4.s_addr; 2015 2016 th = (struct tcphdr *)((caddr_t)h + sizeof(struct ip)); 2017 break; 2018 #endif /* INET */ 2019 #ifdef INET6 2020 case AF_INET6: 2021 h6 = mtod(m, struct ip6_hdr *); 2022 2023 /* IP header fields included in the TCP checksum */ 2024 h6->ip6_nxt = IPPROTO_TCP; 2025 h6->ip6_plen = htons(tlen); 2026 memcpy(&h6->ip6_src, &saddr->v6, sizeof(struct in6_addr)); 2027 memcpy(&h6->ip6_dst, &daddr->v6, sizeof(struct in6_addr)); 2028 2029 th = (struct tcphdr *)((caddr_t)h6 + sizeof(struct ip6_hdr)); 2030 break; 2031 #endif /* INET6 */ 2032 } 2033 2034 /* TCP header */ 2035 th->th_sport = sport; 2036 th->th_dport = dport; 2037 th->th_seq = htonl(seq); 2038 th->th_ack = htonl(ack); 2039 th->th_off = tlen >> 2; 2040 th->th_flags = flags; 2041 th->th_win = htons(win); 2042 2043 if (mss) { 2044 opt = (char *)(th + 1); 2045 opt[0] = TCPOPT_MAXSEG; 2046 opt[1] = 4; 2047 mss = htons(mss); 2048 bcopy((caddr_t)&mss, (caddr_t)(opt + 2), 2); 2049 } 2050 2051 switch (af) { 2052 #ifdef INET 2053 case AF_INET: 2054 /* TCP checksum */ 2055 th->th_sum = in_cksum(m, len); 2056 2057 /* Finish the IP header */ 2058 h->ip_v = 4; 2059 h->ip_hl = sizeof(*h) >> 2; 2060 h->ip_tos = IPTOS_LOWDELAY; 2061 h->ip_len = len; 2062 h->ip_off = path_mtu_discovery ? IP_DF : 0; 2063 h->ip_ttl = ttl ? ttl : ip_defttl; 2064 h->ip_sum = 0; 2065 if (eh == NULL) { 2066 lwkt_reltoken(&pf_token); 2067 ip_output(m, NULL, NULL, 0, NULL, NULL); 2068 lwkt_gettoken(&pf_token); 2069 } else { 2070 struct route ro; 2071 struct rtentry rt; 2072 struct ether_header *e = (void *)ro.ro_dst.sa_data; 2073 2074 if (ifp == NULL) { 2075 m_freem(m); 2076 return; 2077 } 2078 rt.rt_ifp = ifp; 2079 ro.ro_rt = &rt; 2080 ro.ro_dst.sa_len = sizeof(ro.ro_dst); 2081 ro.ro_dst.sa_family = pseudo_AF_HDRCMPLT; 2082 bcopy(eh->ether_dhost, e->ether_shost, ETHER_ADDR_LEN); 2083 bcopy(eh->ether_shost, e->ether_dhost, ETHER_ADDR_LEN); 2084 e->ether_type = eh->ether_type; 2085 /* XXX_IMPORT: later */ 2086 lwkt_reltoken(&pf_token); 2087 ip_output(m, NULL, &ro, 0, NULL, NULL); 2088 lwkt_gettoken(&pf_token); 2089 } 2090 break; 2091 #endif /* INET */ 2092 #ifdef INET6 2093 case AF_INET6: 2094 /* TCP checksum */ 2095 th->th_sum = in6_cksum(m, IPPROTO_TCP, 2096 sizeof(struct ip6_hdr), tlen); 2097 2098 h6->ip6_vfc |= IPV6_VERSION; 2099 h6->ip6_hlim = IPV6_DEFHLIM; 2100 2101 lwkt_reltoken(&pf_token); 2102 ip6_output(m, NULL, NULL, 0, NULL, NULL, NULL); 2103 lwkt_gettoken(&pf_token); 2104 break; 2105 #endif /* INET6 */ 2106 } 2107 } 2108 2109 void 2110 pf_send_icmp(struct mbuf *m, u_int8_t type, u_int8_t code, sa_family_t af, 2111 struct pf_rule *r) 2112 { 2113 struct mbuf *m0; 2114 2115 /* 2116 * DragonFly doesn't zero the auxillary pkghdr fields, only fw_flags, 2117 * so make sure pf.flags is clear. 2118 */ 2119 if ((m0 = m_copy(m, 0, M_COPYALL)) == NULL) 2120 return; 2121 2122 m0->m_pkthdr.fw_flags |= PF_MBUF_TAGGED; 2123 m0->m_pkthdr.pf.flags = 0; 2124 /* XXX Re-Check when Upgrading to > 4.4 */ 2125 m0->m_pkthdr.pf.statekey = NULL; 2126 2127 if (r->rtableid >= 0) 2128 m0->m_pkthdr.pf.rtableid = r->rtableid; 2129 2130 #ifdef ALTQ 2131 if (r->qid) { 2132 m->m_pkthdr.fw_flags |= PF_MBUF_STRUCTURE; 2133 m0->m_pkthdr.pf.qid = r->qid; 2134 m0->m_pkthdr.pf.ecn_af = af; 2135 m0->m_pkthdr.pf.hdr = mtod(m0, struct ip *); 2136 } 2137 #endif /* ALTQ */ 2138 2139 switch (af) { 2140 #ifdef INET 2141 case AF_INET: 2142 icmp_error(m0, type, code, 0, 0); 2143 break; 2144 #endif /* INET */ 2145 #ifdef INET6 2146 case AF_INET6: 2147 icmp6_error(m0, type, code, 0); 2148 break; 2149 #endif /* INET6 */ 2150 } 2151 } 2152 2153 /* 2154 * Return 1 if the addresses a and b match (with mask m), otherwise return 0. 2155 * If n is 0, they match if they are equal. If n is != 0, they match if they 2156 * are different. 2157 */ 2158 int 2159 pf_match_addr(u_int8_t n, struct pf_addr *a, struct pf_addr *m, 2160 struct pf_addr *b, sa_family_t af) 2161 { 2162 int match = 0; 2163 2164 switch (af) { 2165 #ifdef INET 2166 case AF_INET: 2167 if ((a->addr32[0] & m->addr32[0]) == 2168 (b->addr32[0] & m->addr32[0])) 2169 match++; 2170 break; 2171 #endif /* INET */ 2172 #ifdef INET6 2173 case AF_INET6: 2174 if (((a->addr32[0] & m->addr32[0]) == 2175 (b->addr32[0] & m->addr32[0])) && 2176 ((a->addr32[1] & m->addr32[1]) == 2177 (b->addr32[1] & m->addr32[1])) && 2178 ((a->addr32[2] & m->addr32[2]) == 2179 (b->addr32[2] & m->addr32[2])) && 2180 ((a->addr32[3] & m->addr32[3]) == 2181 (b->addr32[3] & m->addr32[3]))) 2182 match++; 2183 break; 2184 #endif /* INET6 */ 2185 } 2186 if (match) { 2187 if (n) 2188 return (0); 2189 else 2190 return (1); 2191 } else { 2192 if (n) 2193 return (1); 2194 else 2195 return (0); 2196 } 2197 } 2198 2199 /* 2200 * Return 1 if b <= a <= e, otherwise return 0. 2201 */ 2202 int 2203 pf_match_addr_range(struct pf_addr *b, struct pf_addr *e, 2204 struct pf_addr *a, sa_family_t af) 2205 { 2206 switch (af) { 2207 #ifdef INET 2208 case AF_INET: 2209 if ((a->addr32[0] < b->addr32[0]) || 2210 (a->addr32[0] > e->addr32[0])) 2211 return (0); 2212 break; 2213 #endif /* INET */ 2214 #ifdef INET6 2215 case AF_INET6: { 2216 int i; 2217 2218 /* check a >= b */ 2219 for (i = 0; i < 4; ++i) 2220 if (a->addr32[i] > b->addr32[i]) 2221 break; 2222 else if (a->addr32[i] < b->addr32[i]) 2223 return (0); 2224 /* check a <= e */ 2225 for (i = 0; i < 4; ++i) 2226 if (a->addr32[i] < e->addr32[i]) 2227 break; 2228 else if (a->addr32[i] > e->addr32[i]) 2229 return (0); 2230 break; 2231 } 2232 #endif /* INET6 */ 2233 } 2234 return (1); 2235 } 2236 2237 int 2238 pf_match(u_int8_t op, u_int32_t a1, u_int32_t a2, u_int32_t p) 2239 { 2240 switch (op) { 2241 case PF_OP_IRG: 2242 return ((p > a1) && (p < a2)); 2243 case PF_OP_XRG: 2244 return ((p < a1) || (p > a2)); 2245 case PF_OP_RRG: 2246 return ((p >= a1) && (p <= a2)); 2247 case PF_OP_EQ: 2248 return (p == a1); 2249 case PF_OP_NE: 2250 return (p != a1); 2251 case PF_OP_LT: 2252 return (p < a1); 2253 case PF_OP_LE: 2254 return (p <= a1); 2255 case PF_OP_GT: 2256 return (p > a1); 2257 case PF_OP_GE: 2258 return (p >= a1); 2259 } 2260 return (0); /* never reached */ 2261 } 2262 2263 int 2264 pf_match_port(u_int8_t op, u_int16_t a1, u_int16_t a2, u_int16_t p) 2265 { 2266 a1 = ntohs(a1); 2267 a2 = ntohs(a2); 2268 p = ntohs(p); 2269 return (pf_match(op, a1, a2, p)); 2270 } 2271 2272 int 2273 pf_match_uid(u_int8_t op, uid_t a1, uid_t a2, uid_t u) 2274 { 2275 if (u == UID_MAX && op != PF_OP_EQ && op != PF_OP_NE) 2276 return (0); 2277 return (pf_match(op, a1, a2, u)); 2278 } 2279 2280 int 2281 pf_match_gid(u_int8_t op, gid_t a1, gid_t a2, gid_t g) 2282 { 2283 if (g == GID_MAX && op != PF_OP_EQ && op != PF_OP_NE) 2284 return (0); 2285 return (pf_match(op, a1, a2, g)); 2286 } 2287 2288 int 2289 pf_match_tag(struct mbuf *m, struct pf_rule *r, int *tag) 2290 { 2291 if (*tag == -1) 2292 *tag = m->m_pkthdr.pf.tag; 2293 2294 return ((!r->match_tag_not && r->match_tag == *tag) || 2295 (r->match_tag_not && r->match_tag != *tag)); 2296 } 2297 2298 int 2299 pf_tag_packet(struct mbuf *m, int tag, int rtableid) 2300 { 2301 if (tag <= 0 && rtableid < 0) 2302 return (0); 2303 2304 if (tag > 0) 2305 m->m_pkthdr.pf.tag = tag; 2306 if (rtableid >= 0) 2307 m->m_pkthdr.pf.rtableid = rtableid; 2308 2309 return (0); 2310 } 2311 2312 void 2313 pf_step_into_anchor(int *depth, struct pf_ruleset **rs, int n, 2314 struct pf_rule **r, struct pf_rule **a, int *match) 2315 { 2316 struct pf_anchor_stackframe *f; 2317 2318 (*r)->anchor->match = 0; 2319 if (match) 2320 *match = 0; 2321 if (*depth >= NELEM(pf_anchor_stack)) { 2322 kprintf("pf_step_into_anchor: stack overflow\n"); 2323 *r = TAILQ_NEXT(*r, entries); 2324 return; 2325 } else if (*depth == 0 && a != NULL) 2326 *a = *r; 2327 f = pf_anchor_stack + (*depth)++; 2328 f->rs = *rs; 2329 f->r = *r; 2330 if ((*r)->anchor_wildcard) { 2331 f->parent = &(*r)->anchor->children; 2332 if ((f->child = RB_MIN(pf_anchor_node, f->parent)) == 2333 NULL) { 2334 *r = NULL; 2335 return; 2336 } 2337 *rs = &f->child->ruleset; 2338 } else { 2339 f->parent = NULL; 2340 f->child = NULL; 2341 *rs = &(*r)->anchor->ruleset; 2342 } 2343 *r = TAILQ_FIRST((*rs)->rules[n].active.ptr); 2344 } 2345 2346 int 2347 pf_step_out_of_anchor(int *depth, struct pf_ruleset **rs, int n, 2348 struct pf_rule **r, struct pf_rule **a, int *match) 2349 { 2350 struct pf_anchor_stackframe *f; 2351 int quick = 0; 2352 2353 do { 2354 if (*depth <= 0) 2355 break; 2356 f = pf_anchor_stack + *depth - 1; 2357 if (f->parent != NULL && f->child != NULL) { 2358 if (f->child->match || 2359 (match != NULL && *match)) { 2360 f->r->anchor->match = 1; 2361 *match = 0; 2362 } 2363 f->child = RB_NEXT(pf_anchor_node, f->parent, f->child); 2364 if (f->child != NULL) { 2365 *rs = &f->child->ruleset; 2366 *r = TAILQ_FIRST((*rs)->rules[n].active.ptr); 2367 if (*r == NULL) 2368 continue; 2369 else 2370 break; 2371 } 2372 } 2373 (*depth)--; 2374 if (*depth == 0 && a != NULL) 2375 *a = NULL; 2376 *rs = f->rs; 2377 if (f->r->anchor->match || (match != NULL && *match)) 2378 quick = f->r->quick; 2379 *r = TAILQ_NEXT(f->r, entries); 2380 } while (*r == NULL); 2381 2382 return (quick); 2383 } 2384 2385 #ifdef INET6 2386 void 2387 pf_poolmask(struct pf_addr *naddr, struct pf_addr *raddr, 2388 struct pf_addr *rmask, struct pf_addr *saddr, sa_family_t af) 2389 { 2390 switch (af) { 2391 #ifdef INET 2392 case AF_INET: 2393 naddr->addr32[0] = (raddr->addr32[0] & rmask->addr32[0]) | 2394 ((rmask->addr32[0] ^ 0xffffffff ) & saddr->addr32[0]); 2395 break; 2396 #endif /* INET */ 2397 case AF_INET6: 2398 naddr->addr32[0] = (raddr->addr32[0] & rmask->addr32[0]) | 2399 ((rmask->addr32[0] ^ 0xffffffff ) & saddr->addr32[0]); 2400 naddr->addr32[1] = (raddr->addr32[1] & rmask->addr32[1]) | 2401 ((rmask->addr32[1] ^ 0xffffffff ) & saddr->addr32[1]); 2402 naddr->addr32[2] = (raddr->addr32[2] & rmask->addr32[2]) | 2403 ((rmask->addr32[2] ^ 0xffffffff ) & saddr->addr32[2]); 2404 naddr->addr32[3] = (raddr->addr32[3] & rmask->addr32[3]) | 2405 ((rmask->addr32[3] ^ 0xffffffff ) & saddr->addr32[3]); 2406 break; 2407 } 2408 } 2409 2410 void 2411 pf_addr_inc(struct pf_addr *addr, sa_family_t af) 2412 { 2413 switch (af) { 2414 #ifdef INET 2415 case AF_INET: 2416 addr->addr32[0] = htonl(ntohl(addr->addr32[0]) + 1); 2417 break; 2418 #endif /* INET */ 2419 case AF_INET6: 2420 if (addr->addr32[3] == 0xffffffff) { 2421 addr->addr32[3] = 0; 2422 if (addr->addr32[2] == 0xffffffff) { 2423 addr->addr32[2] = 0; 2424 if (addr->addr32[1] == 0xffffffff) { 2425 addr->addr32[1] = 0; 2426 addr->addr32[0] = 2427 htonl(ntohl(addr->addr32[0]) + 1); 2428 } else 2429 addr->addr32[1] = 2430 htonl(ntohl(addr->addr32[1]) + 1); 2431 } else 2432 addr->addr32[2] = 2433 htonl(ntohl(addr->addr32[2]) + 1); 2434 } else 2435 addr->addr32[3] = 2436 htonl(ntohl(addr->addr32[3]) + 1); 2437 break; 2438 } 2439 } 2440 #endif /* INET6 */ 2441 2442 #define mix(a,b,c) \ 2443 do { \ 2444 a -= b; a -= c; a ^= (c >> 13); \ 2445 b -= c; b -= a; b ^= (a << 8); \ 2446 c -= a; c -= b; c ^= (b >> 13); \ 2447 a -= b; a -= c; a ^= (c >> 12); \ 2448 b -= c; b -= a; b ^= (a << 16); \ 2449 c -= a; c -= b; c ^= (b >> 5); \ 2450 a -= b; a -= c; a ^= (c >> 3); \ 2451 b -= c; b -= a; b ^= (a << 10); \ 2452 c -= a; c -= b; c ^= (b >> 15); \ 2453 } while (0) 2454 2455 /* 2456 * hash function based on bridge_hash in if_bridge.c 2457 */ 2458 void 2459 pf_hash(struct pf_addr *inaddr, struct pf_addr *hash, 2460 struct pf_poolhashkey *key, sa_family_t af) 2461 { 2462 u_int32_t a = 0x9e3779b9, b = 0x9e3779b9, c = key->key32[0]; 2463 2464 switch (af) { 2465 #ifdef INET 2466 case AF_INET: 2467 a += inaddr->addr32[0]; 2468 b += key->key32[1]; 2469 mix(a, b, c); 2470 hash->addr32[0] = c + key->key32[2]; 2471 break; 2472 #endif /* INET */ 2473 #ifdef INET6 2474 case AF_INET6: 2475 a += inaddr->addr32[0]; 2476 b += inaddr->addr32[2]; 2477 mix(a, b, c); 2478 hash->addr32[0] = c; 2479 a += inaddr->addr32[1]; 2480 b += inaddr->addr32[3]; 2481 c += key->key32[1]; 2482 mix(a, b, c); 2483 hash->addr32[1] = c; 2484 a += inaddr->addr32[2]; 2485 b += inaddr->addr32[1]; 2486 c += key->key32[2]; 2487 mix(a, b, c); 2488 hash->addr32[2] = c; 2489 a += inaddr->addr32[3]; 2490 b += inaddr->addr32[0]; 2491 c += key->key32[3]; 2492 mix(a, b, c); 2493 hash->addr32[3] = c; 2494 break; 2495 #endif /* INET6 */ 2496 } 2497 } 2498 2499 int 2500 pf_map_addr(sa_family_t af, struct pf_rule *r, struct pf_addr *saddr, 2501 struct pf_addr *naddr, struct pf_addr *init_addr, struct pf_src_node **sn) 2502 { 2503 unsigned char hash[16]; 2504 struct pf_pool *rpool = &r->rpool; 2505 struct pf_addr *raddr = &rpool->cur->addr.v.a.addr; 2506 struct pf_addr *rmask = &rpool->cur->addr.v.a.mask; 2507 struct pf_pooladdr *acur = rpool->cur; 2508 struct pf_src_node k; 2509 int cpu = mycpu->gd_cpuid; 2510 2511 if (*sn == NULL && r->rpool.opts & PF_POOL_STICKYADDR && 2512 (r->rpool.opts & PF_POOL_TYPEMASK) != PF_POOL_NONE) { 2513 k.af = af; 2514 PF_ACPY(&k.addr, saddr, af); 2515 if (r->rule_flag & PFRULE_RULESRCTRACK || 2516 r->rpool.opts & PF_POOL_STICKYADDR) 2517 k.rule.ptr = r; 2518 else 2519 k.rule.ptr = NULL; 2520 pf_status.scounters[SCNT_SRC_NODE_SEARCH]++; 2521 *sn = RB_FIND(pf_src_tree, &tree_src_tracking[cpu], &k); 2522 if (*sn != NULL && !PF_AZERO(&(*sn)->raddr, af)) { 2523 PF_ACPY(naddr, &(*sn)->raddr, af); 2524 if (pf_status.debug >= PF_DEBUG_MISC) { 2525 kprintf("pf_map_addr: src tracking maps "); 2526 pf_print_host(&k.addr, 0, af); 2527 kprintf(" to "); 2528 pf_print_host(naddr, 0, af); 2529 kprintf("\n"); 2530 } 2531 return (0); 2532 } 2533 } 2534 2535 if (rpool->cur->addr.type == PF_ADDR_NOROUTE) 2536 return (1); 2537 if (rpool->cur->addr.type == PF_ADDR_DYNIFTL) { 2538 switch (af) { 2539 #ifdef INET 2540 case AF_INET: 2541 if (rpool->cur->addr.p.dyn->pfid_acnt4 < 1 && 2542 (rpool->opts & PF_POOL_TYPEMASK) != 2543 PF_POOL_ROUNDROBIN) 2544 return (1); 2545 raddr = &rpool->cur->addr.p.dyn->pfid_addr4; 2546 rmask = &rpool->cur->addr.p.dyn->pfid_mask4; 2547 break; 2548 #endif /* INET */ 2549 #ifdef INET6 2550 case AF_INET6: 2551 if (rpool->cur->addr.p.dyn->pfid_acnt6 < 1 && 2552 (rpool->opts & PF_POOL_TYPEMASK) != 2553 PF_POOL_ROUNDROBIN) 2554 return (1); 2555 raddr = &rpool->cur->addr.p.dyn->pfid_addr6; 2556 rmask = &rpool->cur->addr.p.dyn->pfid_mask6; 2557 break; 2558 #endif /* INET6 */ 2559 } 2560 } else if (rpool->cur->addr.type == PF_ADDR_TABLE) { 2561 if ((rpool->opts & PF_POOL_TYPEMASK) != PF_POOL_ROUNDROBIN) 2562 return (1); /* unsupported */ 2563 } else { 2564 raddr = &rpool->cur->addr.v.a.addr; 2565 rmask = &rpool->cur->addr.v.a.mask; 2566 } 2567 2568 switch (rpool->opts & PF_POOL_TYPEMASK) { 2569 case PF_POOL_NONE: 2570 PF_ACPY(naddr, raddr, af); 2571 break; 2572 case PF_POOL_BITMASK: 2573 PF_POOLMASK(naddr, raddr, rmask, saddr, af); 2574 break; 2575 case PF_POOL_RANDOM: 2576 if (init_addr != NULL && PF_AZERO(init_addr, af)) { 2577 switch (af) { 2578 #ifdef INET 2579 case AF_INET: 2580 rpool->counter.addr32[0] = htonl(karc4random()); 2581 break; 2582 #endif /* INET */ 2583 #ifdef INET6 2584 case AF_INET6: 2585 if (rmask->addr32[3] != 0xffffffff) 2586 rpool->counter.addr32[3] = 2587 htonl(karc4random()); 2588 else 2589 break; 2590 if (rmask->addr32[2] != 0xffffffff) 2591 rpool->counter.addr32[2] = 2592 htonl(karc4random()); 2593 else 2594 break; 2595 if (rmask->addr32[1] != 0xffffffff) 2596 rpool->counter.addr32[1] = 2597 htonl(karc4random()); 2598 else 2599 break; 2600 if (rmask->addr32[0] != 0xffffffff) 2601 rpool->counter.addr32[0] = 2602 htonl(karc4random()); 2603 break; 2604 #endif /* INET6 */ 2605 } 2606 PF_POOLMASK(naddr, raddr, rmask, &rpool->counter, af); 2607 PF_ACPY(init_addr, naddr, af); 2608 2609 } else { 2610 PF_AINC(&rpool->counter, af); 2611 PF_POOLMASK(naddr, raddr, rmask, &rpool->counter, af); 2612 } 2613 break; 2614 case PF_POOL_SRCHASH: 2615 pf_hash(saddr, (struct pf_addr *)&hash, &rpool->key, af); 2616 PF_POOLMASK(naddr, raddr, rmask, (struct pf_addr *)&hash, af); 2617 break; 2618 case PF_POOL_ROUNDROBIN: 2619 if (rpool->cur->addr.type == PF_ADDR_TABLE) { 2620 if (!pfr_pool_get(rpool->cur->addr.p.tbl, 2621 &rpool->tblidx, &rpool->counter, 2622 &raddr, &rmask, af)) 2623 goto get_addr; 2624 } else if (rpool->cur->addr.type == PF_ADDR_DYNIFTL) { 2625 if (!pfr_pool_get(rpool->cur->addr.p.dyn->pfid_kt, 2626 &rpool->tblidx, &rpool->counter, 2627 &raddr, &rmask, af)) 2628 goto get_addr; 2629 } else if (pf_match_addr(0, raddr, rmask, &rpool->counter, af)) 2630 goto get_addr; 2631 2632 try_next: 2633 if ((rpool->cur = TAILQ_NEXT(rpool->cur, entries)) == NULL) 2634 rpool->cur = TAILQ_FIRST(&rpool->list); 2635 if (rpool->cur->addr.type == PF_ADDR_TABLE) { 2636 rpool->tblidx = -1; 2637 if (pfr_pool_get(rpool->cur->addr.p.tbl, 2638 &rpool->tblidx, &rpool->counter, 2639 &raddr, &rmask, af)) { 2640 /* table contains no address of type 'af' */ 2641 if (rpool->cur != acur) 2642 goto try_next; 2643 return (1); 2644 } 2645 } else if (rpool->cur->addr.type == PF_ADDR_DYNIFTL) { 2646 rpool->tblidx = -1; 2647 if (pfr_pool_get(rpool->cur->addr.p.dyn->pfid_kt, 2648 &rpool->tblidx, &rpool->counter, 2649 &raddr, &rmask, af)) { 2650 /* table contains no address of type 'af' */ 2651 if (rpool->cur != acur) 2652 goto try_next; 2653 return (1); 2654 } 2655 } else { 2656 raddr = &rpool->cur->addr.v.a.addr; 2657 rmask = &rpool->cur->addr.v.a.mask; 2658 PF_ACPY(&rpool->counter, raddr, af); 2659 } 2660 2661 get_addr: 2662 PF_ACPY(naddr, &rpool->counter, af); 2663 if (init_addr != NULL && PF_AZERO(init_addr, af)) 2664 PF_ACPY(init_addr, naddr, af); 2665 PF_AINC(&rpool->counter, af); 2666 break; 2667 } 2668 if (*sn != NULL) 2669 PF_ACPY(&(*sn)->raddr, naddr, af); 2670 2671 if (pf_status.debug >= PF_DEBUG_MISC && 2672 (rpool->opts & PF_POOL_TYPEMASK) != PF_POOL_NONE) { 2673 kprintf("pf_map_addr: selected address "); 2674 pf_print_host(naddr, 0, af); 2675 kprintf("\n"); 2676 } 2677 2678 return (0); 2679 } 2680 2681 int 2682 pf_get_sport(struct pf_pdesc *pd, sa_family_t af, 2683 u_int8_t proto, struct pf_rule *r, 2684 struct pf_addr *saddr, struct pf_addr *daddr, 2685 u_int16_t sport, u_int16_t dport, 2686 struct pf_addr *naddr, u_int16_t *nport, 2687 u_int16_t low, u_int16_t high, struct pf_src_node **sn) 2688 { 2689 struct pf_state_key_cmp key; 2690 struct pf_addr init_addr; 2691 u_int16_t cut; 2692 u_int32_t toeplitz_sport; 2693 2694 bzero(&init_addr, sizeof(init_addr)); 2695 if (pf_map_addr(af, r, saddr, naddr, &init_addr, sn)) 2696 return (1); 2697 2698 if (proto == IPPROTO_ICMP) { 2699 low = 1; 2700 high = 65535; 2701 } 2702 2703 bzero(&key, sizeof(key)); 2704 key.af = af; 2705 key.proto = proto; 2706 key.port[0] = dport; 2707 PF_ACPY(&key.addr[0], daddr, key.af); 2708 2709 do { 2710 PF_ACPY(&key.addr[1], naddr, key.af); 2711 2712 /* 2713 * We want to select a port that calculates to a toeplitz hash 2714 * that masks to the same cpu, otherwise the response may 2715 * not see the new state. 2716 * 2717 * We can still do this even if the kernel is disregarding 2718 * the hash and vectoring the packets to a specific cpu, 2719 * but it will reduce the number of ports we can use. 2720 */ 2721 switch(af) { 2722 case AF_INET: 2723 toeplitz_sport = 2724 toeplitz_piecemeal_port(sport) ^ 2725 toeplitz_piecemeal_addr(saddr->v4.s_addr) ^ 2726 toeplitz_piecemeal_addr(naddr->v4.s_addr); 2727 break; 2728 case AF_INET6: 2729 /* XXX TODO XXX */ 2730 default: 2731 /* XXX TODO XXX */ 2732 toeplitz_sport = 0; 2733 break; 2734 } 2735 2736 /* 2737 * port search; start random, step; 2738 * similar 2 portloop in in_pcbbind 2739 * 2740 * WARNING! We try to match such that the kernel will 2741 * dispatch the translated host/port to the same 2742 * cpu, but this might not be possible. 2743 * 2744 * In the case where the port is fixed, or for the 2745 * UDP case (whos toeplitz does not incorporate the 2746 * port), we set not_cpu_localized which ultimately 2747 * causes the pf_state_tree element 2748 * 2749 * XXX fixed ports present a problem for cpu localization. 2750 */ 2751 if (!(proto == IPPROTO_TCP || 2752 proto == IPPROTO_UDP || 2753 proto == IPPROTO_ICMP)) { 2754 /* 2755 * non-specific protocol, leave port intact. 2756 */ 2757 key.port[1] = sport; 2758 if (pf_find_state_all(&key, PF_IN, NULL) == NULL) { 2759 *nport = sport; 2760 pd->not_cpu_localized = 1; 2761 return (0); 2762 } 2763 } else if (low == 0 && high == 0) { 2764 /* 2765 * static-port same as originator. 2766 */ 2767 key.port[1] = sport; 2768 if (pf_find_state_all(&key, PF_IN, NULL) == NULL) { 2769 *nport = sport; 2770 pd->not_cpu_localized = 1; 2771 return (0); 2772 } 2773 } else if (low == high) { 2774 /* 2775 * specific port as specified. 2776 */ 2777 key.port[1] = htons(low); 2778 if (pf_find_state_all(&key, PF_IN, NULL) == NULL) { 2779 *nport = htons(low); 2780 pd->not_cpu_localized = 1; 2781 return (0); 2782 } 2783 } else { 2784 /* 2785 * normal dynamic port 2786 */ 2787 u_int16_t tmp; 2788 2789 if (low > high) { 2790 tmp = low; 2791 low = high; 2792 high = tmp; 2793 } 2794 /* low < high */ 2795 cut = htonl(karc4random()) % (1 + high - low) + low; 2796 /* low <= cut <= high */ 2797 for (tmp = cut; tmp <= high; ++(tmp)) { 2798 key.port[1] = htons(tmp); 2799 if ((toeplitz_piecemeal_port(key.port[1]) ^ 2800 toeplitz_sport) & ncpus2_mask) { 2801 continue; 2802 } 2803 if (pf_find_state_all(&key, PF_IN, NULL) == 2804 NULL && !in_baddynamic(tmp, proto)) { 2805 if (proto == IPPROTO_UDP) 2806 pd->not_cpu_localized = 1; 2807 *nport = htons(tmp); 2808 return (0); 2809 } 2810 } 2811 for (tmp = cut - 1; tmp >= low; --(tmp)) { 2812 key.port[1] = htons(tmp); 2813 if ((toeplitz_piecemeal_port(key.port[1]) ^ 2814 toeplitz_sport) & ncpus2_mask) { 2815 continue; 2816 } 2817 if (pf_find_state_all(&key, PF_IN, NULL) == 2818 NULL && !in_baddynamic(tmp, proto)) { 2819 if (proto == IPPROTO_UDP) 2820 pd->not_cpu_localized = 1; 2821 *nport = htons(tmp); 2822 return (0); 2823 } 2824 } 2825 } 2826 2827 /* 2828 * Next address 2829 */ 2830 switch (r->rpool.opts & PF_POOL_TYPEMASK) { 2831 case PF_POOL_RANDOM: 2832 case PF_POOL_ROUNDROBIN: 2833 if (pf_map_addr(af, r, saddr, naddr, &init_addr, sn)) 2834 return (1); 2835 break; 2836 case PF_POOL_NONE: 2837 case PF_POOL_SRCHASH: 2838 case PF_POOL_BITMASK: 2839 default: 2840 return (1); 2841 } 2842 } while (! PF_AEQ(&init_addr, naddr, af) ); 2843 return (1); /* none available */ 2844 } 2845 2846 struct pf_rule * 2847 pf_match_translation(struct pf_pdesc *pd, struct mbuf *m, int off, 2848 int direction, struct pfi_kif *kif, struct pf_addr *saddr, u_int16_t sport, 2849 struct pf_addr *daddr, u_int16_t dport, int rs_num) 2850 { 2851 struct pf_rule *r, *rm = NULL; 2852 struct pf_ruleset *ruleset = NULL; 2853 int tag = -1; 2854 int rtableid = -1; 2855 int asd = 0; 2856 2857 r = TAILQ_FIRST(pf_main_ruleset.rules[rs_num].active.ptr); 2858 while (r && rm == NULL) { 2859 struct pf_rule_addr *src = NULL, *dst = NULL; 2860 struct pf_addr_wrap *xdst = NULL; 2861 2862 if (r->action == PF_BINAT && direction == PF_IN) { 2863 src = &r->dst; 2864 if (r->rpool.cur != NULL) 2865 xdst = &r->rpool.cur->addr; 2866 } else { 2867 src = &r->src; 2868 dst = &r->dst; 2869 } 2870 2871 r->evaluations++; 2872 if (pfi_kif_match(r->kif, kif) == r->ifnot) 2873 r = r->skip[PF_SKIP_IFP].ptr; 2874 else if (r->direction && r->direction != direction) 2875 r = r->skip[PF_SKIP_DIR].ptr; 2876 else if (r->af && r->af != pd->af) 2877 r = r->skip[PF_SKIP_AF].ptr; 2878 else if (r->proto && r->proto != pd->proto) 2879 r = r->skip[PF_SKIP_PROTO].ptr; 2880 else if (PF_MISMATCHAW(&src->addr, saddr, pd->af, 2881 src->neg, kif)) 2882 r = r->skip[src == &r->src ? PF_SKIP_SRC_ADDR : 2883 PF_SKIP_DST_ADDR].ptr; 2884 else if (src->port_op && !pf_match_port(src->port_op, 2885 src->port[0], src->port[1], sport)) 2886 r = r->skip[src == &r->src ? PF_SKIP_SRC_PORT : 2887 PF_SKIP_DST_PORT].ptr; 2888 else if (dst != NULL && 2889 PF_MISMATCHAW(&dst->addr, daddr, pd->af, dst->neg, NULL)) 2890 r = r->skip[PF_SKIP_DST_ADDR].ptr; 2891 else if (xdst != NULL && PF_MISMATCHAW(xdst, daddr, pd->af, 2892 0, NULL)) 2893 r = TAILQ_NEXT(r, entries); 2894 else if (dst != NULL && dst->port_op && 2895 !pf_match_port(dst->port_op, dst->port[0], 2896 dst->port[1], dport)) 2897 r = r->skip[PF_SKIP_DST_PORT].ptr; 2898 else if (r->match_tag && !pf_match_tag(m, r, &tag)) 2899 r = TAILQ_NEXT(r, entries); 2900 else if (r->os_fingerprint != PF_OSFP_ANY && (pd->proto != 2901 IPPROTO_TCP || !pf_osfp_match(pf_osfp_fingerprint(pd, m, 2902 off, pd->hdr.tcp), r->os_fingerprint))) 2903 r = TAILQ_NEXT(r, entries); 2904 else { 2905 if (r->tag) 2906 tag = r->tag; 2907 if (r->rtableid >= 0) 2908 rtableid = r->rtableid; 2909 if (r->anchor == NULL) { 2910 rm = r; 2911 } else 2912 pf_step_into_anchor(&asd, &ruleset, rs_num, 2913 &r, NULL, NULL); 2914 } 2915 if (r == NULL) 2916 pf_step_out_of_anchor(&asd, &ruleset, rs_num, &r, 2917 NULL, NULL); 2918 } 2919 if (pf_tag_packet(m, tag, rtableid)) 2920 return (NULL); 2921 if (rm != NULL && (rm->action == PF_NONAT || 2922 rm->action == PF_NORDR || rm->action == PF_NOBINAT)) 2923 return (NULL); 2924 return (rm); 2925 } 2926 2927 struct pf_rule * 2928 pf_get_translation(struct pf_pdesc *pd, struct mbuf *m, int off, int direction, 2929 struct pfi_kif *kif, struct pf_src_node **sn, 2930 struct pf_state_key **skw, struct pf_state_key **sks, 2931 struct pf_state_key **skp, struct pf_state_key **nkp, 2932 struct pf_addr *saddr, struct pf_addr *daddr, 2933 u_int16_t sport, u_int16_t dport) 2934 { 2935 struct pf_rule *r = NULL; 2936 2937 if (direction == PF_OUT) { 2938 r = pf_match_translation(pd, m, off, direction, kif, saddr, 2939 sport, daddr, dport, PF_RULESET_BINAT); 2940 if (r == NULL) 2941 r = pf_match_translation(pd, m, off, direction, kif, 2942 saddr, sport, daddr, dport, PF_RULESET_NAT); 2943 } else { 2944 r = pf_match_translation(pd, m, off, direction, kif, saddr, 2945 sport, daddr, dport, PF_RULESET_RDR); 2946 if (r == NULL) 2947 r = pf_match_translation(pd, m, off, direction, kif, 2948 saddr, sport, daddr, dport, PF_RULESET_BINAT); 2949 } 2950 2951 if (r != NULL) { 2952 struct pf_addr *naddr; 2953 u_int16_t *nport; 2954 2955 if (pf_state_key_setup(pd, r, skw, sks, skp, nkp, 2956 saddr, daddr, sport, dport)) 2957 return r; 2958 2959 /* XXX We only modify one side for now. */ 2960 naddr = &(*nkp)->addr[1]; 2961 nport = &(*nkp)->port[1]; 2962 2963 /* 2964 * NOTE: Currently all translations will clear 2965 * BRIDGE_MBUF_TAGGED, telling the bridge to 2966 * ignore the original input encapsulation. 2967 */ 2968 switch (r->action) { 2969 case PF_NONAT: 2970 case PF_NOBINAT: 2971 case PF_NORDR: 2972 return (NULL); 2973 case PF_NAT: 2974 m->m_pkthdr.fw_flags &= ~BRIDGE_MBUF_TAGGED; 2975 if (pf_get_sport(pd, pd->af, pd->proto, r, 2976 saddr, daddr, sport, dport, 2977 naddr, nport, r->rpool.proxy_port[0], 2978 r->rpool.proxy_port[1], sn)) { 2979 DPFPRINTF(PF_DEBUG_MISC, 2980 ("pf: NAT proxy port allocation " 2981 "(%u-%u) failed\n", 2982 r->rpool.proxy_port[0], 2983 r->rpool.proxy_port[1])); 2984 return (NULL); 2985 } 2986 break; 2987 case PF_BINAT: 2988 m->m_pkthdr.fw_flags &= ~BRIDGE_MBUF_TAGGED; 2989 switch (direction) { 2990 case PF_OUT: 2991 if (r->rpool.cur->addr.type == PF_ADDR_DYNIFTL){ 2992 switch (pd->af) { 2993 #ifdef INET 2994 case AF_INET: 2995 if (r->rpool.cur->addr.p.dyn-> 2996 pfid_acnt4 < 1) 2997 return (NULL); 2998 PF_POOLMASK(naddr, 2999 &r->rpool.cur->addr.p.dyn-> 3000 pfid_addr4, 3001 &r->rpool.cur->addr.p.dyn-> 3002 pfid_mask4, 3003 saddr, AF_INET); 3004 break; 3005 #endif /* INET */ 3006 #ifdef INET6 3007 case AF_INET6: 3008 if (r->rpool.cur->addr.p.dyn-> 3009 pfid_acnt6 < 1) 3010 return (NULL); 3011 PF_POOLMASK(naddr, 3012 &r->rpool.cur->addr.p.dyn-> 3013 pfid_addr6, 3014 &r->rpool.cur->addr.p.dyn-> 3015 pfid_mask6, 3016 saddr, AF_INET6); 3017 break; 3018 #endif /* INET6 */ 3019 } 3020 } else 3021 PF_POOLMASK(naddr, 3022 &r->rpool.cur->addr.v.a.addr, 3023 &r->rpool.cur->addr.v.a.mask, 3024 saddr, pd->af); 3025 break; 3026 case PF_IN: 3027 if (r->src.addr.type == PF_ADDR_DYNIFTL) { 3028 switch (pd->af) { 3029 #ifdef INET 3030 case AF_INET: 3031 if (r->src.addr.p.dyn-> 3032 pfid_acnt4 < 1) 3033 return (NULL); 3034 PF_POOLMASK(naddr, 3035 &r->src.addr.p.dyn-> 3036 pfid_addr4, 3037 &r->src.addr.p.dyn-> 3038 pfid_mask4, 3039 daddr, AF_INET); 3040 break; 3041 #endif /* INET */ 3042 #ifdef INET6 3043 case AF_INET6: 3044 if (r->src.addr.p.dyn-> 3045 pfid_acnt6 < 1) 3046 return (NULL); 3047 PF_POOLMASK(naddr, 3048 &r->src.addr.p.dyn-> 3049 pfid_addr6, 3050 &r->src.addr.p.dyn-> 3051 pfid_mask6, 3052 daddr, AF_INET6); 3053 break; 3054 #endif /* INET6 */ 3055 } 3056 } else 3057 PF_POOLMASK(naddr, 3058 &r->src.addr.v.a.addr, 3059 &r->src.addr.v.a.mask, daddr, 3060 pd->af); 3061 break; 3062 } 3063 break; 3064 case PF_RDR: { 3065 m->m_pkthdr.fw_flags &= ~BRIDGE_MBUF_TAGGED; 3066 if (pf_map_addr(pd->af, r, saddr, naddr, NULL, sn)) 3067 return (NULL); 3068 if ((r->rpool.opts & PF_POOL_TYPEMASK) == 3069 PF_POOL_BITMASK) 3070 PF_POOLMASK(naddr, naddr, 3071 &r->rpool.cur->addr.v.a.mask, daddr, 3072 pd->af); 3073 3074 if (r->rpool.proxy_port[1]) { 3075 u_int32_t tmp_nport; 3076 3077 tmp_nport = ((ntohs(dport) - 3078 ntohs(r->dst.port[0])) % 3079 (r->rpool.proxy_port[1] - 3080 r->rpool.proxy_port[0] + 1)) + 3081 r->rpool.proxy_port[0]; 3082 3083 /* wrap around if necessary */ 3084 if (tmp_nport > 65535) 3085 tmp_nport -= 65535; 3086 *nport = htons((u_int16_t)tmp_nport); 3087 } else if (r->rpool.proxy_port[0]) 3088 *nport = htons(r->rpool.proxy_port[0]); 3089 break; 3090 } 3091 default: 3092 return (NULL); 3093 } 3094 } 3095 3096 return (r); 3097 } 3098 3099 struct netmsg_hashlookup { 3100 struct netmsg_base base; 3101 struct inpcb **nm_pinp; 3102 struct inpcbinfo *nm_pcbinfo; 3103 struct pf_addr *nm_saddr; 3104 struct pf_addr *nm_daddr; 3105 uint16_t nm_sport; 3106 uint16_t nm_dport; 3107 sa_family_t nm_af; 3108 }; 3109 3110 #ifdef PF_SOCKET_LOOKUP_DOMSG 3111 static void 3112 in_pcblookup_hash_handler(netmsg_t msg) 3113 { 3114 struct netmsg_hashlookup *rmsg = (struct netmsg_hashlookup *)msg; 3115 3116 if (rmsg->nm_af == AF_INET) 3117 *rmsg->nm_pinp = in_pcblookup_hash(rmsg->nm_pcbinfo, 3118 rmsg->nm_saddr->v4, rmsg->nm_sport, rmsg->nm_daddr->v4, 3119 rmsg->nm_dport, INPLOOKUP_WILDCARD, NULL); 3120 #ifdef INET6 3121 else 3122 *rmsg->nm_pinp = in6_pcblookup_hash(rmsg->nm_pcbinfo, 3123 &rmsg->nm_saddr->v6, rmsg->nm_sport, &rmsg->nm_daddr->v6, 3124 rmsg->nm_dport, INPLOOKUP_WILDCARD, NULL); 3125 #endif /* INET6 */ 3126 lwkt_replymsg(&rmsg->base.lmsg, 0); 3127 } 3128 #endif /* PF_SOCKET_LOOKUP_DOMSG */ 3129 3130 int 3131 pf_socket_lookup(int direction, struct pf_pdesc *pd) 3132 { 3133 struct pf_addr *saddr, *daddr; 3134 u_int16_t sport, dport; 3135 struct inpcbinfo *pi; 3136 struct inpcb *inp; 3137 struct netmsg_hashlookup *msg = NULL; 3138 #ifdef PF_SOCKET_LOOKUP_DOMSG 3139 struct netmsg_hashlookup msg0; 3140 #endif 3141 int pi_cpu = 0; 3142 3143 if (pd == NULL) 3144 return (-1); 3145 pd->lookup.uid = UID_MAX; 3146 pd->lookup.gid = GID_MAX; 3147 pd->lookup.pid = NO_PID; 3148 if (direction == PF_IN) { 3149 saddr = pd->src; 3150 daddr = pd->dst; 3151 } else { 3152 saddr = pd->dst; 3153 daddr = pd->src; 3154 } 3155 switch (pd->proto) { 3156 case IPPROTO_TCP: 3157 if (pd->hdr.tcp == NULL) 3158 return (-1); 3159 sport = pd->hdr.tcp->th_sport; 3160 dport = pd->hdr.tcp->th_dport; 3161 3162 pi_cpu = tcp_addrcpu(saddr->v4.s_addr, sport, daddr->v4.s_addr, dport); 3163 pi = &tcbinfo[pi_cpu]; 3164 /* 3165 * Our netstack runs lockless on MP systems 3166 * (only for TCP connections at the moment). 3167 * 3168 * As we are not allowed to read another CPU's tcbinfo, 3169 * we have to ask that CPU via remote call to search the 3170 * table for us. 3171 * 3172 * Prepare a msg iff data belongs to another CPU. 3173 */ 3174 if (pi_cpu != mycpu->gd_cpuid) { 3175 #ifdef PF_SOCKET_LOOKUP_DOMSG 3176 /* 3177 * NOTE: 3178 * 3179 * Following lwkt_domsg() is dangerous and could 3180 * lockup the network system, e.g. 3181 * 3182 * On 2 CPU system: 3183 * netisr0 domsg to netisr1 (due to lookup) 3184 * netisr1 domsg to netisr0 (due to lookup) 3185 * 3186 * We simply return -1 here, since we are probably 3187 * called before NAT, so the TCP packet should 3188 * already be on the correct CPU. 3189 */ 3190 msg = &msg0; 3191 netmsg_init(&msg->base, NULL, &curthread->td_msgport, 3192 0, in_pcblookup_hash_handler); 3193 msg->nm_pinp = &inp; 3194 msg->nm_pcbinfo = pi; 3195 msg->nm_saddr = saddr; 3196 msg->nm_sport = sport; 3197 msg->nm_daddr = daddr; 3198 msg->nm_dport = dport; 3199 msg->nm_af = pd->af; 3200 #else /* !PF_SOCKET_LOOKUP_DOMSG */ 3201 kprintf("pf_socket_lookup: tcp packet not on the " 3202 "correct cpu %d, cur cpu %d\n", 3203 pi_cpu, mycpuid); 3204 print_backtrace(-1); 3205 return -1; 3206 #endif /* PF_SOCKET_LOOKUP_DOMSG */ 3207 } 3208 break; 3209 case IPPROTO_UDP: 3210 if (pd->hdr.udp == NULL) 3211 return (-1); 3212 sport = pd->hdr.udp->uh_sport; 3213 dport = pd->hdr.udp->uh_dport; 3214 pi = &udbinfo; 3215 break; 3216 default: 3217 return (-1); 3218 } 3219 if (direction != PF_IN) { 3220 u_int16_t p; 3221 3222 p = sport; 3223 sport = dport; 3224 dport = p; 3225 } 3226 switch (pd->af) { 3227 #ifdef INET6 3228 case AF_INET6: 3229 /* 3230 * Query other CPU, second part 3231 * 3232 * msg only gets initialized when: 3233 * 1) packet is TCP 3234 * 2) the info belongs to another CPU 3235 * 3236 * Use some switch/case magic to avoid code duplication. 3237 */ 3238 if (msg == NULL) { 3239 inp = in6_pcblookup_hash(pi, &saddr->v6, sport, 3240 &daddr->v6, dport, INPLOOKUP_WILDCARD, NULL); 3241 3242 if (inp == NULL) 3243 return (-1); 3244 break; 3245 } 3246 /* FALLTHROUGH if SMP and on other CPU */ 3247 #endif /* INET6 */ 3248 case AF_INET: 3249 if (msg != NULL) { 3250 lwkt_domsg(netisr_cpuport(pi_cpu), 3251 &msg->base.lmsg, 0); 3252 } else 3253 { 3254 inp = in_pcblookup_hash(pi, saddr->v4, sport, daddr->v4, 3255 dport, INPLOOKUP_WILDCARD, NULL); 3256 } 3257 if (inp == NULL) 3258 return (-1); 3259 break; 3260 3261 default: 3262 return (-1); 3263 } 3264 pd->lookup.uid = inp->inp_socket->so_cred->cr_uid; 3265 pd->lookup.gid = inp->inp_socket->so_cred->cr_groups[0]; 3266 return (1); 3267 } 3268 3269 u_int8_t 3270 pf_get_wscale(struct mbuf *m, int off, u_int16_t th_off, sa_family_t af) 3271 { 3272 int hlen; 3273 u_int8_t hdr[60]; 3274 u_int8_t *opt, optlen; 3275 u_int8_t wscale = 0; 3276 3277 hlen = th_off << 2; /* hlen <= sizeof(hdr) */ 3278 if (hlen <= sizeof(struct tcphdr)) 3279 return (0); 3280 if (!pf_pull_hdr(m, off, hdr, hlen, NULL, NULL, af)) 3281 return (0); 3282 opt = hdr + sizeof(struct tcphdr); 3283 hlen -= sizeof(struct tcphdr); 3284 while (hlen >= 3) { 3285 switch (*opt) { 3286 case TCPOPT_EOL: 3287 case TCPOPT_NOP: 3288 ++opt; 3289 --hlen; 3290 break; 3291 case TCPOPT_WINDOW: 3292 wscale = opt[2]; 3293 if (wscale > TCP_MAX_WINSHIFT) 3294 wscale = TCP_MAX_WINSHIFT; 3295 wscale |= PF_WSCALE_FLAG; 3296 /* FALLTHROUGH */ 3297 default: 3298 optlen = opt[1]; 3299 if (optlen < 2) 3300 optlen = 2; 3301 hlen -= optlen; 3302 opt += optlen; 3303 break; 3304 } 3305 } 3306 return (wscale); 3307 } 3308 3309 u_int16_t 3310 pf_get_mss(struct mbuf *m, int off, u_int16_t th_off, sa_family_t af) 3311 { 3312 int hlen; 3313 u_int8_t hdr[60]; 3314 u_int8_t *opt, optlen; 3315 u_int16_t mss = tcp_mssdflt; 3316 3317 hlen = th_off << 2; /* hlen <= sizeof(hdr) */ 3318 if (hlen <= sizeof(struct tcphdr)) 3319 return (0); 3320 if (!pf_pull_hdr(m, off, hdr, hlen, NULL, NULL, af)) 3321 return (0); 3322 opt = hdr + sizeof(struct tcphdr); 3323 hlen -= sizeof(struct tcphdr); 3324 while (hlen >= TCPOLEN_MAXSEG) { 3325 switch (*opt) { 3326 case TCPOPT_EOL: 3327 case TCPOPT_NOP: 3328 ++opt; 3329 --hlen; 3330 break; 3331 case TCPOPT_MAXSEG: 3332 bcopy((caddr_t)(opt + 2), (caddr_t)&mss, 2); 3333 /* FALLTHROUGH */ 3334 default: 3335 optlen = opt[1]; 3336 if (optlen < 2) 3337 optlen = 2; 3338 hlen -= optlen; 3339 opt += optlen; 3340 break; 3341 } 3342 } 3343 return (mss); 3344 } 3345 3346 u_int16_t 3347 pf_calc_mss(struct pf_addr *addr, sa_family_t af, u_int16_t offer) 3348 { 3349 #ifdef INET 3350 struct sockaddr_in *dst; 3351 struct route ro; 3352 #endif /* INET */ 3353 #ifdef INET6 3354 struct sockaddr_in6 *dst6; 3355 struct route_in6 ro6; 3356 #endif /* INET6 */ 3357 struct rtentry *rt = NULL; 3358 int hlen = 0; 3359 u_int16_t mss = tcp_mssdflt; 3360 3361 switch (af) { 3362 #ifdef INET 3363 case AF_INET: 3364 hlen = sizeof(struct ip); 3365 bzero(&ro, sizeof(ro)); 3366 dst = (struct sockaddr_in *)&ro.ro_dst; 3367 dst->sin_family = AF_INET; 3368 dst->sin_len = sizeof(*dst); 3369 dst->sin_addr = addr->v4; 3370 rtalloc_ign(&ro, (RTF_CLONING | RTF_PRCLONING)); 3371 rt = ro.ro_rt; 3372 break; 3373 #endif /* INET */ 3374 #ifdef INET6 3375 case AF_INET6: 3376 hlen = sizeof(struct ip6_hdr); 3377 bzero(&ro6, sizeof(ro6)); 3378 dst6 = (struct sockaddr_in6 *)&ro6.ro_dst; 3379 dst6->sin6_family = AF_INET6; 3380 dst6->sin6_len = sizeof(*dst6); 3381 dst6->sin6_addr = addr->v6; 3382 rtalloc_ign((struct route *)&ro6, (RTF_CLONING | RTF_PRCLONING)); 3383 rt = ro6.ro_rt; 3384 break; 3385 #endif /* INET6 */ 3386 } 3387 3388 if (rt && rt->rt_ifp) { 3389 mss = rt->rt_ifp->if_mtu - hlen - sizeof(struct tcphdr); 3390 mss = max(tcp_mssdflt, mss); 3391 RTFREE(rt); 3392 } 3393 mss = min(mss, offer); 3394 mss = max(mss, 64); /* sanity - at least max opt space */ 3395 return (mss); 3396 } 3397 3398 void 3399 pf_set_rt_ifp(struct pf_state *s, struct pf_addr *saddr) 3400 { 3401 struct pf_rule *r = s->rule.ptr; 3402 3403 s->rt_kif = NULL; 3404 if (!r->rt || r->rt == PF_FASTROUTE) 3405 return; 3406 switch (s->key[PF_SK_WIRE]->af) { 3407 #ifdef INET 3408 case AF_INET: 3409 pf_map_addr(AF_INET, r, saddr, &s->rt_addr, NULL, 3410 &s->nat_src_node); 3411 s->rt_kif = r->rpool.cur->kif; 3412 break; 3413 #endif /* INET */ 3414 #ifdef INET6 3415 case AF_INET6: 3416 pf_map_addr(AF_INET6, r, saddr, &s->rt_addr, NULL, 3417 &s->nat_src_node); 3418 s->rt_kif = r->rpool.cur->kif; 3419 break; 3420 #endif /* INET6 */ 3421 } 3422 } 3423 3424 u_int32_t 3425 pf_tcp_iss(struct pf_pdesc *pd) 3426 { 3427 MD5_CTX ctx; 3428 u_int32_t digest[4]; 3429 3430 if (pf_tcp_secret_init == 0) { 3431 lwkt_gettoken(&pf_gtoken); 3432 if (pf_tcp_secret_init == 0) { 3433 karc4rand(pf_tcp_secret, sizeof(pf_tcp_secret)); 3434 MD5Init(&pf_tcp_secret_ctx); 3435 MD5Update(&pf_tcp_secret_ctx, pf_tcp_secret, 3436 sizeof(pf_tcp_secret)); 3437 pf_tcp_secret_init = 1; 3438 } 3439 lwkt_reltoken(&pf_gtoken); 3440 } 3441 ctx = pf_tcp_secret_ctx; 3442 3443 MD5Update(&ctx, (char *)&pd->hdr.tcp->th_sport, sizeof(u_short)); 3444 MD5Update(&ctx, (char *)&pd->hdr.tcp->th_dport, sizeof(u_short)); 3445 if (pd->af == AF_INET6) { 3446 MD5Update(&ctx, (char *)&pd->src->v6, sizeof(struct in6_addr)); 3447 MD5Update(&ctx, (char *)&pd->dst->v6, sizeof(struct in6_addr)); 3448 } else { 3449 MD5Update(&ctx, (char *)&pd->src->v4, sizeof(struct in_addr)); 3450 MD5Update(&ctx, (char *)&pd->dst->v4, sizeof(struct in_addr)); 3451 } 3452 MD5Final((u_char *)digest, &ctx); 3453 pf_tcp_iss_off += 4096; 3454 3455 return (digest[0] + pd->hdr.tcp->th_seq + pf_tcp_iss_off); 3456 } 3457 3458 int 3459 pf_test_rule(struct pf_rule **rm, struct pf_state **sm, int direction, 3460 struct pfi_kif *kif, struct mbuf *m, int off, void *h, 3461 struct pf_pdesc *pd, struct pf_rule **am, struct pf_ruleset **rsm, 3462 struct ifqueue *ifq, struct inpcb *inp) 3463 { 3464 struct pf_rule *nr = NULL; 3465 struct pf_addr *saddr = pd->src, *daddr = pd->dst; 3466 sa_family_t af = pd->af; 3467 struct pf_rule *r, *a = NULL; 3468 struct pf_ruleset *ruleset = NULL; 3469 struct pf_src_node *nsn = NULL; 3470 struct tcphdr *th = pd->hdr.tcp; 3471 struct pf_state_key *skw = NULL, *sks = NULL; 3472 struct pf_state_key *sk = NULL, *nk = NULL; 3473 u_short reason; 3474 int rewrite = 0, hdrlen = 0; 3475 int tag = -1, rtableid = -1; 3476 int asd = 0; 3477 int match = 0; 3478 int state_icmp = 0; 3479 u_int16_t sport = 0, dport = 0; 3480 u_int16_t bproto_sum = 0, bip_sum = 0; 3481 u_int8_t icmptype = 0, icmpcode = 0; 3482 3483 3484 if (direction == PF_IN && pf_check_congestion(ifq)) { 3485 REASON_SET(&reason, PFRES_CONGEST); 3486 return (PF_DROP); 3487 } 3488 3489 if (inp != NULL) 3490 pd->lookup.done = pf_socket_lookup(direction, pd); 3491 else if (debug_pfugidhack) { 3492 DPFPRINTF(PF_DEBUG_MISC, ("pf: unlocked lookup\n")); 3493 pd->lookup.done = pf_socket_lookup(direction, pd); 3494 } 3495 3496 switch (pd->proto) { 3497 case IPPROTO_TCP: 3498 sport = th->th_sport; 3499 dport = th->th_dport; 3500 hdrlen = sizeof(*th); 3501 break; 3502 case IPPROTO_UDP: 3503 sport = pd->hdr.udp->uh_sport; 3504 dport = pd->hdr.udp->uh_dport; 3505 hdrlen = sizeof(*pd->hdr.udp); 3506 break; 3507 #ifdef INET 3508 case IPPROTO_ICMP: 3509 if (pd->af != AF_INET) 3510 break; 3511 sport = dport = pd->hdr.icmp->icmp_id; 3512 hdrlen = sizeof(*pd->hdr.icmp); 3513 icmptype = pd->hdr.icmp->icmp_type; 3514 icmpcode = pd->hdr.icmp->icmp_code; 3515 3516 if (icmptype == ICMP_UNREACH || 3517 icmptype == ICMP_SOURCEQUENCH || 3518 icmptype == ICMP_REDIRECT || 3519 icmptype == ICMP_TIMXCEED || 3520 icmptype == ICMP_PARAMPROB) 3521 state_icmp++; 3522 break; 3523 #endif /* INET */ 3524 #ifdef INET6 3525 case IPPROTO_ICMPV6: 3526 if (af != AF_INET6) 3527 break; 3528 sport = dport = pd->hdr.icmp6->icmp6_id; 3529 hdrlen = sizeof(*pd->hdr.icmp6); 3530 icmptype = pd->hdr.icmp6->icmp6_type; 3531 icmpcode = pd->hdr.icmp6->icmp6_code; 3532 3533 if (icmptype == ICMP6_DST_UNREACH || 3534 icmptype == ICMP6_PACKET_TOO_BIG || 3535 icmptype == ICMP6_TIME_EXCEEDED || 3536 icmptype == ICMP6_PARAM_PROB) 3537 state_icmp++; 3538 break; 3539 #endif /* INET6 */ 3540 default: 3541 sport = dport = hdrlen = 0; 3542 break; 3543 } 3544 3545 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_FILTER].active.ptr); 3546 3547 /* check packet for BINAT/NAT/RDR */ 3548 if ((nr = pf_get_translation(pd, m, off, direction, kif, &nsn, 3549 &skw, &sks, &sk, &nk, saddr, daddr, sport, dport)) != NULL) { 3550 if (nk == NULL || sk == NULL) { 3551 REASON_SET(&reason, PFRES_MEMORY); 3552 goto cleanup; 3553 } 3554 3555 if (pd->ip_sum) 3556 bip_sum = *pd->ip_sum; 3557 3558 m->m_flags &= ~M_HASH; 3559 switch (pd->proto) { 3560 case IPPROTO_TCP: 3561 bproto_sum = th->th_sum; 3562 pd->proto_sum = &th->th_sum; 3563 3564 if (PF_ANEQ(saddr, &nk->addr[pd->sidx], af) || 3565 nk->port[pd->sidx] != sport) { 3566 pf_change_ap(saddr, &th->th_sport, pd->ip_sum, 3567 &th->th_sum, &nk->addr[pd->sidx], 3568 nk->port[pd->sidx], 0, af); 3569 pd->sport = &th->th_sport; 3570 sport = th->th_sport; 3571 } 3572 3573 if (PF_ANEQ(daddr, &nk->addr[pd->didx], af) || 3574 nk->port[pd->didx] != dport) { 3575 pf_change_ap(daddr, &th->th_dport, pd->ip_sum, 3576 &th->th_sum, &nk->addr[pd->didx], 3577 nk->port[pd->didx], 0, af); 3578 dport = th->th_dport; 3579 pd->dport = &th->th_dport; 3580 } 3581 rewrite++; 3582 break; 3583 case IPPROTO_UDP: 3584 bproto_sum = pd->hdr.udp->uh_sum; 3585 pd->proto_sum = &pd->hdr.udp->uh_sum; 3586 3587 if (PF_ANEQ(saddr, &nk->addr[pd->sidx], af) || 3588 nk->port[pd->sidx] != sport) { 3589 pf_change_ap(saddr, &pd->hdr.udp->uh_sport, 3590 pd->ip_sum, &pd->hdr.udp->uh_sum, 3591 &nk->addr[pd->sidx], 3592 nk->port[pd->sidx], 1, af); 3593 sport = pd->hdr.udp->uh_sport; 3594 pd->sport = &pd->hdr.udp->uh_sport; 3595 } 3596 3597 if (PF_ANEQ(daddr, &nk->addr[pd->didx], af) || 3598 nk->port[pd->didx] != dport) { 3599 pf_change_ap(daddr, &pd->hdr.udp->uh_dport, 3600 pd->ip_sum, &pd->hdr.udp->uh_sum, 3601 &nk->addr[pd->didx], 3602 nk->port[pd->didx], 1, af); 3603 dport = pd->hdr.udp->uh_dport; 3604 pd->dport = &pd->hdr.udp->uh_dport; 3605 } 3606 rewrite++; 3607 break; 3608 #ifdef INET 3609 case IPPROTO_ICMP: 3610 nk->port[0] = nk->port[1]; 3611 if (PF_ANEQ(saddr, &nk->addr[pd->sidx], AF_INET)) 3612 pf_change_a(&saddr->v4.s_addr, pd->ip_sum, 3613 nk->addr[pd->sidx].v4.s_addr, 0); 3614 3615 if (PF_ANEQ(daddr, &nk->addr[pd->didx], AF_INET)) 3616 pf_change_a(&daddr->v4.s_addr, pd->ip_sum, 3617 nk->addr[pd->didx].v4.s_addr, 0); 3618 3619 if (nk->port[1] != pd->hdr.icmp->icmp_id) { 3620 pd->hdr.icmp->icmp_cksum = pf_cksum_fixup( 3621 pd->hdr.icmp->icmp_cksum, sport, 3622 nk->port[1], 0); 3623 pd->hdr.icmp->icmp_id = nk->port[1]; 3624 pd->sport = &pd->hdr.icmp->icmp_id; 3625 } 3626 m_copyback(m, off, ICMP_MINLEN, (caddr_t)pd->hdr.icmp); 3627 break; 3628 #endif /* INET */ 3629 #ifdef INET6 3630 case IPPROTO_ICMPV6: 3631 nk->port[0] = nk->port[1]; 3632 if (PF_ANEQ(saddr, &nk->addr[pd->sidx], AF_INET6)) 3633 pf_change_a6(saddr, &pd->hdr.icmp6->icmp6_cksum, 3634 &nk->addr[pd->sidx], 0); 3635 3636 if (PF_ANEQ(daddr, &nk->addr[pd->didx], AF_INET6)) 3637 pf_change_a6(daddr, &pd->hdr.icmp6->icmp6_cksum, 3638 &nk->addr[pd->didx], 0); 3639 rewrite++; 3640 break; 3641 #endif /* INET */ 3642 default: 3643 switch (af) { 3644 #ifdef INET 3645 case AF_INET: 3646 if (PF_ANEQ(saddr, 3647 &nk->addr[pd->sidx], AF_INET)) 3648 pf_change_a(&saddr->v4.s_addr, 3649 pd->ip_sum, 3650 nk->addr[pd->sidx].v4.s_addr, 0); 3651 3652 if (PF_ANEQ(daddr, 3653 &nk->addr[pd->didx], AF_INET)) 3654 pf_change_a(&daddr->v4.s_addr, 3655 pd->ip_sum, 3656 nk->addr[pd->didx].v4.s_addr, 0); 3657 break; 3658 #endif /* INET */ 3659 #ifdef INET6 3660 case AF_INET6: 3661 if (PF_ANEQ(saddr, 3662 &nk->addr[pd->sidx], AF_INET6)) 3663 PF_ACPY(saddr, &nk->addr[pd->sidx], af); 3664 3665 if (PF_ANEQ(daddr, 3666 &nk->addr[pd->didx], AF_INET6)) 3667 PF_ACPY(saddr, &nk->addr[pd->didx], af); 3668 break; 3669 #endif /* INET */ 3670 } 3671 break; 3672 } 3673 if (nr->natpass) 3674 r = NULL; 3675 pd->nat_rule = nr; 3676 } 3677 3678 while (r != NULL) { 3679 r->evaluations++; 3680 if (pfi_kif_match(r->kif, kif) == r->ifnot) 3681 r = r->skip[PF_SKIP_IFP].ptr; 3682 else if (r->direction && r->direction != direction) 3683 r = r->skip[PF_SKIP_DIR].ptr; 3684 else if (r->af && r->af != af) 3685 r = r->skip[PF_SKIP_AF].ptr; 3686 else if (r->proto && r->proto != pd->proto) 3687 r = r->skip[PF_SKIP_PROTO].ptr; 3688 else if (PF_MISMATCHAW(&r->src.addr, saddr, af, 3689 r->src.neg, kif)) 3690 r = r->skip[PF_SKIP_SRC_ADDR].ptr; 3691 /* tcp/udp only. port_op always 0 in other cases */ 3692 else if (r->src.port_op && !pf_match_port(r->src.port_op, 3693 r->src.port[0], r->src.port[1], sport)) 3694 r = r->skip[PF_SKIP_SRC_PORT].ptr; 3695 else if (PF_MISMATCHAW(&r->dst.addr, daddr, af, 3696 r->dst.neg, NULL)) 3697 r = r->skip[PF_SKIP_DST_ADDR].ptr; 3698 /* tcp/udp only. port_op always 0 in other cases */ 3699 else if (r->dst.port_op && !pf_match_port(r->dst.port_op, 3700 r->dst.port[0], r->dst.port[1], dport)) 3701 r = r->skip[PF_SKIP_DST_PORT].ptr; 3702 /* icmp only. type always 0 in other cases */ 3703 else if (r->type && r->type != icmptype + 1) 3704 r = TAILQ_NEXT(r, entries); 3705 /* icmp only. type always 0 in other cases */ 3706 else if (r->code && r->code != icmpcode + 1) 3707 r = TAILQ_NEXT(r, entries); 3708 else if (r->tos && !(r->tos == pd->tos)) 3709 r = TAILQ_NEXT(r, entries); 3710 else if (r->rule_flag & PFRULE_FRAGMENT) 3711 r = TAILQ_NEXT(r, entries); 3712 else if (pd->proto == IPPROTO_TCP && 3713 (r->flagset & th->th_flags) != r->flags) 3714 r = TAILQ_NEXT(r, entries); 3715 /* tcp/udp only. uid.op always 0 in other cases */ 3716 else if (r->uid.op && (pd->lookup.done || (pd->lookup.done = 3717 pf_socket_lookup(direction, pd), 1)) && 3718 !pf_match_uid(r->uid.op, r->uid.uid[0], r->uid.uid[1], 3719 pd->lookup.uid)) 3720 r = TAILQ_NEXT(r, entries); 3721 /* tcp/udp only. gid.op always 0 in other cases */ 3722 else if (r->gid.op && (pd->lookup.done || (pd->lookup.done = 3723 pf_socket_lookup(direction, pd), 1)) && 3724 !pf_match_gid(r->gid.op, r->gid.gid[0], r->gid.gid[1], 3725 pd->lookup.gid)) 3726 r = TAILQ_NEXT(r, entries); 3727 else if (r->prob && 3728 r->prob <= karc4random()) 3729 r = TAILQ_NEXT(r, entries); 3730 else if (r->match_tag && !pf_match_tag(m, r, &tag)) 3731 r = TAILQ_NEXT(r, entries); 3732 else if (r->os_fingerprint != PF_OSFP_ANY && 3733 (pd->proto != IPPROTO_TCP || !pf_osfp_match( 3734 pf_osfp_fingerprint(pd, m, off, th), 3735 r->os_fingerprint))) 3736 r = TAILQ_NEXT(r, entries); 3737 else { 3738 if (r->tag) 3739 tag = r->tag; 3740 if (r->rtableid >= 0) 3741 rtableid = r->rtableid; 3742 if (r->anchor == NULL) { 3743 match = 1; 3744 *rm = r; 3745 *am = a; 3746 *rsm = ruleset; 3747 if ((*rm)->quick) 3748 break; 3749 r = TAILQ_NEXT(r, entries); 3750 } else 3751 pf_step_into_anchor(&asd, &ruleset, 3752 PF_RULESET_FILTER, &r, &a, &match); 3753 } 3754 if (r == NULL && pf_step_out_of_anchor(&asd, &ruleset, 3755 PF_RULESET_FILTER, &r, &a, &match)) 3756 break; 3757 } 3758 r = *rm; 3759 a = *am; 3760 ruleset = *rsm; 3761 3762 REASON_SET(&reason, PFRES_MATCH); 3763 3764 if (r->log || (nr != NULL && nr->log)) { 3765 if (rewrite) 3766 m_copyback(m, off, hdrlen, pd->hdr.any); 3767 PFLOG_PACKET(kif, h, m, af, direction, reason, r->log ? r : nr, 3768 a, ruleset, pd); 3769 } 3770 3771 if ((r->action == PF_DROP) && 3772 ((r->rule_flag & PFRULE_RETURNRST) || 3773 (r->rule_flag & PFRULE_RETURNICMP) || 3774 (r->rule_flag & PFRULE_RETURN))) { 3775 /* undo NAT changes, if they have taken place */ 3776 if (nr != NULL) { 3777 PF_ACPY(saddr, &sk->addr[pd->sidx], af); 3778 PF_ACPY(daddr, &sk->addr[pd->didx], af); 3779 if (pd->sport) 3780 *pd->sport = sk->port[pd->sidx]; 3781 if (pd->dport) 3782 *pd->dport = sk->port[pd->didx]; 3783 if (pd->proto_sum) 3784 *pd->proto_sum = bproto_sum; 3785 if (pd->ip_sum) 3786 *pd->ip_sum = bip_sum; 3787 m_copyback(m, off, hdrlen, pd->hdr.any); 3788 } 3789 if (pd->proto == IPPROTO_TCP && 3790 ((r->rule_flag & PFRULE_RETURNRST) || 3791 (r->rule_flag & PFRULE_RETURN)) && 3792 !(th->th_flags & TH_RST)) { 3793 u_int32_t ack = ntohl(th->th_seq) + pd->p_len; 3794 int len = 0; 3795 struct ip *h4; 3796 #ifdef INET6 3797 struct ip6_hdr *h6; 3798 #endif 3799 switch (af) { 3800 case AF_INET: 3801 h4 = mtod(m, struct ip *); 3802 len = h4->ip_len - off; 3803 break; 3804 #ifdef INET6 3805 case AF_INET6: 3806 h6 = mtod(m, struct ip6_hdr *); 3807 len = h6->ip6_plen - (off - sizeof(*h6)); 3808 break; 3809 #endif 3810 } 3811 3812 if (pf_check_proto_cksum(m, off, len, IPPROTO_TCP, af)) 3813 REASON_SET(&reason, PFRES_PROTCKSUM); 3814 else { 3815 if (th->th_flags & TH_SYN) 3816 ack++; 3817 if (th->th_flags & TH_FIN) 3818 ack++; 3819 pf_send_tcp(r, af, pd->dst, 3820 pd->src, th->th_dport, th->th_sport, 3821 ntohl(th->th_ack), ack, TH_RST|TH_ACK, 0, 0, 3822 r->return_ttl, 1, 0, pd->eh, kif->pfik_ifp); 3823 } 3824 } else if (pd->proto != IPPROTO_ICMP && af == AF_INET && 3825 r->return_icmp) 3826 pf_send_icmp(m, r->return_icmp >> 8, 3827 r->return_icmp & 255, af, r); 3828 else if (pd->proto != IPPROTO_ICMPV6 && af == AF_INET6 && 3829 r->return_icmp6) 3830 pf_send_icmp(m, r->return_icmp6 >> 8, 3831 r->return_icmp6 & 255, af, r); 3832 } 3833 3834 if (r->action == PF_DROP) 3835 goto cleanup; 3836 3837 if (pf_tag_packet(m, tag, rtableid)) { 3838 REASON_SET(&reason, PFRES_MEMORY); 3839 goto cleanup; 3840 } 3841 3842 if (!state_icmp && (r->keep_state || nr != NULL || 3843 (pd->flags & PFDESC_TCP_NORM))) { 3844 int action; 3845 action = pf_create_state(r, nr, a, pd, nsn, skw, sks, nk, sk, m, 3846 off, sport, dport, &rewrite, kif, sm, tag, bproto_sum, 3847 bip_sum, hdrlen); 3848 if (action != PF_PASS) 3849 return (action); 3850 } 3851 3852 /* copy back packet headers if we performed NAT operations */ 3853 if (rewrite) 3854 m_copyback(m, off, hdrlen, pd->hdr.any); 3855 3856 return (PF_PASS); 3857 3858 cleanup: 3859 if (sk != NULL) 3860 kfree(sk, M_PFSTATEKEYPL); 3861 if (nk != NULL) 3862 kfree(nk, M_PFSTATEKEYPL); 3863 return (PF_DROP); 3864 } 3865 3866 static __inline int 3867 pf_create_state(struct pf_rule *r, struct pf_rule *nr, struct pf_rule *a, 3868 struct pf_pdesc *pd, struct pf_src_node *nsn, struct pf_state_key *skw, 3869 struct pf_state_key *sks, struct pf_state_key *nk, struct pf_state_key *sk, 3870 struct mbuf *m, int off, u_int16_t sport, u_int16_t dport, int *rewrite, 3871 struct pfi_kif *kif, struct pf_state **sm, int tag, u_int16_t bproto_sum, 3872 u_int16_t bip_sum, int hdrlen) 3873 { 3874 struct pf_state *s = NULL; 3875 struct pf_src_node *sn = NULL; 3876 struct tcphdr *th = pd->hdr.tcp; 3877 u_int16_t mss = tcp_mssdflt; 3878 u_short reason; 3879 int cpu = mycpu->gd_cpuid; 3880 3881 /* check maximums */ 3882 if (r->max_states && (r->states_cur >= r->max_states)) { 3883 pf_status.lcounters[LCNT_STATES]++; 3884 REASON_SET(&reason, PFRES_MAXSTATES); 3885 return (PF_DROP); 3886 } 3887 /* src node for filter rule */ 3888 if ((r->rule_flag & PFRULE_SRCTRACK || 3889 r->rpool.opts & PF_POOL_STICKYADDR) && 3890 pf_insert_src_node(&sn, r, pd->src, pd->af) != 0) { 3891 REASON_SET(&reason, PFRES_SRCLIMIT); 3892 goto csfailed; 3893 } 3894 /* src node for translation rule */ 3895 if (nr != NULL && (nr->rpool.opts & PF_POOL_STICKYADDR) && 3896 pf_insert_src_node(&nsn, nr, &sk->addr[pd->sidx], pd->af)) { 3897 REASON_SET(&reason, PFRES_SRCLIMIT); 3898 goto csfailed; 3899 } 3900 s = kmalloc(sizeof(struct pf_state), M_PFSTATEPL, M_NOWAIT|M_ZERO); 3901 if (s == NULL) { 3902 REASON_SET(&reason, PFRES_MEMORY); 3903 goto csfailed; 3904 } 3905 lockinit(&s->lk, "pfstlk", 0, 0); 3906 s->id = 0; /* XXX Do we really need that? not in OpenBSD */ 3907 s->creatorid = 0; 3908 s->rule.ptr = r; 3909 s->nat_rule.ptr = nr; 3910 s->anchor.ptr = a; 3911 s->state_flags = PFSTATE_CREATEINPROG; 3912 STATE_INC_COUNTERS(s); 3913 if (r->allow_opts) 3914 s->state_flags |= PFSTATE_ALLOWOPTS; 3915 if (r->rule_flag & PFRULE_STATESLOPPY) 3916 s->state_flags |= PFSTATE_SLOPPY; 3917 if (pd->not_cpu_localized) 3918 s->state_flags |= PFSTATE_STACK_GLOBAL; 3919 3920 s->log = r->log & PF_LOG_ALL; 3921 if (nr != NULL) 3922 s->log |= nr->log & PF_LOG_ALL; 3923 switch (pd->proto) { 3924 case IPPROTO_TCP: 3925 s->src.seqlo = ntohl(th->th_seq); 3926 s->src.seqhi = s->src.seqlo + pd->p_len + 1; 3927 if ((th->th_flags & (TH_SYN|TH_ACK)) == TH_SYN && 3928 r->keep_state == PF_STATE_MODULATE) { 3929 /* Generate sequence number modulator */ 3930 if ((s->src.seqdiff = pf_tcp_iss(pd) - s->src.seqlo) == 3931 0) 3932 s->src.seqdiff = 1; 3933 pf_change_a(&th->th_seq, &th->th_sum, 3934 htonl(s->src.seqlo + s->src.seqdiff), 0); 3935 *rewrite = 1; 3936 } else 3937 s->src.seqdiff = 0; 3938 if (th->th_flags & TH_SYN) { 3939 s->src.seqhi++; 3940 s->src.wscale = pf_get_wscale(m, off, 3941 th->th_off, pd->af); 3942 } 3943 s->src.max_win = MAX(ntohs(th->th_win), 1); 3944 if (s->src.wscale & PF_WSCALE_MASK) { 3945 /* Remove scale factor from initial window */ 3946 int win = s->src.max_win; 3947 win += 1 << (s->src.wscale & PF_WSCALE_MASK); 3948 s->src.max_win = (win - 1) >> 3949 (s->src.wscale & PF_WSCALE_MASK); 3950 } 3951 if (th->th_flags & TH_FIN) 3952 s->src.seqhi++; 3953 s->dst.seqhi = 1; 3954 s->dst.max_win = 1; 3955 s->src.state = TCPS_SYN_SENT; 3956 s->dst.state = TCPS_CLOSED; 3957 s->timeout = PFTM_TCP_FIRST_PACKET; 3958 break; 3959 case IPPROTO_UDP: 3960 s->src.state = PFUDPS_SINGLE; 3961 s->dst.state = PFUDPS_NO_TRAFFIC; 3962 s->timeout = PFTM_UDP_FIRST_PACKET; 3963 break; 3964 case IPPROTO_ICMP: 3965 #ifdef INET6 3966 case IPPROTO_ICMPV6: 3967 #endif 3968 s->timeout = PFTM_ICMP_FIRST_PACKET; 3969 break; 3970 default: 3971 s->src.state = PFOTHERS_SINGLE; 3972 s->dst.state = PFOTHERS_NO_TRAFFIC; 3973 s->timeout = PFTM_OTHER_FIRST_PACKET; 3974 } 3975 3976 s->creation = time_second; 3977 s->expire = time_second; 3978 3979 if (sn != NULL) { 3980 s->src_node = sn; 3981 s->src_node->states++; 3982 } 3983 if (nsn != NULL) { 3984 /* XXX We only modify one side for now. */ 3985 PF_ACPY(&nsn->raddr, &nk->addr[1], pd->af); 3986 s->nat_src_node = nsn; 3987 s->nat_src_node->states++; 3988 } 3989 if (pd->proto == IPPROTO_TCP) { 3990 if ((pd->flags & PFDESC_TCP_NORM) && pf_normalize_tcp_init(m, 3991 off, pd, th, &s->src, &s->dst)) { 3992 REASON_SET(&reason, PFRES_MEMORY); 3993 pf_src_tree_remove_state(s); 3994 STATE_DEC_COUNTERS(s); 3995 kfree(s, M_PFSTATEPL); 3996 return (PF_DROP); 3997 } 3998 if ((pd->flags & PFDESC_TCP_NORM) && s->src.scrub && 3999 pf_normalize_tcp_stateful(m, off, pd, &reason, th, s, 4000 &s->src, &s->dst, rewrite)) { 4001 /* This really shouldn't happen!!! */ 4002 DPFPRINTF(PF_DEBUG_URGENT, 4003 ("pf_normalize_tcp_stateful failed on first pkt")); 4004 pf_normalize_tcp_cleanup(s); 4005 pf_src_tree_remove_state(s); 4006 STATE_DEC_COUNTERS(s); 4007 kfree(s, M_PFSTATEPL); 4008 return (PF_DROP); 4009 } 4010 } 4011 s->direction = pd->dir; 4012 4013 if (sk == NULL && pf_state_key_setup(pd, nr, &skw, &sks, &sk, &nk, 4014 pd->src, pd->dst, sport, dport)) { 4015 REASON_SET(&reason, PFRES_MEMORY); 4016 goto csfailed; 4017 } 4018 4019 if (pf_state_insert(BOUND_IFACE(r, kif), skw, sks, s)) { 4020 if (pd->proto == IPPROTO_TCP) 4021 pf_normalize_tcp_cleanup(s); 4022 REASON_SET(&reason, PFRES_STATEINS); 4023 pf_src_tree_remove_state(s); 4024 STATE_DEC_COUNTERS(s); 4025 kfree(s, M_PFSTATEPL); 4026 return (PF_DROP); 4027 } else 4028 *sm = s; 4029 4030 pf_set_rt_ifp(s, pd->src); /* needs s->state_key set */ 4031 if (tag > 0) { 4032 pf_tag_ref(tag); 4033 s->tag = tag; 4034 } 4035 if (pd->proto == IPPROTO_TCP && (th->th_flags & (TH_SYN|TH_ACK)) == 4036 TH_SYN && r->keep_state == PF_STATE_SYNPROXY) { 4037 s->src.state = PF_TCPS_PROXY_SRC; 4038 /* undo NAT changes, if they have taken place */ 4039 if (nr != NULL) { 4040 struct pf_state_key *skt = s->key[PF_SK_WIRE]; 4041 if (pd->dir == PF_OUT) 4042 skt = s->key[PF_SK_STACK]; 4043 PF_ACPY(pd->src, &skt->addr[pd->sidx], pd->af); 4044 PF_ACPY(pd->dst, &skt->addr[pd->didx], pd->af); 4045 if (pd->sport) 4046 *pd->sport = skt->port[pd->sidx]; 4047 if (pd->dport) 4048 *pd->dport = skt->port[pd->didx]; 4049 if (pd->proto_sum) 4050 *pd->proto_sum = bproto_sum; 4051 if (pd->ip_sum) 4052 *pd->ip_sum = bip_sum; 4053 m_copyback(m, off, hdrlen, pd->hdr.any); 4054 } 4055 s->src.seqhi = htonl(karc4random()); 4056 /* Find mss option */ 4057 mss = pf_get_mss(m, off, th->th_off, pd->af); 4058 mss = pf_calc_mss(pd->src, pd->af, mss); 4059 mss = pf_calc_mss(pd->dst, pd->af, mss); 4060 s->src.mss = mss; 4061 s->state_flags &= ~PFSTATE_CREATEINPROG; 4062 pf_send_tcp(r, pd->af, pd->dst, pd->src, th->th_dport, 4063 th->th_sport, s->src.seqhi, ntohl(th->th_seq) + 1, 4064 TH_SYN|TH_ACK, 0, s->src.mss, 0, 1, 0, NULL, NULL); 4065 REASON_SET(&reason, PFRES_SYNPROXY); 4066 return (PF_SYNPROXY_DROP); 4067 } 4068 4069 s->state_flags &= ~PFSTATE_CREATEINPROG; 4070 return (PF_PASS); 4071 4072 csfailed: 4073 if (sk != NULL) 4074 kfree(sk, M_PFSTATEKEYPL); 4075 if (nk != NULL) 4076 kfree(nk, M_PFSTATEKEYPL); 4077 4078 if (sn != NULL && sn->states == 0 && sn->expire == 0) { 4079 RB_REMOVE(pf_src_tree, &tree_src_tracking[cpu], sn); 4080 pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++; 4081 atomic_add_int(&pf_status.src_nodes, -1); 4082 kfree(sn, M_PFSRCTREEPL); 4083 } 4084 if (nsn != sn && nsn != NULL && nsn->states == 0 && nsn->expire == 0) { 4085 RB_REMOVE(pf_src_tree, &tree_src_tracking[cpu], nsn); 4086 pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++; 4087 atomic_add_int(&pf_status.src_nodes, -1); 4088 kfree(nsn, M_PFSRCTREEPL); 4089 } 4090 if (s) { 4091 pf_src_tree_remove_state(s); 4092 STATE_DEC_COUNTERS(s); 4093 kfree(s, M_PFSTATEPL); 4094 } 4095 4096 return (PF_DROP); 4097 } 4098 4099 int 4100 pf_test_fragment(struct pf_rule **rm, int direction, struct pfi_kif *kif, 4101 struct mbuf *m, void *h, struct pf_pdesc *pd, struct pf_rule **am, 4102 struct pf_ruleset **rsm) 4103 { 4104 struct pf_rule *r, *a = NULL; 4105 struct pf_ruleset *ruleset = NULL; 4106 sa_family_t af = pd->af; 4107 u_short reason; 4108 int tag = -1; 4109 int asd = 0; 4110 int match = 0; 4111 4112 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_FILTER].active.ptr); 4113 while (r != NULL) { 4114 r->evaluations++; 4115 if (pfi_kif_match(r->kif, kif) == r->ifnot) 4116 r = r->skip[PF_SKIP_IFP].ptr; 4117 else if (r->direction && r->direction != direction) 4118 r = r->skip[PF_SKIP_DIR].ptr; 4119 else if (r->af && r->af != af) 4120 r = r->skip[PF_SKIP_AF].ptr; 4121 else if (r->proto && r->proto != pd->proto) 4122 r = r->skip[PF_SKIP_PROTO].ptr; 4123 else if (PF_MISMATCHAW(&r->src.addr, pd->src, af, 4124 r->src.neg, kif)) 4125 r = r->skip[PF_SKIP_SRC_ADDR].ptr; 4126 else if (PF_MISMATCHAW(&r->dst.addr, pd->dst, af, 4127 r->dst.neg, NULL)) 4128 r = r->skip[PF_SKIP_DST_ADDR].ptr; 4129 else if (r->tos && !(r->tos == pd->tos)) 4130 r = TAILQ_NEXT(r, entries); 4131 else if (r->os_fingerprint != PF_OSFP_ANY) 4132 r = TAILQ_NEXT(r, entries); 4133 else if (pd->proto == IPPROTO_UDP && 4134 (r->src.port_op || r->dst.port_op)) 4135 r = TAILQ_NEXT(r, entries); 4136 else if (pd->proto == IPPROTO_TCP && 4137 (r->src.port_op || r->dst.port_op || r->flagset)) 4138 r = TAILQ_NEXT(r, entries); 4139 else if ((pd->proto == IPPROTO_ICMP || 4140 pd->proto == IPPROTO_ICMPV6) && 4141 (r->type || r->code)) 4142 r = TAILQ_NEXT(r, entries); 4143 else if (r->prob && r->prob <= karc4random()) 4144 r = TAILQ_NEXT(r, entries); 4145 else if (r->match_tag && !pf_match_tag(m, r, &tag)) 4146 r = TAILQ_NEXT(r, entries); 4147 else { 4148 if (r->anchor == NULL) { 4149 match = 1; 4150 *rm = r; 4151 *am = a; 4152 *rsm = ruleset; 4153 if ((*rm)->quick) 4154 break; 4155 r = TAILQ_NEXT(r, entries); 4156 } else 4157 pf_step_into_anchor(&asd, &ruleset, 4158 PF_RULESET_FILTER, &r, &a, &match); 4159 } 4160 if (r == NULL && pf_step_out_of_anchor(&asd, &ruleset, 4161 PF_RULESET_FILTER, &r, &a, &match)) 4162 break; 4163 } 4164 r = *rm; 4165 a = *am; 4166 ruleset = *rsm; 4167 4168 REASON_SET(&reason, PFRES_MATCH); 4169 4170 if (r->log) 4171 PFLOG_PACKET(kif, h, m, af, direction, reason, r, a, ruleset, 4172 pd); 4173 4174 if (r->action != PF_PASS) 4175 return (PF_DROP); 4176 4177 if (pf_tag_packet(m, tag, -1)) { 4178 REASON_SET(&reason, PFRES_MEMORY); 4179 return (PF_DROP); 4180 } 4181 4182 return (PF_PASS); 4183 } 4184 4185 /* 4186 * Called with state locked 4187 */ 4188 int 4189 pf_tcp_track_full(struct pf_state_peer *src, struct pf_state_peer *dst, 4190 struct pf_state **state, struct pfi_kif *kif, struct mbuf *m, int off, 4191 struct pf_pdesc *pd, u_short *reason, int *copyback) 4192 { 4193 struct tcphdr *th = pd->hdr.tcp; 4194 u_int16_t win = ntohs(th->th_win); 4195 u_int32_t ack, end, seq, orig_seq; 4196 u_int8_t sws, dws; 4197 int ackskew; 4198 4199 if (src->wscale && dst->wscale && !(th->th_flags & TH_SYN)) { 4200 sws = src->wscale & PF_WSCALE_MASK; 4201 dws = dst->wscale & PF_WSCALE_MASK; 4202 } else 4203 sws = dws = 0; 4204 4205 /* 4206 * Sequence tracking algorithm from Guido van Rooij's paper: 4207 * http://www.madison-gurkha.com/publications/tcp_filtering/ 4208 * tcp_filtering.ps 4209 */ 4210 4211 orig_seq = seq = ntohl(th->th_seq); 4212 if (src->seqlo == 0) { 4213 /* First packet from this end. Set its state */ 4214 4215 if ((pd->flags & PFDESC_TCP_NORM || dst->scrub) && 4216 src->scrub == NULL) { 4217 if (pf_normalize_tcp_init(m, off, pd, th, src, dst)) { 4218 REASON_SET(reason, PFRES_MEMORY); 4219 return (PF_DROP); 4220 } 4221 } 4222 4223 /* Deferred generation of sequence number modulator */ 4224 if (dst->seqdiff && !src->seqdiff) { 4225 /* use random iss for the TCP server */ 4226 while ((src->seqdiff = karc4random() - seq) == 0) 4227 ; 4228 ack = ntohl(th->th_ack) - dst->seqdiff; 4229 pf_change_a(&th->th_seq, &th->th_sum, htonl(seq + 4230 src->seqdiff), 0); 4231 pf_change_a(&th->th_ack, &th->th_sum, htonl(ack), 0); 4232 *copyback = 1; 4233 } else { 4234 ack = ntohl(th->th_ack); 4235 } 4236 4237 end = seq + pd->p_len; 4238 if (th->th_flags & TH_SYN) { 4239 end++; 4240 (*state)->sync_flags |= PFSTATE_GOT_SYN2; 4241 if (dst->wscale & PF_WSCALE_FLAG) { 4242 src->wscale = pf_get_wscale(m, off, th->th_off, 4243 pd->af); 4244 if (src->wscale & PF_WSCALE_FLAG) { 4245 /* Remove scale factor from initial 4246 * window */ 4247 sws = src->wscale & PF_WSCALE_MASK; 4248 win = ((u_int32_t)win + (1 << sws) - 1) 4249 >> sws; 4250 dws = dst->wscale & PF_WSCALE_MASK; 4251 } else { 4252 /* fixup other window */ 4253 dst->max_win <<= dst->wscale & 4254 PF_WSCALE_MASK; 4255 /* in case of a retrans SYN|ACK */ 4256 dst->wscale = 0; 4257 } 4258 } 4259 } 4260 if (th->th_flags & TH_FIN) 4261 end++; 4262 4263 src->seqlo = seq; 4264 if (src->state < TCPS_SYN_SENT) 4265 src->state = TCPS_SYN_SENT; 4266 4267 /* 4268 * May need to slide the window (seqhi may have been set by 4269 * the crappy stack check or if we picked up the connection 4270 * after establishment) 4271 */ 4272 if (src->seqhi == 1 || 4273 SEQ_GEQ(end + MAX(1, dst->max_win << dws), src->seqhi)) 4274 src->seqhi = end + MAX(1, dst->max_win << dws); 4275 if (win > src->max_win) 4276 src->max_win = win; 4277 4278 } else { 4279 ack = ntohl(th->th_ack) - dst->seqdiff; 4280 if (src->seqdiff) { 4281 /* Modulate sequence numbers */ 4282 pf_change_a(&th->th_seq, &th->th_sum, htonl(seq + 4283 src->seqdiff), 0); 4284 pf_change_a(&th->th_ack, &th->th_sum, htonl(ack), 0); 4285 *copyback = 1; 4286 } 4287 end = seq + pd->p_len; 4288 if (th->th_flags & TH_SYN) 4289 end++; 4290 if (th->th_flags & TH_FIN) 4291 end++; 4292 } 4293 4294 if ((th->th_flags & TH_ACK) == 0) { 4295 /* Let it pass through the ack skew check */ 4296 ack = dst->seqlo; 4297 } else if ((ack == 0 && 4298 (th->th_flags & (TH_ACK|TH_RST)) == (TH_ACK|TH_RST)) || 4299 /* broken tcp stacks do not set ack */ 4300 (dst->state < TCPS_SYN_SENT)) { 4301 /* 4302 * Many stacks (ours included) will set the ACK number in an 4303 * FIN|ACK if the SYN times out -- no sequence to ACK. 4304 */ 4305 ack = dst->seqlo; 4306 } 4307 4308 if (seq == end) { 4309 /* Ease sequencing restrictions on no data packets */ 4310 seq = src->seqlo; 4311 end = seq; 4312 } 4313 4314 ackskew = dst->seqlo - ack; 4315 4316 4317 /* 4318 * Need to demodulate the sequence numbers in any TCP SACK options 4319 * (Selective ACK). We could optionally validate the SACK values 4320 * against the current ACK window, either forwards or backwards, but 4321 * I'm not confident that SACK has been implemented properly 4322 * everywhere. It wouldn't surprise me if several stacks accidently 4323 * SACK too far backwards of previously ACKed data. There really aren't 4324 * any security implications of bad SACKing unless the target stack 4325 * doesn't validate the option length correctly. Someone trying to 4326 * spoof into a TCP connection won't bother blindly sending SACK 4327 * options anyway. 4328 */ 4329 if (dst->seqdiff && (th->th_off << 2) > sizeof(struct tcphdr)) { 4330 if (pf_modulate_sack(m, off, pd, th, dst)) 4331 *copyback = 1; 4332 } 4333 4334 4335 #define MAXACKWINDOW (0xffff + 1500) /* 1500 is an arbitrary fudge factor */ 4336 if (SEQ_GEQ(src->seqhi, end) && 4337 /* Last octet inside other's window space */ 4338 SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)) && 4339 /* Retrans: not more than one window back */ 4340 (ackskew >= -MAXACKWINDOW) && 4341 /* Acking not more than one reassembled fragment backwards */ 4342 (ackskew <= (MAXACKWINDOW << sws)) && 4343 /* Acking not more than one window forward */ 4344 ((th->th_flags & TH_RST) == 0 || orig_seq == src->seqlo || 4345 (orig_seq == src->seqlo + 1) || (orig_seq + 1 == src->seqlo) || 4346 (pd->flags & PFDESC_IP_REAS) == 0)) { 4347 /* Require an exact/+1 sequence match on resets when possible */ 4348 4349 if (dst->scrub || src->scrub) { 4350 if (pf_normalize_tcp_stateful(m, off, pd, reason, th, 4351 *state, src, dst, copyback)) 4352 return (PF_DROP); 4353 } 4354 4355 /* update max window */ 4356 if (src->max_win < win) 4357 src->max_win = win; 4358 /* synchronize sequencing */ 4359 if (SEQ_GT(end, src->seqlo)) 4360 src->seqlo = end; 4361 /* slide the window of what the other end can send */ 4362 if (SEQ_GEQ(ack + (win << sws), dst->seqhi)) 4363 dst->seqhi = ack + MAX((win << sws), 1); 4364 4365 4366 /* update states */ 4367 if (th->th_flags & TH_SYN) 4368 if (src->state < TCPS_SYN_SENT) 4369 src->state = TCPS_SYN_SENT; 4370 if (th->th_flags & TH_FIN) 4371 if (src->state < TCPS_CLOSING) 4372 src->state = TCPS_CLOSING; 4373 if (th->th_flags & TH_ACK) { 4374 if (dst->state == TCPS_SYN_SENT) { 4375 dst->state = TCPS_ESTABLISHED; 4376 if (src->state == TCPS_ESTABLISHED && 4377 (*state)->src_node != NULL && 4378 pf_src_connlimit(*state)) { 4379 REASON_SET(reason, PFRES_SRCLIMIT); 4380 return (PF_DROP); 4381 } 4382 } else if (dst->state == TCPS_CLOSING) 4383 dst->state = TCPS_FIN_WAIT_2; 4384 } 4385 if (th->th_flags & TH_RST) 4386 src->state = dst->state = TCPS_TIME_WAIT; 4387 4388 /* update expire time */ 4389 (*state)->expire = time_second; 4390 if (src->state >= TCPS_FIN_WAIT_2 && 4391 dst->state >= TCPS_FIN_WAIT_2) 4392 (*state)->timeout = PFTM_TCP_CLOSED; 4393 else if (src->state >= TCPS_CLOSING && 4394 dst->state >= TCPS_CLOSING) 4395 (*state)->timeout = PFTM_TCP_FIN_WAIT; 4396 else if (src->state < TCPS_ESTABLISHED || 4397 dst->state < TCPS_ESTABLISHED) 4398 (*state)->timeout = PFTM_TCP_OPENING; 4399 else if (src->state >= TCPS_CLOSING || 4400 dst->state >= TCPS_CLOSING) 4401 (*state)->timeout = PFTM_TCP_CLOSING; 4402 else 4403 (*state)->timeout = PFTM_TCP_ESTABLISHED; 4404 4405 /* Fall through to PASS packet */ 4406 4407 } else if ((dst->state < TCPS_SYN_SENT || 4408 dst->state >= TCPS_FIN_WAIT_2 || 4409 src->state >= TCPS_FIN_WAIT_2) && 4410 SEQ_GEQ(src->seqhi + MAXACKWINDOW, end) && 4411 /* Within a window forward of the originating packet */ 4412 SEQ_GEQ(seq, src->seqlo - MAXACKWINDOW)) { 4413 /* Within a window backward of the originating packet */ 4414 4415 /* 4416 * This currently handles three situations: 4417 * 1) Stupid stacks will shotgun SYNs before their peer 4418 * replies. 4419 * 2) When PF catches an already established stream (the 4420 * firewall rebooted, the state table was flushed, routes 4421 * changed...) 4422 * 3) Packets get funky immediately after the connection 4423 * closes (this should catch Solaris spurious ACK|FINs 4424 * that web servers like to spew after a close) 4425 * 4426 * This must be a little more careful than the above code 4427 * since packet floods will also be caught here. We don't 4428 * update the TTL here to mitigate the damage of a packet 4429 * flood and so the same code can handle awkward establishment 4430 * and a loosened connection close. 4431 * In the establishment case, a correct peer response will 4432 * validate the connection, go through the normal state code 4433 * and keep updating the state TTL. 4434 */ 4435 4436 if (pf_status.debug >= PF_DEBUG_MISC) { 4437 kprintf("pf: loose state match: "); 4438 pf_print_state(*state); 4439 pf_print_flags(th->th_flags); 4440 kprintf(" seq=%u (%u) ack=%u len=%u ackskew=%d " 4441 "pkts=%llu:%llu dir=%s,%s\n", seq, orig_seq, ack, pd->p_len, 4442 ackskew, (unsigned long long)(*state)->packets[0], 4443 (unsigned long long)(*state)->packets[1], 4444 pd->dir == PF_IN ? "in" : "out", 4445 pd->dir == (*state)->direction ? "fwd" : "rev"); 4446 } 4447 4448 if (dst->scrub || src->scrub) { 4449 if (pf_normalize_tcp_stateful(m, off, pd, reason, th, 4450 *state, src, dst, copyback)) 4451 return (PF_DROP); 4452 } 4453 4454 /* update max window */ 4455 if (src->max_win < win) 4456 src->max_win = win; 4457 /* synchronize sequencing */ 4458 if (SEQ_GT(end, src->seqlo)) 4459 src->seqlo = end; 4460 /* slide the window of what the other end can send */ 4461 if (SEQ_GEQ(ack + (win << sws), dst->seqhi)) 4462 dst->seqhi = ack + MAX((win << sws), 1); 4463 4464 /* 4465 * Cannot set dst->seqhi here since this could be a shotgunned 4466 * SYN and not an already established connection. 4467 */ 4468 4469 if (th->th_flags & TH_FIN) 4470 if (src->state < TCPS_CLOSING) 4471 src->state = TCPS_CLOSING; 4472 if (th->th_flags & TH_RST) 4473 src->state = dst->state = TCPS_TIME_WAIT; 4474 4475 /* Fall through to PASS packet */ 4476 4477 } else if ((*state)->pickup_mode == PF_PICKUPS_HASHONLY || 4478 ((*state)->pickup_mode == PF_PICKUPS_ENABLED && 4479 ((*state)->sync_flags & PFSTATE_GOT_SYN_MASK) != 4480 PFSTATE_GOT_SYN_MASK)) { 4481 /* 4482 * If pickup mode is hash only, do not fail on sequence checks. 4483 * 4484 * If pickup mode is enabled and we did not see the SYN in 4485 * both direction, do not fail on sequence checks because 4486 * we do not have complete information on window scale. 4487 * 4488 * Adjust expiration and fall through to PASS packet. 4489 * XXX Add a FIN check to reduce timeout? 4490 */ 4491 (*state)->expire = time_second; 4492 } else { 4493 /* 4494 * Failure processing 4495 */ 4496 if ((*state)->dst.state == TCPS_SYN_SENT && 4497 (*state)->src.state == TCPS_SYN_SENT) { 4498 /* Send RST for state mismatches during handshake */ 4499 if (!(th->th_flags & TH_RST)) 4500 pf_send_tcp((*state)->rule.ptr, pd->af, 4501 pd->dst, pd->src, th->th_dport, 4502 th->th_sport, ntohl(th->th_ack), 0, 4503 TH_RST, 0, 0, 4504 (*state)->rule.ptr->return_ttl, 1, 0, 4505 pd->eh, kif->pfik_ifp); 4506 src->seqlo = 0; 4507 src->seqhi = 1; 4508 src->max_win = 1; 4509 } else if (pf_status.debug >= PF_DEBUG_MISC) { 4510 kprintf("pf: BAD state: "); 4511 pf_print_state(*state); 4512 pf_print_flags(th->th_flags); 4513 kprintf(" seq=%u (%u) ack=%u len=%u ackskew=%d " 4514 "pkts=%llu:%llu dir=%s,%s\n", 4515 seq, orig_seq, ack, pd->p_len, ackskew, 4516 (unsigned long long)(*state)->packets[0], 4517 (unsigned long long)(*state)->packets[1], 4518 pd->dir == PF_IN ? "in" : "out", 4519 pd->dir == (*state)->direction ? "fwd" : "rev"); 4520 kprintf("pf: State failure on: %c %c %c %c | %c %c\n", 4521 SEQ_GEQ(src->seqhi, end) ? ' ' : '1', 4522 SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)) ? 4523 ' ': '2', 4524 (ackskew >= -MAXACKWINDOW) ? ' ' : '3', 4525 (ackskew <= (MAXACKWINDOW << sws)) ? ' ' : '4', 4526 SEQ_GEQ(src->seqhi + MAXACKWINDOW, end) ?' ' :'5', 4527 SEQ_GEQ(seq, src->seqlo - MAXACKWINDOW) ?' ' :'6'); 4528 } 4529 REASON_SET(reason, PFRES_BADSTATE); 4530 return (PF_DROP); 4531 } 4532 4533 return (PF_PASS); 4534 } 4535 4536 /* 4537 * Called with state locked 4538 */ 4539 int 4540 pf_tcp_track_sloppy(struct pf_state_peer *src, struct pf_state_peer *dst, 4541 struct pf_state **state, struct pf_pdesc *pd, u_short *reason) 4542 { 4543 struct tcphdr *th = pd->hdr.tcp; 4544 4545 if (th->th_flags & TH_SYN) 4546 if (src->state < TCPS_SYN_SENT) 4547 src->state = TCPS_SYN_SENT; 4548 if (th->th_flags & TH_FIN) 4549 if (src->state < TCPS_CLOSING) 4550 src->state = TCPS_CLOSING; 4551 if (th->th_flags & TH_ACK) { 4552 if (dst->state == TCPS_SYN_SENT) { 4553 dst->state = TCPS_ESTABLISHED; 4554 if (src->state == TCPS_ESTABLISHED && 4555 (*state)->src_node != NULL && 4556 pf_src_connlimit(*state)) { 4557 REASON_SET(reason, PFRES_SRCLIMIT); 4558 return (PF_DROP); 4559 } 4560 } else if (dst->state == TCPS_CLOSING) { 4561 dst->state = TCPS_FIN_WAIT_2; 4562 } else if (src->state == TCPS_SYN_SENT && 4563 dst->state < TCPS_SYN_SENT) { 4564 /* 4565 * Handle a special sloppy case where we only see one 4566 * half of the connection. If there is a ACK after 4567 * the initial SYN without ever seeing a packet from 4568 * the destination, set the connection to established. 4569 */ 4570 dst->state = src->state = TCPS_ESTABLISHED; 4571 if ((*state)->src_node != NULL && 4572 pf_src_connlimit(*state)) { 4573 REASON_SET(reason, PFRES_SRCLIMIT); 4574 return (PF_DROP); 4575 } 4576 } else if (src->state == TCPS_CLOSING && 4577 dst->state == TCPS_ESTABLISHED && 4578 dst->seqlo == 0) { 4579 /* 4580 * Handle the closing of half connections where we 4581 * don't see the full bidirectional FIN/ACK+ACK 4582 * handshake. 4583 */ 4584 dst->state = TCPS_CLOSING; 4585 } 4586 } 4587 if (th->th_flags & TH_RST) 4588 src->state = dst->state = TCPS_TIME_WAIT; 4589 4590 /* update expire time */ 4591 (*state)->expire = time_second; 4592 if (src->state >= TCPS_FIN_WAIT_2 && 4593 dst->state >= TCPS_FIN_WAIT_2) 4594 (*state)->timeout = PFTM_TCP_CLOSED; 4595 else if (src->state >= TCPS_CLOSING && 4596 dst->state >= TCPS_CLOSING) 4597 (*state)->timeout = PFTM_TCP_FIN_WAIT; 4598 else if (src->state < TCPS_ESTABLISHED || 4599 dst->state < TCPS_ESTABLISHED) 4600 (*state)->timeout = PFTM_TCP_OPENING; 4601 else if (src->state >= TCPS_CLOSING || 4602 dst->state >= TCPS_CLOSING) 4603 (*state)->timeout = PFTM_TCP_CLOSING; 4604 else 4605 (*state)->timeout = PFTM_TCP_ESTABLISHED; 4606 4607 return (PF_PASS); 4608 } 4609 4610 /* 4611 * Test TCP connection state. Caller must hold the state locked. 4612 */ 4613 int 4614 pf_test_state_tcp(struct pf_state **state, int direction, struct pfi_kif *kif, 4615 struct mbuf *m, int off, void *h, struct pf_pdesc *pd, 4616 u_short *reason) 4617 { 4618 struct pf_state_key_cmp key; 4619 struct tcphdr *th = pd->hdr.tcp; 4620 int copyback = 0; 4621 int error; 4622 struct pf_state_peer *src, *dst; 4623 struct pf_state_key *sk; 4624 4625 key.af = pd->af; 4626 key.proto = IPPROTO_TCP; 4627 if (direction == PF_IN) { /* wire side, straight */ 4628 PF_ACPY(&key.addr[0], pd->src, key.af); 4629 PF_ACPY(&key.addr[1], pd->dst, key.af); 4630 key.port[0] = th->th_sport; 4631 key.port[1] = th->th_dport; 4632 } else { /* stack side, reverse */ 4633 PF_ACPY(&key.addr[1], pd->src, key.af); 4634 PF_ACPY(&key.addr[0], pd->dst, key.af); 4635 key.port[1] = th->th_sport; 4636 key.port[0] = th->th_dport; 4637 } 4638 4639 STATE_LOOKUP(kif, &key, direction, *state, m); 4640 lockmgr(&(*state)->lk, LK_EXCLUSIVE); 4641 4642 if (direction == (*state)->direction) { 4643 src = &(*state)->src; 4644 dst = &(*state)->dst; 4645 } else { 4646 src = &(*state)->dst; 4647 dst = &(*state)->src; 4648 } 4649 4650 sk = (*state)->key[pd->didx]; 4651 4652 if ((*state)->src.state == PF_TCPS_PROXY_SRC) { 4653 if (direction != (*state)->direction) { 4654 REASON_SET(reason, PFRES_SYNPROXY); 4655 FAIL (PF_SYNPROXY_DROP); 4656 } 4657 if (th->th_flags & TH_SYN) { 4658 if (ntohl(th->th_seq) != (*state)->src.seqlo) { 4659 REASON_SET(reason, PFRES_SYNPROXY); 4660 FAIL (PF_DROP); 4661 } 4662 pf_send_tcp((*state)->rule.ptr, pd->af, pd->dst, 4663 pd->src, th->th_dport, th->th_sport, 4664 (*state)->src.seqhi, ntohl(th->th_seq) + 1, 4665 TH_SYN|TH_ACK, 0, (*state)->src.mss, 0, 1, 4666 0, NULL, NULL); 4667 REASON_SET(reason, PFRES_SYNPROXY); 4668 FAIL (PF_SYNPROXY_DROP); 4669 } else if (!(th->th_flags & TH_ACK) || 4670 (ntohl(th->th_ack) != (*state)->src.seqhi + 1) || 4671 (ntohl(th->th_seq) != (*state)->src.seqlo + 1)) { 4672 REASON_SET(reason, PFRES_SYNPROXY); 4673 FAIL (PF_DROP); 4674 } else if ((*state)->src_node != NULL && 4675 pf_src_connlimit(*state)) { 4676 REASON_SET(reason, PFRES_SRCLIMIT); 4677 FAIL (PF_DROP); 4678 } else 4679 (*state)->src.state = PF_TCPS_PROXY_DST; 4680 } 4681 if ((*state)->src.state == PF_TCPS_PROXY_DST) { 4682 if (direction == (*state)->direction) { 4683 if (((th->th_flags & (TH_SYN|TH_ACK)) != TH_ACK) || 4684 (ntohl(th->th_ack) != (*state)->src.seqhi + 1) || 4685 (ntohl(th->th_seq) != (*state)->src.seqlo + 1)) { 4686 REASON_SET(reason, PFRES_SYNPROXY); 4687 FAIL (PF_DROP); 4688 } 4689 (*state)->src.max_win = MAX(ntohs(th->th_win), 1); 4690 if ((*state)->dst.seqhi == 1) 4691 (*state)->dst.seqhi = htonl(karc4random()); 4692 pf_send_tcp((*state)->rule.ptr, pd->af, 4693 &sk->addr[pd->sidx], &sk->addr[pd->didx], 4694 sk->port[pd->sidx], sk->port[pd->didx], 4695 (*state)->dst.seqhi, 0, TH_SYN, 0, 4696 (*state)->src.mss, 0, 0, (*state)->tag, NULL, NULL); 4697 REASON_SET(reason, PFRES_SYNPROXY); 4698 FAIL (PF_SYNPROXY_DROP); 4699 } else if (((th->th_flags & (TH_SYN|TH_ACK)) != 4700 (TH_SYN|TH_ACK)) || 4701 (ntohl(th->th_ack) != (*state)->dst.seqhi + 1)) { 4702 REASON_SET(reason, PFRES_SYNPROXY); 4703 FAIL (PF_DROP); 4704 } else { 4705 (*state)->dst.max_win = MAX(ntohs(th->th_win), 1); 4706 (*state)->dst.seqlo = ntohl(th->th_seq); 4707 pf_send_tcp((*state)->rule.ptr, pd->af, pd->dst, 4708 pd->src, th->th_dport, th->th_sport, 4709 ntohl(th->th_ack), ntohl(th->th_seq) + 1, 4710 TH_ACK, (*state)->src.max_win, 0, 0, 0, 4711 (*state)->tag, NULL, NULL); 4712 pf_send_tcp((*state)->rule.ptr, pd->af, 4713 &sk->addr[pd->sidx], &sk->addr[pd->didx], 4714 sk->port[pd->sidx], sk->port[pd->didx], 4715 (*state)->src.seqhi + 1, (*state)->src.seqlo + 1, 4716 TH_ACK, (*state)->dst.max_win, 0, 0, 1, 4717 0, NULL, NULL); 4718 (*state)->src.seqdiff = (*state)->dst.seqhi - 4719 (*state)->src.seqlo; 4720 (*state)->dst.seqdiff = (*state)->src.seqhi - 4721 (*state)->dst.seqlo; 4722 (*state)->src.seqhi = (*state)->src.seqlo + 4723 (*state)->dst.max_win; 4724 (*state)->dst.seqhi = (*state)->dst.seqlo + 4725 (*state)->src.max_win; 4726 (*state)->src.wscale = (*state)->dst.wscale = 0; 4727 (*state)->src.state = (*state)->dst.state = 4728 TCPS_ESTABLISHED; 4729 REASON_SET(reason, PFRES_SYNPROXY); 4730 FAIL (PF_SYNPROXY_DROP); 4731 } 4732 } 4733 4734 /* 4735 * Check for connection (addr+port pair) reuse. We can't actually 4736 * unlink the state if we don't own it. 4737 */ 4738 if (((th->th_flags & (TH_SYN|TH_ACK)) == TH_SYN) && 4739 dst->state >= TCPS_FIN_WAIT_2 && 4740 src->state >= TCPS_FIN_WAIT_2) { 4741 if (pf_status.debug >= PF_DEBUG_MISC) { 4742 kprintf("pf: state reuse "); 4743 pf_print_state(*state); 4744 pf_print_flags(th->th_flags); 4745 kprintf("\n"); 4746 } 4747 /* XXX make sure it's the same direction ?? */ 4748 (*state)->src.state = (*state)->dst.state = TCPS_CLOSED; 4749 if ((*state)->cpuid == mycpu->gd_cpuid) { 4750 pf_unlink_state(*state); 4751 *state = NULL; 4752 } else { 4753 (*state)->timeout = PFTM_PURGE; 4754 } 4755 FAIL (PF_DROP); 4756 } 4757 4758 if ((*state)->state_flags & PFSTATE_SLOPPY) { 4759 if (pf_tcp_track_sloppy(src, dst, state, pd, 4760 reason) == PF_DROP) { 4761 FAIL (PF_DROP); 4762 } 4763 } else { 4764 if (pf_tcp_track_full(src, dst, state, kif, m, off, pd, 4765 reason, ©back) == PF_DROP) { 4766 FAIL (PF_DROP); 4767 } 4768 } 4769 4770 /* translate source/destination address, if necessary */ 4771 if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) { 4772 struct pf_state_key *nk = (*state)->key[pd->didx]; 4773 4774 if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], pd->af) || 4775 nk->port[pd->sidx] != th->th_sport) { 4776 /* 4777 * The translated source address may be completely 4778 * unrelated to the saved link header, make sure 4779 * a bridge doesn't try to use it. 4780 */ 4781 m->m_pkthdr.fw_flags &= ~BRIDGE_MBUF_TAGGED; 4782 m->m_flags &= ~M_HASH; 4783 pf_change_ap(pd->src, &th->th_sport, pd->ip_sum, 4784 &th->th_sum, &nk->addr[pd->sidx], 4785 nk->port[pd->sidx], 0, pd->af); 4786 } 4787 4788 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], pd->af) || 4789 nk->port[pd->didx] != th->th_dport) { 4790 /* 4791 * If we don't redispatch the packet will go into 4792 * the protocol stack on the wrong cpu for the 4793 * post-translated address. 4794 */ 4795 m->m_flags &= ~M_HASH; 4796 pf_change_ap(pd->dst, &th->th_dport, pd->ip_sum, 4797 &th->th_sum, &nk->addr[pd->didx], 4798 nk->port[pd->didx], 0, pd->af); 4799 } 4800 copyback = 1; 4801 } 4802 4803 /* Copyback sequence modulation or stateful scrub changes if needed */ 4804 if (copyback) 4805 m_copyback(m, off, sizeof(*th), (caddr_t)th); 4806 4807 pfsync_update_state(*state); 4808 error = PF_PASS; 4809 done: 4810 if (*state) 4811 lockmgr(&(*state)->lk, LK_RELEASE); 4812 return (error); 4813 } 4814 4815 /* 4816 * Test UDP connection state. Caller must hold the state locked. 4817 */ 4818 int 4819 pf_test_state_udp(struct pf_state **state, int direction, struct pfi_kif *kif, 4820 struct mbuf *m, int off, void *h, struct pf_pdesc *pd) 4821 { 4822 struct pf_state_peer *src, *dst; 4823 struct pf_state_key_cmp key; 4824 struct udphdr *uh = pd->hdr.udp; 4825 4826 key.af = pd->af; 4827 key.proto = IPPROTO_UDP; 4828 if (direction == PF_IN) { /* wire side, straight */ 4829 PF_ACPY(&key.addr[0], pd->src, key.af); 4830 PF_ACPY(&key.addr[1], pd->dst, key.af); 4831 key.port[0] = uh->uh_sport; 4832 key.port[1] = uh->uh_dport; 4833 } else { /* stack side, reverse */ 4834 PF_ACPY(&key.addr[1], pd->src, key.af); 4835 PF_ACPY(&key.addr[0], pd->dst, key.af); 4836 key.port[1] = uh->uh_sport; 4837 key.port[0] = uh->uh_dport; 4838 } 4839 4840 STATE_LOOKUP(kif, &key, direction, *state, m); 4841 lockmgr(&(*state)->lk, LK_EXCLUSIVE); 4842 4843 if (direction == (*state)->direction) { 4844 src = &(*state)->src; 4845 dst = &(*state)->dst; 4846 } else { 4847 src = &(*state)->dst; 4848 dst = &(*state)->src; 4849 } 4850 4851 /* update states */ 4852 if (src->state < PFUDPS_SINGLE) 4853 src->state = PFUDPS_SINGLE; 4854 if (dst->state == PFUDPS_SINGLE) 4855 dst->state = PFUDPS_MULTIPLE; 4856 4857 /* update expire time */ 4858 (*state)->expire = time_second; 4859 if (src->state == PFUDPS_MULTIPLE && dst->state == PFUDPS_MULTIPLE) 4860 (*state)->timeout = PFTM_UDP_MULTIPLE; 4861 else 4862 (*state)->timeout = PFTM_UDP_SINGLE; 4863 4864 /* translate source/destination address, if necessary */ 4865 if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) { 4866 struct pf_state_key *nk = (*state)->key[pd->didx]; 4867 4868 if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], pd->af) || 4869 nk->port[pd->sidx] != uh->uh_sport) { 4870 /* 4871 * The translated source address may be completely 4872 * unrelated to the saved link header, make sure 4873 * a bridge doesn't try to use it. 4874 */ 4875 m->m_pkthdr.fw_flags &= ~BRIDGE_MBUF_TAGGED; 4876 m->m_flags &= ~M_HASH; 4877 pf_change_ap(pd->src, &uh->uh_sport, pd->ip_sum, 4878 &uh->uh_sum, &nk->addr[pd->sidx], 4879 nk->port[pd->sidx], 1, pd->af); 4880 } 4881 4882 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], pd->af) || 4883 nk->port[pd->didx] != uh->uh_dport) { 4884 /* 4885 * If we don't redispatch the packet will go into 4886 * the protocol stack on the wrong cpu for the 4887 * post-translated address. 4888 */ 4889 m->m_flags &= ~M_HASH; 4890 pf_change_ap(pd->dst, &uh->uh_dport, pd->ip_sum, 4891 &uh->uh_sum, &nk->addr[pd->didx], 4892 nk->port[pd->didx], 1, pd->af); 4893 } 4894 m_copyback(m, off, sizeof(*uh), (caddr_t)uh); 4895 } 4896 4897 pfsync_update_state(*state); 4898 lockmgr(&(*state)->lk, LK_RELEASE); 4899 return (PF_PASS); 4900 } 4901 4902 /* 4903 * Test ICMP connection state. Caller must hold the state locked. 4904 */ 4905 int 4906 pf_test_state_icmp(struct pf_state **state, int direction, struct pfi_kif *kif, 4907 struct mbuf *m, int off, void *h, struct pf_pdesc *pd, 4908 u_short *reason) 4909 { 4910 struct pf_addr *saddr = pd->src, *daddr = pd->dst; 4911 u_int16_t icmpid = 0, *icmpsum; 4912 u_int8_t icmptype; 4913 int state_icmp = 0; 4914 int error; 4915 struct pf_state_key_cmp key; 4916 4917 switch (pd->proto) { 4918 #ifdef INET 4919 case IPPROTO_ICMP: 4920 icmptype = pd->hdr.icmp->icmp_type; 4921 icmpid = pd->hdr.icmp->icmp_id; 4922 icmpsum = &pd->hdr.icmp->icmp_cksum; 4923 4924 if (icmptype == ICMP_UNREACH || 4925 icmptype == ICMP_SOURCEQUENCH || 4926 icmptype == ICMP_REDIRECT || 4927 icmptype == ICMP_TIMXCEED || 4928 icmptype == ICMP_PARAMPROB) 4929 state_icmp++; 4930 break; 4931 #endif /* INET */ 4932 #ifdef INET6 4933 case IPPROTO_ICMPV6: 4934 icmptype = pd->hdr.icmp6->icmp6_type; 4935 icmpid = pd->hdr.icmp6->icmp6_id; 4936 icmpsum = &pd->hdr.icmp6->icmp6_cksum; 4937 4938 if (icmptype == ICMP6_DST_UNREACH || 4939 icmptype == ICMP6_PACKET_TOO_BIG || 4940 icmptype == ICMP6_TIME_EXCEEDED || 4941 icmptype == ICMP6_PARAM_PROB) 4942 state_icmp++; 4943 break; 4944 #endif /* INET6 */ 4945 } 4946 4947 if (!state_icmp) { 4948 4949 /* 4950 * ICMP query/reply message not related to a TCP/UDP packet. 4951 * Search for an ICMP state. 4952 */ 4953 key.af = pd->af; 4954 key.proto = pd->proto; 4955 key.port[0] = key.port[1] = icmpid; 4956 if (direction == PF_IN) { /* wire side, straight */ 4957 PF_ACPY(&key.addr[0], pd->src, key.af); 4958 PF_ACPY(&key.addr[1], pd->dst, key.af); 4959 } else { /* stack side, reverse */ 4960 PF_ACPY(&key.addr[1], pd->src, key.af); 4961 PF_ACPY(&key.addr[0], pd->dst, key.af); 4962 } 4963 4964 STATE_LOOKUP(kif, &key, direction, *state, m); 4965 lockmgr(&(*state)->lk, LK_EXCLUSIVE); 4966 4967 (*state)->expire = time_second; 4968 (*state)->timeout = PFTM_ICMP_ERROR_REPLY; 4969 4970 /* translate source/destination address, if necessary */ 4971 if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) { 4972 struct pf_state_key *nk = (*state)->key[pd->didx]; 4973 4974 switch (pd->af) { 4975 #ifdef INET 4976 case AF_INET: 4977 if (PF_ANEQ(pd->src, 4978 &nk->addr[pd->sidx], AF_INET)) 4979 pf_change_a(&saddr->v4.s_addr, 4980 pd->ip_sum, 4981 nk->addr[pd->sidx].v4.s_addr, 0); 4982 4983 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], 4984 AF_INET)) 4985 pf_change_a(&daddr->v4.s_addr, 4986 pd->ip_sum, 4987 nk->addr[pd->didx].v4.s_addr, 0); 4988 4989 if (nk->port[0] != 4990 pd->hdr.icmp->icmp_id) { 4991 pd->hdr.icmp->icmp_cksum = 4992 pf_cksum_fixup( 4993 pd->hdr.icmp->icmp_cksum, icmpid, 4994 nk->port[pd->sidx], 0); 4995 pd->hdr.icmp->icmp_id = 4996 nk->port[pd->sidx]; 4997 } 4998 4999 m_copyback(m, off, ICMP_MINLEN, 5000 (caddr_t)pd->hdr.icmp); 5001 break; 5002 #endif /* INET */ 5003 #ifdef INET6 5004 case AF_INET6: 5005 if (PF_ANEQ(pd->src, 5006 &nk->addr[pd->sidx], AF_INET6)) 5007 pf_change_a6(saddr, 5008 &pd->hdr.icmp6->icmp6_cksum, 5009 &nk->addr[pd->sidx], 0); 5010 5011 if (PF_ANEQ(pd->dst, 5012 &nk->addr[pd->didx], AF_INET6)) 5013 pf_change_a6(daddr, 5014 &pd->hdr.icmp6->icmp6_cksum, 5015 &nk->addr[pd->didx], 0); 5016 5017 m_copyback(m, off, 5018 sizeof(struct icmp6_hdr), 5019 (caddr_t)pd->hdr.icmp6); 5020 break; 5021 #endif /* INET6 */ 5022 } 5023 } 5024 } else { 5025 /* 5026 * ICMP error message in response to a TCP/UDP packet. 5027 * Extract the inner TCP/UDP header and search for that state. 5028 */ 5029 5030 struct pf_pdesc pd2; 5031 #ifdef INET 5032 struct ip h2; 5033 #endif /* INET */ 5034 #ifdef INET6 5035 struct ip6_hdr h2_6; 5036 int terminal = 0; 5037 #endif /* INET6 */ 5038 int ipoff2; 5039 int off2; 5040 5041 pd2.not_cpu_localized = 1; 5042 pd2.af = pd->af; 5043 /* Payload packet is from the opposite direction. */ 5044 pd2.sidx = (direction == PF_IN) ? 1 : 0; 5045 pd2.didx = (direction == PF_IN) ? 0 : 1; 5046 switch (pd->af) { 5047 #ifdef INET 5048 case AF_INET: 5049 /* offset of h2 in mbuf chain */ 5050 ipoff2 = off + ICMP_MINLEN; 5051 5052 if (!pf_pull_hdr(m, ipoff2, &h2, sizeof(h2), 5053 NULL, reason, pd2.af)) { 5054 DPFPRINTF(PF_DEBUG_MISC, 5055 ("pf: ICMP error message too short " 5056 "(ip)\n")); 5057 FAIL (PF_DROP); 5058 } 5059 /* 5060 * ICMP error messages don't refer to non-first 5061 * fragments 5062 */ 5063 if (h2.ip_off & htons(IP_OFFMASK)) { 5064 REASON_SET(reason, PFRES_FRAG); 5065 FAIL (PF_DROP); 5066 } 5067 5068 /* offset of protocol header that follows h2 */ 5069 off2 = ipoff2 + (h2.ip_hl << 2); 5070 5071 pd2.proto = h2.ip_p; 5072 pd2.src = (struct pf_addr *)&h2.ip_src; 5073 pd2.dst = (struct pf_addr *)&h2.ip_dst; 5074 pd2.ip_sum = &h2.ip_sum; 5075 break; 5076 #endif /* INET */ 5077 #ifdef INET6 5078 case AF_INET6: 5079 ipoff2 = off + sizeof(struct icmp6_hdr); 5080 5081 if (!pf_pull_hdr(m, ipoff2, &h2_6, sizeof(h2_6), 5082 NULL, reason, pd2.af)) { 5083 DPFPRINTF(PF_DEBUG_MISC, 5084 ("pf: ICMP error message too short " 5085 "(ip6)\n")); 5086 FAIL (PF_DROP); 5087 } 5088 pd2.proto = h2_6.ip6_nxt; 5089 pd2.src = (struct pf_addr *)&h2_6.ip6_src; 5090 pd2.dst = (struct pf_addr *)&h2_6.ip6_dst; 5091 pd2.ip_sum = NULL; 5092 off2 = ipoff2 + sizeof(h2_6); 5093 do { 5094 switch (pd2.proto) { 5095 case IPPROTO_FRAGMENT: 5096 /* 5097 * ICMPv6 error messages for 5098 * non-first fragments 5099 */ 5100 REASON_SET(reason, PFRES_FRAG); 5101 FAIL (PF_DROP); 5102 case IPPROTO_AH: 5103 case IPPROTO_HOPOPTS: 5104 case IPPROTO_ROUTING: 5105 case IPPROTO_DSTOPTS: { 5106 /* get next header and header length */ 5107 struct ip6_ext opt6; 5108 5109 if (!pf_pull_hdr(m, off2, &opt6, 5110 sizeof(opt6), NULL, reason, 5111 pd2.af)) { 5112 DPFPRINTF(PF_DEBUG_MISC, 5113 ("pf: ICMPv6 short opt\n")); 5114 FAIL (PF_DROP); 5115 } 5116 if (pd2.proto == IPPROTO_AH) 5117 off2 += (opt6.ip6e_len + 2) * 4; 5118 else 5119 off2 += (opt6.ip6e_len + 1) * 8; 5120 pd2.proto = opt6.ip6e_nxt; 5121 /* goto the next header */ 5122 break; 5123 } 5124 default: 5125 terminal++; 5126 break; 5127 } 5128 } while (!terminal); 5129 break; 5130 #endif /* INET6 */ 5131 default: 5132 DPFPRINTF(PF_DEBUG_MISC, 5133 ("pf: ICMP AF %d unknown (ip6)\n", pd->af)); 5134 FAIL (PF_DROP); 5135 break; 5136 } 5137 5138 switch (pd2.proto) { 5139 case IPPROTO_TCP: { 5140 struct tcphdr th; 5141 u_int32_t seq; 5142 struct pf_state_peer *src, *dst; 5143 u_int8_t dws; 5144 int copyback = 0; 5145 5146 /* 5147 * Only the first 8 bytes of the TCP header can be 5148 * expected. Don't access any TCP header fields after 5149 * th_seq, an ackskew test is not possible. 5150 */ 5151 if (!pf_pull_hdr(m, off2, &th, 8, NULL, reason, 5152 pd2.af)) { 5153 DPFPRINTF(PF_DEBUG_MISC, 5154 ("pf: ICMP error message too short " 5155 "(tcp)\n")); 5156 FAIL (PF_DROP); 5157 } 5158 5159 key.af = pd2.af; 5160 key.proto = IPPROTO_TCP; 5161 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af); 5162 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af); 5163 key.port[pd2.sidx] = th.th_sport; 5164 key.port[pd2.didx] = th.th_dport; 5165 5166 STATE_LOOKUP(kif, &key, direction, *state, m); 5167 lockmgr(&(*state)->lk, LK_EXCLUSIVE); 5168 5169 if (direction == (*state)->direction) { 5170 src = &(*state)->dst; 5171 dst = &(*state)->src; 5172 } else { 5173 src = &(*state)->src; 5174 dst = &(*state)->dst; 5175 } 5176 5177 if (src->wscale && dst->wscale) 5178 dws = dst->wscale & PF_WSCALE_MASK; 5179 else 5180 dws = 0; 5181 5182 /* Demodulate sequence number */ 5183 seq = ntohl(th.th_seq) - src->seqdiff; 5184 if (src->seqdiff) { 5185 pf_change_a(&th.th_seq, icmpsum, 5186 htonl(seq), 0); 5187 copyback = 1; 5188 } 5189 5190 if (!((*state)->state_flags & PFSTATE_SLOPPY) && 5191 (!SEQ_GEQ(src->seqhi, seq) || 5192 !SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)))) { 5193 if (pf_status.debug >= PF_DEBUG_MISC) { 5194 kprintf("pf: BAD ICMP %d:%d ", 5195 icmptype, pd->hdr.icmp->icmp_code); 5196 pf_print_host(pd->src, 0, pd->af); 5197 kprintf(" -> "); 5198 pf_print_host(pd->dst, 0, pd->af); 5199 kprintf(" state: "); 5200 pf_print_state(*state); 5201 kprintf(" seq=%u\n", seq); 5202 } 5203 REASON_SET(reason, PFRES_BADSTATE); 5204 FAIL (PF_DROP); 5205 } else { 5206 if (pf_status.debug >= PF_DEBUG_MISC) { 5207 kprintf("pf: OK ICMP %d:%d ", 5208 icmptype, pd->hdr.icmp->icmp_code); 5209 pf_print_host(pd->src, 0, pd->af); 5210 kprintf(" -> "); 5211 pf_print_host(pd->dst, 0, pd->af); 5212 kprintf(" state: "); 5213 pf_print_state(*state); 5214 kprintf(" seq=%u\n", seq); 5215 } 5216 } 5217 5218 /* translate source/destination address, if necessary */ 5219 if ((*state)->key[PF_SK_WIRE] != 5220 (*state)->key[PF_SK_STACK]) { 5221 struct pf_state_key *nk = 5222 (*state)->key[pd->didx]; 5223 5224 if (PF_ANEQ(pd2.src, 5225 &nk->addr[pd2.sidx], pd2.af) || 5226 nk->port[pd2.sidx] != th.th_sport) 5227 pf_change_icmp(pd2.src, &th.th_sport, 5228 daddr, &nk->addr[pd2.sidx], 5229 nk->port[pd2.sidx], NULL, 5230 pd2.ip_sum, icmpsum, 5231 pd->ip_sum, 0, pd2.af); 5232 5233 if (PF_ANEQ(pd2.dst, 5234 &nk->addr[pd2.didx], pd2.af) || 5235 nk->port[pd2.didx] != th.th_dport) 5236 pf_change_icmp(pd2.dst, &th.th_dport, 5237 NULL, /* XXX Inbound NAT? */ 5238 &nk->addr[pd2.didx], 5239 nk->port[pd2.didx], NULL, 5240 pd2.ip_sum, icmpsum, 5241 pd->ip_sum, 0, pd2.af); 5242 copyback = 1; 5243 } 5244 5245 if (copyback) { 5246 switch (pd2.af) { 5247 #ifdef INET 5248 case AF_INET: 5249 m_copyback(m, off, ICMP_MINLEN, 5250 (caddr_t)pd->hdr.icmp); 5251 m_copyback(m, ipoff2, sizeof(h2), 5252 (caddr_t)&h2); 5253 break; 5254 #endif /* INET */ 5255 #ifdef INET6 5256 case AF_INET6: 5257 m_copyback(m, off, 5258 sizeof(struct icmp6_hdr), 5259 (caddr_t)pd->hdr.icmp6); 5260 m_copyback(m, ipoff2, sizeof(h2_6), 5261 (caddr_t)&h2_6); 5262 break; 5263 #endif /* INET6 */ 5264 } 5265 m_copyback(m, off2, 8, (caddr_t)&th); 5266 } 5267 break; 5268 } 5269 case IPPROTO_UDP: { 5270 struct udphdr uh; 5271 5272 if (!pf_pull_hdr(m, off2, &uh, sizeof(uh), 5273 NULL, reason, pd2.af)) { 5274 DPFPRINTF(PF_DEBUG_MISC, 5275 ("pf: ICMP error message too short " 5276 "(udp)\n")); 5277 return (PF_DROP); 5278 } 5279 5280 key.af = pd2.af; 5281 key.proto = IPPROTO_UDP; 5282 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af); 5283 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af); 5284 key.port[pd2.sidx] = uh.uh_sport; 5285 key.port[pd2.didx] = uh.uh_dport; 5286 5287 STATE_LOOKUP(kif, &key, direction, *state, m); 5288 lockmgr(&(*state)->lk, LK_EXCLUSIVE); 5289 5290 /* translate source/destination address, if necessary */ 5291 if ((*state)->key[PF_SK_WIRE] != 5292 (*state)->key[PF_SK_STACK]) { 5293 struct pf_state_key *nk = 5294 (*state)->key[pd->didx]; 5295 5296 if (PF_ANEQ(pd2.src, 5297 &nk->addr[pd2.sidx], pd2.af) || 5298 nk->port[pd2.sidx] != uh.uh_sport) 5299 pf_change_icmp(pd2.src, &uh.uh_sport, 5300 daddr, &nk->addr[pd2.sidx], 5301 nk->port[pd2.sidx], &uh.uh_sum, 5302 pd2.ip_sum, icmpsum, 5303 pd->ip_sum, 1, pd2.af); 5304 5305 if (PF_ANEQ(pd2.dst, 5306 &nk->addr[pd2.didx], pd2.af) || 5307 nk->port[pd2.didx] != uh.uh_dport) 5308 pf_change_icmp(pd2.dst, &uh.uh_dport, 5309 NULL, /* XXX Inbound NAT? */ 5310 &nk->addr[pd2.didx], 5311 nk->port[pd2.didx], &uh.uh_sum, 5312 pd2.ip_sum, icmpsum, 5313 pd->ip_sum, 1, pd2.af); 5314 5315 switch (pd2.af) { 5316 #ifdef INET 5317 case AF_INET: 5318 m_copyback(m, off, ICMP_MINLEN, 5319 (caddr_t)pd->hdr.icmp); 5320 m_copyback(m, ipoff2, sizeof(h2), (caddr_t)&h2); 5321 break; 5322 #endif /* INET */ 5323 #ifdef INET6 5324 case AF_INET6: 5325 m_copyback(m, off, 5326 sizeof(struct icmp6_hdr), 5327 (caddr_t)pd->hdr.icmp6); 5328 m_copyback(m, ipoff2, sizeof(h2_6), 5329 (caddr_t)&h2_6); 5330 break; 5331 #endif /* INET6 */ 5332 } 5333 m_copyback(m, off2, sizeof(uh), (caddr_t)&uh); 5334 } 5335 break; 5336 } 5337 #ifdef INET 5338 case IPPROTO_ICMP: { 5339 struct icmp iih; 5340 5341 if (!pf_pull_hdr(m, off2, &iih, ICMP_MINLEN, 5342 NULL, reason, pd2.af)) { 5343 DPFPRINTF(PF_DEBUG_MISC, 5344 ("pf: ICMP error message too short i" 5345 "(icmp)\n")); 5346 return (PF_DROP); 5347 } 5348 5349 key.af = pd2.af; 5350 key.proto = IPPROTO_ICMP; 5351 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af); 5352 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af); 5353 key.port[0] = key.port[1] = iih.icmp_id; 5354 5355 STATE_LOOKUP(kif, &key, direction, *state, m); 5356 lockmgr(&(*state)->lk, LK_EXCLUSIVE); 5357 5358 /* translate source/destination address, if necessary */ 5359 if ((*state)->key[PF_SK_WIRE] != 5360 (*state)->key[PF_SK_STACK]) { 5361 struct pf_state_key *nk = 5362 (*state)->key[pd->didx]; 5363 5364 if (PF_ANEQ(pd2.src, 5365 &nk->addr[pd2.sidx], pd2.af) || 5366 nk->port[pd2.sidx] != iih.icmp_id) 5367 pf_change_icmp(pd2.src, &iih.icmp_id, 5368 daddr, &nk->addr[pd2.sidx], 5369 nk->port[pd2.sidx], NULL, 5370 pd2.ip_sum, icmpsum, 5371 pd->ip_sum, 0, AF_INET); 5372 5373 if (PF_ANEQ(pd2.dst, 5374 &nk->addr[pd2.didx], pd2.af) || 5375 nk->port[pd2.didx] != iih.icmp_id) 5376 pf_change_icmp(pd2.dst, &iih.icmp_id, 5377 NULL, /* XXX Inbound NAT? */ 5378 &nk->addr[pd2.didx], 5379 nk->port[pd2.didx], NULL, 5380 pd2.ip_sum, icmpsum, 5381 pd->ip_sum, 0, AF_INET); 5382 5383 m_copyback(m, off, ICMP_MINLEN, (caddr_t)pd->hdr.icmp); 5384 m_copyback(m, ipoff2, sizeof(h2), (caddr_t)&h2); 5385 m_copyback(m, off2, ICMP_MINLEN, (caddr_t)&iih); 5386 } 5387 break; 5388 } 5389 #endif /* INET */ 5390 #ifdef INET6 5391 case IPPROTO_ICMPV6: { 5392 struct icmp6_hdr iih; 5393 5394 if (!pf_pull_hdr(m, off2, &iih, 5395 sizeof(struct icmp6_hdr), NULL, reason, pd2.af)) { 5396 DPFPRINTF(PF_DEBUG_MISC, 5397 ("pf: ICMP error message too short " 5398 "(icmp6)\n")); 5399 FAIL (PF_DROP); 5400 } 5401 5402 key.af = pd2.af; 5403 key.proto = IPPROTO_ICMPV6; 5404 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af); 5405 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af); 5406 key.port[0] = key.port[1] = iih.icmp6_id; 5407 5408 STATE_LOOKUP(kif, &key, direction, *state, m); 5409 lockmgr(&(*state)->lk, LK_EXCLUSIVE); 5410 5411 /* translate source/destination address, if necessary */ 5412 if ((*state)->key[PF_SK_WIRE] != 5413 (*state)->key[PF_SK_STACK]) { 5414 struct pf_state_key *nk = 5415 (*state)->key[pd->didx]; 5416 5417 if (PF_ANEQ(pd2.src, 5418 &nk->addr[pd2.sidx], pd2.af) || 5419 nk->port[pd2.sidx] != iih.icmp6_id) 5420 pf_change_icmp(pd2.src, &iih.icmp6_id, 5421 daddr, &nk->addr[pd2.sidx], 5422 nk->port[pd2.sidx], NULL, 5423 pd2.ip_sum, icmpsum, 5424 pd->ip_sum, 0, AF_INET6); 5425 5426 if (PF_ANEQ(pd2.dst, 5427 &nk->addr[pd2.didx], pd2.af) || 5428 nk->port[pd2.didx] != iih.icmp6_id) 5429 pf_change_icmp(pd2.dst, &iih.icmp6_id, 5430 NULL, /* XXX Inbound NAT? */ 5431 &nk->addr[pd2.didx], 5432 nk->port[pd2.didx], NULL, 5433 pd2.ip_sum, icmpsum, 5434 pd->ip_sum, 0, AF_INET6); 5435 5436 m_copyback(m, off, sizeof(struct icmp6_hdr), 5437 (caddr_t)pd->hdr.icmp6); 5438 m_copyback(m, ipoff2, sizeof(h2_6), (caddr_t)&h2_6); 5439 m_copyback(m, off2, sizeof(struct icmp6_hdr), 5440 (caddr_t)&iih); 5441 } 5442 break; 5443 } 5444 #endif /* INET6 */ 5445 default: { 5446 key.af = pd2.af; 5447 key.proto = pd2.proto; 5448 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af); 5449 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af); 5450 key.port[0] = key.port[1] = 0; 5451 5452 STATE_LOOKUP(kif, &key, direction, *state, m); 5453 lockmgr(&(*state)->lk, LK_EXCLUSIVE); 5454 5455 /* translate source/destination address, if necessary */ 5456 if ((*state)->key[PF_SK_WIRE] != 5457 (*state)->key[PF_SK_STACK]) { 5458 struct pf_state_key *nk = 5459 (*state)->key[pd->didx]; 5460 5461 if (PF_ANEQ(pd2.src, 5462 &nk->addr[pd2.sidx], pd2.af)) 5463 pf_change_icmp(pd2.src, NULL, daddr, 5464 &nk->addr[pd2.sidx], 0, NULL, 5465 pd2.ip_sum, icmpsum, 5466 pd->ip_sum, 0, pd2.af); 5467 5468 if (PF_ANEQ(pd2.dst, 5469 &nk->addr[pd2.didx], pd2.af)) 5470 pf_change_icmp(pd2.src, NULL, 5471 NULL, /* XXX Inbound NAT? */ 5472 &nk->addr[pd2.didx], 0, NULL, 5473 pd2.ip_sum, icmpsum, 5474 pd->ip_sum, 0, pd2.af); 5475 5476 switch (pd2.af) { 5477 #ifdef INET 5478 case AF_INET: 5479 m_copyback(m, off, ICMP_MINLEN, 5480 (caddr_t)pd->hdr.icmp); 5481 m_copyback(m, ipoff2, sizeof(h2), (caddr_t)&h2); 5482 break; 5483 #endif /* INET */ 5484 #ifdef INET6 5485 case AF_INET6: 5486 m_copyback(m, off, 5487 sizeof(struct icmp6_hdr), 5488 (caddr_t)pd->hdr.icmp6); 5489 m_copyback(m, ipoff2, sizeof(h2_6), 5490 (caddr_t)&h2_6); 5491 break; 5492 #endif /* INET6 */ 5493 } 5494 } 5495 break; 5496 } 5497 } 5498 } 5499 5500 pfsync_update_state(*state); 5501 error = PF_PASS; 5502 done: 5503 if (*state) 5504 lockmgr(&(*state)->lk, LK_RELEASE); 5505 return (error); 5506 } 5507 5508 /* 5509 * Test other connection state. Caller must hold the state locked. 5510 */ 5511 int 5512 pf_test_state_other(struct pf_state **state, int direction, struct pfi_kif *kif, 5513 struct mbuf *m, struct pf_pdesc *pd) 5514 { 5515 struct pf_state_peer *src, *dst; 5516 struct pf_state_key_cmp key; 5517 5518 key.af = pd->af; 5519 key.proto = pd->proto; 5520 if (direction == PF_IN) { 5521 PF_ACPY(&key.addr[0], pd->src, key.af); 5522 PF_ACPY(&key.addr[1], pd->dst, key.af); 5523 key.port[0] = key.port[1] = 0; 5524 } else { 5525 PF_ACPY(&key.addr[1], pd->src, key.af); 5526 PF_ACPY(&key.addr[0], pd->dst, key.af); 5527 key.port[1] = key.port[0] = 0; 5528 } 5529 5530 STATE_LOOKUP(kif, &key, direction, *state, m); 5531 lockmgr(&(*state)->lk, LK_EXCLUSIVE); 5532 5533 if (direction == (*state)->direction) { 5534 src = &(*state)->src; 5535 dst = &(*state)->dst; 5536 } else { 5537 src = &(*state)->dst; 5538 dst = &(*state)->src; 5539 } 5540 5541 /* update states */ 5542 if (src->state < PFOTHERS_SINGLE) 5543 src->state = PFOTHERS_SINGLE; 5544 if (dst->state == PFOTHERS_SINGLE) 5545 dst->state = PFOTHERS_MULTIPLE; 5546 5547 /* update expire time */ 5548 (*state)->expire = time_second; 5549 if (src->state == PFOTHERS_MULTIPLE && dst->state == PFOTHERS_MULTIPLE) 5550 (*state)->timeout = PFTM_OTHER_MULTIPLE; 5551 else 5552 (*state)->timeout = PFTM_OTHER_SINGLE; 5553 5554 /* translate source/destination address, if necessary */ 5555 if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) { 5556 struct pf_state_key *nk = (*state)->key[pd->didx]; 5557 5558 KKASSERT(nk); 5559 KKASSERT(pd); 5560 KKASSERT(pd->src); 5561 KKASSERT(pd->dst); 5562 switch (pd->af) { 5563 #ifdef INET 5564 case AF_INET: 5565 if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], AF_INET)) 5566 pf_change_a(&pd->src->v4.s_addr, 5567 pd->ip_sum, 5568 nk->addr[pd->sidx].v4.s_addr, 5569 0); 5570 5571 5572 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], AF_INET)) 5573 pf_change_a(&pd->dst->v4.s_addr, 5574 pd->ip_sum, 5575 nk->addr[pd->didx].v4.s_addr, 5576 0); 5577 5578 break; 5579 #endif /* INET */ 5580 #ifdef INET6 5581 case AF_INET6: 5582 if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], AF_INET)) 5583 PF_ACPY(pd->src, &nk->addr[pd->sidx], pd->af); 5584 5585 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], AF_INET)) 5586 PF_ACPY(pd->dst, &nk->addr[pd->didx], pd->af); 5587 #endif /* INET6 */ 5588 } 5589 } 5590 5591 pfsync_update_state(*state); 5592 lockmgr(&(*state)->lk, LK_RELEASE); 5593 return (PF_PASS); 5594 } 5595 5596 /* 5597 * ipoff and off are measured from the start of the mbuf chain. 5598 * h must be at "ipoff" on the mbuf chain. 5599 */ 5600 void * 5601 pf_pull_hdr(struct mbuf *m, int off, void *p, int len, 5602 u_short *actionp, u_short *reasonp, sa_family_t af) 5603 { 5604 switch (af) { 5605 #ifdef INET 5606 case AF_INET: { 5607 struct ip *h = mtod(m, struct ip *); 5608 u_int16_t fragoff = (h->ip_off & IP_OFFMASK) << 3; 5609 5610 if (fragoff) { 5611 if (fragoff >= len) 5612 ACTION_SET(actionp, PF_PASS); 5613 else { 5614 ACTION_SET(actionp, PF_DROP); 5615 REASON_SET(reasonp, PFRES_FRAG); 5616 } 5617 return (NULL); 5618 } 5619 if (m->m_pkthdr.len < off + len || 5620 h->ip_len < off + len) { 5621 ACTION_SET(actionp, PF_DROP); 5622 REASON_SET(reasonp, PFRES_SHORT); 5623 return (NULL); 5624 } 5625 break; 5626 } 5627 #endif /* INET */ 5628 #ifdef INET6 5629 case AF_INET6: { 5630 struct ip6_hdr *h = mtod(m, struct ip6_hdr *); 5631 5632 if (m->m_pkthdr.len < off + len || 5633 (ntohs(h->ip6_plen) + sizeof(struct ip6_hdr)) < 5634 (unsigned)(off + len)) { 5635 ACTION_SET(actionp, PF_DROP); 5636 REASON_SET(reasonp, PFRES_SHORT); 5637 return (NULL); 5638 } 5639 break; 5640 } 5641 #endif /* INET6 */ 5642 } 5643 m_copydata(m, off, len, p); 5644 return (p); 5645 } 5646 5647 int 5648 pf_routable(struct pf_addr *addr, sa_family_t af, struct pfi_kif *kif) 5649 { 5650 struct sockaddr_in *dst; 5651 int ret = 1; 5652 int check_mpath; 5653 #ifdef INET6 5654 struct sockaddr_in6 *dst6; 5655 struct route_in6 ro; 5656 #else 5657 struct route ro; 5658 #endif 5659 struct radix_node *rn; 5660 struct rtentry *rt; 5661 struct ifnet *ifp; 5662 5663 check_mpath = 0; 5664 bzero(&ro, sizeof(ro)); 5665 switch (af) { 5666 case AF_INET: 5667 dst = satosin(&ro.ro_dst); 5668 dst->sin_family = AF_INET; 5669 dst->sin_len = sizeof(*dst); 5670 dst->sin_addr = addr->v4; 5671 break; 5672 #ifdef INET6 5673 case AF_INET6: 5674 dst6 = (struct sockaddr_in6 *)&ro.ro_dst; 5675 dst6->sin6_family = AF_INET6; 5676 dst6->sin6_len = sizeof(*dst6); 5677 dst6->sin6_addr = addr->v6; 5678 break; 5679 #endif /* INET6 */ 5680 default: 5681 return (0); 5682 } 5683 5684 /* Skip checks for ipsec interfaces */ 5685 if (kif != NULL && kif->pfik_ifp->if_type == IFT_ENC) 5686 goto out; 5687 5688 rtalloc_ign((struct route *)&ro, 0); 5689 5690 if (ro.ro_rt != NULL) { 5691 /* No interface given, this is a no-route check */ 5692 if (kif == NULL) 5693 goto out; 5694 5695 if (kif->pfik_ifp == NULL) { 5696 ret = 0; 5697 goto out; 5698 } 5699 5700 /* Perform uRPF check if passed input interface */ 5701 ret = 0; 5702 rn = (struct radix_node *)ro.ro_rt; 5703 do { 5704 rt = (struct rtentry *)rn; 5705 ifp = rt->rt_ifp; 5706 5707 if (kif->pfik_ifp == ifp) 5708 ret = 1; 5709 rn = NULL; 5710 } while (check_mpath == 1 && rn != NULL && ret == 0); 5711 } else 5712 ret = 0; 5713 out: 5714 if (ro.ro_rt != NULL) 5715 RTFREE(ro.ro_rt); 5716 return (ret); 5717 } 5718 5719 int 5720 pf_rtlabel_match(struct pf_addr *addr, sa_family_t af, struct pf_addr_wrap *aw) 5721 { 5722 struct sockaddr_in *dst; 5723 #ifdef INET6 5724 struct sockaddr_in6 *dst6; 5725 struct route_in6 ro; 5726 #else 5727 struct route ro; 5728 #endif 5729 int ret = 0; 5730 5731 ASSERT_LWKT_TOKEN_HELD(&pf_token); 5732 5733 bzero(&ro, sizeof(ro)); 5734 switch (af) { 5735 case AF_INET: 5736 dst = satosin(&ro.ro_dst); 5737 dst->sin_family = AF_INET; 5738 dst->sin_len = sizeof(*dst); 5739 dst->sin_addr = addr->v4; 5740 break; 5741 #ifdef INET6 5742 case AF_INET6: 5743 dst6 = (struct sockaddr_in6 *)&ro.ro_dst; 5744 dst6->sin6_family = AF_INET6; 5745 dst6->sin6_len = sizeof(*dst6); 5746 dst6->sin6_addr = addr->v6; 5747 break; 5748 #endif /* INET6 */ 5749 default: 5750 return (0); 5751 } 5752 5753 rtalloc_ign((struct route *)&ro, (RTF_CLONING | RTF_PRCLONING)); 5754 5755 if (ro.ro_rt != NULL) { 5756 RTFREE(ro.ro_rt); 5757 } 5758 5759 return (ret); 5760 } 5761 5762 #ifdef INET 5763 void 5764 pf_route(struct mbuf **m, struct pf_rule *r, int dir, struct ifnet *oifp, 5765 struct pf_state *s, struct pf_pdesc *pd) 5766 { 5767 struct mbuf *m0, *m1; 5768 struct route iproute; 5769 struct route *ro = NULL; 5770 struct sockaddr_in *dst; 5771 struct ip *ip; 5772 struct ifnet *ifp = NULL; 5773 struct pf_addr naddr; 5774 struct pf_src_node *sn = NULL; 5775 int error = 0; 5776 int sw_csum; 5777 #ifdef IPSEC 5778 struct m_tag *mtag; 5779 #endif /* IPSEC */ 5780 5781 ASSERT_LWKT_TOKEN_HELD(&pf_token); 5782 5783 if (m == NULL || *m == NULL || r == NULL || 5784 (dir != PF_IN && dir != PF_OUT) || oifp == NULL) 5785 panic("pf_route: invalid parameters"); 5786 5787 if (((*m)->m_pkthdr.fw_flags & PF_MBUF_ROUTED) == 0) { 5788 (*m)->m_pkthdr.fw_flags |= PF_MBUF_ROUTED; 5789 (*m)->m_pkthdr.pf.routed = 1; 5790 } else { 5791 if ((*m)->m_pkthdr.pf.routed++ > 3) { 5792 m0 = *m; 5793 *m = NULL; 5794 goto bad; 5795 } 5796 } 5797 5798 if (r->rt == PF_DUPTO) { 5799 if ((m0 = m_dup(*m, MB_DONTWAIT)) == NULL) { 5800 return; 5801 } 5802 } else { 5803 if ((r->rt == PF_REPLYTO) == (r->direction == dir)) { 5804 return; 5805 } 5806 m0 = *m; 5807 } 5808 5809 if (m0->m_len < sizeof(struct ip)) { 5810 DPFPRINTF(PF_DEBUG_URGENT, 5811 ("pf_route: m0->m_len < sizeof(struct ip)\n")); 5812 goto bad; 5813 } 5814 5815 ip = mtod(m0, struct ip *); 5816 5817 ro = &iproute; 5818 bzero((caddr_t)ro, sizeof(*ro)); 5819 dst = satosin(&ro->ro_dst); 5820 dst->sin_family = AF_INET; 5821 dst->sin_len = sizeof(*dst); 5822 dst->sin_addr = ip->ip_dst; 5823 5824 if (r->rt == PF_FASTROUTE) { 5825 rtalloc(ro); 5826 if (ro->ro_rt == 0) { 5827 ipstat.ips_noroute++; 5828 goto bad; 5829 } 5830 5831 ifp = ro->ro_rt->rt_ifp; 5832 ro->ro_rt->rt_use++; 5833 5834 if (ro->ro_rt->rt_flags & RTF_GATEWAY) 5835 dst = satosin(ro->ro_rt->rt_gateway); 5836 } else { 5837 if (TAILQ_EMPTY(&r->rpool.list)) { 5838 DPFPRINTF(PF_DEBUG_URGENT, 5839 ("pf_route: TAILQ_EMPTY(&r->rpool.list)\n")); 5840 goto bad; 5841 } 5842 if (s == NULL) { 5843 pf_map_addr(AF_INET, r, (struct pf_addr *)&ip->ip_src, 5844 &naddr, NULL, &sn); 5845 if (!PF_AZERO(&naddr, AF_INET)) 5846 dst->sin_addr.s_addr = naddr.v4.s_addr; 5847 ifp = r->rpool.cur->kif ? 5848 r->rpool.cur->kif->pfik_ifp : NULL; 5849 } else { 5850 if (!PF_AZERO(&s->rt_addr, AF_INET)) 5851 dst->sin_addr.s_addr = 5852 s->rt_addr.v4.s_addr; 5853 ifp = s->rt_kif ? s->rt_kif->pfik_ifp : NULL; 5854 } 5855 } 5856 if (ifp == NULL) 5857 goto bad; 5858 5859 if (oifp != ifp) { 5860 if (pf_test(PF_OUT, ifp, &m0, NULL, NULL) != PF_PASS) { 5861 goto bad; 5862 } else if (m0 == NULL) { 5863 goto done; 5864 } 5865 if (m0->m_len < sizeof(struct ip)) { 5866 DPFPRINTF(PF_DEBUG_URGENT, 5867 ("pf_route: m0->m_len < sizeof(struct ip)\n")); 5868 goto bad; 5869 } 5870 ip = mtod(m0, struct ip *); 5871 } 5872 5873 /* Copied from FreeBSD 5.1-CURRENT ip_output. */ 5874 m0->m_pkthdr.csum_flags |= CSUM_IP; 5875 sw_csum = m0->m_pkthdr.csum_flags & ~ifp->if_hwassist; 5876 if (sw_csum & CSUM_DELAY_DATA) { 5877 in_delayed_cksum(m0); 5878 sw_csum &= ~CSUM_DELAY_DATA; 5879 } 5880 m0->m_pkthdr.csum_flags &= ifp->if_hwassist; 5881 m0->m_pkthdr.csum_iphlen = (ip->ip_hl << 2); 5882 5883 /* 5884 * WARNING! We cannot fragment if the packet was modified from an 5885 * original which expected to be using TSO. In this 5886 * situation we pray that the target interface is 5887 * compatible with the originating interface. 5888 */ 5889 if (ip->ip_len <= ifp->if_mtu || 5890 (m0->m_pkthdr.csum_flags & CSUM_TSO) || 5891 ((ifp->if_hwassist & CSUM_FRAGMENT) && 5892 (ip->ip_off & IP_DF) == 0)) { 5893 ip->ip_len = htons(ip->ip_len); 5894 ip->ip_off = htons(ip->ip_off); 5895 ip->ip_sum = 0; 5896 if (sw_csum & CSUM_DELAY_IP) { 5897 /* From KAME */ 5898 if (ip->ip_v == IPVERSION && 5899 (ip->ip_hl << 2) == sizeof(*ip)) { 5900 ip->ip_sum = in_cksum_hdr(ip); 5901 } else { 5902 ip->ip_sum = in_cksum(m0, ip->ip_hl << 2); 5903 } 5904 } 5905 lwkt_reltoken(&pf_token); 5906 error = ifp->if_output(ifp, m0, sintosa(dst), ro->ro_rt); 5907 lwkt_gettoken(&pf_token); 5908 goto done; 5909 } 5910 5911 /* 5912 * Too large for interface; fragment if possible. 5913 * Must be able to put at least 8 bytes per fragment. 5914 */ 5915 if (ip->ip_off & IP_DF) { 5916 ipstat.ips_cantfrag++; 5917 if (r->rt != PF_DUPTO) { 5918 icmp_error(m0, ICMP_UNREACH, ICMP_UNREACH_NEEDFRAG, 0, 5919 ifp->if_mtu); 5920 goto done; 5921 } else 5922 goto bad; 5923 } 5924 5925 m1 = m0; 5926 error = ip_fragment(ip, &m0, ifp->if_mtu, ifp->if_hwassist, sw_csum); 5927 if (error) { 5928 goto bad; 5929 } 5930 5931 for (m0 = m1; m0; m0 = m1) { 5932 m1 = m0->m_nextpkt; 5933 m0->m_nextpkt = 0; 5934 if (error == 0) { 5935 lwkt_reltoken(&pf_token); 5936 error = (*ifp->if_output)(ifp, m0, sintosa(dst), 5937 NULL); 5938 lwkt_gettoken(&pf_token); 5939 } else 5940 m_freem(m0); 5941 } 5942 5943 if (error == 0) 5944 ipstat.ips_fragmented++; 5945 5946 done: 5947 if (r->rt != PF_DUPTO) 5948 *m = NULL; 5949 if (ro == &iproute && ro->ro_rt) 5950 RTFREE(ro->ro_rt); 5951 return; 5952 5953 bad: 5954 m_freem(m0); 5955 goto done; 5956 } 5957 #endif /* INET */ 5958 5959 #ifdef INET6 5960 void 5961 pf_route6(struct mbuf **m, struct pf_rule *r, int dir, struct ifnet *oifp, 5962 struct pf_state *s, struct pf_pdesc *pd) 5963 { 5964 struct mbuf *m0; 5965 struct route_in6 ip6route; 5966 struct route_in6 *ro; 5967 struct sockaddr_in6 *dst; 5968 struct ip6_hdr *ip6; 5969 struct ifnet *ifp = NULL; 5970 struct pf_addr naddr; 5971 struct pf_src_node *sn = NULL; 5972 5973 if (m == NULL || *m == NULL || r == NULL || 5974 (dir != PF_IN && dir != PF_OUT) || oifp == NULL) 5975 panic("pf_route6: invalid parameters"); 5976 5977 if (((*m)->m_pkthdr.fw_flags & PF_MBUF_ROUTED) == 0) { 5978 (*m)->m_pkthdr.fw_flags |= PF_MBUF_ROUTED; 5979 (*m)->m_pkthdr.pf.routed = 1; 5980 } else { 5981 if ((*m)->m_pkthdr.pf.routed++ > 3) { 5982 m0 = *m; 5983 *m = NULL; 5984 goto bad; 5985 } 5986 } 5987 5988 if (r->rt == PF_DUPTO) { 5989 if ((m0 = m_dup(*m, MB_DONTWAIT)) == NULL) 5990 return; 5991 } else { 5992 if ((r->rt == PF_REPLYTO) == (r->direction == dir)) 5993 return; 5994 m0 = *m; 5995 } 5996 5997 if (m0->m_len < sizeof(struct ip6_hdr)) { 5998 DPFPRINTF(PF_DEBUG_URGENT, 5999 ("pf_route6: m0->m_len < sizeof(struct ip6_hdr)\n")); 6000 goto bad; 6001 } 6002 ip6 = mtod(m0, struct ip6_hdr *); 6003 6004 ro = &ip6route; 6005 bzero((caddr_t)ro, sizeof(*ro)); 6006 dst = (struct sockaddr_in6 *)&ro->ro_dst; 6007 dst->sin6_family = AF_INET6; 6008 dst->sin6_len = sizeof(*dst); 6009 dst->sin6_addr = ip6->ip6_dst; 6010 6011 /* 6012 * DragonFly doesn't zero the auxillary pkghdr fields, only fw_flags, 6013 * so make sure pf.flags is clear. 6014 * 6015 * Cheat. XXX why only in the v6 case??? 6016 */ 6017 if (r->rt == PF_FASTROUTE) { 6018 m0->m_pkthdr.fw_flags |= PF_MBUF_TAGGED; 6019 m0->m_pkthdr.pf.flags = 0; 6020 /* XXX Re-Check when Upgrading to > 4.4 */ 6021 m0->m_pkthdr.pf.statekey = NULL; 6022 ip6_output(m0, NULL, NULL, 0, NULL, NULL, NULL); 6023 return; 6024 } 6025 6026 if (TAILQ_EMPTY(&r->rpool.list)) { 6027 DPFPRINTF(PF_DEBUG_URGENT, 6028 ("pf_route6: TAILQ_EMPTY(&r->rpool.list)\n")); 6029 goto bad; 6030 } 6031 if (s == NULL) { 6032 pf_map_addr(AF_INET6, r, (struct pf_addr *)&ip6->ip6_src, 6033 &naddr, NULL, &sn); 6034 if (!PF_AZERO(&naddr, AF_INET6)) 6035 PF_ACPY((struct pf_addr *)&dst->sin6_addr, 6036 &naddr, AF_INET6); 6037 ifp = r->rpool.cur->kif ? r->rpool.cur->kif->pfik_ifp : NULL; 6038 } else { 6039 if (!PF_AZERO(&s->rt_addr, AF_INET6)) 6040 PF_ACPY((struct pf_addr *)&dst->sin6_addr, 6041 &s->rt_addr, AF_INET6); 6042 ifp = s->rt_kif ? s->rt_kif->pfik_ifp : NULL; 6043 } 6044 if (ifp == NULL) 6045 goto bad; 6046 6047 if (oifp != ifp) { 6048 if (pf_test6(PF_OUT, ifp, &m0, NULL, NULL) != PF_PASS) { 6049 goto bad; 6050 } else if (m0 == NULL) { 6051 goto done; 6052 } 6053 if (m0->m_len < sizeof(struct ip6_hdr)) { 6054 DPFPRINTF(PF_DEBUG_URGENT, 6055 ("pf_route6: m0->m_len < sizeof(struct ip6_hdr)\n")); 6056 goto bad; 6057 } 6058 ip6 = mtod(m0, struct ip6_hdr *); 6059 } 6060 6061 /* 6062 * If the packet is too large for the outgoing interface, 6063 * send back an icmp6 error. 6064 */ 6065 if (IN6_IS_ADDR_LINKLOCAL(&dst->sin6_addr)) 6066 dst->sin6_addr.s6_addr16[1] = htons(ifp->if_index); 6067 if ((u_long)m0->m_pkthdr.len <= ifp->if_mtu) { 6068 nd6_output(ifp, ifp, m0, dst, NULL); 6069 } else { 6070 in6_ifstat_inc(ifp, ifs6_in_toobig); 6071 if (r->rt != PF_DUPTO) 6072 icmp6_error(m0, ICMP6_PACKET_TOO_BIG, 0, ifp->if_mtu); 6073 else 6074 goto bad; 6075 } 6076 6077 done: 6078 if (r->rt != PF_DUPTO) 6079 *m = NULL; 6080 return; 6081 6082 bad: 6083 m_freem(m0); 6084 goto done; 6085 } 6086 #endif /* INET6 */ 6087 6088 6089 /* 6090 * check protocol (tcp/udp/icmp/icmp6) checksum and set mbuf flag 6091 * off is the offset where the protocol header starts 6092 * len is the total length of protocol header plus payload 6093 * returns 0 when the checksum is valid, otherwise returns 1. 6094 */ 6095 /* 6096 * XXX 6097 * FreeBSD supports cksum offload for the following drivers. 6098 * em(4), gx(4), lge(4), nge(4), ti(4), xl(4) 6099 * If we can make full use of it we would outperform ipfw/ipfilter in 6100 * very heavy traffic. 6101 * I have not tested 'cause I don't have NICs that supports cksum offload. 6102 * (There might be problems. Typical phenomena would be 6103 * 1. No route message for UDP packet. 6104 * 2. No connection acceptance from external hosts regardless of rule set.) 6105 */ 6106 int 6107 pf_check_proto_cksum(struct mbuf *m, int off, int len, u_int8_t p, 6108 sa_family_t af) 6109 { 6110 u_int16_t sum = 0; 6111 int hw_assist = 0; 6112 struct ip *ip; 6113 6114 if (off < sizeof(struct ip) || len < sizeof(struct udphdr)) 6115 return (1); 6116 if (m->m_pkthdr.len < off + len) 6117 return (1); 6118 6119 switch (p) { 6120 case IPPROTO_TCP: 6121 case IPPROTO_UDP: 6122 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) { 6123 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) { 6124 sum = m->m_pkthdr.csum_data; 6125 } else { 6126 ip = mtod(m, struct ip *); 6127 sum = in_pseudo(ip->ip_src.s_addr, 6128 ip->ip_dst.s_addr, htonl((u_short)len + 6129 m->m_pkthdr.csum_data + p)); 6130 } 6131 sum ^= 0xffff; 6132 ++hw_assist; 6133 } 6134 break; 6135 case IPPROTO_ICMP: 6136 #ifdef INET6 6137 case IPPROTO_ICMPV6: 6138 #endif /* INET6 */ 6139 break; 6140 default: 6141 return (1); 6142 } 6143 6144 if (!hw_assist) { 6145 switch (af) { 6146 case AF_INET: 6147 if (p == IPPROTO_ICMP) { 6148 if (m->m_len < off) 6149 return (1); 6150 m->m_data += off; 6151 m->m_len -= off; 6152 sum = in_cksum(m, len); 6153 m->m_data -= off; 6154 m->m_len += off; 6155 } else { 6156 if (m->m_len < sizeof(struct ip)) 6157 return (1); 6158 sum = in_cksum_range(m, p, off, len); 6159 if (sum == 0) { 6160 m->m_pkthdr.csum_flags |= 6161 (CSUM_DATA_VALID | 6162 CSUM_PSEUDO_HDR); 6163 m->m_pkthdr.csum_data = 0xffff; 6164 } 6165 } 6166 break; 6167 #ifdef INET6 6168 case AF_INET6: 6169 if (m->m_len < sizeof(struct ip6_hdr)) 6170 return (1); 6171 sum = in6_cksum(m, p, off, len); 6172 /* 6173 * XXX 6174 * IPv6 H/W cksum off-load not supported yet! 6175 * 6176 * if (sum == 0) { 6177 * m->m_pkthdr.csum_flags |= 6178 * (CSUM_DATA_VALID|CSUM_PSEUDO_HDR); 6179 * m->m_pkthdr.csum_data = 0xffff; 6180 *} 6181 */ 6182 break; 6183 #endif /* INET6 */ 6184 default: 6185 return (1); 6186 } 6187 } 6188 if (sum) { 6189 switch (p) { 6190 case IPPROTO_TCP: 6191 tcpstat.tcps_rcvbadsum++; 6192 break; 6193 case IPPROTO_UDP: 6194 udp_stat.udps_badsum++; 6195 break; 6196 case IPPROTO_ICMP: 6197 icmpstat.icps_checksum++; 6198 break; 6199 #ifdef INET6 6200 case IPPROTO_ICMPV6: 6201 icmp6stat.icp6s_checksum++; 6202 break; 6203 #endif /* INET6 */ 6204 } 6205 return (1); 6206 } 6207 return (0); 6208 } 6209 6210 struct pf_divert * 6211 pf_find_divert(struct mbuf *m) 6212 { 6213 struct m_tag *mtag; 6214 6215 if ((mtag = m_tag_find(m, PACKET_TAG_PF_DIVERT, NULL)) == NULL) 6216 return (NULL); 6217 6218 return ((struct pf_divert *)(mtag + 1)); 6219 } 6220 6221 struct pf_divert * 6222 pf_get_divert(struct mbuf *m) 6223 { 6224 struct m_tag *mtag; 6225 6226 if ((mtag = m_tag_find(m, PACKET_TAG_PF_DIVERT, NULL)) == NULL) { 6227 mtag = m_tag_get(PACKET_TAG_PF_DIVERT, sizeof(struct pf_divert), 6228 M_NOWAIT); 6229 if (mtag == NULL) 6230 return (NULL); 6231 bzero(mtag + 1, sizeof(struct pf_divert)); 6232 m_tag_prepend(m, mtag); 6233 } 6234 6235 return ((struct pf_divert *)(mtag + 1)); 6236 } 6237 6238 #ifdef INET 6239 6240 /* 6241 * WARNING: pf_token held shared on entry, THIS IS CPU LOCALIZED CODE 6242 */ 6243 int 6244 pf_test(int dir, struct ifnet *ifp, struct mbuf **m0, 6245 struct ether_header *eh, struct inpcb *inp) 6246 { 6247 struct pfi_kif *kif; 6248 u_short action, reason = 0, log = 0; 6249 struct mbuf *m = *m0; 6250 struct ip *h = NULL; 6251 struct pf_rule *a = NULL, *r = &pf_default_rule, *tr, *nr; 6252 struct pf_state *s = NULL; 6253 struct pf_ruleset *ruleset = NULL; 6254 struct pf_pdesc pd; 6255 int off, dirndx; 6256 #ifdef ALTQ 6257 int pqid = 0; 6258 #endif 6259 6260 if (!pf_status.running) 6261 return (PF_PASS); 6262 6263 memset(&pd, 0, sizeof(pd)); 6264 #ifdef foo 6265 if (ifp->if_type == IFT_CARP && ifp->if_carpdev) 6266 kif = (struct pfi_kif *)ifp->if_carpdev->if_pf_kif; 6267 else 6268 #endif 6269 kif = (struct pfi_kif *)ifp->if_pf_kif; 6270 6271 if (kif == NULL) { 6272 DPFPRINTF(PF_DEBUG_URGENT, 6273 ("pf_test: kif == NULL, if_xname %s\n", ifp->if_xname)); 6274 return (PF_DROP); 6275 } 6276 if (kif->pfik_flags & PFI_IFLAG_SKIP) 6277 return (PF_PASS); 6278 6279 #ifdef DIAGNOSTIC 6280 if ((m->m_flags & M_PKTHDR) == 0) 6281 panic("non-M_PKTHDR is passed to pf_test"); 6282 #endif /* DIAGNOSTIC */ 6283 6284 if (m->m_pkthdr.len < (int)sizeof(*h)) { 6285 action = PF_DROP; 6286 REASON_SET(&reason, PFRES_SHORT); 6287 log = 1; 6288 goto done; 6289 } 6290 6291 /* 6292 * DragonFly doesn't zero the auxillary pkghdr fields, only fw_flags, 6293 * so make sure pf.flags is clear. 6294 */ 6295 if (m->m_pkthdr.fw_flags & PF_MBUF_TAGGED) 6296 return (PF_PASS); 6297 m->m_pkthdr.pf.flags = 0; 6298 /* Re-Check when updating to > 4.4 */ 6299 m->m_pkthdr.pf.statekey = NULL; 6300 6301 /* We do IP header normalization and packet reassembly here */ 6302 if (pf_normalize_ip(m0, dir, kif, &reason, &pd) != PF_PASS) { 6303 action = PF_DROP; 6304 goto done; 6305 } 6306 m = *m0; /* pf_normalize messes with m0 */ 6307 h = mtod(m, struct ip *); 6308 6309 off = h->ip_hl << 2; 6310 if (off < (int)sizeof(*h)) { 6311 action = PF_DROP; 6312 REASON_SET(&reason, PFRES_SHORT); 6313 log = 1; 6314 goto done; 6315 } 6316 6317 pd.src = (struct pf_addr *)&h->ip_src; 6318 pd.dst = (struct pf_addr *)&h->ip_dst; 6319 pd.sport = pd.dport = NULL; 6320 pd.ip_sum = &h->ip_sum; 6321 pd.proto_sum = NULL; 6322 pd.proto = h->ip_p; 6323 pd.dir = dir; 6324 pd.sidx = (dir == PF_IN) ? 0 : 1; 6325 pd.didx = (dir == PF_IN) ? 1 : 0; 6326 pd.af = AF_INET; 6327 pd.tos = h->ip_tos; 6328 pd.tot_len = h->ip_len; 6329 pd.eh = eh; 6330 6331 /* handle fragments that didn't get reassembled by normalization */ 6332 if (h->ip_off & (IP_MF | IP_OFFMASK)) { 6333 action = pf_test_fragment(&r, dir, kif, m, h, 6334 &pd, &a, &ruleset); 6335 goto done; 6336 } 6337 6338 switch (h->ip_p) { 6339 6340 case IPPROTO_TCP: { 6341 struct tcphdr th; 6342 6343 pd.hdr.tcp = &th; 6344 if (!pf_pull_hdr(m, off, &th, sizeof(th), 6345 &action, &reason, AF_INET)) { 6346 log = action != PF_PASS; 6347 goto done; 6348 } 6349 pd.p_len = pd.tot_len - off - (th.th_off << 2); 6350 #ifdef ALTQ 6351 if ((th.th_flags & TH_ACK) && pd.p_len == 0) 6352 pqid = 1; 6353 #endif 6354 action = pf_normalize_tcp(dir, kif, m, 0, off, h, &pd); 6355 if (action == PF_DROP) 6356 goto done; 6357 action = pf_test_state_tcp(&s, dir, kif, m, off, h, &pd, 6358 &reason); 6359 if (action == PF_PASS) { 6360 r = s->rule.ptr; 6361 a = s->anchor.ptr; 6362 log = s->log; 6363 } else if (s == NULL) { 6364 action = pf_test_rule(&r, &s, dir, kif, 6365 m, off, h, &pd, &a, 6366 &ruleset, NULL, inp); 6367 } 6368 break; 6369 } 6370 6371 case IPPROTO_UDP: { 6372 struct udphdr uh; 6373 6374 pd.hdr.udp = &uh; 6375 if (!pf_pull_hdr(m, off, &uh, sizeof(uh), 6376 &action, &reason, AF_INET)) { 6377 log = action != PF_PASS; 6378 goto done; 6379 } 6380 if (uh.uh_dport == 0 || 6381 ntohs(uh.uh_ulen) > m->m_pkthdr.len - off || 6382 ntohs(uh.uh_ulen) < sizeof(struct udphdr)) { 6383 action = PF_DROP; 6384 REASON_SET(&reason, PFRES_SHORT); 6385 goto done; 6386 } 6387 action = pf_test_state_udp(&s, dir, kif, m, off, h, &pd); 6388 if (action == PF_PASS) { 6389 r = s->rule.ptr; 6390 a = s->anchor.ptr; 6391 log = s->log; 6392 } else if (s == NULL) { 6393 action = pf_test_rule(&r, &s, dir, kif, 6394 m, off, h, &pd, &a, 6395 &ruleset, NULL, inp); 6396 } 6397 break; 6398 } 6399 6400 case IPPROTO_ICMP: { 6401 struct icmp ih; 6402 6403 pd.hdr.icmp = &ih; 6404 if (!pf_pull_hdr(m, off, &ih, ICMP_MINLEN, 6405 &action, &reason, AF_INET)) { 6406 log = action != PF_PASS; 6407 goto done; 6408 } 6409 action = pf_test_state_icmp(&s, dir, kif, m, off, h, &pd, 6410 &reason); 6411 if (action == PF_PASS) { 6412 r = s->rule.ptr; 6413 a = s->anchor.ptr; 6414 log = s->log; 6415 } else if (s == NULL) { 6416 action = pf_test_rule(&r, &s, dir, kif, 6417 m, off, h, &pd, &a, 6418 &ruleset, NULL, inp); 6419 } 6420 break; 6421 } 6422 6423 default: 6424 action = pf_test_state_other(&s, dir, kif, m, &pd); 6425 if (action == PF_PASS) { 6426 r = s->rule.ptr; 6427 a = s->anchor.ptr; 6428 log = s->log; 6429 } else if (s == NULL) { 6430 action = pf_test_rule(&r, &s, dir, kif, m, off, h, 6431 &pd, &a, &ruleset, NULL, inp); 6432 } 6433 break; 6434 } 6435 6436 done: 6437 if (action == PF_PASS && h->ip_hl > 5 && 6438 !((s && s->state_flags & PFSTATE_ALLOWOPTS) || r->allow_opts)) { 6439 action = PF_DROP; 6440 REASON_SET(&reason, PFRES_IPOPTIONS); 6441 log = 1; 6442 DPFPRINTF(PF_DEBUG_MISC, 6443 ("pf: dropping packet with ip options\n")); 6444 } 6445 6446 if ((s && s->tag) || r->rtableid) 6447 pf_tag_packet(m, s ? s->tag : 0, r->rtableid); 6448 6449 #if 0 6450 if (dir == PF_IN && s && s->key[PF_SK_STACK]) 6451 m->m_pkthdr.pf.statekey = s->key[PF_SK_STACK]; 6452 #endif 6453 6454 #ifdef ALTQ 6455 if (action == PF_PASS && r->qid) { 6456 m->m_pkthdr.fw_flags |= PF_MBUF_STRUCTURE; 6457 if (pqid || (pd.tos & IPTOS_LOWDELAY)) 6458 m->m_pkthdr.pf.qid = r->pqid; 6459 else 6460 m->m_pkthdr.pf.qid = r->qid; 6461 m->m_pkthdr.pf.ecn_af = AF_INET; 6462 m->m_pkthdr.pf.hdr = h; 6463 /* add connection hash for fairq */ 6464 if (s) { 6465 /* for fairq */ 6466 m->m_pkthdr.pf.state_hash = s->hash; 6467 m->m_pkthdr.pf.flags |= PF_TAG_STATE_HASHED; 6468 } 6469 } 6470 #endif /* ALTQ */ 6471 6472 /* 6473 * connections redirected to loopback should not match sockets 6474 * bound specifically to loopback due to security implications, 6475 * see tcp_input() and in_pcblookup_listen(). 6476 */ 6477 if (dir == PF_IN && action == PF_PASS && (pd.proto == IPPROTO_TCP || 6478 pd.proto == IPPROTO_UDP) && s != NULL && s->nat_rule.ptr != NULL && 6479 (s->nat_rule.ptr->action == PF_RDR || 6480 s->nat_rule.ptr->action == PF_BINAT) && 6481 (ntohl(pd.dst->v4.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET) 6482 m->m_pkthdr.pf.flags |= PF_TAG_TRANSLATE_LOCALHOST; 6483 6484 if (dir == PF_IN && action == PF_PASS && r->divert.port) { 6485 struct pf_divert *divert; 6486 6487 if ((divert = pf_get_divert(m))) { 6488 m->m_pkthdr.pf.flags |= PF_TAG_DIVERTED; 6489 divert->port = r->divert.port; 6490 divert->addr.ipv4 = r->divert.addr.v4; 6491 } 6492 } 6493 6494 if (log) { 6495 struct pf_rule *lr; 6496 6497 if (s != NULL && s->nat_rule.ptr != NULL && 6498 s->nat_rule.ptr->log & PF_LOG_ALL) 6499 lr = s->nat_rule.ptr; 6500 else 6501 lr = r; 6502 PFLOG_PACKET(kif, h, m, AF_INET, dir, reason, lr, a, ruleset, 6503 &pd); 6504 } 6505 6506 kif->pfik_bytes[0][dir == PF_OUT][action != PF_PASS] += pd.tot_len; 6507 kif->pfik_packets[0][dir == PF_OUT][action != PF_PASS]++; 6508 6509 if (action == PF_PASS || r->action == PF_DROP) { 6510 dirndx = (dir == PF_OUT); 6511 r->packets[dirndx]++; 6512 r->bytes[dirndx] += pd.tot_len; 6513 if (a != NULL) { 6514 a->packets[dirndx]++; 6515 a->bytes[dirndx] += pd.tot_len; 6516 } 6517 if (s != NULL) { 6518 if (s->nat_rule.ptr != NULL) { 6519 s->nat_rule.ptr->packets[dirndx]++; 6520 s->nat_rule.ptr->bytes[dirndx] += pd.tot_len; 6521 } 6522 if (s->src_node != NULL) { 6523 s->src_node->packets[dirndx]++; 6524 s->src_node->bytes[dirndx] += pd.tot_len; 6525 } 6526 if (s->nat_src_node != NULL) { 6527 s->nat_src_node->packets[dirndx]++; 6528 s->nat_src_node->bytes[dirndx] += pd.tot_len; 6529 } 6530 dirndx = (dir == s->direction) ? 0 : 1; 6531 s->packets[dirndx]++; 6532 s->bytes[dirndx] += pd.tot_len; 6533 } 6534 tr = r; 6535 nr = (s != NULL) ? s->nat_rule.ptr : pd.nat_rule; 6536 if (nr != NULL && r == &pf_default_rule) 6537 tr = nr; 6538 if (tr->src.addr.type == PF_ADDR_TABLE) 6539 pfr_update_stats(tr->src.addr.p.tbl, 6540 (s == NULL) ? pd.src : 6541 &s->key[(s->direction == PF_IN)]-> 6542 addr[(s->direction == PF_OUT)], 6543 pd.af, pd.tot_len, dir == PF_OUT, 6544 r->action == PF_PASS, tr->src.neg); 6545 if (tr->dst.addr.type == PF_ADDR_TABLE) 6546 pfr_update_stats(tr->dst.addr.p.tbl, 6547 (s == NULL) ? pd.dst : 6548 &s->key[(s->direction == PF_IN)]-> 6549 addr[(s->direction == PF_IN)], 6550 pd.af, pd.tot_len, dir == PF_OUT, 6551 r->action == PF_PASS, tr->dst.neg); 6552 } 6553 6554 6555 if (action == PF_SYNPROXY_DROP) { 6556 m_freem(*m0); 6557 *m0 = NULL; 6558 action = PF_PASS; 6559 } else if (r->rt) 6560 /* pf_route can free the mbuf causing *m0 to become NULL */ 6561 pf_route(m0, r, dir, kif->pfik_ifp, s, &pd); 6562 6563 return (action); 6564 } 6565 #endif /* INET */ 6566 6567 #ifdef INET6 6568 6569 /* 6570 * WARNING: pf_token held shared on entry, THIS IS CPU LOCALIZED CODE 6571 */ 6572 int 6573 pf_test6(int dir, struct ifnet *ifp, struct mbuf **m0, 6574 struct ether_header *eh, struct inpcb *inp) 6575 { 6576 struct pfi_kif *kif; 6577 u_short action, reason = 0, log = 0; 6578 struct mbuf *m = *m0, *n = NULL; 6579 struct ip6_hdr *h = NULL; 6580 struct pf_rule *a = NULL, *r = &pf_default_rule, *tr, *nr; 6581 struct pf_state *s = NULL; 6582 struct pf_ruleset *ruleset = NULL; 6583 struct pf_pdesc pd; 6584 int off, terminal = 0, dirndx, rh_cnt = 0; 6585 6586 if (!pf_status.running) 6587 return (PF_PASS); 6588 6589 memset(&pd, 0, sizeof(pd)); 6590 #ifdef foo 6591 if (ifp->if_type == IFT_CARP && ifp->if_carpdev) 6592 kif = (struct pfi_kif *)ifp->if_carpdev->if_pf_kif; 6593 else 6594 #endif 6595 kif = (struct pfi_kif *)ifp->if_pf_kif; 6596 6597 if (kif == NULL) { 6598 DPFPRINTF(PF_DEBUG_URGENT, 6599 ("pf_test6: kif == NULL, if_xname %s\n", ifp->if_xname)); 6600 return (PF_DROP); 6601 } 6602 if (kif->pfik_flags & PFI_IFLAG_SKIP) 6603 return (PF_PASS); 6604 6605 #ifdef DIAGNOSTIC 6606 if ((m->m_flags & M_PKTHDR) == 0) 6607 panic("non-M_PKTHDR is passed to pf_test6"); 6608 #endif /* DIAGNOSTIC */ 6609 6610 if (m->m_pkthdr.len < (int)sizeof(*h)) { 6611 action = PF_DROP; 6612 REASON_SET(&reason, PFRES_SHORT); 6613 log = 1; 6614 goto done; 6615 } 6616 6617 /* 6618 * DragonFly doesn't zero the auxillary pkghdr fields, only fw_flags, 6619 * so make sure pf.flags is clear. 6620 */ 6621 if (m->m_pkthdr.fw_flags & PF_MBUF_TAGGED) 6622 return (PF_PASS); 6623 m->m_pkthdr.pf.flags = 0; 6624 /* Re-Check when updating to > 4.4 */ 6625 m->m_pkthdr.pf.statekey = NULL; 6626 6627 /* We do IP header normalization and packet reassembly here */ 6628 if (pf_normalize_ip6(m0, dir, kif, &reason, &pd) != PF_PASS) { 6629 action = PF_DROP; 6630 goto done; 6631 } 6632 m = *m0; /* pf_normalize messes with m0 */ 6633 h = mtod(m, struct ip6_hdr *); 6634 6635 #if 1 6636 /* 6637 * we do not support jumbogram yet. if we keep going, zero ip6_plen 6638 * will do something bad, so drop the packet for now. 6639 */ 6640 if (htons(h->ip6_plen) == 0) { 6641 action = PF_DROP; 6642 REASON_SET(&reason, PFRES_NORM); /*XXX*/ 6643 goto done; 6644 } 6645 #endif 6646 6647 pd.src = (struct pf_addr *)&h->ip6_src; 6648 pd.dst = (struct pf_addr *)&h->ip6_dst; 6649 pd.sport = pd.dport = NULL; 6650 pd.ip_sum = NULL; 6651 pd.proto_sum = NULL; 6652 pd.dir = dir; 6653 pd.sidx = (dir == PF_IN) ? 0 : 1; 6654 pd.didx = (dir == PF_IN) ? 1 : 0; 6655 pd.af = AF_INET6; 6656 pd.tos = 0; 6657 pd.tot_len = ntohs(h->ip6_plen) + sizeof(struct ip6_hdr); 6658 pd.eh = eh; 6659 6660 off = ((caddr_t)h - m->m_data) + sizeof(struct ip6_hdr); 6661 pd.proto = h->ip6_nxt; 6662 do { 6663 switch (pd.proto) { 6664 case IPPROTO_FRAGMENT: 6665 action = pf_test_fragment(&r, dir, kif, m, h, 6666 &pd, &a, &ruleset); 6667 if (action == PF_DROP) 6668 REASON_SET(&reason, PFRES_FRAG); 6669 goto done; 6670 case IPPROTO_ROUTING: { 6671 struct ip6_rthdr rthdr; 6672 6673 if (rh_cnt++) { 6674 DPFPRINTF(PF_DEBUG_MISC, 6675 ("pf: IPv6 more than one rthdr\n")); 6676 action = PF_DROP; 6677 REASON_SET(&reason, PFRES_IPOPTIONS); 6678 log = 1; 6679 goto done; 6680 } 6681 if (!pf_pull_hdr(m, off, &rthdr, sizeof(rthdr), NULL, 6682 &reason, pd.af)) { 6683 DPFPRINTF(PF_DEBUG_MISC, 6684 ("pf: IPv6 short rthdr\n")); 6685 action = PF_DROP; 6686 REASON_SET(&reason, PFRES_SHORT); 6687 log = 1; 6688 goto done; 6689 } 6690 if (rthdr.ip6r_type == IPV6_RTHDR_TYPE_0) { 6691 DPFPRINTF(PF_DEBUG_MISC, 6692 ("pf: IPv6 rthdr0\n")); 6693 action = PF_DROP; 6694 REASON_SET(&reason, PFRES_IPOPTIONS); 6695 log = 1; 6696 goto done; 6697 } 6698 /* FALLTHROUGH */ 6699 } 6700 case IPPROTO_AH: 6701 case IPPROTO_HOPOPTS: 6702 case IPPROTO_DSTOPTS: { 6703 /* get next header and header length */ 6704 struct ip6_ext opt6; 6705 6706 if (!pf_pull_hdr(m, off, &opt6, sizeof(opt6), 6707 NULL, &reason, pd.af)) { 6708 DPFPRINTF(PF_DEBUG_MISC, 6709 ("pf: IPv6 short opt\n")); 6710 action = PF_DROP; 6711 log = 1; 6712 goto done; 6713 } 6714 if (pd.proto == IPPROTO_AH) 6715 off += (opt6.ip6e_len + 2) * 4; 6716 else 6717 off += (opt6.ip6e_len + 1) * 8; 6718 pd.proto = opt6.ip6e_nxt; 6719 /* goto the next header */ 6720 break; 6721 } 6722 default: 6723 terminal++; 6724 break; 6725 } 6726 } while (!terminal); 6727 6728 /* if there's no routing header, use unmodified mbuf for checksumming */ 6729 if (!n) 6730 n = m; 6731 6732 switch (pd.proto) { 6733 6734 case IPPROTO_TCP: { 6735 struct tcphdr th; 6736 6737 pd.hdr.tcp = &th; 6738 if (!pf_pull_hdr(m, off, &th, sizeof(th), 6739 &action, &reason, AF_INET6)) { 6740 log = action != PF_PASS; 6741 goto done; 6742 } 6743 pd.p_len = pd.tot_len - off - (th.th_off << 2); 6744 action = pf_normalize_tcp(dir, kif, m, 0, off, h, &pd); 6745 if (action == PF_DROP) 6746 goto done; 6747 action = pf_test_state_tcp(&s, dir, kif, m, off, h, &pd, 6748 &reason); 6749 if (action == PF_PASS) { 6750 r = s->rule.ptr; 6751 a = s->anchor.ptr; 6752 log = s->log; 6753 } else if (s == NULL) { 6754 action = pf_test_rule(&r, &s, dir, kif, 6755 m, off, h, &pd, &a, 6756 &ruleset, NULL, inp); 6757 } 6758 break; 6759 } 6760 6761 case IPPROTO_UDP: { 6762 struct udphdr uh; 6763 6764 pd.hdr.udp = &uh; 6765 if (!pf_pull_hdr(m, off, &uh, sizeof(uh), 6766 &action, &reason, AF_INET6)) { 6767 log = action != PF_PASS; 6768 goto done; 6769 } 6770 if (uh.uh_dport == 0 || 6771 ntohs(uh.uh_ulen) > m->m_pkthdr.len - off || 6772 ntohs(uh.uh_ulen) < sizeof(struct udphdr)) { 6773 action = PF_DROP; 6774 REASON_SET(&reason, PFRES_SHORT); 6775 goto done; 6776 } 6777 action = pf_test_state_udp(&s, dir, kif, m, off, h, &pd); 6778 if (action == PF_PASS) { 6779 r = s->rule.ptr; 6780 a = s->anchor.ptr; 6781 log = s->log; 6782 } else if (s == NULL) { 6783 action = pf_test_rule(&r, &s, dir, kif, 6784 m, off, h, &pd, &a, 6785 &ruleset, NULL, inp); 6786 } 6787 break; 6788 } 6789 6790 case IPPROTO_ICMPV6: { 6791 struct icmp6_hdr ih; 6792 6793 pd.hdr.icmp6 = &ih; 6794 if (!pf_pull_hdr(m, off, &ih, sizeof(ih), 6795 &action, &reason, AF_INET6)) { 6796 log = action != PF_PASS; 6797 goto done; 6798 } 6799 action = pf_test_state_icmp(&s, dir, kif, 6800 m, off, h, &pd, &reason); 6801 if (action == PF_PASS) { 6802 r = s->rule.ptr; 6803 a = s->anchor.ptr; 6804 log = s->log; 6805 } else if (s == NULL) { 6806 action = pf_test_rule(&r, &s, dir, kif, 6807 m, off, h, &pd, &a, 6808 &ruleset, NULL, inp); 6809 } 6810 break; 6811 } 6812 6813 default: 6814 action = pf_test_state_other(&s, dir, kif, m, &pd); 6815 if (action == PF_PASS) { 6816 r = s->rule.ptr; 6817 a = s->anchor.ptr; 6818 log = s->log; 6819 } else if (s == NULL) { 6820 action = pf_test_rule(&r, &s, dir, kif, m, off, h, 6821 &pd, &a, &ruleset, NULL, inp); 6822 } 6823 break; 6824 } 6825 6826 done: 6827 if (n != m) { 6828 m_freem(n); 6829 n = NULL; 6830 } 6831 6832 /* handle dangerous IPv6 extension headers. */ 6833 if (action == PF_PASS && rh_cnt && 6834 !((s && s->state_flags & PFSTATE_ALLOWOPTS) || r->allow_opts)) { 6835 action = PF_DROP; 6836 REASON_SET(&reason, PFRES_IPOPTIONS); 6837 log = 1; 6838 DPFPRINTF(PF_DEBUG_MISC, 6839 ("pf: dropping packet with dangerous v6 headers\n")); 6840 } 6841 6842 if ((s && s->tag) || r->rtableid) 6843 pf_tag_packet(m, s ? s->tag : 0, r->rtableid); 6844 6845 #if 0 6846 if (dir == PF_IN && s && s->key[PF_SK_STACK]) 6847 m->m_pkthdr.pf.statekey = s->key[PF_SK_STACK]; 6848 #endif 6849 6850 #ifdef ALTQ 6851 if (action == PF_PASS && r->qid) { 6852 m->m_pkthdr.fw_flags |= PF_MBUF_STRUCTURE; 6853 if (pd.tos & IPTOS_LOWDELAY) 6854 m->m_pkthdr.pf.qid = r->pqid; 6855 else 6856 m->m_pkthdr.pf.qid = r->qid; 6857 m->m_pkthdr.pf.ecn_af = AF_INET6; 6858 m->m_pkthdr.pf.hdr = h; 6859 if (s) { 6860 /* for fairq */ 6861 m->m_pkthdr.pf.state_hash = s->hash; 6862 m->m_pkthdr.pf.flags |= PF_TAG_STATE_HASHED; 6863 } 6864 } 6865 #endif /* ALTQ */ 6866 6867 if (dir == PF_IN && action == PF_PASS && (pd.proto == IPPROTO_TCP || 6868 pd.proto == IPPROTO_UDP) && s != NULL && s->nat_rule.ptr != NULL && 6869 (s->nat_rule.ptr->action == PF_RDR || 6870 s->nat_rule.ptr->action == PF_BINAT) && 6871 IN6_IS_ADDR_LOOPBACK(&pd.dst->v6)) 6872 m->m_pkthdr.pf.flags |= PF_TAG_TRANSLATE_LOCALHOST; 6873 6874 if (dir == PF_IN && action == PF_PASS && r->divert.port) { 6875 struct pf_divert *divert; 6876 6877 if ((divert = pf_get_divert(m))) { 6878 m->m_pkthdr.pf.flags |= PF_TAG_DIVERTED; 6879 divert->port = r->divert.port; 6880 divert->addr.ipv6 = r->divert.addr.v6; 6881 } 6882 } 6883 6884 if (log) { 6885 struct pf_rule *lr; 6886 6887 if (s != NULL && s->nat_rule.ptr != NULL && 6888 s->nat_rule.ptr->log & PF_LOG_ALL) 6889 lr = s->nat_rule.ptr; 6890 else 6891 lr = r; 6892 PFLOG_PACKET(kif, h, m, AF_INET6, dir, reason, lr, a, ruleset, 6893 &pd); 6894 } 6895 6896 kif->pfik_bytes[1][dir == PF_OUT][action != PF_PASS] += pd.tot_len; 6897 kif->pfik_packets[1][dir == PF_OUT][action != PF_PASS]++; 6898 6899 if (action == PF_PASS || r->action == PF_DROP) { 6900 dirndx = (dir == PF_OUT); 6901 r->packets[dirndx]++; 6902 r->bytes[dirndx] += pd.tot_len; 6903 if (a != NULL) { 6904 a->packets[dirndx]++; 6905 a->bytes[dirndx] += pd.tot_len; 6906 } 6907 if (s != NULL) { 6908 if (s->nat_rule.ptr != NULL) { 6909 s->nat_rule.ptr->packets[dirndx]++; 6910 s->nat_rule.ptr->bytes[dirndx] += pd.tot_len; 6911 } 6912 if (s->src_node != NULL) { 6913 s->src_node->packets[dirndx]++; 6914 s->src_node->bytes[dirndx] += pd.tot_len; 6915 } 6916 if (s->nat_src_node != NULL) { 6917 s->nat_src_node->packets[dirndx]++; 6918 s->nat_src_node->bytes[dirndx] += pd.tot_len; 6919 } 6920 dirndx = (dir == s->direction) ? 0 : 1; 6921 s->packets[dirndx]++; 6922 s->bytes[dirndx] += pd.tot_len; 6923 } 6924 tr = r; 6925 nr = (s != NULL) ? s->nat_rule.ptr : pd.nat_rule; 6926 if (nr != NULL && r == &pf_default_rule) 6927 tr = nr; 6928 if (tr->src.addr.type == PF_ADDR_TABLE) 6929 pfr_update_stats(tr->src.addr.p.tbl, 6930 (s == NULL) ? pd.src : 6931 &s->key[(s->direction == PF_IN)]->addr[0], 6932 pd.af, pd.tot_len, dir == PF_OUT, 6933 r->action == PF_PASS, tr->src.neg); 6934 if (tr->dst.addr.type == PF_ADDR_TABLE) 6935 pfr_update_stats(tr->dst.addr.p.tbl, 6936 (s == NULL) ? pd.dst : 6937 &s->key[(s->direction == PF_IN)]->addr[1], 6938 pd.af, pd.tot_len, dir == PF_OUT, 6939 r->action == PF_PASS, tr->dst.neg); 6940 } 6941 6942 6943 if (action == PF_SYNPROXY_DROP) { 6944 m_freem(*m0); 6945 *m0 = NULL; 6946 action = PF_PASS; 6947 } else if (r->rt) 6948 /* pf_route6 can free the mbuf causing *m0 to become NULL */ 6949 pf_route6(m0, r, dir, kif->pfik_ifp, s, &pd); 6950 6951 return (action); 6952 } 6953 #endif /* INET6 */ 6954 6955 int 6956 pf_check_congestion(struct ifqueue *ifq) 6957 { 6958 return (0); 6959 } 6960