1 /* $NetBSD: ip_frag.c,v 1.8 2020/04/05 02:51:34 christos Exp $ */ 2 3 /* 4 * Copyright (C) 2012 by Darren Reed. 5 * 6 * See the IPFILTER.LICENCE file for details on licencing. 7 */ 8 #if defined(KERNEL) || defined(_KERNEL) 9 # undef KERNEL 10 # undef _KERNEL 11 # define KERNEL 1 12 # define _KERNEL 1 13 #endif 14 #include <sys/errno.h> 15 #include <sys/types.h> 16 #include <sys/param.h> 17 #include <sys/time.h> 18 #include <sys/file.h> 19 #ifdef __hpux 20 # include <sys/timeout.h> 21 #endif 22 #if !defined(_KERNEL) 23 # include <stdio.h> 24 # include <string.h> 25 # include <stdlib.h> 26 # define _KERNEL 27 # ifdef __OpenBSD__ 28 struct file; 29 # endif 30 # include <sys/uio.h> 31 # undef _KERNEL 32 #endif 33 #if defined(_KERNEL) && \ 34 defined(__FreeBSD_version) && (__FreeBSD_version >= 220000) 35 # include <sys/filio.h> 36 # include <sys/fcntl.h> 37 #else 38 # include <sys/ioctl.h> 39 #endif 40 #if !defined(linux) 41 # include <sys/protosw.h> 42 #endif 43 #include <sys/socket.h> 44 #if defined(_KERNEL) 45 # include <sys/systm.h> 46 # if !defined(__SVR4) && !defined(__svr4__) 47 # include <sys/mbuf.h> 48 # endif 49 #endif 50 #if !defined(__SVR4) && !defined(__svr4__) 51 # if defined(_KERNEL) && !defined(__sgi) && !defined(AIX) 52 # include <sys/kernel.h> 53 # endif 54 #else 55 # include <sys/byteorder.h> 56 # ifdef _KERNEL 57 # include <sys/dditypes.h> 58 # endif 59 # include <sys/stream.h> 60 # include <sys/kmem.h> 61 #endif 62 #include <net/if.h> 63 #ifdef sun 64 # include <net/af.h> 65 #endif 66 #include <netinet/in.h> 67 #include <netinet/in_systm.h> 68 #include <netinet/ip.h> 69 #if !defined(linux) 70 # include <netinet/ip_var.h> 71 #endif 72 #include <netinet/tcp.h> 73 #include <netinet/udp.h> 74 #include <netinet/ip_icmp.h> 75 #include "netinet/ip_compat.h" 76 #include "netinet/ip_fil.h" 77 #include "netinet/ip_nat.h" 78 #include "netinet/ip_frag.h" 79 #include "netinet/ip_state.h" 80 #include "netinet/ip_auth.h" 81 #include "netinet/ip_lookup.h" 82 #include "netinet/ip_proxy.h" 83 #include "netinet/ip_sync.h" 84 /* END OF INCLUDES */ 85 86 #if !defined(lint) 87 #if defined(__NetBSD__) 88 #include <sys/cdefs.h> 89 __KERNEL_RCSID(0, "$NetBSD: ip_frag.c,v 1.8 2020/04/05 02:51:34 christos Exp $"); 90 #else 91 static const char sccsid[] = "@(#)ip_frag.c 1.11 3/24/96 (C) 1993-2000 Darren Reed"; 92 static const char rcsid[] = "@(#)Id: ip_frag.c,v 1.1.1.2 2012/07/22 13:45:17 darrenr Exp"; 93 #endif 94 #endif 95 96 97 typedef struct ipf_frag_softc_s { 98 ipfrwlock_t ipfr_ipidfrag; 99 ipfrwlock_t ipfr_frag; 100 ipfrwlock_t ipfr_natfrag; 101 int ipfr_size; 102 int ipfr_ttl; 103 int ipfr_lock; 104 int ipfr_inited; 105 ipfr_t *ipfr_list; 106 ipfr_t **ipfr_tail; 107 ipfr_t *ipfr_natlist; 108 ipfr_t **ipfr_nattail; 109 ipfr_t *ipfr_ipidlist; 110 ipfr_t **ipfr_ipidtail; 111 ipfr_t **ipfr_heads; 112 ipfr_t **ipfr_nattab; 113 ipfr_t **ipfr_ipidtab; 114 ipfrstat_t ipfr_stats; 115 } ipf_frag_softc_t; 116 117 118 #ifdef USE_MUTEXES 119 static ipfr_t *ipfr_frag_new(ipf_main_softc_t *, ipf_frag_softc_t *, 120 fr_info_t *, u_32_t, ipfr_t **, 121 ipfrwlock_t *); 122 static ipfr_t *ipf_frag_lookup(ipf_main_softc_t *, ipf_frag_softc_t *, fr_info_t *, ipfr_t **, ipfrwlock_t *); 123 static void ipf_frag_deref(void *, ipfr_t **, ipfrwlock_t *); 124 static int ipf_frag_next(ipf_main_softc_t *, ipftoken_t *, ipfgeniter_t *, 125 ipfr_t **, ipfrwlock_t *); 126 #else 127 static ipfr_t *ipfr_frag_new(ipf_main_softc_t *, ipf_frag_softc_t *, 128 fr_info_t *, u_32_t, ipfr_t **); 129 static ipfr_t *ipf_frag_lookup(ipf_main_softc_t *, ipf_frag_softc_t *, fr_info_t *, ipfr_t **); 130 static void ipf_frag_deref(void *, ipfr_t **); 131 static int ipf_frag_next(ipf_main_softc_t *, ipftoken_t *, ipfgeniter_t *, 132 ipfr_t **); 133 #endif 134 static void ipf_frag_delete(ipf_main_softc_t *, ipfr_t *, ipfr_t ***); 135 static void ipf_frag_free(ipf_frag_softc_t *, ipfr_t *); 136 137 static frentry_t ipfr_block; 138 139 #define FBUMP(x) softf->ipfr_stats.x++ 140 #define FBUMPD(x) do { softf->ipfr_stats.x++; DT(x); } while (0) 141 142 143 /* ------------------------------------------------------------------------ */ 144 /* Function: ipf_frag_main_load */ 145 /* Returns: int - 0 == success, -1 == error */ 146 /* Parameters: Nil */ 147 /* */ 148 /* Initialise the filter rule associted with blocked packets - everyone can */ 149 /* use it. */ 150 /* ------------------------------------------------------------------------ */ 151 int 152 ipf_frag_main_load(void) 153 { 154 bzero((char *)&ipfr_block, sizeof(ipfr_block)); 155 ipfr_block.fr_flags = FR_BLOCK|FR_QUICK; 156 ipfr_block.fr_ref = 1; 157 158 return 0; 159 } 160 161 162 /* ------------------------------------------------------------------------ */ 163 /* Function: ipf_frag_main_unload */ 164 /* Returns: int - 0 == success, -1 == error */ 165 /* Parameters: Nil */ 166 /* */ 167 /* A null-op function that exists as a placeholder so that the flow in */ 168 /* other functions is obvious. */ 169 /* ------------------------------------------------------------------------ */ 170 int 171 ipf_frag_main_unload(void) 172 { 173 return 0; 174 } 175 176 177 /* ------------------------------------------------------------------------ */ 178 /* Function: ipf_frag_soft_create */ 179 /* Returns: void * - NULL = failure, else pointer to local context */ 180 /* Parameters: softc(I) - pointer to soft context main structure */ 181 /* */ 182 /* Allocate a new soft context structure to track fragment related info. */ 183 /* ------------------------------------------------------------------------ */ 184 /*ARGSUSED*/ 185 void * 186 ipf_frag_soft_create(ipf_main_softc_t *softc) 187 { 188 ipf_frag_softc_t *softf; 189 190 KMALLOC(softf, ipf_frag_softc_t *); 191 if (softf == NULL) 192 return NULL; 193 194 bzero((char *)softf, sizeof(*softf)); 195 196 RWLOCK_INIT(&softf->ipfr_ipidfrag, "frag ipid lock"); 197 RWLOCK_INIT(&softf->ipfr_frag, "ipf fragment rwlock"); 198 RWLOCK_INIT(&softf->ipfr_natfrag, "ipf NAT fragment rwlock"); 199 200 softf->ipfr_size = IPFT_SIZE; 201 softf->ipfr_ttl = IPF_TTLVAL(60); 202 softf->ipfr_lock = 1; 203 softf->ipfr_tail = &softf->ipfr_list; 204 softf->ipfr_nattail = &softf->ipfr_natlist; 205 softf->ipfr_ipidtail = &softf->ipfr_ipidlist; 206 207 return softf; 208 } 209 210 211 /* ------------------------------------------------------------------------ */ 212 /* Function: ipf_frag_soft_destroy */ 213 /* Returns: Nil */ 214 /* Parameters: softc(I) - pointer to soft context main structure */ 215 /* arg(I) - pointer to local context to use */ 216 /* */ 217 /* Initialise the hash tables for the fragment cache lookups. */ 218 /* ------------------------------------------------------------------------ */ 219 void 220 ipf_frag_soft_destroy(ipf_main_softc_t *softc, void *arg) 221 { 222 ipf_frag_softc_t *softf = arg; 223 224 RW_DESTROY(&softf->ipfr_ipidfrag); 225 RW_DESTROY(&softf->ipfr_frag); 226 RW_DESTROY(&softf->ipfr_natfrag); 227 228 KFREE(softf); 229 } 230 231 232 /* ------------------------------------------------------------------------ */ 233 /* Function: ipf_frag_soft_init */ 234 /* Returns: int - 0 == success, -1 == error */ 235 /* Parameters: softc(I) - pointer to soft context main structure */ 236 /* arg(I) - pointer to local context to use */ 237 /* */ 238 /* Initialise the hash tables for the fragment cache lookups. */ 239 /* ------------------------------------------------------------------------ */ 240 /*ARGSUSED*/ 241 int 242 ipf_frag_soft_init(ipf_main_softc_t *softc, void *arg) 243 { 244 ipf_frag_softc_t *softf = arg; 245 246 KMALLOCS(softf->ipfr_heads, ipfr_t **, 247 softf->ipfr_size * sizeof(ipfr_t *)); 248 if (softf->ipfr_heads == NULL) 249 return -1; 250 251 bzero((char *)softf->ipfr_heads, softf->ipfr_size * sizeof(ipfr_t *)); 252 253 KMALLOCS(softf->ipfr_nattab, ipfr_t **, 254 softf->ipfr_size * sizeof(ipfr_t *)); 255 if (softf->ipfr_nattab == NULL) 256 return -2; 257 258 bzero((char *)softf->ipfr_nattab, softf->ipfr_size * sizeof(ipfr_t *)); 259 260 KMALLOCS(softf->ipfr_ipidtab, ipfr_t **, 261 softf->ipfr_size * sizeof(ipfr_t *)); 262 if (softf->ipfr_ipidtab == NULL) 263 return -3; 264 265 bzero((char *)softf->ipfr_ipidtab, 266 softf->ipfr_size * sizeof(ipfr_t *)); 267 268 softf->ipfr_lock = 0; 269 softf->ipfr_inited = 1; 270 271 return 0; 272 } 273 274 275 /* ------------------------------------------------------------------------ */ 276 /* Function: ipf_frag_soft_fini */ 277 /* Returns: int - 0 == success, -1 == error */ 278 /* Parameters: softc(I) - pointer to soft context main structure */ 279 /* arg(I) - pointer to local context to use */ 280 /* */ 281 /* Free all memory allocated whilst running and from initialisation. */ 282 /* ------------------------------------------------------------------------ */ 283 int 284 ipf_frag_soft_fini(ipf_main_softc_t *softc, void *arg) 285 { 286 ipf_frag_softc_t *softf = arg; 287 288 softf->ipfr_lock = 1; 289 290 if (softf->ipfr_inited == 1) { 291 ipf_frag_clear(softc); 292 293 softf->ipfr_inited = 0; 294 } 295 296 if (softf->ipfr_heads != NULL) 297 KFREES(softf->ipfr_heads, 298 softf->ipfr_size * sizeof(ipfr_t *)); 299 softf->ipfr_heads = NULL; 300 301 if (softf->ipfr_nattab != NULL) 302 KFREES(softf->ipfr_nattab, 303 softf->ipfr_size * sizeof(ipfr_t *)); 304 softf->ipfr_nattab = NULL; 305 306 if (softf->ipfr_ipidtab != NULL) 307 KFREES(softf->ipfr_ipidtab, 308 softf->ipfr_size * sizeof(ipfr_t *)); 309 softf->ipfr_ipidtab = NULL; 310 311 return 0; 312 } 313 314 315 /* ------------------------------------------------------------------------ */ 316 /* Function: ipf_frag_set_lock */ 317 /* Returns: Nil */ 318 /* Parameters: arg(I) - pointer to local context to use */ 319 /* tmp(I) - new value for lock */ 320 /* */ 321 /* Stub function that allows for external manipulation of ipfr_lock */ 322 /* ------------------------------------------------------------------------ */ 323 void 324 ipf_frag_setlock(void *arg, int tmp) 325 { 326 ipf_frag_softc_t *softf = arg; 327 328 softf->ipfr_lock = tmp; 329 } 330 331 332 /* ------------------------------------------------------------------------ */ 333 /* Function: ipf_frag_stats */ 334 /* Returns: ipfrstat_t* - pointer to struct with current frag stats */ 335 /* Parameters: arg(I) - pointer to local context to use */ 336 /* */ 337 /* Updates ipfr_stats with current information and returns a pointer to it */ 338 /* ------------------------------------------------------------------------ */ 339 ipfrstat_t * 340 ipf_frag_stats(void *arg) 341 { 342 ipf_frag_softc_t *softf = arg; 343 344 softf->ipfr_stats.ifs_table = softf->ipfr_heads; 345 softf->ipfr_stats.ifs_nattab = softf->ipfr_nattab; 346 return &softf->ipfr_stats; 347 } 348 349 350 /* ------------------------------------------------------------------------ */ 351 /* Function: ipfr_frag_new */ 352 /* Returns: ipfr_t * - pointer to fragment cache state info or NULL */ 353 /* Parameters: fin(I) - pointer to packet information */ 354 /* table(I) - pointer to frag table to add to */ 355 /* lock(I) - pointer to lock to get a write hold of */ 356 /* */ 357 /* Add a new entry to the fragment cache, registering it as having come */ 358 /* through this box, with the result of the filter operation. */ 359 /* */ 360 /* If this function succeeds, it returns with a write lock held on "lock". */ 361 /* If it fails, no lock is held on return. */ 362 /* ------------------------------------------------------------------------ */ 363 static ipfr_t * 364 ipfr_frag_new( 365 ipf_main_softc_t *softc, 366 ipf_frag_softc_t *softf, 367 fr_info_t *fin, 368 u_32_t pass, 369 ipfr_t *table[] 370 #ifdef USE_MUTEXES 371 , ipfrwlock_t *lock 372 #endif 373 ) 374 { 375 ipfr_t *fra, frag, *fran; 376 u_int idx, off; 377 frentry_t *fr; 378 379 if (softf->ipfr_stats.ifs_inuse >= softf->ipfr_size) { 380 FBUMPD(ifs_maximum); 381 return NULL; 382 } 383 384 if ((fin->fin_flx & (FI_FRAG|FI_BAD)) != FI_FRAG) { 385 FBUMPD(ifs_newbad); 386 return NULL; 387 } 388 389 if (pass & FR_FRSTRICT) { 390 if (fin->fin_off != 0) { 391 FBUMPD(ifs_newrestrictnot0); 392 return NULL; 393 } 394 } 395 396 memset(&frag, 0, sizeof(frag)); 397 frag.ipfr_v = fin->fin_v; 398 idx = fin->fin_v; 399 frag.ipfr_p = fin->fin_p; 400 idx += fin->fin_p; 401 frag.ipfr_id = fin->fin_id; 402 idx += fin->fin_id; 403 frag.ipfr_source = fin->fin_fi.fi_src; 404 idx += frag.ipfr_src.s_addr; 405 frag.ipfr_dest = fin->fin_fi.fi_dst; 406 idx += frag.ipfr_dst.s_addr; 407 frag.ipfr_ifp = fin->fin_ifp; 408 idx *= 127; 409 idx %= softf->ipfr_size; 410 411 frag.ipfr_optmsk = fin->fin_fi.fi_optmsk & IPF_OPTCOPY; 412 frag.ipfr_secmsk = fin->fin_fi.fi_secmsk; 413 frag.ipfr_auth = fin->fin_fi.fi_auth; 414 415 off = fin->fin_off >> 3; 416 if (off == 0) { 417 char *ptr; 418 int end; 419 420 #ifdef USE_INET6 421 if (fin->fin_v == 6) { 422 423 ptr = (char *)fin->fin_fraghdr + 424 sizeof(struct ip6_frag); 425 } else 426 #endif 427 { 428 ptr = fin->fin_dp; 429 } 430 end = fin->fin_plen - (ptr - (char *)fin->fin_ip); 431 frag.ipfr_firstend = end >> 3; 432 } else { 433 frag.ipfr_firstend = 0; 434 } 435 436 /* 437 * allocate some memory, if possible, if not, just record that we 438 * failed to do so. 439 */ 440 KMALLOC(fran, ipfr_t *); 441 if (fran == NULL) { 442 FBUMPD(ifs_nomem); 443 return NULL; 444 } 445 memset(fran, 0, sizeof(*fran)); 446 447 WRITE_ENTER(lock); 448 449 /* 450 * first, make sure it isn't already there... 451 */ 452 for (fra = table[idx]; (fra != NULL); fra = fra->ipfr_hnext) 453 if (!bcmp((char *)&frag.ipfr_ifp, (char *)&fra->ipfr_ifp, 454 IPFR_CMPSZ)) { 455 RWLOCK_EXIT(lock); 456 FBUMPD(ifs_exists); 457 KFREE(fran); 458 return NULL; 459 } 460 461 fra = fran; 462 fran = NULL; 463 fr = fin->fin_fr; 464 fra->ipfr_rule = fr; 465 if (fr != NULL) { 466 MUTEX_ENTER(&fr->fr_lock); 467 fr->fr_ref++; 468 MUTEX_EXIT(&fr->fr_lock); 469 } 470 471 /* 472 * Insert the fragment into the fragment table, copy the struct used 473 * in the search using bcopy rather than reassign each field. 474 * Set the ttl to the default. 475 */ 476 if ((fra->ipfr_hnext = table[idx]) != NULL) 477 table[idx]->ipfr_hprev = &fra->ipfr_hnext; 478 fra->ipfr_hprev = table + idx; 479 fra->ipfr_data = NULL; 480 table[idx] = fra; 481 bcopy((char *)&frag.ipfr_ifp, (char *)&fra->ipfr_ifp, IPFR_CMPSZ); 482 fra->ipfr_v = fin->fin_v; 483 fra->ipfr_p = fin->fin_p; 484 fra->ipfr_ttl = softc->ipf_ticks + softf->ipfr_ttl; 485 fra->ipfr_firstend = frag.ipfr_firstend; 486 487 /* 488 * Compute the offset of the expected start of the next packet. 489 */ 490 if (off == 0) 491 fra->ipfr_seen0 = 1; 492 fra->ipfr_off = off + (fin->fin_dlen >> 3); 493 fra->ipfr_pass = pass; 494 fra->ipfr_ref = 1; 495 fra->ipfr_pkts = 1; 496 fra->ipfr_bytes = fin->fin_plen; 497 FBUMP(ifs_inuse); 498 FBUMP(ifs_new); 499 return fra; 500 } 501 502 503 /* ------------------------------------------------------------------------ */ 504 /* Function: ipf_frag_new */ 505 /* Returns: int - 0 == success, -1 == error */ 506 /* Parameters: fin(I) - pointer to packet information */ 507 /* */ 508 /* Add a new entry to the fragment cache table based on the current packet */ 509 /* ------------------------------------------------------------------------ */ 510 int 511 ipf_frag_new(ipf_main_softc_t *softc, fr_info_t *fin, u_32_t pass) 512 { 513 ipf_frag_softc_t *softf = softc->ipf_frag_soft; 514 ipfr_t *fra; 515 516 if (softf->ipfr_lock != 0) 517 return -1; 518 519 #ifdef USE_MUTEXES 520 fra = ipfr_frag_new(softc, softf, fin, pass, softf->ipfr_heads, &softc->ipf_frag); 521 #else 522 fra = ipfr_frag_new(softc, softf, fin, pass, softf->ipfr_heads); 523 #endif 524 if (fra != NULL) { 525 *softf->ipfr_tail = fra; 526 fra->ipfr_prev = softf->ipfr_tail; 527 softf->ipfr_tail = &fra->ipfr_next; 528 fra->ipfr_next = NULL; 529 RWLOCK_EXIT(&softc->ipf_frag); 530 } 531 return fra ? 0 : -1; 532 } 533 534 535 /* ------------------------------------------------------------------------ */ 536 /* Function: ipf_frag_natnew */ 537 /* Returns: int - 0 == success, -1 == error */ 538 /* Parameters: fin(I) - pointer to packet information */ 539 /* nat(I) - pointer to NAT structure */ 540 /* */ 541 /* Create a new NAT fragment cache entry based on the current packet and */ 542 /* the NAT structure for this "session". */ 543 /* ------------------------------------------------------------------------ */ 544 int 545 ipf_frag_natnew(ipf_main_softc_t *softc, fr_info_t *fin, u_32_t pass, 546 nat_t *nat) 547 { 548 ipf_frag_softc_t *softf = softc->ipf_frag_soft; 549 ipfr_t *fra; 550 551 if (softf->ipfr_lock != 0) 552 return 0; 553 554 #ifdef USE_MUTEXES 555 fra = ipfr_frag_new(softc, softf, fin, pass, softf->ipfr_nattab, 556 &softf->ipfr_natfrag); 557 #else 558 fra = ipfr_frag_new(softc, softf, fin, pass, softf->ipfr_nattab); 559 #endif 560 if (fra != NULL) { 561 fra->ipfr_data = nat; 562 nat->nat_data = fra; 563 *softf->ipfr_nattail = fra; 564 fra->ipfr_prev = softf->ipfr_nattail; 565 softf->ipfr_nattail = &fra->ipfr_next; 566 fra->ipfr_next = NULL; 567 RWLOCK_EXIT(&softf->ipfr_natfrag); 568 return 0; 569 } 570 return -1; 571 } 572 573 574 /* ------------------------------------------------------------------------ */ 575 /* Function: ipf_frag_ipidnew */ 576 /* Returns: int - 0 == success, -1 == error */ 577 /* Parameters: fin(I) - pointer to packet information */ 578 /* ipid(I) - new IP ID for this fragmented packet */ 579 /* */ 580 /* Create a new fragment cache entry for this packet and store, as a data */ 581 /* pointer, the new IP ID value. */ 582 /* ------------------------------------------------------------------------ */ 583 int 584 ipf_frag_ipidnew(fr_info_t *fin, u_32_t ipid) 585 { 586 ipf_main_softc_t *softc = fin->fin_main_soft; 587 ipf_frag_softc_t *softf = softc->ipf_frag_soft; 588 ipfr_t *fra; 589 590 if (softf->ipfr_lock) 591 return 0; 592 593 #ifdef USE_MUTEXES 594 fra = ipfr_frag_new(softc, softf, fin, 0, softf->ipfr_ipidtab, &softf->ipfr_ipidfrag); 595 #else 596 fra = ipfr_frag_new(softc, softf, fin, 0, softf->ipfr_ipidtab); 597 #endif 598 if (fra != NULL) { 599 fra->ipfr_data = (void *)(intptr_t)ipid; 600 *softf->ipfr_ipidtail = fra; 601 fra->ipfr_prev = softf->ipfr_ipidtail; 602 softf->ipfr_ipidtail = &fra->ipfr_next; 603 fra->ipfr_next = NULL; 604 RWLOCK_EXIT(&softf->ipfr_ipidfrag); 605 } 606 return fra ? 0 : -1; 607 } 608 609 610 /* ------------------------------------------------------------------------ */ 611 /* Function: ipf_frag_lookup */ 612 /* Returns: ipfr_t * - pointer to ipfr_t structure if there's a */ 613 /* matching entry in the frag table, else NULL */ 614 /* Parameters: fin(I) - pointer to packet information */ 615 /* table(I) - pointer to fragment cache table to search */ 616 /* */ 617 /* Check the fragment cache to see if there is already a record of this */ 618 /* packet with its filter result known. */ 619 /* */ 620 /* If this function succeeds, it returns with a write lock held on "lock". */ 621 /* If it fails, no lock is held on return. */ 622 /* ------------------------------------------------------------------------ */ 623 static ipfr_t * 624 ipf_frag_lookup( 625 ipf_main_softc_t *softc, 626 ipf_frag_softc_t *softf, 627 fr_info_t *fin, 628 ipfr_t *table[] 629 #ifdef USE_MUTEXES 630 , ipfrwlock_t *lock 631 #endif 632 ) 633 { 634 ipfr_t *f, frag; 635 u_int idx; 636 637 /* 638 * We don't want to let short packets match because they could be 639 * compromising the security of other rules that want to match on 640 * layer 4 fields (and can't because they have been fragmented off.) 641 * Why do this check here? The counter acts as an indicator of this 642 * kind of attack, whereas if it was elsewhere, it wouldn't know if 643 * other matching packets had been seen. 644 */ 645 if (fin->fin_flx & FI_SHORT) { 646 FBUMPD(ifs_short); 647 return NULL; 648 } 649 650 if ((fin->fin_flx & FI_BAD) != 0) { 651 FBUMPD(ifs_bad); 652 return NULL; 653 } 654 655 /* 656 * For fragments, we record protocol, packet id, TOS and both IP#'s 657 * (these should all be the same for all fragments of a packet). 658 * 659 * build up a hash value to index the table with. 660 */ 661 memset(&frag, 0, sizeof(frag)); 662 frag.ipfr_v = fin->fin_v; 663 idx = fin->fin_v; 664 frag.ipfr_p = fin->fin_p; 665 idx += fin->fin_p; 666 frag.ipfr_id = fin->fin_id; 667 idx += fin->fin_id; 668 frag.ipfr_source = fin->fin_fi.fi_src; 669 idx += frag.ipfr_src.s_addr; 670 frag.ipfr_dest = fin->fin_fi.fi_dst; 671 idx += frag.ipfr_dst.s_addr; 672 frag.ipfr_ifp = fin->fin_ifp; 673 idx *= 127; 674 idx %= softf->ipfr_size; 675 676 frag.ipfr_optmsk = fin->fin_fi.fi_optmsk & IPF_OPTCOPY; 677 frag.ipfr_secmsk = fin->fin_fi.fi_secmsk; 678 frag.ipfr_auth = fin->fin_fi.fi_auth; 679 680 READ_ENTER(lock); 681 682 /* 683 * check the table, careful to only compare the right amount of data 684 */ 685 for (f = table[idx]; f; f = f->ipfr_hnext) { 686 if (!bcmp((char *)&frag.ipfr_ifp, (char *)&f->ipfr_ifp, 687 IPFR_CMPSZ)) { 688 u_short off; 689 690 /* 691 * XXX - We really need to be guarding against the 692 * retransmission of (src,dst,id,offset-range) here 693 * because a fragmented packet is never resent with 694 * the same IP ID# (or shouldn't). 695 */ 696 off = fin->fin_off >> 3; 697 if (f->ipfr_seen0) { 698 if (off == 0) { 699 FBUMPD(ifs_retrans0); 700 continue; 701 } 702 703 /* 704 * Case 3. See comment for frpr_fragment6. 705 */ 706 if ((f->ipfr_firstend != 0) && 707 (off < f->ipfr_firstend)) { 708 FBUMP(ifs_overlap); 709 DT2(ifs_overlap, u_short, off, 710 ipfr_t *, f); 711 fin->fin_flx |= FI_BAD; 712 break; 713 } 714 } else if (off == 0) 715 f->ipfr_seen0 = 1; 716 717 #if 0 718 /* We can't do this, since we only have a read lock! */ 719 if (f != table[idx]) { 720 ipfr_t **fp; 721 722 /* 723 * Move fragment info. to the top of the list 724 * to speed up searches. First, delink... 725 */ 726 fp = f->ipfr_hprev; 727 (*fp) = f->ipfr_hnext; 728 if (f->ipfr_hnext != NULL) 729 f->ipfr_hnext->ipfr_hprev = fp; 730 /* 731 * Then put back at the top of the chain. 732 */ 733 f->ipfr_hnext = table[idx]; 734 table[idx]->ipfr_hprev = &f->ipfr_hnext; 735 f->ipfr_hprev = table + idx; 736 table[idx] = f; 737 } 738 #endif 739 740 /* 741 * If we've followed the fragments, and this is the 742 * last (in order), shrink expiration time. 743 */ 744 if (off == f->ipfr_off) { 745 f->ipfr_off = (fin->fin_dlen >> 3) + off; 746 747 /* 748 * Well, we could shrink the expiration time 749 * but only if every fragment has been seen 750 * in order upto this, the last. ipfr_badorder 751 * is used here to count those out of order 752 * and if it equals 0 when we get to the last 753 * fragment then we can assume all of the 754 * fragments have been seen and in order. 755 */ 756 #if 0 757 /* 758 * Doing this properly requires moving it to 759 * the head of the list which is infesible. 760 */ 761 if ((more == 0) && (f->ipfr_badorder == 0)) 762 f->ipfr_ttl = softc->ipf_ticks + 1; 763 #endif 764 } else { 765 f->ipfr_badorder++; 766 FBUMPD(ifs_unordered); 767 if (f->ipfr_pass & FR_FRSTRICT) { 768 FBUMPD(ifs_strict); 769 continue; 770 } 771 } 772 f->ipfr_pkts++; 773 f->ipfr_bytes += fin->fin_plen; 774 FBUMP(ifs_hits); 775 return f; 776 } 777 } 778 779 RWLOCK_EXIT(lock); 780 FBUMP(ifs_miss); 781 return NULL; 782 } 783 784 785 /* ------------------------------------------------------------------------ */ 786 /* Function: ipf_frag_natknown */ 787 /* Returns: nat_t* - pointer to 'parent' NAT structure if frag table */ 788 /* match found, else NULL */ 789 /* Parameters: fin(I) - pointer to packet information */ 790 /* */ 791 /* Functional interface for NAT lookups of the NAT fragment cache */ 792 /* ------------------------------------------------------------------------ */ 793 nat_t * 794 ipf_frag_natknown(fr_info_t *fin) 795 { 796 ipf_main_softc_t *softc = fin->fin_main_soft; 797 ipf_frag_softc_t *softf = softc->ipf_frag_soft; 798 nat_t *nat; 799 ipfr_t *ipf; 800 801 if ((softf->ipfr_lock) || !softf->ipfr_natlist) 802 return NULL; 803 #ifdef USE_MUTEXES 804 ipf = ipf_frag_lookup(softc, softf, fin, softf->ipfr_nattab, 805 &softf->ipfr_natfrag); 806 #else 807 ipf = ipf_frag_lookup(softc, softf, fin, softf->ipfr_nattab); 808 #endif 809 if (ipf != NULL) { 810 nat = ipf->ipfr_data; 811 /* 812 * This is the last fragment for this packet. 813 */ 814 if ((ipf->ipfr_ttl == softc->ipf_ticks + 1) && (nat != NULL)) { 815 nat->nat_data = NULL; 816 ipf->ipfr_data = NULL; 817 } 818 RWLOCK_EXIT(&softf->ipfr_natfrag); 819 } else 820 nat = NULL; 821 return nat; 822 } 823 824 825 /* ------------------------------------------------------------------------ */ 826 /* Function: ipf_frag_ipidknown */ 827 /* Returns: u_32_t - IPv4 ID for this packet if match found, else */ 828 /* return 0xfffffff to indicate no match. */ 829 /* Parameters: fin(I) - pointer to packet information */ 830 /* */ 831 /* Functional interface for IP ID lookups of the IP ID fragment cache */ 832 /* ------------------------------------------------------------------------ */ 833 u_32_t 834 ipf_frag_ipidknown(fr_info_t *fin) 835 { 836 ipf_main_softc_t *softc = fin->fin_main_soft; 837 ipf_frag_softc_t *softf = softc->ipf_frag_soft; 838 ipfr_t *ipf; 839 u_32_t id; 840 841 if (softf->ipfr_lock || !softf->ipfr_ipidlist) 842 return 0xffffffff; 843 844 #ifdef USE_MUTEXES 845 ipf = ipf_frag_lookup(softc, softf, fin, softf->ipfr_ipidtab, 846 &softf->ipfr_ipidfrag); 847 #else 848 ipf = ipf_frag_lookup(softc, softf, fin, softf->ipfr_ipidtab); 849 #endif 850 if (ipf != NULL) { 851 id = (u_32_t)(intptr_t)ipf->ipfr_data; 852 RWLOCK_EXIT(&softf->ipfr_ipidfrag); 853 } else 854 id = 0xffffffff; 855 return id; 856 } 857 858 859 /* ------------------------------------------------------------------------ */ 860 /* Function: ipf_frag_known */ 861 /* Returns: frentry_t* - pointer to filter rule if a match is found in */ 862 /* the frag cache table, else NULL. */ 863 /* Parameters: fin(I) - pointer to packet information */ 864 /* passp(O) - pointer to where to store rule flags resturned */ 865 /* */ 866 /* Functional interface for normal lookups of the fragment cache. If a */ 867 /* match is found, return the rule pointer and flags from the rule, except */ 868 /* that if FR_LOGFIRST is set, reset FR_LOG. */ 869 /* ------------------------------------------------------------------------ */ 870 frentry_t * 871 ipf_frag_known(fr_info_t *fin, u_32_t *passp) 872 { 873 ipf_main_softc_t *softc = fin->fin_main_soft; 874 ipf_frag_softc_t *softf = softc->ipf_frag_soft; 875 frentry_t *fr = NULL; 876 ipfr_t *fra; 877 u_32_t pass; 878 879 if ((softf->ipfr_lock) || (softf->ipfr_list == NULL)) 880 return NULL; 881 882 #ifdef USE_MUTEXES 883 fra = ipf_frag_lookup(softc, softf, fin, softf->ipfr_heads, 884 &softc->ipf_frag); 885 #else 886 fra = ipf_frag_lookup(softc, softf, fin, softf->ipfr_heads); 887 #endif 888 if (fra != NULL) { 889 if (fin->fin_flx & FI_BAD) { 890 fr = &ipfr_block; 891 fin->fin_reason = FRB_BADFRAG; 892 } else { 893 fr = fra->ipfr_rule; 894 } 895 fin->fin_fr = fr; 896 if (fr != NULL) { 897 pass = fr->fr_flags; 898 if ((pass & FR_KEEPSTATE) != 0) { 899 fin->fin_flx |= FI_STATE; 900 /* 901 * Reset the keep state flag here so that we 902 * don't try and add a new state entry because 903 * of a match here. That leads to blocking of 904 * the packet later because the add fails. 905 */ 906 pass &= ~FR_KEEPSTATE; 907 } 908 if ((pass & FR_LOGFIRST) != 0) 909 pass &= ~(FR_LOGFIRST|FR_LOG); 910 *passp = pass; 911 } 912 RWLOCK_EXIT(&softc->ipf_frag); 913 } 914 return fr; 915 } 916 917 918 /* ------------------------------------------------------------------------ */ 919 /* Function: ipf_frag_natforget */ 920 /* Returns: Nil */ 921 /* Parameters: ptr(I) - pointer to data structure */ 922 /* */ 923 /* Search through all of the fragment cache entries for NAT and wherever a */ 924 /* pointer is found to match ptr, reset it to NULL. */ 925 /* ------------------------------------------------------------------------ */ 926 void 927 ipf_frag_natforget(ipf_main_softc_t *softc, void *ptr) 928 { 929 ipf_frag_softc_t *softf = softc->ipf_frag_soft; 930 ipfr_t *fr; 931 932 WRITE_ENTER(&softf->ipfr_natfrag); 933 for (fr = softf->ipfr_natlist; fr; fr = fr->ipfr_next) 934 if (fr->ipfr_data == ptr) 935 fr->ipfr_data = NULL; 936 RWLOCK_EXIT(&softf->ipfr_natfrag); 937 } 938 939 940 /* ------------------------------------------------------------------------ */ 941 /* Function: ipf_frag_delete */ 942 /* Returns: Nil */ 943 /* Parameters: fra(I) - pointer to fragment structure to delete */ 944 /* tail(IO) - pointer to the pointer to the tail of the frag */ 945 /* list */ 946 /* */ 947 /* Remove a fragment cache table entry from the table & list. Also free */ 948 /* the filter rule it is associated with it if it is no longer used as a */ 949 /* result of decreasing the reference count. */ 950 /* ------------------------------------------------------------------------ */ 951 static void 952 ipf_frag_delete(ipf_main_softc_t *softc, ipfr_t *fra, ipfr_t ***tail) 953 { 954 ipf_frag_softc_t *softf = softc->ipf_frag_soft; 955 956 if (fra->ipfr_next) 957 fra->ipfr_next->ipfr_prev = fra->ipfr_prev; 958 *fra->ipfr_prev = fra->ipfr_next; 959 if (*tail == &fra->ipfr_next) 960 *tail = fra->ipfr_prev; 961 962 if (fra->ipfr_hnext) 963 fra->ipfr_hnext->ipfr_hprev = fra->ipfr_hprev; 964 *fra->ipfr_hprev = fra->ipfr_hnext; 965 966 if (fra->ipfr_rule != NULL) { 967 (void) ipf_derefrule(softc, &fra->ipfr_rule); 968 } 969 970 if (fra->ipfr_ref <= 0) 971 ipf_frag_free(softf, fra); 972 } 973 974 975 /* ------------------------------------------------------------------------ */ 976 /* Function: ipf_frag_free */ 977 /* Returns: Nil */ 978 /* */ 979 /* ------------------------------------------------------------------------ */ 980 static void 981 ipf_frag_free(ipf_frag_softc_t *softf, ipfr_t *fra) 982 { 983 KFREE(fra); 984 FBUMP(ifs_expire); 985 softf->ipfr_stats.ifs_inuse--; 986 } 987 988 989 /* ------------------------------------------------------------------------ */ 990 /* Function: ipf_frag_clear */ 991 /* Returns: Nil */ 992 /* Parameters: Nil */ 993 /* */ 994 /* Free memory in use by fragment state information kept. Do the normal */ 995 /* fragment state stuff first and then the NAT-fragment table. */ 996 /* ------------------------------------------------------------------------ */ 997 void 998 ipf_frag_clear(ipf_main_softc_t *softc) 999 { 1000 ipf_frag_softc_t *softf = softc->ipf_frag_soft; 1001 ipfr_t *fra; 1002 nat_t *nat; 1003 1004 WRITE_ENTER(&softc->ipf_frag); 1005 while ((fra = softf->ipfr_list) != NULL) { 1006 fra->ipfr_ref--; 1007 ipf_frag_delete(softc, fra, &softf->ipfr_tail); 1008 } 1009 softf->ipfr_tail = &softf->ipfr_list; 1010 RWLOCK_EXIT(&softc->ipf_frag); 1011 1012 WRITE_ENTER(&softc->ipf_nat); 1013 WRITE_ENTER(&softf->ipfr_natfrag); 1014 while ((fra = softf->ipfr_natlist) != NULL) { 1015 nat = fra->ipfr_data; 1016 if (nat != NULL) { 1017 if (nat->nat_data == fra) 1018 nat->nat_data = NULL; 1019 } 1020 fra->ipfr_ref--; 1021 ipf_frag_delete(softc, fra, &softf->ipfr_nattail); 1022 } 1023 softf->ipfr_nattail = &softf->ipfr_natlist; 1024 RWLOCK_EXIT(&softf->ipfr_natfrag); 1025 RWLOCK_EXIT(&softc->ipf_nat); 1026 } 1027 1028 1029 /* ------------------------------------------------------------------------ */ 1030 /* Function: ipf_frag_expire */ 1031 /* Returns: Nil */ 1032 /* Parameters: Nil */ 1033 /* */ 1034 /* Expire entries in the fragment cache table that have been there too long */ 1035 /* ------------------------------------------------------------------------ */ 1036 void 1037 ipf_frag_expire(ipf_main_softc_t *softc) 1038 { 1039 ipf_frag_softc_t *softf = softc->ipf_frag_soft; 1040 ipfr_t **fp, *fra; 1041 nat_t *nat; 1042 SPL_INT(s); 1043 1044 if (softf->ipfr_lock) 1045 return; 1046 1047 SPL_NET(s); 1048 WRITE_ENTER(&softc->ipf_frag); 1049 /* 1050 * Go through the entire table, looking for entries to expire, 1051 * which is indicated by the ttl being less than or equal to ipf_ticks. 1052 */ 1053 for (fp = &softf->ipfr_list; ((fra = *fp) != NULL); ) { 1054 if (fra->ipfr_ttl > softc->ipf_ticks) 1055 break; 1056 fra->ipfr_ref--; 1057 ipf_frag_delete(softc, fra, &softf->ipfr_tail); 1058 } 1059 RWLOCK_EXIT(&softc->ipf_frag); 1060 1061 WRITE_ENTER(&softf->ipfr_ipidfrag); 1062 for (fp = &softf->ipfr_ipidlist; ((fra = *fp) != NULL); ) { 1063 if (fra->ipfr_ttl > softc->ipf_ticks) 1064 break; 1065 fra->ipfr_ref--; 1066 ipf_frag_delete(softc, fra, &softf->ipfr_ipidtail); 1067 } 1068 RWLOCK_EXIT(&softf->ipfr_ipidfrag); 1069 1070 /* 1071 * Same again for the NAT table, except that if the structure also 1072 * still points to a NAT structure, and the NAT structure points back 1073 * at the one to be free'd, NULL the reference from the NAT struct. 1074 * NOTE: We need to grab both mutex's early, and in this order so as 1075 * to prevent a deadlock if both try to expire at the same time. 1076 * The extra if() statement here is because it locks out all NAT 1077 * operations - no need to do that if there are no entries in this 1078 * list, right? 1079 */ 1080 if (softf->ipfr_natlist != NULL) { 1081 WRITE_ENTER(&softc->ipf_nat); 1082 WRITE_ENTER(&softf->ipfr_natfrag); 1083 for (fp = &softf->ipfr_natlist; ((fra = *fp) != NULL); ) { 1084 if (fra->ipfr_ttl > softc->ipf_ticks) 1085 break; 1086 nat = fra->ipfr_data; 1087 if (nat != NULL) { 1088 if (nat->nat_data == fra) 1089 nat->nat_data = NULL; 1090 } 1091 fra->ipfr_ref--; 1092 ipf_frag_delete(softc, fra, &softf->ipfr_nattail); 1093 } 1094 RWLOCK_EXIT(&softf->ipfr_natfrag); 1095 RWLOCK_EXIT(&softc->ipf_nat); 1096 } 1097 SPL_X(s); 1098 } 1099 1100 1101 /* ------------------------------------------------------------------------ */ 1102 /* Function: ipf_frag_pkt_next */ 1103 /* ------------------------------------------------------------------------ */ 1104 int 1105 ipf_frag_pkt_next(ipf_main_softc_t *softc, ipftoken_t *token, ipfgeniter_t *itp) 1106 { 1107 ipf_frag_softc_t *softf = softc->ipf_frag_soft; 1108 1109 #ifdef USE_MUTEXES 1110 return ipf_frag_next(softc, token, itp, &softf->ipfr_list, 1111 &softf->ipfr_frag); 1112 #else 1113 return ipf_frag_next(softc, token, itp, &softf->ipfr_list); 1114 #endif 1115 } 1116 1117 1118 /* ------------------------------------------------------------------------ */ 1119 /* Function: ipf_frag_nat_next */ 1120 /* ------------------------------------------------------------------------ */ 1121 int 1122 ipf_frag_nat_next(ipf_main_softc_t *softc, ipftoken_t *token, ipfgeniter_t *itp) 1123 { 1124 ipf_frag_softc_t *softf = softc->ipf_frag_soft;; 1125 1126 #ifdef USE_MUTEXES 1127 return ipf_frag_next(softc, token, itp, &softf->ipfr_natlist, 1128 &softf->ipfr_natfrag); 1129 #else 1130 return ipf_frag_next(softc, token, itp, &softf->ipfr_natlist); 1131 #endif 1132 } 1133 1134 /* ------------------------------------------------------------------------ */ 1135 /* Function: ipf_frag_next */ 1136 /* Returns: int - 0 == success, else error */ 1137 /* Parameters: token(I) - pointer to token information for this caller */ 1138 /* itp(I) - pointer to generic iterator from caller */ 1139 /* top(I) - top of the fragment list */ 1140 /* lock(I) - fragment cache lock */ 1141 /* */ 1142 /* This function is used to interate through the list of entries in the */ 1143 /* fragment cache. It increases the reference count on the one currently */ 1144 /* being returned so that the caller can come back and resume from it later.*/ 1145 /* */ 1146 /* This function is used for both the NAT fragment cache as well as the ipf */ 1147 /* fragment cache - hence the reason for passing in top and lock. */ 1148 /* ------------------------------------------------------------------------ */ 1149 static int 1150 ipf_frag_next( 1151 ipf_main_softc_t *softc, 1152 ipftoken_t *token, 1153 ipfgeniter_t *itp, 1154 ipfr_t **top 1155 #ifdef USE_MUTEXES 1156 , ipfrwlock_t *lock 1157 #endif 1158 ) 1159 { 1160 ipfr_t *frag, *next, zero; 1161 int error = 0; 1162 1163 if (itp->igi_data == NULL) { 1164 IPFERROR(20001); 1165 return EFAULT; 1166 } 1167 1168 if (itp->igi_nitems != 1) { 1169 IPFERROR(20003); 1170 return EFAULT; 1171 } 1172 1173 frag = token->ipt_data; 1174 1175 READ_ENTER(lock); 1176 1177 if (frag == NULL) 1178 next = *top; 1179 else 1180 next = frag->ipfr_next; 1181 1182 if (next != NULL) { 1183 ATOMIC_INC(next->ipfr_ref); 1184 token->ipt_data = next; 1185 } else { 1186 bzero(&zero, sizeof(zero)); 1187 next = &zero; 1188 token->ipt_data = NULL; 1189 } 1190 if (next->ipfr_next == NULL) 1191 ipf_token_mark_complete(token); 1192 1193 RWLOCK_EXIT(lock); 1194 1195 error = COPYOUT(next, itp->igi_data, sizeof(*next)); 1196 if (error != 0) 1197 IPFERROR(20002); 1198 1199 if (frag != NULL) { 1200 #ifdef USE_MUTEXES 1201 ipf_frag_deref(softc, &frag, lock); 1202 #else 1203 ipf_frag_deref(softc, &frag); 1204 #endif 1205 } 1206 return error; 1207 } 1208 1209 1210 /* ------------------------------------------------------------------------ */ 1211 /* Function: ipf_frag_pkt_deref */ 1212 /* Returns: Nil */ 1213 /* */ 1214 /* ------------------------------------------------------------------------ */ 1215 void 1216 ipf_frag_pkt_deref(ipf_main_softc_t *softc, void *data) 1217 { 1218 ipfr_t **frp = data; 1219 1220 #ifdef USE_MUTEXES 1221 ipf_frag_softc_t *softf = softc->ipf_frag_soft; 1222 1223 ipf_frag_deref(softc->ipf_frag_soft, frp, &softf->ipfr_frag); 1224 #else 1225 ipf_frag_deref(softc->ipf_frag_soft, frp); 1226 #endif 1227 } 1228 1229 1230 /* ------------------------------------------------------------------------ */ 1231 /* Function: ipf_frag_nat_deref */ 1232 /* Returns: Nil */ 1233 /* */ 1234 /* ------------------------------------------------------------------------ */ 1235 void 1236 ipf_frag_nat_deref(ipf_main_softc_t *softc, void *data) 1237 { 1238 ipfr_t **frp = data; 1239 1240 #ifdef USE_MUTEXES 1241 ipf_frag_softc_t *softf = softc->ipf_frag_soft; 1242 1243 ipf_frag_deref(softc->ipf_frag_soft, frp, &softf->ipfr_natfrag); 1244 #else 1245 ipf_frag_deref(softc->ipf_frag_soft, frp); 1246 #endif 1247 } 1248 1249 1250 /* ------------------------------------------------------------------------ */ 1251 /* Function: ipf_frag_deref */ 1252 /* Returns: Nil */ 1253 /* Parameters: frp(IO) - pointer to fragment structure to deference */ 1254 /* lock(I) - lock associated with the fragment */ 1255 /* */ 1256 /* This function dereferences a fragment structure (ipfr_t). The pointer */ 1257 /* passed in will always be reset back to NULL, even if the structure is */ 1258 /* not freed, to enforce the notion that the caller is no longer entitled */ 1259 /* to use the pointer it is dropping the reference to. */ 1260 /* ------------------------------------------------------------------------ */ 1261 static void 1262 ipf_frag_deref(void *arg, ipfr_t **frp 1263 #ifdef USE_MUTEXES 1264 , ipfrwlock_t *lock 1265 #endif 1266 ) 1267 { 1268 ipf_frag_softc_t *softf = arg; 1269 ipfr_t *fra; 1270 1271 fra = *frp; 1272 *frp = NULL; 1273 1274 WRITE_ENTER(lock); 1275 fra->ipfr_ref--; 1276 if (fra->ipfr_ref <= 0) 1277 ipf_frag_free(softf, fra); 1278 RWLOCK_EXIT(lock); 1279 } 1280