1 /* $NetBSD: ip_input.c,v 1.286 2010/04/01 01:23:32 tls Exp $ */ 2 3 /* 4 * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the name of the project nor the names of its contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 */ 31 32 /*- 33 * Copyright (c) 1998 The NetBSD Foundation, Inc. 34 * All rights reserved. 35 * 36 * This code is derived from software contributed to The NetBSD Foundation 37 * by Public Access Networks Corporation ("Panix"). It was developed under 38 * contract to Panix by Eric Haszlakiewicz and Thor Lancelot Simon. 39 * 40 * Redistribution and use in source and binary forms, with or without 41 * modification, are permitted provided that the following conditions 42 * are met: 43 * 1. Redistributions of source code must retain the above copyright 44 * notice, this list of conditions and the following disclaimer. 45 * 2. Redistributions in binary form must reproduce the above copyright 46 * notice, this list of conditions and the following disclaimer in the 47 * documentation and/or other materials provided with the distribution. 48 * 49 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 50 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 51 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 52 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 53 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 54 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 55 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 56 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 57 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 58 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 59 * POSSIBILITY OF SUCH DAMAGE. 60 */ 61 62 /* 63 * Copyright (c) 1982, 1986, 1988, 1993 64 * The Regents of the University of California. All rights reserved. 65 * 66 * Redistribution and use in source and binary forms, with or without 67 * modification, are permitted provided that the following conditions 68 * are met: 69 * 1. Redistributions of source code must retain the above copyright 70 * notice, this list of conditions and the following disclaimer. 71 * 2. Redistributions in binary form must reproduce the above copyright 72 * notice, this list of conditions and the following disclaimer in the 73 * documentation and/or other materials provided with the distribution. 74 * 3. Neither the name of the University nor the names of its contributors 75 * may be used to endorse or promote products derived from this software 76 * without specific prior written permission. 77 * 78 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 79 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 80 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 81 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 82 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 83 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 84 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 85 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 86 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 87 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 88 * SUCH DAMAGE. 89 * 90 * @(#)ip_input.c 8.2 (Berkeley) 1/4/94 91 */ 92 93 #include <sys/cdefs.h> 94 __KERNEL_RCSID(0, "$NetBSD: ip_input.c,v 1.286 2010/04/01 01:23:32 tls Exp $"); 95 96 #include "opt_inet.h" 97 #include "opt_compat_netbsd.h" 98 #include "opt_gateway.h" 99 #include "opt_pfil_hooks.h" 100 #include "opt_ipsec.h" 101 #include "opt_mrouting.h" 102 #include "opt_mbuftrace.h" 103 #include "opt_inet_csum.h" 104 105 #include <sys/param.h> 106 #include <sys/systm.h> 107 #include <sys/malloc.h> 108 #include <sys/mbuf.h> 109 #include <sys/domain.h> 110 #include <sys/protosw.h> 111 #include <sys/socket.h> 112 #include <sys/socketvar.h> 113 #include <sys/errno.h> 114 #include <sys/time.h> 115 #include <sys/kernel.h> 116 #include <sys/pool.h> 117 #include <sys/sysctl.h> 118 #include <sys/kauth.h> 119 120 #include <net/if.h> 121 #include <net/if_dl.h> 122 #include <net/route.h> 123 #include <net/pfil.h> 124 125 #include <netinet/in.h> 126 #include <netinet/in_systm.h> 127 #include <netinet/ip.h> 128 #include <netinet/in_pcb.h> 129 #include <netinet/in_proto.h> 130 #include <netinet/in_var.h> 131 #include <netinet/ip_var.h> 132 #include <netinet/ip_private.h> 133 #include <netinet/ip_icmp.h> 134 /* just for gif_ttl */ 135 #include <netinet/in_gif.h> 136 #include "gif.h" 137 #include <net/if_gre.h> 138 #include "gre.h" 139 140 #ifdef MROUTING 141 #include <netinet/ip_mroute.h> 142 #endif 143 144 #ifdef IPSEC 145 #include <netinet6/ipsec.h> 146 #include <netinet6/ipsec_private.h> 147 #include <netkey/key.h> 148 #endif 149 #ifdef FAST_IPSEC 150 #include <netipsec/ipsec.h> 151 #include <netipsec/key.h> 152 #endif /* FAST_IPSEC*/ 153 154 #ifndef IPFORWARDING 155 #ifdef GATEWAY 156 #define IPFORWARDING 1 /* forward IP packets not for us */ 157 #else /* GATEWAY */ 158 #define IPFORWARDING 0 /* don't forward IP packets not for us */ 159 #endif /* GATEWAY */ 160 #endif /* IPFORWARDING */ 161 #ifndef IPSENDREDIRECTS 162 #define IPSENDREDIRECTS 1 163 #endif 164 #ifndef IPFORWSRCRT 165 #define IPFORWSRCRT 1 /* forward source-routed packets */ 166 #endif 167 #ifndef IPALLOWSRCRT 168 #define IPALLOWSRCRT 1 /* allow source-routed packets */ 169 #endif 170 #ifndef IPMTUDISC 171 #define IPMTUDISC 1 172 #endif 173 #ifndef IPMTUDISCTIMEOUT 174 #define IPMTUDISCTIMEOUT (10 * 60) /* as per RFC 1191 */ 175 #endif 176 177 #ifdef COMPAT_50 178 #include <compat/sys/time.h> 179 #include <compat/sys/socket.h> 180 #endif 181 182 /* 183 * Note: DIRECTED_BROADCAST is handled this way so that previous 184 * configuration using this option will Just Work. 185 */ 186 #ifndef IPDIRECTEDBCAST 187 #ifdef DIRECTED_BROADCAST 188 #define IPDIRECTEDBCAST 1 189 #else 190 #define IPDIRECTEDBCAST 0 191 #endif /* DIRECTED_BROADCAST */ 192 #endif /* IPDIRECTEDBCAST */ 193 int ipforwarding = IPFORWARDING; 194 int ipsendredirects = IPSENDREDIRECTS; 195 int ip_defttl = IPDEFTTL; 196 int ip_forwsrcrt = IPFORWSRCRT; 197 int ip_directedbcast = IPDIRECTEDBCAST; 198 int ip_allowsrcrt = IPALLOWSRCRT; 199 int ip_mtudisc = IPMTUDISC; 200 int ip_mtudisc_timeout = IPMTUDISCTIMEOUT; 201 #ifdef DIAGNOSTIC 202 int ipprintfs = 0; 203 #endif 204 205 int ip_do_randomid = 0; 206 207 /* 208 * XXX - Setting ip_checkinterface mostly implements the receive side of 209 * the Strong ES model described in RFC 1122, but since the routing table 210 * and transmit implementation do not implement the Strong ES model, 211 * setting this to 1 results in an odd hybrid. 212 * 213 * XXX - ip_checkinterface currently must be disabled if you use ipnat 214 * to translate the destination address to another local interface. 215 * 216 * XXX - ip_checkinterface must be disabled if you add IP aliases 217 * to the loopback interface instead of the interface where the 218 * packets for those addresses are received. 219 */ 220 int ip_checkinterface = 0; 221 222 223 struct rttimer_queue *ip_mtudisc_timeout_q = NULL; 224 225 int ipqmaxlen = IFQ_MAXLEN; 226 u_long in_ifaddrhash; /* size of hash table - 1 */ 227 int in_ifaddrentries; /* total number of addrs */ 228 struct in_ifaddrhead in_ifaddrhead; 229 struct in_ifaddrhashhead *in_ifaddrhashtbl; 230 u_long in_multihash; /* size of hash table - 1 */ 231 int in_multientries; /* total number of addrs */ 232 struct in_multihashhead *in_multihashtbl; 233 struct ifqueue ipintrq; 234 235 uint16_t ip_id; 236 237 percpu_t *ipstat_percpu; 238 239 #ifdef PFIL_HOOKS 240 struct pfil_head inet_pfil_hook; 241 #endif 242 243 /* 244 * Cached copy of nmbclusters. If nbclusters is different, 245 * recalculate IP parameters derived from nmbclusters. 246 */ 247 static int ip_nmbclusters; /* copy of nmbclusters */ 248 static void ip_nmbclusters_changed(void); /* recalc limits */ 249 250 #define CHECK_NMBCLUSTER_PARAMS() \ 251 do { \ 252 if (__predict_false(ip_nmbclusters != nmbclusters)) \ 253 ip_nmbclusters_changed(); \ 254 } while (/*CONSTCOND*/0) 255 256 /* IP datagram reassembly queues (hashed) */ 257 #define IPREASS_NHASH_LOG2 6 258 #define IPREASS_NHASH (1 << IPREASS_NHASH_LOG2) 259 #define IPREASS_HMASK (IPREASS_NHASH - 1) 260 #define IPREASS_HASH(x,y) \ 261 (((((x) & 0xF) | ((((x) >> 8) & 0xF) << 4)) ^ (y)) & IPREASS_HMASK) 262 struct ipqhead ipq[IPREASS_NHASH]; 263 int ipq_locked; 264 static int ip_nfragpackets; /* packets in reass queue */ 265 static int ip_nfrags; /* total fragments in reass queues */ 266 267 int ip_maxfragpackets = 200; /* limit on packets. XXX sysctl */ 268 int ip_maxfrags; /* limit on fragments. XXX sysctl */ 269 270 271 /* 272 * Additive-Increase/Multiplicative-Decrease (AIMD) strategy for 273 * IP reassembly queue buffer managment. 274 * 275 * We keep a count of total IP fragments (NB: not fragmented packets!) 276 * awaiting reassembly (ip_nfrags) and a limit (ip_maxfrags) on fragments. 277 * If ip_nfrags exceeds ip_maxfrags the limit, we drop half the 278 * total fragments in reassembly queues.This AIMD policy avoids 279 * repeatedly deleting single packets under heavy fragmentation load 280 * (e.g., from lossy NFS peers). 281 */ 282 static u_int ip_reass_ttl_decr(u_int ticks); 283 static void ip_reass_drophalf(void); 284 285 286 static inline int ipq_lock_try(void); 287 static inline void ipq_unlock(void); 288 289 static inline int 290 ipq_lock_try(void) 291 { 292 int s; 293 294 /* 295 * Use splvm() -- we're blocking things that would cause 296 * mbuf allocation. 297 */ 298 s = splvm(); 299 if (ipq_locked) { 300 splx(s); 301 return (0); 302 } 303 ipq_locked = 1; 304 splx(s); 305 return (1); 306 } 307 308 static inline void 309 ipq_unlock(void) 310 { 311 int s; 312 313 s = splvm(); 314 ipq_locked = 0; 315 splx(s); 316 } 317 318 #ifdef DIAGNOSTIC 319 #define IPQ_LOCK() \ 320 do { \ 321 if (ipq_lock_try() == 0) { \ 322 printf("%s:%d: ipq already locked\n", __FILE__, __LINE__); \ 323 panic("ipq_lock"); \ 324 } \ 325 } while (/*CONSTCOND*/ 0) 326 #define IPQ_LOCK_CHECK() \ 327 do { \ 328 if (ipq_locked == 0) { \ 329 printf("%s:%d: ipq lock not held\n", __FILE__, __LINE__); \ 330 panic("ipq lock check"); \ 331 } \ 332 } while (/*CONSTCOND*/ 0) 333 #else 334 #define IPQ_LOCK() (void) ipq_lock_try() 335 #define IPQ_LOCK_CHECK() /* nothing */ 336 #endif 337 338 #define IPQ_UNLOCK() ipq_unlock() 339 340 struct pool inmulti_pool; 341 struct pool ipqent_pool; 342 343 #ifdef INET_CSUM_COUNTERS 344 #include <sys/device.h> 345 346 struct evcnt ip_hwcsum_bad = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, 347 NULL, "inet", "hwcsum bad"); 348 struct evcnt ip_hwcsum_ok = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, 349 NULL, "inet", "hwcsum ok"); 350 struct evcnt ip_swcsum = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, 351 NULL, "inet", "swcsum"); 352 353 #define INET_CSUM_COUNTER_INCR(ev) (ev)->ev_count++ 354 355 EVCNT_ATTACH_STATIC(ip_hwcsum_bad); 356 EVCNT_ATTACH_STATIC(ip_hwcsum_ok); 357 EVCNT_ATTACH_STATIC(ip_swcsum); 358 359 #else 360 361 #define INET_CSUM_COUNTER_INCR(ev) /* nothing */ 362 363 #endif /* INET_CSUM_COUNTERS */ 364 365 /* 366 * We need to save the IP options in case a protocol wants to respond 367 * to an incoming packet over the same route if the packet got here 368 * using IP source routing. This allows connection establishment and 369 * maintenance when the remote end is on a network that is not known 370 * to us. 371 */ 372 int ip_nhops = 0; 373 static struct ip_srcrt { 374 struct in_addr dst; /* final destination */ 375 char nop; /* one NOP to align */ 376 char srcopt[IPOPT_OFFSET + 1]; /* OPTVAL, OLEN and OFFSET */ 377 struct in_addr route[MAX_IPOPTLEN/sizeof(struct in_addr)]; 378 } ip_srcrt; 379 380 static void save_rte(u_char *, struct in_addr); 381 382 #ifdef MBUFTRACE 383 struct mowner ip_rx_mowner = MOWNER_INIT("internet", "rx"); 384 struct mowner ip_tx_mowner = MOWNER_INIT("internet", "tx"); 385 #endif 386 387 static void sysctl_net_inet_ip_setup(struct sysctllog **); 388 389 /* 390 * Compute IP limits derived from the value of nmbclusters. 391 */ 392 static void 393 ip_nmbclusters_changed(void) 394 { 395 ip_maxfrags = nmbclusters / 4; 396 ip_nmbclusters = nmbclusters; 397 } 398 399 /* 400 * IP initialization: fill in IP protocol switch table. 401 * All protocols not implemented in kernel go to raw IP protocol handler. 402 */ 403 void 404 ip_init(void) 405 { 406 const struct protosw *pr; 407 int i; 408 409 sysctl_net_inet_ip_setup(NULL); 410 411 pool_init(&inmulti_pool, sizeof(struct in_multi), 0, 0, 0, "inmltpl", 412 NULL, IPL_SOFTNET); 413 pool_init(&ipqent_pool, sizeof(struct ipqent), 0, 0, 0, "ipqepl", 414 NULL, IPL_VM); 415 416 pr = pffindproto(PF_INET, IPPROTO_RAW, SOCK_RAW); 417 if (pr == 0) 418 panic("ip_init"); 419 for (i = 0; i < IPPROTO_MAX; i++) 420 ip_protox[i] = pr - inetsw; 421 for (pr = inetdomain.dom_protosw; 422 pr < inetdomain.dom_protoswNPROTOSW; pr++) 423 if (pr->pr_domain->dom_family == PF_INET && 424 pr->pr_protocol && pr->pr_protocol != IPPROTO_RAW) 425 ip_protox[pr->pr_protocol] = pr - inetsw; 426 427 for (i = 0; i < IPREASS_NHASH; i++) 428 LIST_INIT(&ipq[i]); 429 430 ip_initid(); 431 ip_id = time_second & 0xfffff; 432 433 ipintrq.ifq_maxlen = ipqmaxlen; 434 ip_nmbclusters_changed(); 435 436 TAILQ_INIT(&in_ifaddrhead); 437 in_ifaddrhashtbl = hashinit(IN_IFADDR_HASH_SIZE, HASH_LIST, true, 438 &in_ifaddrhash); 439 in_multihashtbl = hashinit(IN_IFADDR_HASH_SIZE, HASH_LIST, true, 440 &in_multihash); 441 ip_mtudisc_timeout_q = rt_timer_queue_create(ip_mtudisc_timeout); 442 #ifdef GATEWAY 443 ipflow_init(ip_hashsize); 444 #endif 445 446 #ifdef PFIL_HOOKS 447 /* Register our Packet Filter hook. */ 448 inet_pfil_hook.ph_type = PFIL_TYPE_AF; 449 inet_pfil_hook.ph_af = AF_INET; 450 i = pfil_head_register(&inet_pfil_hook); 451 if (i != 0) 452 printf("ip_init: WARNING: unable to register pfil hook, " 453 "error %d\n", i); 454 #endif /* PFIL_HOOKS */ 455 456 #ifdef MBUFTRACE 457 MOWNER_ATTACH(&ip_tx_mowner); 458 MOWNER_ATTACH(&ip_rx_mowner); 459 #endif /* MBUFTRACE */ 460 461 ipstat_percpu = percpu_alloc(sizeof(uint64_t) * IP_NSTATS); 462 } 463 464 struct sockaddr_in ipaddr = { 465 .sin_len = sizeof(ipaddr), 466 .sin_family = AF_INET, 467 }; 468 struct route ipforward_rt; 469 470 /* 471 * IP software interrupt routine 472 */ 473 void 474 ipintr(void) 475 { 476 int s; 477 struct mbuf *m; 478 struct ifqueue lcl_intrq; 479 480 memset(&lcl_intrq, 0, sizeof(lcl_intrq)); 481 ipintrq.ifq_maxlen = ipqmaxlen; 482 483 mutex_enter(softnet_lock); 484 KERNEL_LOCK(1, NULL); 485 if (!IF_IS_EMPTY(&ipintrq)) { 486 s = splnet(); 487 488 /* Take existing queue onto stack */ 489 lcl_intrq = ipintrq; 490 491 /* Zero out global queue, preserving maxlen and drops */ 492 ipintrq.ifq_head = NULL; 493 ipintrq.ifq_tail = NULL; 494 ipintrq.ifq_len = 0; 495 ipintrq.ifq_maxlen = lcl_intrq.ifq_maxlen; 496 ipintrq.ifq_drops = lcl_intrq.ifq_drops; 497 498 splx(s); 499 } 500 KERNEL_UNLOCK_ONE(NULL); 501 while (!IF_IS_EMPTY(&lcl_intrq)) { 502 IF_DEQUEUE(&lcl_intrq, m); 503 if (m == NULL) 504 break; 505 ip_input(m); 506 } 507 mutex_exit(softnet_lock); 508 } 509 510 /* 511 * Ip input routine. Checksum and byte swap header. If fragmented 512 * try to reassemble. Process options. Pass to next level. 513 */ 514 void 515 ip_input(struct mbuf *m) 516 { 517 struct ip *ip = NULL; 518 struct ipq *fp; 519 struct in_ifaddr *ia; 520 struct ifaddr *ifa; 521 struct ipqent *ipqe; 522 int hlen = 0, mff, len; 523 int downmatch; 524 int checkif; 525 int srcrt = 0; 526 int s; 527 u_int hash; 528 #ifdef FAST_IPSEC 529 struct m_tag *mtag; 530 struct tdb_ident *tdbi; 531 struct secpolicy *sp; 532 int error; 533 #endif /* FAST_IPSEC */ 534 535 MCLAIM(m, &ip_rx_mowner); 536 #ifdef DIAGNOSTIC 537 if ((m->m_flags & M_PKTHDR) == 0) 538 panic("ipintr no HDR"); 539 #endif 540 541 /* 542 * If no IP addresses have been set yet but the interfaces 543 * are receiving, can't do anything with incoming packets yet. 544 */ 545 if (TAILQ_FIRST(&in_ifaddrhead) == 0) 546 goto bad; 547 IP_STATINC(IP_STAT_TOTAL); 548 /* 549 * If the IP header is not aligned, slurp it up into a new 550 * mbuf with space for link headers, in the event we forward 551 * it. Otherwise, if it is aligned, make sure the entire 552 * base IP header is in the first mbuf of the chain. 553 */ 554 if (IP_HDR_ALIGNED_P(mtod(m, void *)) == 0) { 555 if ((m = m_copyup(m, sizeof(struct ip), 556 (max_linkhdr + 3) & ~3)) == NULL) { 557 /* XXXJRT new stat, please */ 558 IP_STATINC(IP_STAT_TOOSMALL); 559 return; 560 } 561 } else if (__predict_false(m->m_len < sizeof (struct ip))) { 562 if ((m = m_pullup(m, sizeof (struct ip))) == NULL) { 563 IP_STATINC(IP_STAT_TOOSMALL); 564 return; 565 } 566 } 567 ip = mtod(m, struct ip *); 568 if (ip->ip_v != IPVERSION) { 569 IP_STATINC(IP_STAT_BADVERS); 570 goto bad; 571 } 572 hlen = ip->ip_hl << 2; 573 if (hlen < sizeof(struct ip)) { /* minimum header length */ 574 IP_STATINC(IP_STAT_BADHLEN); 575 goto bad; 576 } 577 if (hlen > m->m_len) { 578 if ((m = m_pullup(m, hlen)) == 0) { 579 IP_STATINC(IP_STAT_BADHLEN); 580 return; 581 } 582 ip = mtod(m, struct ip *); 583 } 584 585 /* 586 * RFC1122: packets with a multicast source address are 587 * not allowed. 588 */ 589 if (IN_MULTICAST(ip->ip_src.s_addr)) { 590 IP_STATINC(IP_STAT_BADADDR); 591 goto bad; 592 } 593 594 /* 127/8 must not appear on wire - RFC1122 */ 595 if ((ntohl(ip->ip_dst.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET || 596 (ntohl(ip->ip_src.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET) { 597 if ((m->m_pkthdr.rcvif->if_flags & IFF_LOOPBACK) == 0) { 598 IP_STATINC(IP_STAT_BADADDR); 599 goto bad; 600 } 601 } 602 603 switch (m->m_pkthdr.csum_flags & 604 ((m->m_pkthdr.rcvif->if_csum_flags_rx & M_CSUM_IPv4) | 605 M_CSUM_IPv4_BAD)) { 606 case M_CSUM_IPv4|M_CSUM_IPv4_BAD: 607 INET_CSUM_COUNTER_INCR(&ip_hwcsum_bad); 608 goto badcsum; 609 610 case M_CSUM_IPv4: 611 /* Checksum was okay. */ 612 INET_CSUM_COUNTER_INCR(&ip_hwcsum_ok); 613 break; 614 615 default: 616 /* 617 * Must compute it ourselves. Maybe skip checksum on 618 * loopback interfaces. 619 */ 620 if (__predict_true(!(m->m_pkthdr.rcvif->if_flags & 621 IFF_LOOPBACK) || ip_do_loopback_cksum)) { 622 INET_CSUM_COUNTER_INCR(&ip_swcsum); 623 if (in_cksum(m, hlen) != 0) 624 goto badcsum; 625 } 626 break; 627 } 628 629 /* Retrieve the packet length. */ 630 len = ntohs(ip->ip_len); 631 632 /* 633 * Check for additional length bogosity 634 */ 635 if (len < hlen) { 636 IP_STATINC(IP_STAT_BADLEN); 637 goto bad; 638 } 639 640 /* 641 * Check that the amount of data in the buffers 642 * is as at least much as the IP header would have us expect. 643 * Trim mbufs if longer than we expect. 644 * Drop packet if shorter than we expect. 645 */ 646 if (m->m_pkthdr.len < len) { 647 IP_STATINC(IP_STAT_TOOSHORT); 648 goto bad; 649 } 650 if (m->m_pkthdr.len > len) { 651 if (m->m_len == m->m_pkthdr.len) { 652 m->m_len = len; 653 m->m_pkthdr.len = len; 654 } else 655 m_adj(m, len - m->m_pkthdr.len); 656 } 657 658 #if defined(IPSEC) 659 /* ipflow (IP fast forwarding) is not compatible with IPsec. */ 660 m->m_flags &= ~M_CANFASTFWD; 661 #else 662 /* 663 * Assume that we can create a fast-forward IP flow entry 664 * based on this packet. 665 */ 666 m->m_flags |= M_CANFASTFWD; 667 #endif 668 669 #ifdef PFIL_HOOKS 670 /* 671 * Run through list of hooks for input packets. If there are any 672 * filters which require that additional packets in the flow are 673 * not fast-forwarded, they must clear the M_CANFASTFWD flag. 674 * Note that filters must _never_ set this flag, as another filter 675 * in the list may have previously cleared it. 676 */ 677 /* 678 * let ipfilter look at packet on the wire, 679 * not the decapsulated packet. 680 */ 681 #ifdef IPSEC 682 if (!ipsec_getnhist(m)) 683 #elif defined(FAST_IPSEC) 684 if (!ipsec_indone(m)) 685 #else 686 if (1) 687 #endif 688 { 689 struct in_addr odst; 690 691 odst = ip->ip_dst; 692 if (pfil_run_hooks(&inet_pfil_hook, &m, m->m_pkthdr.rcvif, 693 PFIL_IN) != 0) 694 return; 695 if (m == NULL) 696 return; 697 ip = mtod(m, struct ip *); 698 hlen = ip->ip_hl << 2; 699 /* 700 * XXX The setting of "srcrt" here is to prevent ip_forward() 701 * from generating ICMP redirects for packets that have 702 * been redirected by a hook back out on to the same LAN that 703 * they came from and is not an indication that the packet 704 * is being inffluenced by source routing options. This 705 * allows things like 706 * "rdr tlp0 0/0 port 80 -> 1.1.1.200 3128 tcp" 707 * where tlp0 is both on the 1.1.1.0/24 network and is the 708 * default route for hosts on 1.1.1.0/24. Of course this 709 * also requires a "map tlp0 ..." to complete the story. 710 * One might argue whether or not this kind of network config. 711 * should be supported in this manner... 712 */ 713 srcrt = (odst.s_addr != ip->ip_dst.s_addr); 714 } 715 #endif /* PFIL_HOOKS */ 716 717 #ifdef ALTQ 718 /* XXX Temporary until ALTQ is changed to use a pfil hook */ 719 if (altq_input != NULL && (*altq_input)(m, AF_INET) == 0) { 720 /* packet dropped by traffic conditioner */ 721 return; 722 } 723 #endif 724 725 /* 726 * Process options and, if not destined for us, 727 * ship it on. ip_dooptions returns 1 when an 728 * error was detected (causing an icmp message 729 * to be sent and the original packet to be freed). 730 */ 731 ip_nhops = 0; /* for source routed packets */ 732 if (hlen > sizeof (struct ip) && ip_dooptions(m)) 733 return; 734 735 /* 736 * Enable a consistency check between the destination address 737 * and the arrival interface for a unicast packet (the RFC 1122 738 * strong ES model) if IP forwarding is disabled and the packet 739 * is not locally generated. 740 * 741 * XXX - Checking also should be disabled if the destination 742 * address is ipnat'ed to a different interface. 743 * 744 * XXX - Checking is incompatible with IP aliases added 745 * to the loopback interface instead of the interface where 746 * the packets are received. 747 * 748 * XXX - We need to add a per ifaddr flag for this so that 749 * we get finer grain control. 750 */ 751 checkif = ip_checkinterface && (ipforwarding == 0) && 752 (m->m_pkthdr.rcvif != NULL) && 753 ((m->m_pkthdr.rcvif->if_flags & IFF_LOOPBACK) == 0); 754 755 /* 756 * Check our list of addresses, to see if the packet is for us. 757 * 758 * Traditional 4.4BSD did not consult IFF_UP at all. 759 * The behavior here is to treat addresses on !IFF_UP interface 760 * as not mine. 761 */ 762 downmatch = 0; 763 LIST_FOREACH(ia, &IN_IFADDR_HASH(ip->ip_dst.s_addr), ia_hash) { 764 if (in_hosteq(ia->ia_addr.sin_addr, ip->ip_dst)) { 765 if (checkif && ia->ia_ifp != m->m_pkthdr.rcvif) 766 continue; 767 if ((ia->ia_ifp->if_flags & IFF_UP) != 0) 768 break; 769 else 770 downmatch++; 771 } 772 } 773 if (ia != NULL) 774 goto ours; 775 if (m->m_pkthdr.rcvif && m->m_pkthdr.rcvif->if_flags & IFF_BROADCAST) { 776 IFADDR_FOREACH(ifa, m->m_pkthdr.rcvif) { 777 if (ifa->ifa_addr->sa_family != AF_INET) 778 continue; 779 ia = ifatoia(ifa); 780 if (in_hosteq(ip->ip_dst, ia->ia_broadaddr.sin_addr) || 781 in_hosteq(ip->ip_dst, ia->ia_netbroadcast) || 782 /* 783 * Look for all-0's host part (old broadcast addr), 784 * either for subnet or net. 785 */ 786 ip->ip_dst.s_addr == ia->ia_subnet || 787 ip->ip_dst.s_addr == ia->ia_net) 788 goto ours; 789 /* 790 * An interface with IP address zero accepts 791 * all packets that arrive on that interface. 792 */ 793 if (in_nullhost(ia->ia_addr.sin_addr)) 794 goto ours; 795 } 796 } 797 if (IN_MULTICAST(ip->ip_dst.s_addr)) { 798 struct in_multi *inm; 799 #ifdef MROUTING 800 extern struct socket *ip_mrouter; 801 802 if (ip_mrouter) { 803 /* 804 * If we are acting as a multicast router, all 805 * incoming multicast packets are passed to the 806 * kernel-level multicast forwarding function. 807 * The packet is returned (relatively) intact; if 808 * ip_mforward() returns a non-zero value, the packet 809 * must be discarded, else it may be accepted below. 810 * 811 * (The IP ident field is put in the same byte order 812 * as expected when ip_mforward() is called from 813 * ip_output().) 814 */ 815 if (ip_mforward(m, m->m_pkthdr.rcvif) != 0) { 816 IP_STATINC(IP_STAT_CANTFORWARD); 817 m_freem(m); 818 return; 819 } 820 821 /* 822 * The process-level routing demon needs to receive 823 * all multicast IGMP packets, whether or not this 824 * host belongs to their destination groups. 825 */ 826 if (ip->ip_p == IPPROTO_IGMP) 827 goto ours; 828 IP_STATINC(IP_STAT_CANTFORWARD); 829 } 830 #endif 831 /* 832 * See if we belong to the destination multicast group on the 833 * arrival interface. 834 */ 835 IN_LOOKUP_MULTI(ip->ip_dst, m->m_pkthdr.rcvif, inm); 836 if (inm == NULL) { 837 IP_STATINC(IP_STAT_CANTFORWARD); 838 m_freem(m); 839 return; 840 } 841 goto ours; 842 } 843 if (ip->ip_dst.s_addr == INADDR_BROADCAST || 844 in_nullhost(ip->ip_dst)) 845 goto ours; 846 847 /* 848 * Not for us; forward if possible and desirable. 849 */ 850 if (ipforwarding == 0) { 851 IP_STATINC(IP_STAT_CANTFORWARD); 852 m_freem(m); 853 } else { 854 /* 855 * If ip_dst matched any of my address on !IFF_UP interface, 856 * and there's no IFF_UP interface that matches ip_dst, 857 * send icmp unreach. Forwarding it will result in in-kernel 858 * forwarding loop till TTL goes to 0. 859 */ 860 if (downmatch) { 861 icmp_error(m, ICMP_UNREACH, ICMP_UNREACH_HOST, 0, 0); 862 IP_STATINC(IP_STAT_CANTFORWARD); 863 return; 864 } 865 #ifdef IPSEC 866 if (ipsec4_in_reject(m, NULL)) { 867 IPSEC_STATINC(IPSEC_STAT_IN_POLVIO); 868 goto bad; 869 } 870 #endif 871 #ifdef FAST_IPSEC 872 mtag = m_tag_find(m, PACKET_TAG_IPSEC_IN_DONE, NULL); 873 s = splsoftnet(); 874 if (mtag != NULL) { 875 tdbi = (struct tdb_ident *)(mtag + 1); 876 sp = ipsec_getpolicy(tdbi, IPSEC_DIR_INBOUND); 877 } else { 878 sp = ipsec_getpolicybyaddr(m, IPSEC_DIR_INBOUND, 879 IP_FORWARDING, &error); 880 } 881 if (sp == NULL) { /* NB: can happen if error */ 882 splx(s); 883 /*XXX error stat???*/ 884 DPRINTF(("ip_input: no SP for forwarding\n")); /*XXX*/ 885 goto bad; 886 } 887 888 /* 889 * Check security policy against packet attributes. 890 */ 891 error = ipsec_in_reject(sp, m); 892 KEY_FREESP(&sp); 893 splx(s); 894 if (error) { 895 IP_STATINC(IP_STAT_CANTFORWARD); 896 goto bad; 897 } 898 899 /* 900 * Peek at the outbound SP for this packet to determine if 901 * it's a Fast Forward candidate. 902 */ 903 mtag = m_tag_find(m, PACKET_TAG_IPSEC_PENDING_TDB, NULL); 904 if (mtag != NULL) 905 m->m_flags &= ~M_CANFASTFWD; 906 else { 907 s = splsoftnet(); 908 sp = ipsec4_checkpolicy(m, IPSEC_DIR_OUTBOUND, 909 (IP_FORWARDING | 910 (ip_directedbcast ? IP_ALLOWBROADCAST : 0)), 911 &error, NULL); 912 if (sp != NULL) { 913 m->m_flags &= ~M_CANFASTFWD; 914 KEY_FREESP(&sp); 915 } 916 splx(s); 917 } 918 #endif /* FAST_IPSEC */ 919 920 ip_forward(m, srcrt); 921 } 922 return; 923 924 ours: 925 /* 926 * If offset or IP_MF are set, must reassemble. 927 * Otherwise, nothing need be done. 928 * (We could look in the reassembly queue to see 929 * if the packet was previously fragmented, 930 * but it's not worth the time; just let them time out.) 931 */ 932 if (ip->ip_off & ~htons(IP_DF|IP_RF)) { 933 uint16_t off; 934 /* 935 * Prevent TCP blind data attacks by not allowing non-initial 936 * fragments to start at less than 68 bytes (minimal fragment 937 * size) and making sure the first fragment is at least 68 938 * bytes. 939 */ 940 off = (ntohs(ip->ip_off) & IP_OFFMASK) << 3; 941 if ((off > 0 ? off + hlen : len) < IP_MINFRAGSIZE - 1) { 942 IP_STATINC(IP_STAT_BADFRAGS); 943 goto bad; 944 } 945 /* 946 * Look for queue of fragments 947 * of this datagram. 948 */ 949 IPQ_LOCK(); 950 hash = IPREASS_HASH(ip->ip_src.s_addr, ip->ip_id); 951 LIST_FOREACH(fp, &ipq[hash], ipq_q) { 952 if (ip->ip_id == fp->ipq_id && 953 in_hosteq(ip->ip_src, fp->ipq_src) && 954 in_hosteq(ip->ip_dst, fp->ipq_dst) && 955 ip->ip_p == fp->ipq_p) { 956 /* 957 * Make sure the TOS is matches previous 958 * fragments. 959 */ 960 if (ip->ip_tos != fp->ipq_tos) { 961 IP_STATINC(IP_STAT_BADFRAGS); 962 IPQ_UNLOCK(); 963 goto bad; 964 } 965 goto found; 966 } 967 } 968 fp = 0; 969 found: 970 971 /* 972 * Adjust ip_len to not reflect header, 973 * set ipqe_mff if more fragments are expected, 974 * convert offset of this to bytes. 975 */ 976 ip->ip_len = htons(ntohs(ip->ip_len) - hlen); 977 mff = (ip->ip_off & htons(IP_MF)) != 0; 978 if (mff) { 979 /* 980 * Make sure that fragments have a data length 981 * that's a non-zero multiple of 8 bytes. 982 */ 983 if (ntohs(ip->ip_len) == 0 || 984 (ntohs(ip->ip_len) & 0x7) != 0) { 985 IP_STATINC(IP_STAT_BADFRAGS); 986 IPQ_UNLOCK(); 987 goto bad; 988 } 989 } 990 ip->ip_off = htons((ntohs(ip->ip_off) & IP_OFFMASK) << 3); 991 992 /* 993 * If datagram marked as having more fragments 994 * or if this is not the first fragment, 995 * attempt reassembly; if it succeeds, proceed. 996 */ 997 if (mff || ip->ip_off != htons(0)) { 998 IP_STATINC(IP_STAT_FRAGMENTS); 999 s = splvm(); 1000 ipqe = pool_get(&ipqent_pool, PR_NOWAIT); 1001 splx(s); 1002 if (ipqe == NULL) { 1003 IP_STATINC(IP_STAT_RCVMEMDROP); 1004 IPQ_UNLOCK(); 1005 goto bad; 1006 } 1007 ipqe->ipqe_mff = mff; 1008 ipqe->ipqe_m = m; 1009 ipqe->ipqe_ip = ip; 1010 m = ip_reass(ipqe, fp, &ipq[hash]); 1011 if (m == 0) { 1012 IPQ_UNLOCK(); 1013 return; 1014 } 1015 IP_STATINC(IP_STAT_REASSEMBLED); 1016 ip = mtod(m, struct ip *); 1017 hlen = ip->ip_hl << 2; 1018 ip->ip_len = htons(ntohs(ip->ip_len) + hlen); 1019 } else 1020 if (fp) 1021 ip_freef(fp); 1022 IPQ_UNLOCK(); 1023 } 1024 1025 #if defined(IPSEC) 1026 /* 1027 * enforce IPsec policy checking if we are seeing last header. 1028 * note that we do not visit this with protocols with pcb layer 1029 * code - like udp/tcp/raw ip. 1030 */ 1031 if ((inetsw[ip_protox[ip->ip_p]].pr_flags & PR_LASTHDR) != 0 && 1032 ipsec4_in_reject(m, NULL)) { 1033 IPSEC_STATINC(IPSEC_STAT_IN_POLVIO); 1034 goto bad; 1035 } 1036 #endif 1037 #ifdef FAST_IPSEC 1038 /* 1039 * enforce IPsec policy checking if we are seeing last header. 1040 * note that we do not visit this with protocols with pcb layer 1041 * code - like udp/tcp/raw ip. 1042 */ 1043 if ((inetsw[ip_protox[ip->ip_p]].pr_flags & PR_LASTHDR) != 0) { 1044 /* 1045 * Check if the packet has already had IPsec processing 1046 * done. If so, then just pass it along. This tag gets 1047 * set during AH, ESP, etc. input handling, before the 1048 * packet is returned to the ip input queue for delivery. 1049 */ 1050 mtag = m_tag_find(m, PACKET_TAG_IPSEC_IN_DONE, NULL); 1051 s = splsoftnet(); 1052 if (mtag != NULL) { 1053 tdbi = (struct tdb_ident *)(mtag + 1); 1054 sp = ipsec_getpolicy(tdbi, IPSEC_DIR_INBOUND); 1055 } else { 1056 sp = ipsec_getpolicybyaddr(m, IPSEC_DIR_INBOUND, 1057 IP_FORWARDING, &error); 1058 } 1059 if (sp != NULL) { 1060 /* 1061 * Check security policy against packet attributes. 1062 */ 1063 error = ipsec_in_reject(sp, m); 1064 KEY_FREESP(&sp); 1065 } else { 1066 /* XXX error stat??? */ 1067 error = EINVAL; 1068 DPRINTF(("ip_input: no SP, packet discarded\n"));/*XXX*/ 1069 } 1070 splx(s); 1071 if (error) 1072 goto bad; 1073 } 1074 #endif /* FAST_IPSEC */ 1075 1076 /* 1077 * Switch out to protocol's input routine. 1078 */ 1079 #if IFA_STATS 1080 if (ia && ip) 1081 ia->ia_ifa.ifa_data.ifad_inbytes += ntohs(ip->ip_len); 1082 #endif 1083 IP_STATINC(IP_STAT_DELIVERED); 1084 { 1085 int off = hlen, nh = ip->ip_p; 1086 1087 (*inetsw[ip_protox[nh]].pr_input)(m, off, nh); 1088 return; 1089 } 1090 bad: 1091 m_freem(m); 1092 return; 1093 1094 badcsum: 1095 IP_STATINC(IP_STAT_BADSUM); 1096 m_freem(m); 1097 } 1098 1099 /* 1100 * Take incoming datagram fragment and try to 1101 * reassemble it into whole datagram. If a chain for 1102 * reassembly of this datagram already exists, then it 1103 * is given as fp; otherwise have to make a chain. 1104 */ 1105 struct mbuf * 1106 ip_reass(struct ipqent *ipqe, struct ipq *fp, struct ipqhead *ipqhead) 1107 { 1108 struct mbuf *m = ipqe->ipqe_m; 1109 struct ipqent *nq, *p, *q; 1110 struct ip *ip; 1111 struct mbuf *t; 1112 int hlen = ipqe->ipqe_ip->ip_hl << 2; 1113 int i, next, s; 1114 1115 IPQ_LOCK_CHECK(); 1116 1117 /* 1118 * Presence of header sizes in mbufs 1119 * would confuse code below. 1120 */ 1121 m->m_data += hlen; 1122 m->m_len -= hlen; 1123 1124 #ifdef notyet 1125 /* make sure fragment limit is up-to-date */ 1126 CHECK_NMBCLUSTER_PARAMS(); 1127 1128 /* If we have too many fragments, drop the older half. */ 1129 if (ip_nfrags >= ip_maxfrags) 1130 ip_reass_drophalf(void); 1131 #endif 1132 1133 /* 1134 * We are about to add a fragment; increment frag count. 1135 */ 1136 ip_nfrags++; 1137 1138 /* 1139 * If first fragment to arrive, create a reassembly queue. 1140 */ 1141 if (fp == 0) { 1142 /* 1143 * Enforce upper bound on number of fragmented packets 1144 * for which we attempt reassembly; 1145 * If maxfrag is 0, never accept fragments. 1146 * If maxfrag is -1, accept all fragments without limitation. 1147 */ 1148 if (ip_maxfragpackets < 0) 1149 ; 1150 else if (ip_nfragpackets >= ip_maxfragpackets) 1151 goto dropfrag; 1152 ip_nfragpackets++; 1153 fp = malloc(sizeof (struct ipq), M_FTABLE, M_NOWAIT); 1154 if (fp == NULL) 1155 goto dropfrag; 1156 LIST_INSERT_HEAD(ipqhead, fp, ipq_q); 1157 fp->ipq_nfrags = 1; 1158 fp->ipq_ttl = IPFRAGTTL; 1159 fp->ipq_p = ipqe->ipqe_ip->ip_p; 1160 fp->ipq_id = ipqe->ipqe_ip->ip_id; 1161 fp->ipq_tos = ipqe->ipqe_ip->ip_tos; 1162 TAILQ_INIT(&fp->ipq_fragq); 1163 fp->ipq_src = ipqe->ipqe_ip->ip_src; 1164 fp->ipq_dst = ipqe->ipqe_ip->ip_dst; 1165 p = NULL; 1166 goto insert; 1167 } else { 1168 fp->ipq_nfrags++; 1169 } 1170 1171 /* 1172 * Find a segment which begins after this one does. 1173 */ 1174 for (p = NULL, q = TAILQ_FIRST(&fp->ipq_fragq); q != NULL; 1175 p = q, q = TAILQ_NEXT(q, ipqe_q)) 1176 if (ntohs(q->ipqe_ip->ip_off) > ntohs(ipqe->ipqe_ip->ip_off)) 1177 break; 1178 1179 /* 1180 * If there is a preceding segment, it may provide some of 1181 * our data already. If so, drop the data from the incoming 1182 * segment. If it provides all of our data, drop us. 1183 */ 1184 if (p != NULL) { 1185 i = ntohs(p->ipqe_ip->ip_off) + ntohs(p->ipqe_ip->ip_len) - 1186 ntohs(ipqe->ipqe_ip->ip_off); 1187 if (i > 0) { 1188 if (i >= ntohs(ipqe->ipqe_ip->ip_len)) 1189 goto dropfrag; 1190 m_adj(ipqe->ipqe_m, i); 1191 ipqe->ipqe_ip->ip_off = 1192 htons(ntohs(ipqe->ipqe_ip->ip_off) + i); 1193 ipqe->ipqe_ip->ip_len = 1194 htons(ntohs(ipqe->ipqe_ip->ip_len) - i); 1195 } 1196 } 1197 1198 /* 1199 * While we overlap succeeding segments trim them or, 1200 * if they are completely covered, dequeue them. 1201 */ 1202 for (; q != NULL && 1203 ntohs(ipqe->ipqe_ip->ip_off) + ntohs(ipqe->ipqe_ip->ip_len) > 1204 ntohs(q->ipqe_ip->ip_off); q = nq) { 1205 i = (ntohs(ipqe->ipqe_ip->ip_off) + 1206 ntohs(ipqe->ipqe_ip->ip_len)) - ntohs(q->ipqe_ip->ip_off); 1207 if (i < ntohs(q->ipqe_ip->ip_len)) { 1208 q->ipqe_ip->ip_len = 1209 htons(ntohs(q->ipqe_ip->ip_len) - i); 1210 q->ipqe_ip->ip_off = 1211 htons(ntohs(q->ipqe_ip->ip_off) + i); 1212 m_adj(q->ipqe_m, i); 1213 break; 1214 } 1215 nq = TAILQ_NEXT(q, ipqe_q); 1216 m_freem(q->ipqe_m); 1217 TAILQ_REMOVE(&fp->ipq_fragq, q, ipqe_q); 1218 s = splvm(); 1219 pool_put(&ipqent_pool, q); 1220 splx(s); 1221 fp->ipq_nfrags--; 1222 ip_nfrags--; 1223 } 1224 1225 insert: 1226 /* 1227 * Stick new segment in its place; 1228 * check for complete reassembly. 1229 */ 1230 if (p == NULL) { 1231 TAILQ_INSERT_HEAD(&fp->ipq_fragq, ipqe, ipqe_q); 1232 } else { 1233 TAILQ_INSERT_AFTER(&fp->ipq_fragq, p, ipqe, ipqe_q); 1234 } 1235 next = 0; 1236 for (p = NULL, q = TAILQ_FIRST(&fp->ipq_fragq); q != NULL; 1237 p = q, q = TAILQ_NEXT(q, ipqe_q)) { 1238 if (ntohs(q->ipqe_ip->ip_off) != next) 1239 return (0); 1240 next += ntohs(q->ipqe_ip->ip_len); 1241 } 1242 if (p->ipqe_mff) 1243 return (0); 1244 1245 /* 1246 * Reassembly is complete. Check for a bogus message size and 1247 * concatenate fragments. 1248 */ 1249 q = TAILQ_FIRST(&fp->ipq_fragq); 1250 ip = q->ipqe_ip; 1251 if ((next + (ip->ip_hl << 2)) > IP_MAXPACKET) { 1252 IP_STATINC(IP_STAT_TOOLONG); 1253 ip_freef(fp); 1254 return (0); 1255 } 1256 m = q->ipqe_m; 1257 t = m->m_next; 1258 m->m_next = 0; 1259 m_cat(m, t); 1260 nq = TAILQ_NEXT(q, ipqe_q); 1261 s = splvm(); 1262 pool_put(&ipqent_pool, q); 1263 splx(s); 1264 for (q = nq; q != NULL; q = nq) { 1265 t = q->ipqe_m; 1266 nq = TAILQ_NEXT(q, ipqe_q); 1267 s = splvm(); 1268 pool_put(&ipqent_pool, q); 1269 splx(s); 1270 m_cat(m, t); 1271 } 1272 ip_nfrags -= fp->ipq_nfrags; 1273 1274 /* 1275 * Create header for new ip packet by 1276 * modifying header of first packet; 1277 * dequeue and discard fragment reassembly header. 1278 * Make header visible. 1279 */ 1280 ip->ip_len = htons(next); 1281 ip->ip_src = fp->ipq_src; 1282 ip->ip_dst = fp->ipq_dst; 1283 LIST_REMOVE(fp, ipq_q); 1284 free(fp, M_FTABLE); 1285 ip_nfragpackets--; 1286 m->m_len += (ip->ip_hl << 2); 1287 m->m_data -= (ip->ip_hl << 2); 1288 /* some debugging cruft by sklower, below, will go away soon */ 1289 if (m->m_flags & M_PKTHDR) { /* XXX this should be done elsewhere */ 1290 int plen = 0; 1291 for (t = m; t; t = t->m_next) 1292 plen += t->m_len; 1293 m->m_pkthdr.len = plen; 1294 m->m_pkthdr.csum_flags = 0; 1295 } 1296 return (m); 1297 1298 dropfrag: 1299 if (fp != 0) 1300 fp->ipq_nfrags--; 1301 ip_nfrags--; 1302 IP_STATINC(IP_STAT_FRAGDROPPED); 1303 m_freem(m); 1304 s = splvm(); 1305 pool_put(&ipqent_pool, ipqe); 1306 splx(s); 1307 return (0); 1308 } 1309 1310 /* 1311 * Free a fragment reassembly header and all 1312 * associated datagrams. 1313 */ 1314 void 1315 ip_freef(struct ipq *fp) 1316 { 1317 struct ipqent *q, *p; 1318 u_int nfrags = 0; 1319 int s; 1320 1321 IPQ_LOCK_CHECK(); 1322 1323 for (q = TAILQ_FIRST(&fp->ipq_fragq); q != NULL; q = p) { 1324 p = TAILQ_NEXT(q, ipqe_q); 1325 m_freem(q->ipqe_m); 1326 nfrags++; 1327 TAILQ_REMOVE(&fp->ipq_fragq, q, ipqe_q); 1328 s = splvm(); 1329 pool_put(&ipqent_pool, q); 1330 splx(s); 1331 } 1332 1333 if (nfrags != fp->ipq_nfrags) 1334 printf("ip_freef: nfrags %d != %d\n", fp->ipq_nfrags, nfrags); 1335 ip_nfrags -= nfrags; 1336 LIST_REMOVE(fp, ipq_q); 1337 free(fp, M_FTABLE); 1338 ip_nfragpackets--; 1339 } 1340 1341 /* 1342 * IP reassembly TTL machinery for multiplicative drop. 1343 */ 1344 static u_int fragttl_histo[(IPFRAGTTL+1)]; 1345 1346 1347 /* 1348 * Decrement TTL of all reasembly queue entries by `ticks'. 1349 * Count number of distinct fragments (as opposed to partial, fragmented 1350 * datagrams) in the reassembly queue. While we traverse the entire 1351 * reassembly queue, compute and return the median TTL over all fragments. 1352 */ 1353 static u_int 1354 ip_reass_ttl_decr(u_int ticks) 1355 { 1356 u_int nfrags, median, dropfraction, keepfraction; 1357 struct ipq *fp, *nfp; 1358 int i; 1359 1360 nfrags = 0; 1361 memset(fragttl_histo, 0, sizeof fragttl_histo); 1362 1363 for (i = 0; i < IPREASS_NHASH; i++) { 1364 for (fp = LIST_FIRST(&ipq[i]); fp != NULL; fp = nfp) { 1365 fp->ipq_ttl = ((fp->ipq_ttl <= ticks) ? 1366 0 : fp->ipq_ttl - ticks); 1367 nfp = LIST_NEXT(fp, ipq_q); 1368 if (fp->ipq_ttl == 0) { 1369 IP_STATINC(IP_STAT_FRAGTIMEOUT); 1370 ip_freef(fp); 1371 } else { 1372 nfrags += fp->ipq_nfrags; 1373 fragttl_histo[fp->ipq_ttl] += fp->ipq_nfrags; 1374 } 1375 } 1376 } 1377 1378 KASSERT(ip_nfrags == nfrags); 1379 1380 /* Find median (or other drop fraction) in histogram. */ 1381 dropfraction = (ip_nfrags / 2); 1382 keepfraction = ip_nfrags - dropfraction; 1383 for (i = IPFRAGTTL, median = 0; i >= 0; i--) { 1384 median += fragttl_histo[i]; 1385 if (median >= keepfraction) 1386 break; 1387 } 1388 1389 /* Return TTL of median (or other fraction). */ 1390 return (u_int)i; 1391 } 1392 1393 void 1394 ip_reass_drophalf(void) 1395 { 1396 1397 u_int median_ticks; 1398 /* 1399 * Compute median TTL of all fragments, and count frags 1400 * with that TTL or lower (roughly half of all fragments). 1401 */ 1402 median_ticks = ip_reass_ttl_decr(0); 1403 1404 /* Drop half. */ 1405 median_ticks = ip_reass_ttl_decr(median_ticks); 1406 1407 } 1408 1409 /* 1410 * IP timer processing; 1411 * if a timer expires on a reassembly 1412 * queue, discard it. 1413 */ 1414 void 1415 ip_slowtimo(void) 1416 { 1417 static u_int dropscanidx = 0; 1418 u_int i; 1419 u_int median_ttl; 1420 1421 mutex_enter(softnet_lock); 1422 KERNEL_LOCK(1, NULL); 1423 1424 IPQ_LOCK(); 1425 1426 /* Age TTL of all fragments by 1 tick .*/ 1427 median_ttl = ip_reass_ttl_decr(1); 1428 1429 /* make sure fragment limit is up-to-date */ 1430 CHECK_NMBCLUSTER_PARAMS(); 1431 1432 /* If we have too many fragments, drop the older half. */ 1433 if (ip_nfrags > ip_maxfrags) 1434 ip_reass_ttl_decr(median_ttl); 1435 1436 /* 1437 * If we are over the maximum number of fragmented packets 1438 * (due to the limit being lowered), drain off 1439 * enough to get down to the new limit. Start draining 1440 * from the reassembly hashqueue most recently drained. 1441 */ 1442 if (ip_maxfragpackets < 0) 1443 ; 1444 else { 1445 int wrapped = 0; 1446 1447 i = dropscanidx; 1448 while (ip_nfragpackets > ip_maxfragpackets && wrapped == 0) { 1449 while (LIST_FIRST(&ipq[i]) != NULL) 1450 ip_freef(LIST_FIRST(&ipq[i])); 1451 if (++i >= IPREASS_NHASH) { 1452 i = 0; 1453 } 1454 /* 1455 * Dont scan forever even if fragment counters are 1456 * wrong: stop after scanning entire reassembly queue. 1457 */ 1458 if (i == dropscanidx) 1459 wrapped = 1; 1460 } 1461 dropscanidx = i; 1462 } 1463 IPQ_UNLOCK(); 1464 1465 KERNEL_UNLOCK_ONE(NULL); 1466 mutex_exit(softnet_lock); 1467 } 1468 1469 /* 1470 * Drain off all datagram fragments. Don't acquire softnet_lock as 1471 * can be called from hardware interrupt context. 1472 */ 1473 void 1474 ip_drain(void) 1475 { 1476 1477 KERNEL_LOCK(1, NULL); 1478 1479 /* 1480 * We may be called from a device's interrupt context. If 1481 * the ipq is already busy, just bail out now. 1482 */ 1483 if (ipq_lock_try() != 0) { 1484 /* 1485 * Drop half the total fragments now. If more mbufs are 1486 * needed, we will be called again soon. 1487 */ 1488 ip_reass_drophalf(); 1489 IPQ_UNLOCK(); 1490 } 1491 1492 KERNEL_UNLOCK_ONE(NULL); 1493 } 1494 1495 /* 1496 * Do option processing on a datagram, 1497 * possibly discarding it if bad options are encountered, 1498 * or forwarding it if source-routed. 1499 * Returns 1 if packet has been forwarded/freed, 1500 * 0 if the packet should be processed further. 1501 */ 1502 int 1503 ip_dooptions(struct mbuf *m) 1504 { 1505 struct ip *ip = mtod(m, struct ip *); 1506 u_char *cp, *cp0; 1507 struct ip_timestamp *ipt; 1508 struct in_ifaddr *ia; 1509 int opt, optlen, cnt, off, code, type = ICMP_PARAMPROB, forward = 0; 1510 struct in_addr dst; 1511 n_time ntime; 1512 1513 dst = ip->ip_dst; 1514 cp = (u_char *)(ip + 1); 1515 cnt = (ip->ip_hl << 2) - sizeof (struct ip); 1516 for (; cnt > 0; cnt -= optlen, cp += optlen) { 1517 opt = cp[IPOPT_OPTVAL]; 1518 if (opt == IPOPT_EOL) 1519 break; 1520 if (opt == IPOPT_NOP) 1521 optlen = 1; 1522 else { 1523 if (cnt < IPOPT_OLEN + sizeof(*cp)) { 1524 code = &cp[IPOPT_OLEN] - (u_char *)ip; 1525 goto bad; 1526 } 1527 optlen = cp[IPOPT_OLEN]; 1528 if (optlen < IPOPT_OLEN + sizeof(*cp) || optlen > cnt) { 1529 code = &cp[IPOPT_OLEN] - (u_char *)ip; 1530 goto bad; 1531 } 1532 } 1533 switch (opt) { 1534 1535 default: 1536 break; 1537 1538 /* 1539 * Source routing with record. 1540 * Find interface with current destination address. 1541 * If none on this machine then drop if strictly routed, 1542 * or do nothing if loosely routed. 1543 * Record interface address and bring up next address 1544 * component. If strictly routed make sure next 1545 * address is on directly accessible net. 1546 */ 1547 case IPOPT_LSRR: 1548 case IPOPT_SSRR: 1549 if (ip_allowsrcrt == 0) { 1550 type = ICMP_UNREACH; 1551 code = ICMP_UNREACH_NET_PROHIB; 1552 goto bad; 1553 } 1554 if (optlen < IPOPT_OFFSET + sizeof(*cp)) { 1555 code = &cp[IPOPT_OLEN] - (u_char *)ip; 1556 goto bad; 1557 } 1558 if ((off = cp[IPOPT_OFFSET]) < IPOPT_MINOFF) { 1559 code = &cp[IPOPT_OFFSET] - (u_char *)ip; 1560 goto bad; 1561 } 1562 ipaddr.sin_addr = ip->ip_dst; 1563 ia = ifatoia(ifa_ifwithaddr(sintosa(&ipaddr))); 1564 if (ia == 0) { 1565 if (opt == IPOPT_SSRR) { 1566 type = ICMP_UNREACH; 1567 code = ICMP_UNREACH_SRCFAIL; 1568 goto bad; 1569 } 1570 /* 1571 * Loose routing, and not at next destination 1572 * yet; nothing to do except forward. 1573 */ 1574 break; 1575 } 1576 off--; /* 0 origin */ 1577 if ((off + sizeof(struct in_addr)) > optlen) { 1578 /* 1579 * End of source route. Should be for us. 1580 */ 1581 save_rte(cp, ip->ip_src); 1582 break; 1583 } 1584 /* 1585 * locate outgoing interface 1586 */ 1587 memcpy((void *)&ipaddr.sin_addr, (void *)(cp + off), 1588 sizeof(ipaddr.sin_addr)); 1589 if (opt == IPOPT_SSRR) 1590 ia = ifatoia(ifa_ifwithladdr(sintosa(&ipaddr))); 1591 else 1592 ia = ip_rtaddr(ipaddr.sin_addr); 1593 if (ia == 0) { 1594 type = ICMP_UNREACH; 1595 code = ICMP_UNREACH_SRCFAIL; 1596 goto bad; 1597 } 1598 ip->ip_dst = ipaddr.sin_addr; 1599 bcopy((void *)&ia->ia_addr.sin_addr, 1600 (void *)(cp + off), sizeof(struct in_addr)); 1601 cp[IPOPT_OFFSET] += sizeof(struct in_addr); 1602 /* 1603 * Let ip_intr's mcast routing check handle mcast pkts 1604 */ 1605 forward = !IN_MULTICAST(ip->ip_dst.s_addr); 1606 break; 1607 1608 case IPOPT_RR: 1609 if (optlen < IPOPT_OFFSET + sizeof(*cp)) { 1610 code = &cp[IPOPT_OLEN] - (u_char *)ip; 1611 goto bad; 1612 } 1613 if ((off = cp[IPOPT_OFFSET]) < IPOPT_MINOFF) { 1614 code = &cp[IPOPT_OFFSET] - (u_char *)ip; 1615 goto bad; 1616 } 1617 /* 1618 * If no space remains, ignore. 1619 */ 1620 off--; /* 0 origin */ 1621 if ((off + sizeof(struct in_addr)) > optlen) 1622 break; 1623 memcpy((void *)&ipaddr.sin_addr, (void *)(&ip->ip_dst), 1624 sizeof(ipaddr.sin_addr)); 1625 /* 1626 * locate outgoing interface; if we're the destination, 1627 * use the incoming interface (should be same). 1628 */ 1629 if ((ia = ifatoia(ifa_ifwithaddr(sintosa(&ipaddr)))) 1630 == NULL && 1631 (ia = ip_rtaddr(ipaddr.sin_addr)) == NULL) { 1632 type = ICMP_UNREACH; 1633 code = ICMP_UNREACH_HOST; 1634 goto bad; 1635 } 1636 bcopy((void *)&ia->ia_addr.sin_addr, 1637 (void *)(cp + off), sizeof(struct in_addr)); 1638 cp[IPOPT_OFFSET] += sizeof(struct in_addr); 1639 break; 1640 1641 case IPOPT_TS: 1642 code = cp - (u_char *)ip; 1643 ipt = (struct ip_timestamp *)cp; 1644 if (ipt->ipt_len < 4 || ipt->ipt_len > 40) { 1645 code = (u_char *)&ipt->ipt_len - (u_char *)ip; 1646 goto bad; 1647 } 1648 if (ipt->ipt_ptr < 5) { 1649 code = (u_char *)&ipt->ipt_ptr - (u_char *)ip; 1650 goto bad; 1651 } 1652 if (ipt->ipt_ptr > ipt->ipt_len - sizeof (int32_t)) { 1653 if (++ipt->ipt_oflw == 0) { 1654 code = (u_char *)&ipt->ipt_ptr - 1655 (u_char *)ip; 1656 goto bad; 1657 } 1658 break; 1659 } 1660 cp0 = (cp + ipt->ipt_ptr - 1); 1661 switch (ipt->ipt_flg) { 1662 1663 case IPOPT_TS_TSONLY: 1664 break; 1665 1666 case IPOPT_TS_TSANDADDR: 1667 if (ipt->ipt_ptr - 1 + sizeof(n_time) + 1668 sizeof(struct in_addr) > ipt->ipt_len) { 1669 code = (u_char *)&ipt->ipt_ptr - 1670 (u_char *)ip; 1671 goto bad; 1672 } 1673 ipaddr.sin_addr = dst; 1674 ia = ifatoia(ifaof_ifpforaddr(sintosa(&ipaddr), 1675 m->m_pkthdr.rcvif)); 1676 if (ia == 0) 1677 continue; 1678 bcopy(&ia->ia_addr.sin_addr, 1679 cp0, sizeof(struct in_addr)); 1680 ipt->ipt_ptr += sizeof(struct in_addr); 1681 break; 1682 1683 case IPOPT_TS_PRESPEC: 1684 if (ipt->ipt_ptr - 1 + sizeof(n_time) + 1685 sizeof(struct in_addr) > ipt->ipt_len) { 1686 code = (u_char *)&ipt->ipt_ptr - 1687 (u_char *)ip; 1688 goto bad; 1689 } 1690 memcpy(&ipaddr.sin_addr, cp0, 1691 sizeof(struct in_addr)); 1692 if (ifatoia(ifa_ifwithaddr(sintosa(&ipaddr))) 1693 == NULL) 1694 continue; 1695 ipt->ipt_ptr += sizeof(struct in_addr); 1696 break; 1697 1698 default: 1699 /* XXX can't take &ipt->ipt_flg */ 1700 code = (u_char *)&ipt->ipt_ptr - 1701 (u_char *)ip + 1; 1702 goto bad; 1703 } 1704 ntime = iptime(); 1705 cp0 = (u_char *) &ntime; /* XXX grumble, GCC... */ 1706 memmove((char *)cp + ipt->ipt_ptr - 1, cp0, 1707 sizeof(n_time)); 1708 ipt->ipt_ptr += sizeof(n_time); 1709 } 1710 } 1711 if (forward) { 1712 if (ip_forwsrcrt == 0) { 1713 type = ICMP_UNREACH; 1714 code = ICMP_UNREACH_SRCFAIL; 1715 goto bad; 1716 } 1717 ip_forward(m, 1); 1718 return (1); 1719 } 1720 return (0); 1721 bad: 1722 icmp_error(m, type, code, 0, 0); 1723 IP_STATINC(IP_STAT_BADOPTIONS); 1724 return (1); 1725 } 1726 1727 /* 1728 * Given address of next destination (final or next hop), 1729 * return internet address info of interface to be used to get there. 1730 */ 1731 struct in_ifaddr * 1732 ip_rtaddr(struct in_addr dst) 1733 { 1734 struct rtentry *rt; 1735 union { 1736 struct sockaddr dst; 1737 struct sockaddr_in dst4; 1738 } u; 1739 1740 sockaddr_in_init(&u.dst4, &dst, 0); 1741 1742 if ((rt = rtcache_lookup(&ipforward_rt, &u.dst)) == NULL) 1743 return NULL; 1744 1745 return ifatoia(rt->rt_ifa); 1746 } 1747 1748 /* 1749 * Save incoming source route for use in replies, 1750 * to be picked up later by ip_srcroute if the receiver is interested. 1751 */ 1752 void 1753 save_rte(u_char *option, struct in_addr dst) 1754 { 1755 unsigned olen; 1756 1757 olen = option[IPOPT_OLEN]; 1758 #ifdef DIAGNOSTIC 1759 if (ipprintfs) 1760 printf("save_rte: olen %d\n", olen); 1761 #endif /* 0 */ 1762 if (olen > sizeof(ip_srcrt) - (1 + sizeof(dst))) 1763 return; 1764 memcpy((void *)ip_srcrt.srcopt, (void *)option, olen); 1765 ip_nhops = (olen - IPOPT_OFFSET - 1) / sizeof(struct in_addr); 1766 ip_srcrt.dst = dst; 1767 } 1768 1769 /* 1770 * Retrieve incoming source route for use in replies, 1771 * in the same form used by setsockopt. 1772 * The first hop is placed before the options, will be removed later. 1773 */ 1774 struct mbuf * 1775 ip_srcroute(void) 1776 { 1777 struct in_addr *p, *q; 1778 struct mbuf *m; 1779 1780 if (ip_nhops == 0) 1781 return NULL; 1782 m = m_get(M_DONTWAIT, MT_SOOPTS); 1783 if (m == 0) 1784 return NULL; 1785 1786 MCLAIM(m, &inetdomain.dom_mowner); 1787 #define OPTSIZ (sizeof(ip_srcrt.nop) + sizeof(ip_srcrt.srcopt)) 1788 1789 /* length is (nhops+1)*sizeof(addr) + sizeof(nop + srcrt header) */ 1790 m->m_len = ip_nhops * sizeof(struct in_addr) + sizeof(struct in_addr) + 1791 OPTSIZ; 1792 #ifdef DIAGNOSTIC 1793 if (ipprintfs) 1794 printf("ip_srcroute: nhops %d mlen %d", ip_nhops, m->m_len); 1795 #endif 1796 1797 /* 1798 * First save first hop for return route 1799 */ 1800 p = &ip_srcrt.route[ip_nhops - 1]; 1801 *(mtod(m, struct in_addr *)) = *p--; 1802 #ifdef DIAGNOSTIC 1803 if (ipprintfs) 1804 printf(" hops %x", ntohl(mtod(m, struct in_addr *)->s_addr)); 1805 #endif 1806 1807 /* 1808 * Copy option fields and padding (nop) to mbuf. 1809 */ 1810 ip_srcrt.nop = IPOPT_NOP; 1811 ip_srcrt.srcopt[IPOPT_OFFSET] = IPOPT_MINOFF; 1812 memmove(mtod(m, char *) + sizeof(struct in_addr), &ip_srcrt.nop, 1813 OPTSIZ); 1814 q = (struct in_addr *)(mtod(m, char *) + 1815 sizeof(struct in_addr) + OPTSIZ); 1816 #undef OPTSIZ 1817 /* 1818 * Record return path as an IP source route, 1819 * reversing the path (pointers are now aligned). 1820 */ 1821 while (p >= ip_srcrt.route) { 1822 #ifdef DIAGNOSTIC 1823 if (ipprintfs) 1824 printf(" %x", ntohl(q->s_addr)); 1825 #endif 1826 *q++ = *p--; 1827 } 1828 /* 1829 * Last hop goes to final destination. 1830 */ 1831 *q = ip_srcrt.dst; 1832 #ifdef DIAGNOSTIC 1833 if (ipprintfs) 1834 printf(" %x\n", ntohl(q->s_addr)); 1835 #endif 1836 return (m); 1837 } 1838 1839 const int inetctlerrmap[PRC_NCMDS] = { 1840 [PRC_MSGSIZE] = EMSGSIZE, 1841 [PRC_HOSTDEAD] = EHOSTDOWN, 1842 [PRC_HOSTUNREACH] = EHOSTUNREACH, 1843 [PRC_UNREACH_NET] = EHOSTUNREACH, 1844 [PRC_UNREACH_HOST] = EHOSTUNREACH, 1845 [PRC_UNREACH_PROTOCOL] = ECONNREFUSED, 1846 [PRC_UNREACH_PORT] = ECONNREFUSED, 1847 [PRC_UNREACH_SRCFAIL] = EHOSTUNREACH, 1848 [PRC_PARAMPROB] = ENOPROTOOPT, 1849 }; 1850 1851 /* 1852 * Forward a packet. If some error occurs return the sender 1853 * an icmp packet. Note we can't always generate a meaningful 1854 * icmp message because icmp doesn't have a large enough repertoire 1855 * of codes and types. 1856 * 1857 * If not forwarding, just drop the packet. This could be confusing 1858 * if ipforwarding was zero but some routing protocol was advancing 1859 * us as a gateway to somewhere. However, we must let the routing 1860 * protocol deal with that. 1861 * 1862 * The srcrt parameter indicates whether the packet is being forwarded 1863 * via a source route. 1864 */ 1865 void 1866 ip_forward(struct mbuf *m, int srcrt) 1867 { 1868 struct ip *ip = mtod(m, struct ip *); 1869 struct rtentry *rt; 1870 int error, type = 0, code = 0, destmtu = 0; 1871 struct mbuf *mcopy; 1872 n_long dest; 1873 union { 1874 struct sockaddr dst; 1875 struct sockaddr_in dst4; 1876 } u; 1877 1878 /* 1879 * We are now in the output path. 1880 */ 1881 MCLAIM(m, &ip_tx_mowner); 1882 1883 /* 1884 * Clear any in-bound checksum flags for this packet. 1885 */ 1886 m->m_pkthdr.csum_flags = 0; 1887 1888 dest = 0; 1889 #ifdef DIAGNOSTIC 1890 if (ipprintfs) { 1891 printf("forward: src %s ", inet_ntoa(ip->ip_src)); 1892 printf("dst %s ttl %x\n", inet_ntoa(ip->ip_dst), ip->ip_ttl); 1893 } 1894 #endif 1895 if (m->m_flags & (M_BCAST|M_MCAST) || in_canforward(ip->ip_dst) == 0) { 1896 IP_STATINC(IP_STAT_CANTFORWARD); 1897 m_freem(m); 1898 return; 1899 } 1900 if (ip->ip_ttl <= IPTTLDEC) { 1901 icmp_error(m, ICMP_TIMXCEED, ICMP_TIMXCEED_INTRANS, dest, 0); 1902 return; 1903 } 1904 1905 sockaddr_in_init(&u.dst4, &ip->ip_dst, 0); 1906 if ((rt = rtcache_lookup(&ipforward_rt, &u.dst)) == NULL) { 1907 icmp_error(m, ICMP_UNREACH, ICMP_UNREACH_NET, dest, 0); 1908 return; 1909 } 1910 1911 /* 1912 * Save at most 68 bytes of the packet in case 1913 * we need to generate an ICMP message to the src. 1914 * Pullup to avoid sharing mbuf cluster between m and mcopy. 1915 */ 1916 mcopy = m_copym(m, 0, imin(ntohs(ip->ip_len), 68), M_DONTWAIT); 1917 if (mcopy) 1918 mcopy = m_pullup(mcopy, ip->ip_hl << 2); 1919 1920 ip->ip_ttl -= IPTTLDEC; 1921 1922 /* 1923 * If forwarding packet using same interface that it came in on, 1924 * perhaps should send a redirect to sender to shortcut a hop. 1925 * Only send redirect if source is sending directly to us, 1926 * and if packet was not source routed (or has any options). 1927 * Also, don't send redirect if forwarding using a default route 1928 * or a route modified by a redirect. 1929 */ 1930 if (rt->rt_ifp == m->m_pkthdr.rcvif && 1931 (rt->rt_flags & (RTF_DYNAMIC|RTF_MODIFIED)) == 0 && 1932 !in_nullhost(satocsin(rt_getkey(rt))->sin_addr) && 1933 ipsendredirects && !srcrt) { 1934 if (rt->rt_ifa && 1935 (ip->ip_src.s_addr & ifatoia(rt->rt_ifa)->ia_subnetmask) == 1936 ifatoia(rt->rt_ifa)->ia_subnet) { 1937 if (rt->rt_flags & RTF_GATEWAY) 1938 dest = satosin(rt->rt_gateway)->sin_addr.s_addr; 1939 else 1940 dest = ip->ip_dst.s_addr; 1941 /* 1942 * Router requirements says to only send host 1943 * redirects. 1944 */ 1945 type = ICMP_REDIRECT; 1946 code = ICMP_REDIRECT_HOST; 1947 #ifdef DIAGNOSTIC 1948 if (ipprintfs) 1949 printf("redirect (%d) to %x\n", code, 1950 (u_int32_t)dest); 1951 #endif 1952 } 1953 } 1954 1955 error = ip_output(m, NULL, &ipforward_rt, 1956 (IP_FORWARDING | (ip_directedbcast ? IP_ALLOWBROADCAST : 0)), 1957 (struct ip_moptions *)NULL, (struct socket *)NULL); 1958 1959 if (error) 1960 IP_STATINC(IP_STAT_CANTFORWARD); 1961 else { 1962 uint64_t *ips = IP_STAT_GETREF(); 1963 ips[IP_STAT_FORWARD]++; 1964 if (type) { 1965 ips[IP_STAT_REDIRECTSENT]++; 1966 IP_STAT_PUTREF(); 1967 } else { 1968 IP_STAT_PUTREF(); 1969 if (mcopy) { 1970 #ifdef GATEWAY 1971 if (mcopy->m_flags & M_CANFASTFWD) 1972 ipflow_create(&ipforward_rt, mcopy); 1973 #endif 1974 m_freem(mcopy); 1975 } 1976 return; 1977 } 1978 } 1979 if (mcopy == NULL) 1980 return; 1981 1982 switch (error) { 1983 1984 case 0: /* forwarded, but need redirect */ 1985 /* type, code set above */ 1986 break; 1987 1988 case ENETUNREACH: /* shouldn't happen, checked above */ 1989 case EHOSTUNREACH: 1990 case ENETDOWN: 1991 case EHOSTDOWN: 1992 default: 1993 type = ICMP_UNREACH; 1994 code = ICMP_UNREACH_HOST; 1995 break; 1996 1997 case EMSGSIZE: 1998 type = ICMP_UNREACH; 1999 code = ICMP_UNREACH_NEEDFRAG; 2000 2001 if ((rt = rtcache_validate(&ipforward_rt)) != NULL) 2002 destmtu = rt->rt_ifp->if_mtu; 2003 2004 #if defined(IPSEC) || defined(FAST_IPSEC) 2005 { 2006 /* 2007 * If the packet is routed over IPsec tunnel, tell the 2008 * originator the tunnel MTU. 2009 * tunnel MTU = if MTU - sizeof(IP) - ESP/AH hdrsiz 2010 * XXX quickhack!!! 2011 */ 2012 2013 struct secpolicy *sp; 2014 int ipsecerror; 2015 size_t ipsechdr; 2016 struct route *ro; 2017 2018 sp = ipsec4_getpolicybyaddr(mcopy, 2019 IPSEC_DIR_OUTBOUND, IP_FORWARDING, 2020 &ipsecerror); 2021 2022 if (sp != NULL) { 2023 /* count IPsec header size */ 2024 ipsechdr = ipsec4_hdrsiz(mcopy, 2025 IPSEC_DIR_OUTBOUND, NULL); 2026 2027 /* 2028 * find the correct route for outer IPv4 2029 * header, compute tunnel MTU. 2030 */ 2031 2032 if (sp->req != NULL 2033 && sp->req->sav != NULL 2034 && sp->req->sav->sah != NULL) { 2035 ro = &sp->req->sav->sah->sa_route; 2036 rt = rtcache_validate(ro); 2037 if (rt && rt->rt_ifp) { 2038 destmtu = 2039 rt->rt_rmx.rmx_mtu ? 2040 rt->rt_rmx.rmx_mtu : 2041 rt->rt_ifp->if_mtu; 2042 destmtu -= ipsechdr; 2043 } 2044 } 2045 2046 #ifdef IPSEC 2047 key_freesp(sp); 2048 #else 2049 KEY_FREESP(&sp); 2050 #endif 2051 } 2052 } 2053 #endif /*defined(IPSEC) || defined(FAST_IPSEC)*/ 2054 IP_STATINC(IP_STAT_CANTFRAG); 2055 break; 2056 2057 case ENOBUFS: 2058 #if 1 2059 /* 2060 * a router should not generate ICMP_SOURCEQUENCH as 2061 * required in RFC1812 Requirements for IP Version 4 Routers. 2062 * source quench could be a big problem under DoS attacks, 2063 * or if the underlying interface is rate-limited. 2064 */ 2065 if (mcopy) 2066 m_freem(mcopy); 2067 return; 2068 #else 2069 type = ICMP_SOURCEQUENCH; 2070 code = 0; 2071 break; 2072 #endif 2073 } 2074 icmp_error(mcopy, type, code, dest, destmtu); 2075 } 2076 2077 void 2078 ip_savecontrol(struct inpcb *inp, struct mbuf **mp, struct ip *ip, 2079 struct mbuf *m) 2080 { 2081 2082 if (inp->inp_socket->so_options & SO_TIMESTAMP 2083 #ifdef SO_OTIMESTAMP 2084 || inp->inp_socket->so_options & SO_OTIMESTAMP 2085 #endif 2086 ) { 2087 struct timeval tv; 2088 2089 microtime(&tv); 2090 #ifdef SO_OTIMESTAMP 2091 if (inp->inp_socket->so_options & SO_OTIMESTAMP) { 2092 struct timeval50 tv50; 2093 timeval_to_timeval50(&tv, &tv50); 2094 *mp = sbcreatecontrol((void *) &tv50, sizeof(tv50), 2095 SCM_OTIMESTAMP, SOL_SOCKET); 2096 } else 2097 #endif 2098 *mp = sbcreatecontrol((void *) &tv, sizeof(tv), 2099 SCM_TIMESTAMP, SOL_SOCKET); 2100 if (*mp) 2101 mp = &(*mp)->m_next; 2102 } 2103 if (inp->inp_flags & INP_RECVDSTADDR) { 2104 *mp = sbcreatecontrol((void *) &ip->ip_dst, 2105 sizeof(struct in_addr), IP_RECVDSTADDR, IPPROTO_IP); 2106 if (*mp) 2107 mp = &(*mp)->m_next; 2108 } 2109 #ifdef notyet 2110 /* 2111 * XXX 2112 * Moving these out of udp_input() made them even more broken 2113 * than they already were. 2114 * - fenner@parc.xerox.com 2115 */ 2116 /* options were tossed already */ 2117 if (inp->inp_flags & INP_RECVOPTS) { 2118 *mp = sbcreatecontrol((void *) opts_deleted_above, 2119 sizeof(struct in_addr), IP_RECVOPTS, IPPROTO_IP); 2120 if (*mp) 2121 mp = &(*mp)->m_next; 2122 } 2123 /* ip_srcroute doesn't do what we want here, need to fix */ 2124 if (inp->inp_flags & INP_RECVRETOPTS) { 2125 *mp = sbcreatecontrol((void *) ip_srcroute(), 2126 sizeof(struct in_addr), IP_RECVRETOPTS, IPPROTO_IP); 2127 if (*mp) 2128 mp = &(*mp)->m_next; 2129 } 2130 #endif 2131 if (inp->inp_flags & INP_RECVIF) { 2132 struct sockaddr_dl sdl; 2133 2134 sockaddr_dl_init(&sdl, sizeof(sdl), 2135 (m->m_pkthdr.rcvif != NULL) 2136 ? m->m_pkthdr.rcvif->if_index 2137 : 0, 2138 0, NULL, 0, NULL, 0); 2139 *mp = sbcreatecontrol(&sdl, sdl.sdl_len, IP_RECVIF, IPPROTO_IP); 2140 if (*mp) 2141 mp = &(*mp)->m_next; 2142 } 2143 if (inp->inp_flags & INP_RECVTTL) { 2144 *mp = sbcreatecontrol((void *) &ip->ip_ttl, 2145 sizeof(uint8_t), IP_RECVTTL, IPPROTO_IP); 2146 if (*mp) 2147 mp = &(*mp)->m_next; 2148 } 2149 } 2150 2151 /* 2152 * sysctl helper routine for net.inet.ip.forwsrcrt. 2153 */ 2154 static int 2155 sysctl_net_inet_ip_forwsrcrt(SYSCTLFN_ARGS) 2156 { 2157 int error, tmp; 2158 struct sysctlnode node; 2159 2160 node = *rnode; 2161 tmp = ip_forwsrcrt; 2162 node.sysctl_data = &tmp; 2163 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 2164 if (error || newp == NULL) 2165 return (error); 2166 2167 error = kauth_authorize_network(l->l_cred, KAUTH_NETWORK_FORWSRCRT, 2168 0, NULL, NULL, NULL); 2169 if (error) 2170 return (error); 2171 2172 ip_forwsrcrt = tmp; 2173 2174 return (0); 2175 } 2176 2177 /* 2178 * sysctl helper routine for net.inet.ip.mtudisctimeout. checks the 2179 * range of the new value and tweaks timers if it changes. 2180 */ 2181 static int 2182 sysctl_net_inet_ip_pmtudto(SYSCTLFN_ARGS) 2183 { 2184 int error, tmp; 2185 struct sysctlnode node; 2186 2187 node = *rnode; 2188 tmp = ip_mtudisc_timeout; 2189 node.sysctl_data = &tmp; 2190 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 2191 if (error || newp == NULL) 2192 return (error); 2193 if (tmp < 0) 2194 return (EINVAL); 2195 2196 mutex_enter(softnet_lock); 2197 2198 ip_mtudisc_timeout = tmp; 2199 rt_timer_queue_change(ip_mtudisc_timeout_q, ip_mtudisc_timeout); 2200 2201 mutex_exit(softnet_lock); 2202 2203 return (0); 2204 } 2205 2206 #ifdef GATEWAY 2207 /* 2208 * sysctl helper routine for net.inet.ip.maxflows. 2209 */ 2210 static int 2211 sysctl_net_inet_ip_maxflows(SYSCTLFN_ARGS) 2212 { 2213 int error; 2214 2215 error = sysctl_lookup(SYSCTLFN_CALL(rnode)); 2216 if (error || newp == NULL) 2217 return (error); 2218 2219 mutex_enter(softnet_lock); 2220 KERNEL_LOCK(1, NULL); 2221 2222 ipflow_prune(); 2223 2224 KERNEL_UNLOCK_ONE(NULL); 2225 mutex_exit(softnet_lock); 2226 2227 return (0); 2228 } 2229 2230 static int 2231 sysctl_net_inet_ip_hashsize(SYSCTLFN_ARGS) 2232 { 2233 int error, tmp; 2234 struct sysctlnode node; 2235 2236 node = *rnode; 2237 tmp = ip_hashsize; 2238 node.sysctl_data = &tmp; 2239 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 2240 if (error || newp == NULL) 2241 return (error); 2242 2243 if ((tmp & (tmp - 1)) == 0 && tmp != 0) { 2244 /* 2245 * Can only fail due to malloc() 2246 */ 2247 mutex_enter(softnet_lock); 2248 KERNEL_LOCK(1, NULL); 2249 2250 error = ipflow_invalidate_all(tmp); 2251 2252 KERNEL_UNLOCK_ONE(NULL); 2253 mutex_exit(softnet_lock); 2254 2255 } else { 2256 /* 2257 * EINVAL if not a power of 2 2258 */ 2259 error = EINVAL; 2260 } 2261 2262 return error; 2263 } 2264 #endif /* GATEWAY */ 2265 2266 static int 2267 sysctl_net_inet_ip_stats(SYSCTLFN_ARGS) 2268 { 2269 2270 return (NETSTAT_SYSCTL(ipstat_percpu, IP_NSTATS)); 2271 } 2272 2273 static void 2274 sysctl_net_inet_ip_setup(struct sysctllog **clog) 2275 { 2276 extern int subnetsarelocal, hostzeroisbroadcast; 2277 2278 sysctl_createv(clog, 0, NULL, NULL, 2279 CTLFLAG_PERMANENT, 2280 CTLTYPE_NODE, "net", NULL, 2281 NULL, 0, NULL, 0, 2282 CTL_NET, CTL_EOL); 2283 sysctl_createv(clog, 0, NULL, NULL, 2284 CTLFLAG_PERMANENT, 2285 CTLTYPE_NODE, "inet", 2286 SYSCTL_DESCR("PF_INET related settings"), 2287 NULL, 0, NULL, 0, 2288 CTL_NET, PF_INET, CTL_EOL); 2289 sysctl_createv(clog, 0, NULL, NULL, 2290 CTLFLAG_PERMANENT, 2291 CTLTYPE_NODE, "ip", 2292 SYSCTL_DESCR("IPv4 related settings"), 2293 NULL, 0, NULL, 0, 2294 CTL_NET, PF_INET, IPPROTO_IP, CTL_EOL); 2295 2296 sysctl_createv(clog, 0, NULL, NULL, 2297 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 2298 CTLTYPE_INT, "forwarding", 2299 SYSCTL_DESCR("Enable forwarding of INET datagrams"), 2300 NULL, 0, &ipforwarding, 0, 2301 CTL_NET, PF_INET, IPPROTO_IP, 2302 IPCTL_FORWARDING, CTL_EOL); 2303 sysctl_createv(clog, 0, NULL, NULL, 2304 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 2305 CTLTYPE_INT, "redirect", 2306 SYSCTL_DESCR("Enable sending of ICMP redirect messages"), 2307 NULL, 0, &ipsendredirects, 0, 2308 CTL_NET, PF_INET, IPPROTO_IP, 2309 IPCTL_SENDREDIRECTS, CTL_EOL); 2310 sysctl_createv(clog, 0, NULL, NULL, 2311 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 2312 CTLTYPE_INT, "ttl", 2313 SYSCTL_DESCR("Default TTL for an INET datagram"), 2314 NULL, 0, &ip_defttl, 0, 2315 CTL_NET, PF_INET, IPPROTO_IP, 2316 IPCTL_DEFTTL, CTL_EOL); 2317 #ifdef IPCTL_DEFMTU 2318 sysctl_createv(clog, 0, NULL, NULL, 2319 CTLFLAG_PERMANENT /* |CTLFLAG_READWRITE? */, 2320 CTLTYPE_INT, "mtu", 2321 SYSCTL_DESCR("Default MTA for an INET route"), 2322 NULL, 0, &ip_mtu, 0, 2323 CTL_NET, PF_INET, IPPROTO_IP, 2324 IPCTL_DEFMTU, CTL_EOL); 2325 #endif /* IPCTL_DEFMTU */ 2326 sysctl_createv(clog, 0, NULL, NULL, 2327 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 2328 CTLTYPE_INT, "forwsrcrt", 2329 SYSCTL_DESCR("Enable forwarding of source-routed " 2330 "datagrams"), 2331 sysctl_net_inet_ip_forwsrcrt, 0, &ip_forwsrcrt, 0, 2332 CTL_NET, PF_INET, IPPROTO_IP, 2333 IPCTL_FORWSRCRT, CTL_EOL); 2334 sysctl_createv(clog, 0, NULL, NULL, 2335 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 2336 CTLTYPE_INT, "directed-broadcast", 2337 SYSCTL_DESCR("Enable forwarding of broadcast datagrams"), 2338 NULL, 0, &ip_directedbcast, 0, 2339 CTL_NET, PF_INET, IPPROTO_IP, 2340 IPCTL_DIRECTEDBCAST, CTL_EOL); 2341 sysctl_createv(clog, 0, NULL, NULL, 2342 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 2343 CTLTYPE_INT, "allowsrcrt", 2344 SYSCTL_DESCR("Accept source-routed datagrams"), 2345 NULL, 0, &ip_allowsrcrt, 0, 2346 CTL_NET, PF_INET, IPPROTO_IP, 2347 IPCTL_ALLOWSRCRT, CTL_EOL); 2348 sysctl_createv(clog, 0, NULL, NULL, 2349 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 2350 CTLTYPE_INT, "subnetsarelocal", 2351 SYSCTL_DESCR("Whether logical subnets are considered " 2352 "local"), 2353 NULL, 0, &subnetsarelocal, 0, 2354 CTL_NET, PF_INET, IPPROTO_IP, 2355 IPCTL_SUBNETSARELOCAL, CTL_EOL); 2356 sysctl_createv(clog, 0, NULL, NULL, 2357 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 2358 CTLTYPE_INT, "mtudisc", 2359 SYSCTL_DESCR("Use RFC1191 Path MTU Discovery"), 2360 NULL, 0, &ip_mtudisc, 0, 2361 CTL_NET, PF_INET, IPPROTO_IP, 2362 IPCTL_MTUDISC, CTL_EOL); 2363 sysctl_createv(clog, 0, NULL, NULL, 2364 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 2365 CTLTYPE_INT, "anonportmin", 2366 SYSCTL_DESCR("Lowest ephemeral port number to assign"), 2367 sysctl_net_inet_ip_ports, 0, &anonportmin, 0, 2368 CTL_NET, PF_INET, IPPROTO_IP, 2369 IPCTL_ANONPORTMIN, CTL_EOL); 2370 sysctl_createv(clog, 0, NULL, NULL, 2371 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 2372 CTLTYPE_INT, "anonportmax", 2373 SYSCTL_DESCR("Highest ephemeral port number to assign"), 2374 sysctl_net_inet_ip_ports, 0, &anonportmax, 0, 2375 CTL_NET, PF_INET, IPPROTO_IP, 2376 IPCTL_ANONPORTMAX, CTL_EOL); 2377 sysctl_createv(clog, 0, NULL, NULL, 2378 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 2379 CTLTYPE_INT, "mtudisctimeout", 2380 SYSCTL_DESCR("Lifetime of a Path MTU Discovered route"), 2381 sysctl_net_inet_ip_pmtudto, 0, &ip_mtudisc_timeout, 0, 2382 CTL_NET, PF_INET, IPPROTO_IP, 2383 IPCTL_MTUDISCTIMEOUT, CTL_EOL); 2384 #ifdef GATEWAY 2385 sysctl_createv(clog, 0, NULL, NULL, 2386 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 2387 CTLTYPE_INT, "maxflows", 2388 SYSCTL_DESCR("Number of flows for fast forwarding"), 2389 sysctl_net_inet_ip_maxflows, 0, &ip_maxflows, 0, 2390 CTL_NET, PF_INET, IPPROTO_IP, 2391 IPCTL_MAXFLOWS, CTL_EOL); 2392 sysctl_createv(clog, 0, NULL, NULL, 2393 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 2394 CTLTYPE_INT, "hashsize", 2395 SYSCTL_DESCR("Size of hash table for fast forwarding (IPv4)"), 2396 sysctl_net_inet_ip_hashsize, 0, &ip_hashsize, 0, 2397 CTL_NET, PF_INET, IPPROTO_IP, 2398 CTL_CREATE, CTL_EOL); 2399 #endif /* GATEWAY */ 2400 sysctl_createv(clog, 0, NULL, NULL, 2401 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 2402 CTLTYPE_INT, "hostzerobroadcast", 2403 SYSCTL_DESCR("All zeroes address is broadcast address"), 2404 NULL, 0, &hostzeroisbroadcast, 0, 2405 CTL_NET, PF_INET, IPPROTO_IP, 2406 IPCTL_HOSTZEROBROADCAST, CTL_EOL); 2407 #if NGIF > 0 2408 sysctl_createv(clog, 0, NULL, NULL, 2409 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 2410 CTLTYPE_INT, "gifttl", 2411 SYSCTL_DESCR("Default TTL for a gif tunnel datagram"), 2412 NULL, 0, &ip_gif_ttl, 0, 2413 CTL_NET, PF_INET, IPPROTO_IP, 2414 IPCTL_GIF_TTL, CTL_EOL); 2415 #endif /* NGIF */ 2416 #ifndef IPNOPRIVPORTS 2417 sysctl_createv(clog, 0, NULL, NULL, 2418 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 2419 CTLTYPE_INT, "lowportmin", 2420 SYSCTL_DESCR("Lowest privileged ephemeral port number " 2421 "to assign"), 2422 sysctl_net_inet_ip_ports, 0, &lowportmin, 0, 2423 CTL_NET, PF_INET, IPPROTO_IP, 2424 IPCTL_LOWPORTMIN, CTL_EOL); 2425 sysctl_createv(clog, 0, NULL, NULL, 2426 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 2427 CTLTYPE_INT, "lowportmax", 2428 SYSCTL_DESCR("Highest privileged ephemeral port number " 2429 "to assign"), 2430 sysctl_net_inet_ip_ports, 0, &lowportmax, 0, 2431 CTL_NET, PF_INET, IPPROTO_IP, 2432 IPCTL_LOWPORTMAX, CTL_EOL); 2433 #endif /* IPNOPRIVPORTS */ 2434 sysctl_createv(clog, 0, NULL, NULL, 2435 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 2436 CTLTYPE_INT, "maxfragpackets", 2437 SYSCTL_DESCR("Maximum number of fragments to retain for " 2438 "possible reassembly"), 2439 NULL, 0, &ip_maxfragpackets, 0, 2440 CTL_NET, PF_INET, IPPROTO_IP, 2441 IPCTL_MAXFRAGPACKETS, CTL_EOL); 2442 #if NGRE > 0 2443 sysctl_createv(clog, 0, NULL, NULL, 2444 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 2445 CTLTYPE_INT, "grettl", 2446 SYSCTL_DESCR("Default TTL for a gre tunnel datagram"), 2447 NULL, 0, &ip_gre_ttl, 0, 2448 CTL_NET, PF_INET, IPPROTO_IP, 2449 IPCTL_GRE_TTL, CTL_EOL); 2450 #endif /* NGRE */ 2451 sysctl_createv(clog, 0, NULL, NULL, 2452 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 2453 CTLTYPE_INT, "checkinterface", 2454 SYSCTL_DESCR("Enable receive side of Strong ES model " 2455 "from RFC1122"), 2456 NULL, 0, &ip_checkinterface, 0, 2457 CTL_NET, PF_INET, IPPROTO_IP, 2458 IPCTL_CHECKINTERFACE, CTL_EOL); 2459 sysctl_createv(clog, 0, NULL, NULL, 2460 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 2461 CTLTYPE_INT, "random_id", 2462 SYSCTL_DESCR("Assign random ip_id values"), 2463 NULL, 0, &ip_do_randomid, 0, 2464 CTL_NET, PF_INET, IPPROTO_IP, 2465 IPCTL_RANDOMID, CTL_EOL); 2466 sysctl_createv(clog, 0, NULL, NULL, 2467 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 2468 CTLTYPE_INT, "do_loopback_cksum", 2469 SYSCTL_DESCR("Perform IP checksum on loopback"), 2470 NULL, 0, &ip_do_loopback_cksum, 0, 2471 CTL_NET, PF_INET, IPPROTO_IP, 2472 IPCTL_LOOPBACKCKSUM, CTL_EOL); 2473 sysctl_createv(clog, 0, NULL, NULL, 2474 CTLFLAG_PERMANENT, 2475 CTLTYPE_STRUCT, "stats", 2476 SYSCTL_DESCR("IP statistics"), 2477 sysctl_net_inet_ip_stats, 0, NULL, 0, 2478 CTL_NET, PF_INET, IPPROTO_IP, IPCTL_STATS, 2479 CTL_EOL); 2480 } 2481 2482 void 2483 ip_statinc(u_int stat) 2484 { 2485 2486 KASSERT(stat < IP_NSTATS); 2487 IP_STATINC(stat); 2488 } 2489