1 /* 2 * Copyright (c) 2003, 2004 Jeffrey M. Hsu. All rights reserved. 3 * Copyright (c) 2003, 2004 The DragonFly Project. All rights reserved. 4 * 5 * This code is derived from software contributed to The DragonFly Project 6 * by Jeffrey M. Hsu. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of The DragonFly Project nor the names of its 17 * contributors may be used to endorse or promote products derived 18 * from this software without specific, prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 25 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 29 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 30 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 */ 33 34 /* 35 * Copyright (c) 1982, 1986, 1988, 1993 36 * The Regents of the University of California. All rights reserved. 37 * 38 * Redistribution and use in source and binary forms, with or without 39 * modification, are permitted provided that the following conditions 40 * are met: 41 * 1. Redistributions of source code must retain the above copyright 42 * notice, this list of conditions and the following disclaimer. 43 * 2. Redistributions in binary form must reproduce the above copyright 44 * notice, this list of conditions and the following disclaimer in the 45 * documentation and/or other materials provided with the distribution. 46 * 3. Neither the name of the University nor the names of its contributors 47 * may be used to endorse or promote products derived from this software 48 * without specific prior written permission. 49 * 50 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 51 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 52 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 53 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 54 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 55 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 56 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 57 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 58 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 59 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 60 * SUCH DAMAGE. 61 * 62 * @(#)ip_input.c 8.2 (Berkeley) 1/4/94 63 * $FreeBSD: src/sys/netinet/ip_input.c,v 1.130.2.52 2003/03/07 07:01:28 silby Exp $ 64 */ 65 66 #define _IP_VHL 67 68 #include "opt_bootp.h" 69 #include "opt_ipdn.h" 70 #include "opt_ipdivert.h" 71 #include "opt_ipstealth.h" 72 #include "opt_ipsec.h" 73 #include "opt_rss.h" 74 75 #include <sys/param.h> 76 #include <sys/systm.h> 77 #include <sys/mbuf.h> 78 #include <sys/malloc.h> 79 #include <sys/mpipe.h> 80 #include <sys/domain.h> 81 #include <sys/protosw.h> 82 #include <sys/socket.h> 83 #include <sys/time.h> 84 #include <sys/globaldata.h> 85 #include <sys/thread.h> 86 #include <sys/kernel.h> 87 #include <sys/syslog.h> 88 #include <sys/sysctl.h> 89 #include <sys/in_cksum.h> 90 #include <sys/lock.h> 91 92 #include <sys/mplock2.h> 93 94 #include <machine/stdarg.h> 95 96 #include <net/if.h> 97 #include <net/if_types.h> 98 #include <net/if_var.h> 99 #include <net/if_dl.h> 100 #include <net/pfil.h> 101 #include <net/route.h> 102 #include <net/netisr2.h> 103 104 #include <netinet/in.h> 105 #include <netinet/in_systm.h> 106 #include <netinet/in_var.h> 107 #include <netinet/ip.h> 108 #include <netinet/in_pcb.h> 109 #include <netinet/ip_var.h> 110 #include <netinet/ip_icmp.h> 111 #include <netinet/ip_divert.h> 112 #include <netinet/ip_flow.h> 113 114 #include <sys/thread2.h> 115 #include <sys/msgport2.h> 116 #include <net/netmsg2.h> 117 118 #include <sys/socketvar.h> 119 120 #include <net/ipfw/ip_fw.h> 121 #include <net/dummynet/ip_dummynet.h> 122 123 #ifdef IPSEC 124 #include <netinet6/ipsec.h> 125 #include <netproto/key/key.h> 126 #endif 127 128 #ifdef FAST_IPSEC 129 #include <netproto/ipsec/ipsec.h> 130 #include <netproto/ipsec/key.h> 131 #endif 132 133 int rsvp_on = 0; 134 static int ip_rsvp_on; 135 struct socket *ip_rsvpd; 136 137 int ipforwarding = 0; 138 SYSCTL_INT(_net_inet_ip, IPCTL_FORWARDING, forwarding, CTLFLAG_RW, 139 &ipforwarding, 0, "Enable IP forwarding between interfaces"); 140 141 static int ipsendredirects = 1; /* XXX */ 142 SYSCTL_INT(_net_inet_ip, IPCTL_SENDREDIRECTS, redirect, CTLFLAG_RW, 143 &ipsendredirects, 0, "Enable sending IP redirects"); 144 145 int ip_defttl = IPDEFTTL; 146 SYSCTL_INT(_net_inet_ip, IPCTL_DEFTTL, ttl, CTLFLAG_RW, 147 &ip_defttl, 0, "Maximum TTL on IP packets"); 148 149 static int ip_dosourceroute = 0; 150 SYSCTL_INT(_net_inet_ip, IPCTL_SOURCEROUTE, sourceroute, CTLFLAG_RW, 151 &ip_dosourceroute, 0, "Enable forwarding source routed IP packets"); 152 153 static int ip_acceptsourceroute = 0; 154 SYSCTL_INT(_net_inet_ip, IPCTL_ACCEPTSOURCEROUTE, accept_sourceroute, 155 CTLFLAG_RW, &ip_acceptsourceroute, 0, 156 "Enable accepting source routed IP packets"); 157 158 static int ip_keepfaith = 0; 159 SYSCTL_INT(_net_inet_ip, IPCTL_KEEPFAITH, keepfaith, CTLFLAG_RW, 160 &ip_keepfaith, 0, 161 "Enable packet capture for FAITH IPv4->IPv6 translator daemon"); 162 163 static int maxnipq; 164 SYSCTL_INT(_net_inet_ip, OID_AUTO, maxfragpackets, CTLFLAG_RW, 165 &maxnipq, 0, 166 "Maximum number of IPv4 fragment reassembly queue entries"); 167 168 static int maxfragsperpacket; 169 SYSCTL_INT(_net_inet_ip, OID_AUTO, maxfragsperpacket, CTLFLAG_RW, 170 &maxfragsperpacket, 0, 171 "Maximum number of IPv4 fragments allowed per packet"); 172 173 static int ip_sendsourcequench = 0; 174 SYSCTL_INT(_net_inet_ip, OID_AUTO, sendsourcequench, CTLFLAG_RW, 175 &ip_sendsourcequench, 0, 176 "Enable the transmission of source quench packets"); 177 178 int ip_do_randomid = 1; 179 SYSCTL_INT(_net_inet_ip, OID_AUTO, random_id, CTLFLAG_RW, 180 &ip_do_randomid, 0, 181 "Assign random ip_id values"); 182 /* 183 * XXX - Setting ip_checkinterface mostly implements the receive side of 184 * the Strong ES model described in RFC 1122, but since the routing table 185 * and transmit implementation do not implement the Strong ES model, 186 * setting this to 1 results in an odd hybrid. 187 * 188 * XXX - ip_checkinterface currently must be disabled if you use ipnat 189 * to translate the destination address to another local interface. 190 * 191 * XXX - ip_checkinterface must be disabled if you add IP aliases 192 * to the loopback interface instead of the interface where the 193 * packets for those addresses are received. 194 */ 195 static int ip_checkinterface = 0; 196 SYSCTL_INT(_net_inet_ip, OID_AUTO, check_interface, CTLFLAG_RW, 197 &ip_checkinterface, 0, "Verify packet arrives on correct interface"); 198 199 static u_long ip_hash_count = 0; 200 SYSCTL_ULONG(_net_inet_ip, OID_AUTO, hash_count, CTLFLAG_RD, 201 &ip_hash_count, 0, "Number of packets hashed by IP"); 202 203 #ifdef RSS_DEBUG 204 static u_long ip_rehash_count = 0; 205 SYSCTL_ULONG(_net_inet_ip, OID_AUTO, rehash_count, CTLFLAG_RD, 206 &ip_rehash_count, 0, "Number of packets rehashed by IP"); 207 208 static u_long ip_dispatch_fast = 0; 209 SYSCTL_ULONG(_net_inet_ip, OID_AUTO, dispatch_fast_count, CTLFLAG_RD, 210 &ip_dispatch_fast, 0, "Number of packets handled on current CPU"); 211 212 static u_long ip_dispatch_slow = 0; 213 SYSCTL_ULONG(_net_inet_ip, OID_AUTO, dispatch_slow_count, CTLFLAG_RD, 214 &ip_dispatch_slow, 0, "Number of packets messaged to another CPU"); 215 #endif 216 217 #ifdef DIAGNOSTIC 218 static int ipprintfs = 0; 219 #endif 220 221 extern struct domain inetdomain; 222 extern struct protosw inetsw[]; 223 u_char ip_protox[IPPROTO_MAX]; 224 struct in_ifaddrhead in_ifaddrheads[MAXCPU]; /* first inet address */ 225 struct in_ifaddrhashhead *in_ifaddrhashtbls[MAXCPU]; 226 /* inet addr hash table */ 227 u_long in_ifaddrhmask; /* mask for hash table */ 228 229 static struct mbuf *ipforward_mtemp[MAXCPU]; 230 231 struct ip_stats ipstats_percpu[MAXCPU] __cachealign; 232 233 static int 234 sysctl_ipstats(SYSCTL_HANDLER_ARGS) 235 { 236 int cpu, error = 0; 237 238 for (cpu = 0; cpu < netisr_ncpus; ++cpu) { 239 if ((error = SYSCTL_OUT(req, &ipstats_percpu[cpu], 240 sizeof(struct ip_stats)))) 241 break; 242 if ((error = SYSCTL_IN(req, &ipstats_percpu[cpu], 243 sizeof(struct ip_stats)))) 244 break; 245 } 246 247 return (error); 248 } 249 SYSCTL_PROC(_net_inet_ip, IPCTL_STATS, stats, (CTLTYPE_OPAQUE | CTLFLAG_RW), 250 0, 0, sysctl_ipstats, "S,ip_stats", "IP statistics"); 251 252 /* Packet reassembly stuff */ 253 #define IPREASS_NHASH_LOG2 6 254 #define IPREASS_NHASH (1 << IPREASS_NHASH_LOG2) 255 #define IPREASS_HMASK (IPREASS_NHASH - 1) 256 #define IPREASS_HASH(x,y) \ 257 (((((x) & 0xF) | ((((x) >> 8) & 0xF) << 4)) ^ (y)) & IPREASS_HMASK) 258 259 TAILQ_HEAD(ipqhead, ipq); 260 struct ipfrag_queue { 261 int nipq; 262 volatile int draining; 263 struct netmsg_base timeo_netmsg; 264 struct callout timeo_ch; 265 struct netmsg_base drain_netmsg; 266 struct ipqhead ipq[IPREASS_NHASH]; 267 } __cachealign; 268 269 static struct ipfrag_queue ipfrag_queue_pcpu[MAXCPU]; 270 271 #ifdef IPCTL_DEFMTU 272 SYSCTL_INT(_net_inet_ip, IPCTL_DEFMTU, mtu, CTLFLAG_RW, 273 &ip_mtu, 0, "Default MTU"); 274 #endif 275 276 #ifdef IPSTEALTH 277 static int ipstealth = 0; 278 SYSCTL_INT(_net_inet_ip, OID_AUTO, stealth, CTLFLAG_RW, &ipstealth, 0, ""); 279 #else 280 static const int ipstealth = 0; 281 #endif 282 283 struct mbuf *(*ip_divert_p)(struct mbuf *, int, int); 284 285 struct pfil_head inet_pfil_hook; 286 287 /* 288 * struct ip_srcrt_opt is used to store packet state while it travels 289 * through the stack. 290 * 291 * XXX Note that the code even makes assumptions on the size and 292 * alignment of fields inside struct ip_srcrt so e.g. adding some 293 * fields will break the code. This needs to be fixed. 294 * 295 * We need to save the IP options in case a protocol wants to respond 296 * to an incoming packet over the same route if the packet got here 297 * using IP source routing. This allows connection establishment and 298 * maintenance when the remote end is on a network that is not known 299 * to us. 300 */ 301 struct ip_srcrt { 302 struct in_addr dst; /* final destination */ 303 char nop; /* one NOP to align */ 304 char srcopt[IPOPT_OFFSET + 1]; /* OPTVAL, OLEN and OFFSET */ 305 struct in_addr route[MAX_IPOPTLEN/sizeof(struct in_addr)]; 306 }; 307 308 struct ip_srcrt_opt { 309 int ip_nhops; 310 struct ip_srcrt ip_srcrt; 311 }; 312 313 #define IPFRAG_MPIPE_MAX 4096 314 #define MAXIPFRAG_MIN ((IPFRAG_MPIPE_MAX * 2) / 256) 315 316 #define IPFRAG_TIMEO (hz / PR_SLOWHZ) 317 318 static MALLOC_DEFINE(M_IPQ, "ipq", "IP Fragment Management"); 319 static struct malloc_pipe ipq_mpipe; 320 321 static void save_rte(struct mbuf *, u_char *, struct in_addr); 322 static int ip_dooptions(struct mbuf *m, int, struct sockaddr_in *); 323 static void ip_freef(struct ipfrag_queue *, struct ipqhead *, 324 struct ipq *); 325 static void ip_input_handler(netmsg_t); 326 327 static void ipfrag_timeo_dispatch(netmsg_t); 328 static void ipfrag_timeo(void *); 329 static void ipfrag_drain_dispatch(netmsg_t); 330 331 /* 332 * IP initialization: fill in IP protocol switch table. 333 * All protocols not implemented in kernel go to raw IP protocol handler. 334 */ 335 void 336 ip_init(void) 337 { 338 struct ipfrag_queue *fragq; 339 struct protosw *pr; 340 int cpu, i; 341 342 /* 343 * Make sure we can handle a reasonable number of fragments but 344 * cap it at IPFRAG_MPIPE_MAX. 345 */ 346 mpipe_init(&ipq_mpipe, M_IPQ, sizeof(struct ipq), 347 IFQ_MAXLEN, IPFRAG_MPIPE_MAX, 0, NULL, NULL, NULL); 348 349 /* 350 * Make in_ifaddrhead and in_ifaddrhashtbl available on all CPUs, 351 * since they could be accessed by any threads. 352 */ 353 for (cpu = 0; cpu < ncpus; ++cpu) { 354 TAILQ_INIT(&in_ifaddrheads[cpu]); 355 in_ifaddrhashtbls[cpu] = 356 hashinit(INADDR_NHASH, M_IFADDR, &in_ifaddrhmask); 357 } 358 359 pr = pffindproto(PF_INET, IPPROTO_RAW, SOCK_RAW); 360 if (pr == NULL) 361 panic("ip_init"); 362 for (i = 0; i < IPPROTO_MAX; i++) 363 ip_protox[i] = pr - inetsw; 364 for (pr = inetdomain.dom_protosw; 365 pr < inetdomain.dom_protoswNPROTOSW; pr++) { 366 if (pr->pr_domain->dom_family == PF_INET && pr->pr_protocol) { 367 if (pr->pr_protocol != IPPROTO_RAW) 368 ip_protox[pr->pr_protocol] = pr - inetsw; 369 } 370 } 371 372 inet_pfil_hook.ph_type = PFIL_TYPE_AF; 373 inet_pfil_hook.ph_af = AF_INET; 374 if ((i = pfil_head_register(&inet_pfil_hook)) != 0) { 375 kprintf("%s: WARNING: unable to register pfil hook, " 376 "error %d\n", __func__, i); 377 } 378 379 maxnipq = (nmbclusters / 32) / netisr_ncpus; 380 if (maxnipq < MAXIPFRAG_MIN) 381 maxnipq = MAXIPFRAG_MIN; 382 maxfragsperpacket = 16; 383 384 ip_id = time_second & 0xffff; /* time_second survives reboots */ 385 386 for (cpu = 0; cpu < netisr_ncpus; ++cpu) { 387 /* 388 * Initialize IP statistics counters for each CPU. 389 */ 390 bzero(&ipstats_percpu[cpu], sizeof(struct ip_stats)); 391 392 /* 393 * Preallocate mbuf template for forwarding 394 */ 395 MGETHDR(ipforward_mtemp[cpu], M_WAITOK, MT_DATA); 396 397 /* 398 * Initialize per-cpu ip fragments queues 399 */ 400 fragq = &ipfrag_queue_pcpu[cpu]; 401 for (i = 0; i < IPREASS_NHASH; i++) 402 TAILQ_INIT(&fragq->ipq[i]); 403 404 callout_init_mp(&fragq->timeo_ch); 405 netmsg_init(&fragq->timeo_netmsg, NULL, &netisr_adone_rport, 406 MSGF_PRIORITY, ipfrag_timeo_dispatch); 407 netmsg_init(&fragq->drain_netmsg, NULL, &netisr_adone_rport, 408 MSGF_PRIORITY, ipfrag_drain_dispatch); 409 } 410 411 netisr_register(NETISR_IP, ip_input_handler, ip_hashfn); 412 netisr_register_hashcheck(NETISR_IP, ip_hashcheck); 413 414 for (cpu = 0; cpu < netisr_ncpus; ++cpu) { 415 fragq = &ipfrag_queue_pcpu[cpu]; 416 callout_reset_bycpu(&fragq->timeo_ch, IPFRAG_TIMEO, 417 ipfrag_timeo, NULL, cpu); 418 } 419 } 420 421 /* Do transport protocol processing. */ 422 static void 423 transport_processing_oncpu(struct mbuf *m, int hlen, struct ip *ip) 424 { 425 const struct protosw *pr = &inetsw[ip_protox[ip->ip_p]]; 426 427 /* 428 * Switch out to protocol's input routine. 429 */ 430 PR_GET_MPLOCK(pr); 431 pr->pr_input(&m, &hlen, ip->ip_p); 432 PR_REL_MPLOCK(pr); 433 } 434 435 static void 436 transport_processing_handler(netmsg_t msg) 437 { 438 struct netmsg_packet *pmsg = &msg->packet; 439 struct ip *ip; 440 int hlen; 441 442 ip = mtod(pmsg->nm_packet, struct ip *); 443 hlen = pmsg->base.lmsg.u.ms_result; 444 445 transport_processing_oncpu(pmsg->nm_packet, hlen, ip); 446 /* msg was embedded in the mbuf, do not reply! */ 447 } 448 449 static void 450 ip_input_handler(netmsg_t msg) 451 { 452 ip_input(msg->packet.nm_packet); 453 /* msg was embedded in the mbuf, do not reply! */ 454 } 455 456 /* 457 * IP input routine. Checksum and byte swap header. If fragmented 458 * try to reassemble. Process options. Pass to next level. 459 */ 460 void 461 ip_input(struct mbuf *m) 462 { 463 struct ip *ip; 464 struct in_ifaddr *ia = NULL; 465 struct in_ifaddr_container *iac; 466 int hlen, checkif; 467 u_short sum; 468 struct in_addr pkt_dst; 469 boolean_t using_srcrt = FALSE; /* forward (by PFIL_HOOKS) */ 470 struct in_addr odst; /* original dst address(NAT) */ 471 struct m_tag *mtag; 472 struct sockaddr_in *next_hop = NULL; 473 lwkt_port_t port; 474 #ifdef FAST_IPSEC 475 struct tdb_ident *tdbi; 476 struct secpolicy *sp; 477 int error; 478 #endif 479 480 ASSERT_NETISR_NCPUS(mycpuid); 481 M_ASSERTPKTHDR(m); 482 483 /* length checks already done in ip_hashfn() */ 484 KASSERT(m->m_len >= sizeof(struct ip), ("IP header not in one mbuf")); 485 486 /* 487 * This routine is called from numerous places which may not have 488 * characterized the packet. 489 */ 490 ip = mtod(m, struct ip *); 491 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) || 492 (ntohs(ip->ip_off) & (IP_MF | IP_OFFMASK))) { 493 /* 494 * Force hash recalculation for fragments and multicast 495 * packets; hardware may not do it correctly. 496 * XXX add flag to indicate the hash is from hardware 497 */ 498 m->m_flags &= ~M_HASH; 499 } 500 if ((m->m_flags & M_HASH) == 0) { 501 ip_hashfn(&m, 0); 502 if (m == NULL) 503 return; 504 KKASSERT(m->m_flags & M_HASH); 505 506 if (&curthread->td_msgport != 507 netisr_hashport(m->m_pkthdr.hash)) { 508 netisr_queue(NETISR_IP, m); 509 /* Requeued to other netisr msgport; done */ 510 return; 511 } 512 513 /* mbuf could have been changed */ 514 ip = mtod(m, struct ip *); 515 } 516 517 /* 518 * Pull out certain tags 519 */ 520 if (m->m_pkthdr.fw_flags & IPFORWARD_MBUF_TAGGED) { 521 /* Next hop */ 522 mtag = m_tag_find(m, PACKET_TAG_IPFORWARD, NULL); 523 KKASSERT(mtag != NULL); 524 next_hop = m_tag_data(mtag); 525 } 526 527 if (m->m_pkthdr.fw_flags & DUMMYNET_MBUF_TAGGED) { 528 /* dummynet already filtered us */ 529 ip = mtod(m, struct ip *); 530 ip->ip_len = ntohs(ip->ip_len); 531 ip->ip_off = ntohs(ip->ip_off); 532 hlen = IP_VHL_HL(ip->ip_vhl) << 2; 533 goto iphack; 534 } 535 536 ipstat.ips_total++; 537 538 if (IP_VHL_V(ip->ip_vhl) != IPVERSION) { 539 ipstat.ips_badvers++; 540 goto bad; 541 } 542 543 hlen = IP_VHL_HL(ip->ip_vhl) << 2; 544 /* length checks already done in ip_hashfn() */ 545 KASSERT(hlen >= sizeof(struct ip), ("IP header len too small")); 546 KASSERT(m->m_len >= hlen, ("complete IP header not in one mbuf")); 547 548 /* 127/8 must not appear on wire - RFC1122 */ 549 if ((ntohl(ip->ip_dst.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET || 550 (ntohl(ip->ip_src.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET) { 551 if (!(m->m_pkthdr.rcvif->if_flags & IFF_LOOPBACK)) { 552 ipstat.ips_badaddr++; 553 goto bad; 554 } 555 } 556 557 if (m->m_pkthdr.csum_flags & CSUM_IP_CHECKED) { 558 sum = !(m->m_pkthdr.csum_flags & CSUM_IP_VALID); 559 } else { 560 if (hlen == sizeof(struct ip)) 561 sum = in_cksum_hdr(ip); 562 else 563 sum = in_cksum(m, hlen); 564 } 565 if (sum != 0) { 566 ipstat.ips_badsum++; 567 goto bad; 568 } 569 570 #ifdef ALTQ 571 if (altq_input != NULL && (*altq_input)(m, AF_INET) == 0) { 572 /* packet is dropped by traffic conditioner */ 573 return; 574 } 575 #endif 576 /* 577 * Convert fields to host representation. 578 */ 579 ip->ip_len = ntohs(ip->ip_len); 580 ip->ip_off = ntohs(ip->ip_off); 581 582 /* length checks already done in ip_hashfn() */ 583 KASSERT(ip->ip_len >= hlen, ("total length less then header length")); 584 KASSERT(m->m_pkthdr.len >= ip->ip_len, ("mbuf too short")); 585 586 /* 587 * Trim mbufs if longer than the IP header would have us expect. 588 */ 589 if (m->m_pkthdr.len > ip->ip_len) { 590 if (m->m_len == m->m_pkthdr.len) { 591 m->m_len = ip->ip_len; 592 m->m_pkthdr.len = ip->ip_len; 593 } else { 594 m_adj(m, ip->ip_len - m->m_pkthdr.len); 595 } 596 } 597 #if defined(IPSEC) && !defined(IPSEC_FILTERGIF) 598 /* 599 * Bypass packet filtering for packets from a tunnel (gif). 600 */ 601 if (ipsec_gethist(m, NULL)) 602 goto pass; 603 #endif 604 605 /* 606 * IpHack's section. 607 * Right now when no processing on packet has done 608 * and it is still fresh out of network we do our black 609 * deals with it. 610 * - Firewall: deny/allow/divert 611 * - Xlate: translate packet's addr/port (NAT). 612 * - Pipe: pass pkt through dummynet. 613 * - Wrap: fake packet's addr/port <unimpl.> 614 * - Encapsulate: put it in another IP and send out. <unimp.> 615 */ 616 617 iphack: 618 /* 619 * If we've been forwarded from the output side, then 620 * skip the firewall a second time 621 */ 622 if (next_hop != NULL) 623 goto ours; 624 625 /* No pfil hooks */ 626 if (!pfil_has_hooks(&inet_pfil_hook)) { 627 if (m->m_pkthdr.fw_flags & DUMMYNET_MBUF_TAGGED) { 628 /* 629 * Strip dummynet tags from stranded packets 630 */ 631 mtag = m_tag_find(m, PACKET_TAG_DUMMYNET, NULL); 632 KKASSERT(mtag != NULL); 633 m_tag_delete(m, mtag); 634 m->m_pkthdr.fw_flags &= ~DUMMYNET_MBUF_TAGGED; 635 } 636 goto pass; 637 } 638 639 /* 640 * Run through list of hooks for input packets. 641 * 642 * NOTE! If the packet is rewritten pf/ipfw/whoever must 643 * clear M_HASH. 644 */ 645 odst = ip->ip_dst; 646 if (pfil_run_hooks(&inet_pfil_hook, &m, m->m_pkthdr.rcvif, PFIL_IN)) 647 return; 648 if (m == NULL) /* consumed by filter */ 649 return; 650 ip = mtod(m, struct ip *); 651 hlen = IP_VHL_HL(ip->ip_vhl) << 2; 652 using_srcrt = (odst.s_addr != ip->ip_dst.s_addr); 653 654 if (m->m_pkthdr.fw_flags & IPFORWARD_MBUF_TAGGED) { 655 mtag = m_tag_find(m, PACKET_TAG_IPFORWARD, NULL); 656 KKASSERT(mtag != NULL); 657 next_hop = m_tag_data(mtag); 658 } 659 if (m->m_pkthdr.fw_flags & DUMMYNET_MBUF_TAGGED) { 660 ip_dn_queue(m); 661 return; 662 } 663 if (m->m_pkthdr.fw_flags & FW_MBUF_REDISPATCH) { 664 m->m_pkthdr.fw_flags &= ~FW_MBUF_REDISPATCH; 665 } 666 pass: 667 /* 668 * Process options and, if not destined for us, 669 * ship it on. ip_dooptions returns 1 when an 670 * error was detected (causing an icmp message 671 * to be sent and the original packet to be freed). 672 */ 673 if (hlen > sizeof(struct ip) && ip_dooptions(m, 0, next_hop)) 674 return; 675 676 /* greedy RSVP, snatches any PATH packet of the RSVP protocol and no 677 * matter if it is destined to another node, or whether it is 678 * a multicast one, RSVP wants it! and prevents it from being forwarded 679 * anywhere else. Also checks if the rsvp daemon is running before 680 * grabbing the packet. 681 */ 682 if (rsvp_on && ip->ip_p == IPPROTO_RSVP) 683 goto ours; 684 685 /* 686 * Check our list of addresses, to see if the packet is for us. 687 * If we don't have any addresses, assume any unicast packet 688 * we receive might be for us (and let the upper layers deal 689 * with it). 690 */ 691 if (TAILQ_EMPTY(&in_ifaddrheads[mycpuid]) && 692 !(m->m_flags & (M_MCAST | M_BCAST))) 693 goto ours; 694 695 /* 696 * Cache the destination address of the packet; this may be 697 * changed by use of 'ipfw fwd'. 698 */ 699 pkt_dst = next_hop ? next_hop->sin_addr : ip->ip_dst; 700 701 /* 702 * Enable a consistency check between the destination address 703 * and the arrival interface for a unicast packet (the RFC 1122 704 * strong ES model) if IP forwarding is disabled and the packet 705 * is not locally generated and the packet is not subject to 706 * 'ipfw fwd'. 707 * 708 * XXX - Checking also should be disabled if the destination 709 * address is ipnat'ed to a different interface. 710 * 711 * XXX - Checking is incompatible with IP aliases added 712 * to the loopback interface instead of the interface where 713 * the packets are received. 714 */ 715 checkif = ip_checkinterface && 716 !ipforwarding && 717 m->m_pkthdr.rcvif != NULL && 718 !(m->m_pkthdr.rcvif->if_flags & IFF_LOOPBACK) && 719 next_hop == NULL; 720 721 /* 722 * Check for exact addresses in the hash bucket. 723 */ 724 LIST_FOREACH(iac, INADDR_HASH(pkt_dst.s_addr), ia_hash) { 725 ia = iac->ia; 726 727 /* 728 * If the address matches, verify that the packet 729 * arrived via the correct interface if checking is 730 * enabled. 731 */ 732 if (IA_SIN(ia)->sin_addr.s_addr == pkt_dst.s_addr && 733 (!checkif || ia->ia_ifp == m->m_pkthdr.rcvif)) 734 goto ours; 735 } 736 ia = NULL; 737 738 /* 739 * Check for broadcast addresses. 740 * 741 * Only accept broadcast packets that arrive via the matching 742 * interface. Reception of forwarded directed broadcasts would 743 * be handled via ip_forward() and ether_output() with the loopback 744 * into the stack for SIMPLEX interfaces handled by ether_output(). 745 */ 746 if (m->m_pkthdr.rcvif != NULL && 747 m->m_pkthdr.rcvif->if_flags & IFF_BROADCAST) { 748 struct ifaddr_container *ifac; 749 750 TAILQ_FOREACH(ifac, &m->m_pkthdr.rcvif->if_addrheads[mycpuid], 751 ifa_link) { 752 struct ifaddr *ifa = ifac->ifa; 753 754 if (ifa->ifa_addr == NULL) /* shutdown/startup race */ 755 continue; 756 if (ifa->ifa_addr->sa_family != AF_INET) 757 continue; 758 ia = ifatoia(ifa); 759 if (satosin(&ia->ia_broadaddr)->sin_addr.s_addr == 760 pkt_dst.s_addr) 761 goto ours; 762 if (ia->ia_netbroadcast.s_addr == pkt_dst.s_addr) 763 goto ours; 764 #ifdef BOOTP_COMPAT 765 if (IA_SIN(ia)->sin_addr.s_addr == INADDR_ANY) 766 goto ours; 767 #endif 768 } 769 } 770 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr))) { 771 struct in_multi *inm; 772 773 if (ip_mrouter != NULL) { 774 /* XXX Multicast routing is not MPSAFE yet */ 775 get_mplock(); 776 777 /* 778 * If we are acting as a multicast router, all 779 * incoming multicast packets are passed to the 780 * kernel-level multicast forwarding function. 781 * The packet is returned (relatively) intact; if 782 * ip_mforward() returns a non-zero value, the packet 783 * must be discarded, else it may be accepted below. 784 */ 785 if (ip_mforward != NULL && 786 ip_mforward(ip, m->m_pkthdr.rcvif, m, NULL) != 0) { 787 rel_mplock(); 788 ipstat.ips_cantforward++; 789 m_freem(m); 790 return; 791 } 792 793 rel_mplock(); 794 795 /* 796 * The process-level routing daemon needs to receive 797 * all multicast IGMP packets, whether or not this 798 * host belongs to their destination groups. 799 */ 800 if (ip->ip_p == IPPROTO_IGMP) 801 goto ours; 802 ipstat.ips_forward++; 803 } 804 /* 805 * See if we belong to the destination multicast group on the 806 * arrival interface. 807 */ 808 inm = IN_LOOKUP_MULTI(&ip->ip_dst, m->m_pkthdr.rcvif); 809 if (inm == NULL) { 810 ipstat.ips_notmember++; 811 m_freem(m); 812 return; 813 } 814 goto ours; 815 } 816 if (ip->ip_dst.s_addr == INADDR_BROADCAST) 817 goto ours; 818 if (ip->ip_dst.s_addr == INADDR_ANY) 819 goto ours; 820 821 /* 822 * FAITH(Firewall Aided Internet Translator) 823 */ 824 if (m->m_pkthdr.rcvif && m->m_pkthdr.rcvif->if_type == IFT_FAITH) { 825 if (ip_keepfaith) { 826 if (ip->ip_p == IPPROTO_TCP || ip->ip_p == IPPROTO_ICMP) 827 goto ours; 828 } 829 m_freem(m); 830 return; 831 } 832 833 /* 834 * Not for us; forward if possible and desirable. 835 */ 836 if (!ipforwarding) { 837 ipstat.ips_cantforward++; 838 m_freem(m); 839 } else { 840 #ifdef IPSEC 841 /* 842 * Enforce inbound IPsec SPD. 843 */ 844 if (ipsec4_in_reject(m, NULL)) { 845 ipsecstat.in_polvio++; 846 goto bad; 847 } 848 #endif 849 #ifdef FAST_IPSEC 850 mtag = m_tag_find(m, PACKET_TAG_IPSEC_IN_DONE, NULL); 851 crit_enter(); 852 if (mtag != NULL) { 853 tdbi = (struct tdb_ident *)m_tag_data(mtag); 854 sp = ipsec_getpolicy(tdbi, IPSEC_DIR_INBOUND); 855 } else { 856 sp = ipsec_getpolicybyaddr(m, IPSEC_DIR_INBOUND, 857 IP_FORWARDING, &error); 858 } 859 if (sp == NULL) { /* NB: can happen if error */ 860 crit_exit(); 861 /*XXX error stat???*/ 862 DPRINTF(("ip_input: no SP for forwarding\n")); /*XXX*/ 863 goto bad; 864 } 865 866 /* 867 * Check security policy against packet attributes. 868 */ 869 error = ipsec_in_reject(sp, m); 870 KEY_FREESP(&sp); 871 crit_exit(); 872 if (error) { 873 ipstat.ips_cantforward++; 874 goto bad; 875 } 876 #endif 877 ip_forward(m, using_srcrt, next_hop); 878 } 879 return; 880 881 ours: 882 883 /* 884 * IPSTEALTH: Process non-routing options only 885 * if the packet is destined for us. 886 */ 887 if (ipstealth && 888 hlen > sizeof(struct ip) && 889 ip_dooptions(m, 1, next_hop)) 890 return; 891 892 /* Count the packet in the ip address stats */ 893 if (ia != NULL) { 894 IFA_STAT_INC(&ia->ia_ifa, ipackets, 1); 895 IFA_STAT_INC(&ia->ia_ifa, ibytes, m->m_pkthdr.len); 896 } 897 898 /* 899 * If offset or IP_MF are set, must reassemble. 900 * Otherwise, nothing need be done. 901 * (We could look in the reassembly queue to see 902 * if the packet was previously fragmented, 903 * but it's not worth the time; just let them time out.) 904 */ 905 if (ip->ip_off & (IP_MF | IP_OFFMASK)) { 906 /* 907 * Attempt reassembly; if it succeeds, proceed. ip_reass() 908 * will return a different mbuf. 909 * 910 * NOTE: ip_reass() returns m with M_HASH cleared to force 911 * us to recharacterize the packet. 912 */ 913 m = ip_reass(m); 914 if (m == NULL) 915 return; 916 ip = mtod(m, struct ip *); 917 918 /* Get the header length of the reassembled packet */ 919 hlen = IP_VHL_HL(ip->ip_vhl) << 2; 920 } else { 921 ip->ip_len -= hlen; 922 } 923 924 #ifdef IPSEC 925 /* 926 * enforce IPsec policy checking if we are seeing last header. 927 * note that we do not visit this with protocols with pcb layer 928 * code - like udp/tcp/raw ip. 929 */ 930 if ((inetsw[ip_protox[ip->ip_p]].pr_flags & PR_LASTHDR) && 931 ipsec4_in_reject(m, NULL)) { 932 ipsecstat.in_polvio++; 933 goto bad; 934 } 935 #endif 936 #ifdef FAST_IPSEC 937 /* 938 * enforce IPsec policy checking if we are seeing last header. 939 * note that we do not visit this with protocols with pcb layer 940 * code - like udp/tcp/raw ip. 941 */ 942 if (inetsw[ip_protox[ip->ip_p]].pr_flags & PR_LASTHDR) { 943 /* 944 * Check if the packet has already had IPsec processing 945 * done. If so, then just pass it along. This tag gets 946 * set during AH, ESP, etc. input handling, before the 947 * packet is returned to the ip input queue for delivery. 948 */ 949 mtag = m_tag_find(m, PACKET_TAG_IPSEC_IN_DONE, NULL); 950 crit_enter(); 951 if (mtag != NULL) { 952 tdbi = (struct tdb_ident *)m_tag_data(mtag); 953 sp = ipsec_getpolicy(tdbi, IPSEC_DIR_INBOUND); 954 } else { 955 sp = ipsec_getpolicybyaddr(m, IPSEC_DIR_INBOUND, 956 IP_FORWARDING, &error); 957 } 958 if (sp != NULL) { 959 /* 960 * Check security policy against packet attributes. 961 */ 962 error = ipsec_in_reject(sp, m); 963 KEY_FREESP(&sp); 964 } else { 965 /* XXX error stat??? */ 966 error = EINVAL; 967 DPRINTF(("ip_input: no SP, packet discarded\n"));/*XXX*/ 968 crit_exit(); 969 goto bad; 970 } 971 crit_exit(); 972 if (error) 973 goto bad; 974 } 975 #endif /* FAST_IPSEC */ 976 977 /* 978 * We must forward the packet to the correct protocol thread if 979 * we are not already in it. 980 * 981 * NOTE: ip_len is now in host form. ip_len is not adjusted 982 * further for protocol processing, instead we pass hlen 983 * to the protosw and let it deal with it. 984 */ 985 ipstat.ips_delivered++; 986 987 if ((m->m_flags & M_HASH) == 0) { 988 #ifdef RSS_DEBUG 989 atomic_add_long(&ip_rehash_count, 1); 990 #endif 991 ip->ip_len = htons(ip->ip_len + hlen); 992 ip->ip_off = htons(ip->ip_off); 993 994 ip_hashfn(&m, 0); 995 if (m == NULL) 996 return; 997 998 ip = mtod(m, struct ip *); 999 ip->ip_len = ntohs(ip->ip_len) - hlen; 1000 ip->ip_off = ntohs(ip->ip_off); 1001 KKASSERT(m->m_flags & M_HASH); 1002 } 1003 port = netisr_hashport(m->m_pkthdr.hash); 1004 1005 if (port != &curthread->td_msgport) { 1006 struct netmsg_packet *pmsg; 1007 1008 #ifdef RSS_DEBUG 1009 atomic_add_long(&ip_dispatch_slow, 1); 1010 #endif 1011 1012 pmsg = &m->m_hdr.mh_netmsg; 1013 netmsg_init(&pmsg->base, NULL, &netisr_apanic_rport, 1014 0, transport_processing_handler); 1015 pmsg->nm_packet = m; 1016 pmsg->base.lmsg.u.ms_result = hlen; 1017 lwkt_sendmsg(port, &pmsg->base.lmsg); 1018 } else { 1019 #ifdef RSS_DEBUG 1020 atomic_add_long(&ip_dispatch_fast, 1); 1021 #endif 1022 transport_processing_oncpu(m, hlen, ip); 1023 } 1024 return; 1025 1026 bad: 1027 m_freem(m); 1028 } 1029 1030 /* 1031 * Take incoming datagram fragment and try to reassemble it into 1032 * whole datagram. If a chain for reassembly of this datagram already 1033 * exists, then it is given as fp; otherwise have to make a chain. 1034 */ 1035 struct mbuf * 1036 ip_reass(struct mbuf *m) 1037 { 1038 struct ipfrag_queue *fragq = &ipfrag_queue_pcpu[mycpuid]; 1039 struct ip *ip = mtod(m, struct ip *); 1040 struct mbuf *p = NULL, *q, *nq; 1041 struct mbuf *n; 1042 struct ipq *fp = NULL; 1043 struct ipqhead *head; 1044 int hlen = IP_VHL_HL(ip->ip_vhl) << 2; 1045 int i, next; 1046 u_short sum; 1047 1048 /* If maxnipq or maxfragsperpacket are 0, never accept fragments. */ 1049 if (maxnipq == 0 || maxfragsperpacket == 0) { 1050 ipstat.ips_fragments++; 1051 ipstat.ips_fragdropped++; 1052 m_freem(m); 1053 return NULL; 1054 } 1055 1056 sum = IPREASS_HASH(ip->ip_src.s_addr, ip->ip_id); 1057 /* 1058 * Look for queue of fragments of this datagram. 1059 */ 1060 head = &fragq->ipq[sum]; 1061 TAILQ_FOREACH(fp, head, ipq_list) { 1062 if (ip->ip_id == fp->ipq_id && 1063 ip->ip_src.s_addr == fp->ipq_src.s_addr && 1064 ip->ip_dst.s_addr == fp->ipq_dst.s_addr && 1065 ip->ip_p == fp->ipq_p) 1066 goto found; 1067 } 1068 1069 fp = NULL; 1070 1071 /* 1072 * Enforce upper bound on number of fragmented packets 1073 * for which we attempt reassembly; 1074 * If maxnipq is -1, accept all fragments without limitation. 1075 */ 1076 if (fragq->nipq > maxnipq && maxnipq > 0) { 1077 /* 1078 * drop something from the tail of the current queue 1079 * before proceeding further 1080 */ 1081 struct ipq *q = TAILQ_LAST(head, ipqhead); 1082 if (q == NULL) { 1083 /* 1084 * The current queue is empty, 1085 * so drop from one of the others. 1086 */ 1087 for (i = 0; i < IPREASS_NHASH; i++) { 1088 struct ipq *r = TAILQ_LAST(&fragq->ipq[i], 1089 ipqhead); 1090 if (r) { 1091 ipstat.ips_fragtimeout += r->ipq_nfrags; 1092 ip_freef(fragq, &fragq->ipq[i], r); 1093 break; 1094 } 1095 } 1096 } else { 1097 ipstat.ips_fragtimeout += q->ipq_nfrags; 1098 ip_freef(fragq, head, q); 1099 } 1100 } 1101 found: 1102 /* 1103 * Adjust ip_len to not reflect header, 1104 * convert offset of this to bytes. 1105 */ 1106 ip->ip_len -= hlen; 1107 if (ip->ip_off & IP_MF) { 1108 /* 1109 * Make sure that fragments have a data length 1110 * that's a non-zero multiple of 8 bytes. 1111 */ 1112 if (ip->ip_len == 0 || (ip->ip_len & 0x7) != 0) { 1113 ipstat.ips_toosmall++; /* XXX */ 1114 m_freem(m); 1115 goto done; 1116 } 1117 m->m_flags |= M_FRAG; 1118 } else { 1119 m->m_flags &= ~M_FRAG; 1120 } 1121 ip->ip_off <<= 3; 1122 1123 ipstat.ips_fragments++; 1124 m->m_pkthdr.header = ip; 1125 1126 /* 1127 * If the hardware has not done csum over this fragment 1128 * then csum_data is not valid at all. 1129 */ 1130 if ((m->m_pkthdr.csum_flags & (CSUM_FRAG_NOT_CHECKED | CSUM_DATA_VALID)) 1131 == (CSUM_FRAG_NOT_CHECKED | CSUM_DATA_VALID)) { 1132 m->m_pkthdr.csum_data = 0; 1133 m->m_pkthdr.csum_flags &= ~(CSUM_DATA_VALID | CSUM_PSEUDO_HDR); 1134 } 1135 1136 /* 1137 * Presence of header sizes in mbufs 1138 * would confuse code below. 1139 */ 1140 m->m_data += hlen; 1141 m->m_len -= hlen; 1142 1143 /* 1144 * If first fragment to arrive, create a reassembly queue. 1145 */ 1146 if (fp == NULL) { 1147 if ((fp = mpipe_alloc_nowait(&ipq_mpipe)) == NULL) 1148 goto dropfrag; 1149 TAILQ_INSERT_HEAD(head, fp, ipq_list); 1150 fragq->nipq++; 1151 fp->ipq_nfrags = 1; 1152 fp->ipq_ttl = IPFRAGTTL; 1153 fp->ipq_p = ip->ip_p; 1154 fp->ipq_id = ip->ip_id; 1155 fp->ipq_src = ip->ip_src; 1156 fp->ipq_dst = ip->ip_dst; 1157 fp->ipq_frags = m; 1158 m->m_nextpkt = NULL; 1159 goto inserted; 1160 } 1161 fp->ipq_nfrags++; 1162 1163 #define GETIP(m) ((struct ip*)((m)->m_pkthdr.header)) 1164 1165 /* 1166 * Find a segment which begins after this one does. 1167 */ 1168 for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt) { 1169 if (GETIP(q)->ip_off > ip->ip_off) 1170 break; 1171 } 1172 1173 /* 1174 * If there is a preceding segment, it may provide some of 1175 * our data already. If so, drop the data from the incoming 1176 * segment. If it provides all of our data, drop us, otherwise 1177 * stick new segment in the proper place. 1178 * 1179 * If some of the data is dropped from the the preceding 1180 * segment, then it's checksum is invalidated. 1181 */ 1182 if (p) { 1183 i = GETIP(p)->ip_off + GETIP(p)->ip_len - ip->ip_off; 1184 if (i > 0) { 1185 if (i >= ip->ip_len) 1186 goto dropfrag; 1187 m_adj(m, i); 1188 m->m_pkthdr.csum_flags = 0; 1189 ip->ip_off += i; 1190 ip->ip_len -= i; 1191 } 1192 m->m_nextpkt = p->m_nextpkt; 1193 p->m_nextpkt = m; 1194 } else { 1195 m->m_nextpkt = fp->ipq_frags; 1196 fp->ipq_frags = m; 1197 } 1198 1199 /* 1200 * While we overlap succeeding segments trim them or, 1201 * if they are completely covered, dequeue them. 1202 */ 1203 for (; q != NULL && ip->ip_off + ip->ip_len > GETIP(q)->ip_off; 1204 q = nq) { 1205 i = (ip->ip_off + ip->ip_len) - GETIP(q)->ip_off; 1206 if (i < GETIP(q)->ip_len) { 1207 GETIP(q)->ip_len -= i; 1208 GETIP(q)->ip_off += i; 1209 m_adj(q, i); 1210 q->m_pkthdr.csum_flags = 0; 1211 break; 1212 } 1213 nq = q->m_nextpkt; 1214 m->m_nextpkt = nq; 1215 ipstat.ips_fragdropped++; 1216 fp->ipq_nfrags--; 1217 q->m_nextpkt = NULL; 1218 m_freem(q); 1219 } 1220 1221 inserted: 1222 /* 1223 * Check for complete reassembly and perform frag per packet 1224 * limiting. 1225 * 1226 * Frag limiting is performed here so that the nth frag has 1227 * a chance to complete the packet before we drop the packet. 1228 * As a result, n+1 frags are actually allowed per packet, but 1229 * only n will ever be stored. (n = maxfragsperpacket.) 1230 * 1231 */ 1232 next = 0; 1233 for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt) { 1234 if (GETIP(q)->ip_off != next) { 1235 if (fp->ipq_nfrags > maxfragsperpacket) { 1236 ipstat.ips_fragdropped += fp->ipq_nfrags; 1237 ip_freef(fragq, head, fp); 1238 } 1239 goto done; 1240 } 1241 next += GETIP(q)->ip_len; 1242 } 1243 /* Make sure the last packet didn't have the IP_MF flag */ 1244 if (p->m_flags & M_FRAG) { 1245 if (fp->ipq_nfrags > maxfragsperpacket) { 1246 ipstat.ips_fragdropped += fp->ipq_nfrags; 1247 ip_freef(fragq, head, fp); 1248 } 1249 goto done; 1250 } 1251 1252 /* 1253 * Reassembly is complete. Make sure the packet is a sane size. 1254 */ 1255 q = fp->ipq_frags; 1256 ip = GETIP(q); 1257 if (next + (IP_VHL_HL(ip->ip_vhl) << 2) > IP_MAXPACKET) { 1258 ipstat.ips_toolong++; 1259 ipstat.ips_fragdropped += fp->ipq_nfrags; 1260 ip_freef(fragq, head, fp); 1261 goto done; 1262 } 1263 1264 /* 1265 * Concatenate fragments. 1266 */ 1267 m = q; 1268 n = m->m_next; 1269 m->m_next = NULL; 1270 m_cat(m, n); 1271 nq = q->m_nextpkt; 1272 q->m_nextpkt = NULL; 1273 for (q = nq; q != NULL; q = nq) { 1274 nq = q->m_nextpkt; 1275 q->m_nextpkt = NULL; 1276 m->m_pkthdr.csum_flags &= q->m_pkthdr.csum_flags; 1277 m->m_pkthdr.csum_data += q->m_pkthdr.csum_data; 1278 m_cat(m, q); 1279 } 1280 1281 /* 1282 * Clean up the 1's complement checksum. Carry over 16 bits must 1283 * be added back. This assumes no more then 65535 packet fragments 1284 * were reassembled. A second carry can also occur (but not a third). 1285 */ 1286 m->m_pkthdr.csum_data = (m->m_pkthdr.csum_data & 0xffff) + 1287 (m->m_pkthdr.csum_data >> 16); 1288 if (m->m_pkthdr.csum_data > 0xFFFF) 1289 m->m_pkthdr.csum_data -= 0xFFFF; 1290 1291 /* 1292 * Create header for new ip packet by 1293 * modifying header of first packet; 1294 * dequeue and discard fragment reassembly header. 1295 * Make header visible. 1296 */ 1297 ip->ip_len = next; 1298 ip->ip_src = fp->ipq_src; 1299 ip->ip_dst = fp->ipq_dst; 1300 TAILQ_REMOVE(head, fp, ipq_list); 1301 fragq->nipq--; 1302 mpipe_free(&ipq_mpipe, fp); 1303 m->m_len += (IP_VHL_HL(ip->ip_vhl) << 2); 1304 m->m_data -= (IP_VHL_HL(ip->ip_vhl) << 2); 1305 /* some debugging cruft by sklower, below, will go away soon */ 1306 if (m->m_flags & M_PKTHDR) { /* XXX this should be done elsewhere */ 1307 int plen = 0; 1308 1309 for (n = m; n; n = n->m_next) 1310 plen += n->m_len; 1311 m->m_pkthdr.len = plen; 1312 } 1313 1314 /* 1315 * Reassembly complete, return the next protocol. 1316 * 1317 * Be sure to clear M_HASH to force the packet 1318 * to be re-characterized. 1319 * 1320 * Clear M_FRAG, we are no longer a fragment. 1321 */ 1322 m->m_flags &= ~(M_HASH | M_FRAG); 1323 1324 ipstat.ips_reassembled++; 1325 return (m); 1326 1327 dropfrag: 1328 ipstat.ips_fragdropped++; 1329 if (fp != NULL) 1330 fp->ipq_nfrags--; 1331 m_freem(m); 1332 done: 1333 return (NULL); 1334 1335 #undef GETIP 1336 } 1337 1338 /* 1339 * Free a fragment reassembly header and all 1340 * associated datagrams. 1341 */ 1342 static void 1343 ip_freef(struct ipfrag_queue *fragq, struct ipqhead *fhp, struct ipq *fp) 1344 { 1345 struct mbuf *q; 1346 1347 /* 1348 * Remove first to protect against blocking 1349 */ 1350 TAILQ_REMOVE(fhp, fp, ipq_list); 1351 1352 /* 1353 * Clean out at our leisure 1354 */ 1355 while (fp->ipq_frags) { 1356 q = fp->ipq_frags; 1357 fp->ipq_frags = q->m_nextpkt; 1358 q->m_nextpkt = NULL; 1359 m_freem(q); 1360 } 1361 mpipe_free(&ipq_mpipe, fp); 1362 fragq->nipq--; 1363 } 1364 1365 /* 1366 * If a timer expires on a reassembly queue, discard it. 1367 */ 1368 static void 1369 ipfrag_timeo_dispatch(netmsg_t nmsg) 1370 { 1371 struct ipfrag_queue *fragq = &ipfrag_queue_pcpu[mycpuid]; 1372 struct ipq *fp, *fp_temp; 1373 struct ipqhead *head; 1374 int i; 1375 1376 crit_enter(); 1377 netisr_replymsg(&nmsg->base, 0); /* reply ASAP */ 1378 crit_exit(); 1379 1380 if (fragq->nipq == 0) 1381 goto done; 1382 1383 for (i = 0; i < IPREASS_NHASH; i++) { 1384 head = &fragq->ipq[i]; 1385 TAILQ_FOREACH_MUTABLE(fp, head, ipq_list, fp_temp) { 1386 if (--fp->ipq_ttl == 0) { 1387 ipstat.ips_fragtimeout += fp->ipq_nfrags; 1388 ip_freef(fragq, head, fp); 1389 } 1390 } 1391 } 1392 /* 1393 * If we are over the maximum number of fragments 1394 * (due to the limit being lowered), drain off 1395 * enough to get down to the new limit. 1396 */ 1397 if (maxnipq >= 0 && fragq->nipq > maxnipq) { 1398 for (i = 0; i < IPREASS_NHASH; i++) { 1399 head = &fragq->ipq[i]; 1400 while (fragq->nipq > maxnipq && !TAILQ_EMPTY(head)) { 1401 ipstat.ips_fragdropped += 1402 TAILQ_FIRST(head)->ipq_nfrags; 1403 ip_freef(fragq, head, TAILQ_FIRST(head)); 1404 } 1405 } 1406 } 1407 done: 1408 callout_reset(&fragq->timeo_ch, IPFRAG_TIMEO, ipfrag_timeo, NULL); 1409 } 1410 1411 static void 1412 ipfrag_timeo(void *dummy __unused) 1413 { 1414 struct netmsg_base *msg = &ipfrag_queue_pcpu[mycpuid].timeo_netmsg; 1415 1416 crit_enter(); 1417 if (msg->lmsg.ms_flags & MSGF_DONE) 1418 netisr_sendmsg_oncpu(msg); 1419 crit_exit(); 1420 } 1421 1422 /* 1423 * Drain off all datagram fragments. 1424 */ 1425 static void 1426 ipfrag_drain_oncpu(struct ipfrag_queue *fragq) 1427 { 1428 struct ipqhead *head; 1429 int i; 1430 1431 for (i = 0; i < IPREASS_NHASH; i++) { 1432 head = &fragq->ipq[i]; 1433 while (!TAILQ_EMPTY(head)) { 1434 ipstat.ips_fragdropped += TAILQ_FIRST(head)->ipq_nfrags; 1435 ip_freef(fragq, head, TAILQ_FIRST(head)); 1436 } 1437 } 1438 } 1439 1440 static void 1441 ipfrag_drain_dispatch(netmsg_t nmsg) 1442 { 1443 struct ipfrag_queue *fragq = &ipfrag_queue_pcpu[mycpuid]; 1444 1445 crit_enter(); 1446 lwkt_replymsg(&nmsg->lmsg, 0); /* reply ASAP */ 1447 crit_exit(); 1448 1449 ipfrag_drain_oncpu(fragq); 1450 fragq->draining = 0; 1451 } 1452 1453 static void 1454 ipfrag_drain_ipi(void *arg __unused) 1455 { 1456 int cpu = mycpuid; 1457 struct lwkt_msg *msg = &ipfrag_queue_pcpu[cpu].drain_netmsg.lmsg; 1458 1459 crit_enter(); 1460 if (msg->ms_flags & MSGF_DONE) 1461 lwkt_sendmsg_oncpu(netisr_cpuport(cpu), msg); 1462 crit_exit(); 1463 } 1464 1465 static void 1466 ipfrag_drain(void) 1467 { 1468 cpumask_t mask; 1469 int cpu; 1470 1471 CPUMASK_ASSBMASK(mask, netisr_ncpus); 1472 CPUMASK_ANDMASK(mask, smp_active_mask); 1473 1474 if (IN_NETISR_NCPUS(mycpuid)) { 1475 ipfrag_drain_oncpu(&ipfrag_queue_pcpu[mycpuid]); 1476 CPUMASK_NANDBIT(mask, mycpuid); 1477 } 1478 1479 for (cpu = 0; cpu < netisr_ncpus; ++cpu) { 1480 struct ipfrag_queue *fragq = &ipfrag_queue_pcpu[cpu]; 1481 1482 if (!CPUMASK_TESTBIT(mask, cpu)) 1483 continue; 1484 1485 if (fragq->nipq == 0 || fragq->draining) { 1486 /* No fragments or is draining; skip this cpu. */ 1487 CPUMASK_NANDBIT(mask, cpu); 1488 continue; 1489 } 1490 fragq->draining = 1; 1491 } 1492 1493 if (CPUMASK_TESTNZERO(mask)) 1494 lwkt_send_ipiq_mask(mask, ipfrag_drain_ipi, NULL); 1495 } 1496 1497 void 1498 ip_drain(void) 1499 { 1500 ipfrag_drain(); 1501 in_rtqdrain(); 1502 } 1503 1504 /* 1505 * Do option processing on a datagram, 1506 * possibly discarding it if bad options are encountered, 1507 * or forwarding it if source-routed. 1508 * The pass argument is used when operating in the IPSTEALTH 1509 * mode to tell what options to process: 1510 * [LS]SRR (pass 0) or the others (pass 1). 1511 * The reason for as many as two passes is that when doing IPSTEALTH, 1512 * non-routing options should be processed only if the packet is for us. 1513 * Returns 1 if packet has been forwarded/freed, 1514 * 0 if the packet should be processed further. 1515 */ 1516 static int 1517 ip_dooptions(struct mbuf *m, int pass, struct sockaddr_in *next_hop) 1518 { 1519 struct sockaddr_in ipaddr = { sizeof ipaddr, AF_INET }; 1520 struct ip *ip = mtod(m, struct ip *); 1521 u_char *cp; 1522 struct in_ifaddr *ia; 1523 int opt, optlen, cnt, off, code, type = ICMP_PARAMPROB; 1524 boolean_t forward = FALSE; 1525 struct in_addr *sin, dst; 1526 n_time ntime; 1527 1528 dst = ip->ip_dst; 1529 cp = (u_char *)(ip + 1); 1530 cnt = (IP_VHL_HL(ip->ip_vhl) << 2) - sizeof(struct ip); 1531 for (; cnt > 0; cnt -= optlen, cp += optlen) { 1532 opt = cp[IPOPT_OPTVAL]; 1533 if (opt == IPOPT_EOL) 1534 break; 1535 if (opt == IPOPT_NOP) 1536 optlen = 1; 1537 else { 1538 if (cnt < IPOPT_OLEN + sizeof(*cp)) { 1539 code = &cp[IPOPT_OLEN] - (u_char *)ip; 1540 goto bad; 1541 } 1542 optlen = cp[IPOPT_OLEN]; 1543 if (optlen < IPOPT_OLEN + sizeof(*cp) || optlen > cnt) { 1544 code = &cp[IPOPT_OLEN] - (u_char *)ip; 1545 goto bad; 1546 } 1547 } 1548 switch (opt) { 1549 1550 default: 1551 break; 1552 1553 /* 1554 * Source routing with record. 1555 * Find interface with current destination address. 1556 * If none on this machine then drop if strictly routed, 1557 * or do nothing if loosely routed. 1558 * Record interface address and bring up next address 1559 * component. If strictly routed make sure next 1560 * address is on directly accessible net. 1561 */ 1562 case IPOPT_LSRR: 1563 case IPOPT_SSRR: 1564 if (ipstealth && pass > 0) 1565 break; 1566 if (optlen < IPOPT_OFFSET + sizeof(*cp)) { 1567 code = &cp[IPOPT_OLEN] - (u_char *)ip; 1568 goto bad; 1569 } 1570 if ((off = cp[IPOPT_OFFSET]) < IPOPT_MINOFF) { 1571 code = &cp[IPOPT_OFFSET] - (u_char *)ip; 1572 goto bad; 1573 } 1574 ipaddr.sin_addr = ip->ip_dst; 1575 ia = (struct in_ifaddr *) 1576 ifa_ifwithaddr((struct sockaddr *)&ipaddr); 1577 if (ia == NULL) { 1578 if (opt == IPOPT_SSRR) { 1579 type = ICMP_UNREACH; 1580 code = ICMP_UNREACH_SRCFAIL; 1581 goto bad; 1582 } 1583 if (!ip_dosourceroute) 1584 goto nosourcerouting; 1585 /* 1586 * Loose routing, and not at next destination 1587 * yet; nothing to do except forward. 1588 */ 1589 break; 1590 } 1591 off--; /* 0 origin */ 1592 if (off > optlen - (int)sizeof(struct in_addr)) { 1593 /* 1594 * End of source route. Should be for us. 1595 */ 1596 if (!ip_acceptsourceroute) 1597 goto nosourcerouting; 1598 save_rte(m, cp, ip->ip_src); 1599 break; 1600 } 1601 if (ipstealth) 1602 goto dropit; 1603 if (!ip_dosourceroute) { 1604 if (ipforwarding) { 1605 char sbuf[INET_ADDRSTRLEN]; 1606 char dbuf[INET_ADDRSTRLEN]; 1607 1608 /* 1609 * Acting as a router, so generate ICMP 1610 */ 1611 nosourcerouting: 1612 log(LOG_WARNING, 1613 "attempted source route from %s to %s\n", 1614 kinet_ntoa(ip->ip_src, sbuf), 1615 kinet_ntoa(ip->ip_dst, dbuf)); 1616 type = ICMP_UNREACH; 1617 code = ICMP_UNREACH_SRCFAIL; 1618 goto bad; 1619 } else { 1620 /* 1621 * Not acting as a router, 1622 * so silently drop. 1623 */ 1624 dropit: 1625 ipstat.ips_cantforward++; 1626 m_freem(m); 1627 return (1); 1628 } 1629 } 1630 1631 /* 1632 * locate outgoing interface 1633 */ 1634 memcpy(&ipaddr.sin_addr, cp + off, 1635 sizeof ipaddr.sin_addr); 1636 1637 if (opt == IPOPT_SSRR) { 1638 #define INA struct in_ifaddr * 1639 #define SA struct sockaddr * 1640 if ((ia = (INA)ifa_ifwithdstaddr((SA)&ipaddr)) 1641 == NULL) 1642 ia = (INA)ifa_ifwithnet((SA)&ipaddr); 1643 } else { 1644 ia = ip_rtaddr(ipaddr.sin_addr, NULL); 1645 } 1646 if (ia == NULL) { 1647 type = ICMP_UNREACH; 1648 code = ICMP_UNREACH_SRCFAIL; 1649 goto bad; 1650 } 1651 ip->ip_dst = ipaddr.sin_addr; 1652 memcpy(cp + off, &IA_SIN(ia)->sin_addr, 1653 sizeof(struct in_addr)); 1654 cp[IPOPT_OFFSET] += sizeof(struct in_addr); 1655 /* 1656 * Let ip_intr's mcast routing check handle mcast pkts 1657 */ 1658 forward = !IN_MULTICAST(ntohl(ip->ip_dst.s_addr)); 1659 break; 1660 1661 case IPOPT_RR: 1662 if (ipstealth && pass == 0) 1663 break; 1664 if (optlen < IPOPT_OFFSET + sizeof(*cp)) { 1665 code = &cp[IPOPT_OFFSET] - (u_char *)ip; 1666 goto bad; 1667 } 1668 if ((off = cp[IPOPT_OFFSET]) < IPOPT_MINOFF) { 1669 code = &cp[IPOPT_OFFSET] - (u_char *)ip; 1670 goto bad; 1671 } 1672 /* 1673 * If no space remains, ignore. 1674 */ 1675 off--; /* 0 origin */ 1676 if (off > optlen - (int)sizeof(struct in_addr)) 1677 break; 1678 memcpy(&ipaddr.sin_addr, &ip->ip_dst, 1679 sizeof ipaddr.sin_addr); 1680 /* 1681 * locate outgoing interface; if we're the destination, 1682 * use the incoming interface (should be same). 1683 */ 1684 if ((ia = (INA)ifa_ifwithaddr((SA)&ipaddr)) == NULL && 1685 (ia = ip_rtaddr(ipaddr.sin_addr, NULL)) == NULL) { 1686 type = ICMP_UNREACH; 1687 code = ICMP_UNREACH_HOST; 1688 goto bad; 1689 } 1690 memcpy(cp + off, &IA_SIN(ia)->sin_addr, 1691 sizeof(struct in_addr)); 1692 cp[IPOPT_OFFSET] += sizeof(struct in_addr); 1693 break; 1694 1695 case IPOPT_TS: 1696 if (ipstealth && pass == 0) 1697 break; 1698 code = cp - (u_char *)ip; 1699 if (optlen < 4 || optlen > 40) { 1700 code = &cp[IPOPT_OLEN] - (u_char *)ip; 1701 goto bad; 1702 } 1703 if ((off = cp[IPOPT_OFFSET]) < 5) { 1704 code = &cp[IPOPT_OLEN] - (u_char *)ip; 1705 goto bad; 1706 } 1707 if (off > optlen - (int)sizeof(int32_t)) { 1708 cp[IPOPT_OFFSET + 1] += (1 << 4); 1709 if ((cp[IPOPT_OFFSET + 1] & 0xf0) == 0) { 1710 code = &cp[IPOPT_OFFSET] - (u_char *)ip; 1711 goto bad; 1712 } 1713 break; 1714 } 1715 off--; /* 0 origin */ 1716 sin = (struct in_addr *)(cp + off); 1717 switch (cp[IPOPT_OFFSET + 1] & 0x0f) { 1718 1719 case IPOPT_TS_TSONLY: 1720 break; 1721 1722 case IPOPT_TS_TSANDADDR: 1723 if (off + sizeof(n_time) + 1724 sizeof(struct in_addr) > optlen) { 1725 code = &cp[IPOPT_OFFSET] - (u_char *)ip; 1726 goto bad; 1727 } 1728 ipaddr.sin_addr = dst; 1729 ia = (INA)ifaof_ifpforaddr((SA)&ipaddr, 1730 m->m_pkthdr.rcvif); 1731 if (ia == NULL) 1732 continue; 1733 memcpy(sin, &IA_SIN(ia)->sin_addr, 1734 sizeof(struct in_addr)); 1735 cp[IPOPT_OFFSET] += sizeof(struct in_addr); 1736 off += sizeof(struct in_addr); 1737 break; 1738 1739 case IPOPT_TS_PRESPEC: 1740 if (off + sizeof(n_time) + 1741 sizeof(struct in_addr) > optlen) { 1742 code = &cp[IPOPT_OFFSET] - (u_char *)ip; 1743 goto bad; 1744 } 1745 memcpy(&ipaddr.sin_addr, sin, 1746 sizeof(struct in_addr)); 1747 if (ifa_ifwithaddr((SA)&ipaddr) == NULL) 1748 continue; 1749 cp[IPOPT_OFFSET] += sizeof(struct in_addr); 1750 off += sizeof(struct in_addr); 1751 break; 1752 1753 default: 1754 code = &cp[IPOPT_OFFSET + 1] - (u_char *)ip; 1755 goto bad; 1756 } 1757 ntime = iptime(); 1758 memcpy(cp + off, &ntime, sizeof(n_time)); 1759 cp[IPOPT_OFFSET] += sizeof(n_time); 1760 } 1761 } 1762 if (forward && ipforwarding) { 1763 ip_forward(m, TRUE, next_hop); 1764 return (1); 1765 } 1766 return (0); 1767 bad: 1768 icmp_error(m, type, code, 0, 0); 1769 ipstat.ips_badoptions++; 1770 return (1); 1771 } 1772 1773 /* 1774 * Given address of next destination (final or next hop), 1775 * return internet address info of interface to be used to get there. 1776 */ 1777 struct in_ifaddr * 1778 ip_rtaddr(struct in_addr dst, struct route *ro0) 1779 { 1780 struct route sro, *ro; 1781 struct sockaddr_in *sin; 1782 struct in_ifaddr *ia; 1783 1784 if (ro0 != NULL) { 1785 ro = ro0; 1786 } else { 1787 bzero(&sro, sizeof(sro)); 1788 ro = &sro; 1789 } 1790 1791 sin = (struct sockaddr_in *)&ro->ro_dst; 1792 1793 if (ro->ro_rt == NULL || dst.s_addr != sin->sin_addr.s_addr) { 1794 if (ro->ro_rt != NULL) { 1795 RTFREE(ro->ro_rt); 1796 ro->ro_rt = NULL; 1797 } 1798 sin->sin_family = AF_INET; 1799 sin->sin_len = sizeof *sin; 1800 sin->sin_addr = dst; 1801 rtalloc_ign(ro, RTF_PRCLONING); 1802 } 1803 1804 if (ro->ro_rt == NULL) 1805 return (NULL); 1806 1807 ia = ifatoia(ro->ro_rt->rt_ifa); 1808 1809 if (ro == &sro) 1810 RTFREE(ro->ro_rt); 1811 return ia; 1812 } 1813 1814 /* 1815 * Save incoming source route for use in replies, 1816 * to be picked up later by ip_srcroute if the receiver is interested. 1817 */ 1818 static void 1819 save_rte(struct mbuf *m, u_char *option, struct in_addr dst) 1820 { 1821 struct m_tag *mtag; 1822 struct ip_srcrt_opt *opt; 1823 unsigned olen; 1824 1825 mtag = m_tag_get(PACKET_TAG_IPSRCRT, sizeof(*opt), M_NOWAIT); 1826 if (mtag == NULL) 1827 return; 1828 opt = m_tag_data(mtag); 1829 1830 olen = option[IPOPT_OLEN]; 1831 #ifdef DIAGNOSTIC 1832 if (ipprintfs) 1833 kprintf("save_rte: olen %d\n", olen); 1834 #endif 1835 if (olen > sizeof(opt->ip_srcrt) - (1 + sizeof(dst))) { 1836 m_tag_free(mtag); 1837 return; 1838 } 1839 bcopy(option, opt->ip_srcrt.srcopt, olen); 1840 opt->ip_nhops = (olen - IPOPT_OFFSET - 1) / sizeof(struct in_addr); 1841 opt->ip_srcrt.dst = dst; 1842 m_tag_prepend(m, mtag); 1843 } 1844 1845 /* 1846 * Retrieve incoming source route for use in replies, 1847 * in the same form used by setsockopt. 1848 * The first hop is placed before the options, will be removed later. 1849 */ 1850 struct mbuf * 1851 ip_srcroute(struct mbuf *m0) 1852 { 1853 struct in_addr *p, *q; 1854 struct mbuf *m; 1855 struct m_tag *mtag; 1856 struct ip_srcrt_opt *opt; 1857 1858 if (m0 == NULL) 1859 return NULL; 1860 1861 mtag = m_tag_find(m0, PACKET_TAG_IPSRCRT, NULL); 1862 if (mtag == NULL) 1863 return NULL; 1864 opt = m_tag_data(mtag); 1865 1866 if (opt->ip_nhops == 0) 1867 return (NULL); 1868 m = m_get(M_NOWAIT, MT_HEADER); 1869 if (m == NULL) 1870 return (NULL); 1871 1872 #define OPTSIZ (sizeof(opt->ip_srcrt.nop) + sizeof(opt->ip_srcrt.srcopt)) 1873 1874 /* length is (nhops+1)*sizeof(addr) + sizeof(nop + srcrt header) */ 1875 m->m_len = opt->ip_nhops * sizeof(struct in_addr) + 1876 sizeof(struct in_addr) + OPTSIZ; 1877 #ifdef DIAGNOSTIC 1878 if (ipprintfs) { 1879 kprintf("ip_srcroute: nhops %d mlen %d", 1880 opt->ip_nhops, m->m_len); 1881 } 1882 #endif 1883 1884 /* 1885 * First save first hop for return route 1886 */ 1887 p = &opt->ip_srcrt.route[opt->ip_nhops - 1]; 1888 *(mtod(m, struct in_addr *)) = *p--; 1889 #ifdef DIAGNOSTIC 1890 if (ipprintfs) 1891 kprintf(" hops %x", ntohl(mtod(m, struct in_addr *)->s_addr)); 1892 #endif 1893 1894 /* 1895 * Copy option fields and padding (nop) to mbuf. 1896 */ 1897 opt->ip_srcrt.nop = IPOPT_NOP; 1898 opt->ip_srcrt.srcopt[IPOPT_OFFSET] = IPOPT_MINOFF; 1899 memcpy(mtod(m, caddr_t) + sizeof(struct in_addr), &opt->ip_srcrt.nop, 1900 OPTSIZ); 1901 q = (struct in_addr *)(mtod(m, caddr_t) + 1902 sizeof(struct in_addr) + OPTSIZ); 1903 #undef OPTSIZ 1904 /* 1905 * Record return path as an IP source route, 1906 * reversing the path (pointers are now aligned). 1907 */ 1908 while (p >= opt->ip_srcrt.route) { 1909 #ifdef DIAGNOSTIC 1910 if (ipprintfs) 1911 kprintf(" %x", ntohl(q->s_addr)); 1912 #endif 1913 *q++ = *p--; 1914 } 1915 /* 1916 * Last hop goes to final destination. 1917 */ 1918 *q = opt->ip_srcrt.dst; 1919 m_tag_delete(m0, mtag); 1920 #ifdef DIAGNOSTIC 1921 if (ipprintfs) 1922 kprintf(" %x\n", ntohl(q->s_addr)); 1923 #endif 1924 return (m); 1925 } 1926 1927 /* 1928 * Strip out IP options. 1929 */ 1930 void 1931 ip_stripoptions(struct mbuf *m) 1932 { 1933 int datalen; 1934 struct ip *ip = mtod(m, struct ip *); 1935 caddr_t opts; 1936 int optlen; 1937 1938 optlen = (IP_VHL_HL(ip->ip_vhl) << 2) - sizeof(struct ip); 1939 opts = (caddr_t)(ip + 1); 1940 datalen = m->m_len - (sizeof(struct ip) + optlen); 1941 bcopy(opts + optlen, opts, datalen); 1942 m->m_len -= optlen; 1943 if (m->m_flags & M_PKTHDR) 1944 m->m_pkthdr.len -= optlen; 1945 ip->ip_vhl = IP_MAKE_VHL(IPVERSION, sizeof(struct ip) >> 2); 1946 } 1947 1948 u_char inetctlerrmap[PRC_NCMDS] = { 1949 0, 0, 0, 0, 1950 0, EMSGSIZE, EHOSTDOWN, EHOSTUNREACH, 1951 EHOSTUNREACH, EHOSTUNREACH, ECONNREFUSED, ECONNREFUSED, 1952 EMSGSIZE, EHOSTUNREACH, 0, 0, 1953 0, 0, 0, 0, 1954 ENOPROTOOPT, ECONNREFUSED 1955 }; 1956 1957 /* 1958 * Forward a packet. If some error occurs return the sender 1959 * an icmp packet. Note we can't always generate a meaningful 1960 * icmp message because icmp doesn't have a large enough repertoire 1961 * of codes and types. 1962 * 1963 * If not forwarding, just drop the packet. This could be confusing 1964 * if ipforwarding was zero but some routing protocol was advancing 1965 * us as a gateway to somewhere. However, we must let the routing 1966 * protocol deal with that. 1967 * 1968 * The using_srcrt parameter indicates whether the packet is being forwarded 1969 * via a source route. 1970 */ 1971 void 1972 ip_forward(struct mbuf *m, boolean_t using_srcrt, struct sockaddr_in *next_hop) 1973 { 1974 struct ip *ip = mtod(m, struct ip *); 1975 struct rtentry *rt; 1976 struct route fwd_ro; 1977 int error, type = 0, code = 0, destmtu = 0; 1978 struct mbuf *mcopy, *mtemp = NULL; 1979 n_long dest; 1980 struct in_addr pkt_dst; 1981 1982 dest = INADDR_ANY; 1983 /* 1984 * Cache the destination address of the packet; this may be 1985 * changed by use of 'ipfw fwd'. 1986 */ 1987 pkt_dst = (next_hop != NULL) ? next_hop->sin_addr : ip->ip_dst; 1988 1989 #ifdef DIAGNOSTIC 1990 if (ipprintfs) 1991 kprintf("forward: src %x dst %x ttl %x\n", 1992 ip->ip_src.s_addr, pkt_dst.s_addr, ip->ip_ttl); 1993 #endif 1994 1995 if (m->m_flags & (M_BCAST | M_MCAST) || !in_canforward(pkt_dst)) { 1996 ipstat.ips_cantforward++; 1997 m_freem(m); 1998 return; 1999 } 2000 if (!ipstealth && ip->ip_ttl <= IPTTLDEC) { 2001 icmp_error(m, ICMP_TIMXCEED, ICMP_TIMXCEED_INTRANS, dest, 0); 2002 return; 2003 } 2004 2005 bzero(&fwd_ro, sizeof(fwd_ro)); 2006 ip_rtaddr(pkt_dst, &fwd_ro); 2007 if (fwd_ro.ro_rt == NULL) { 2008 icmp_error(m, ICMP_UNREACH, ICMP_UNREACH_HOST, dest, 0); 2009 return; 2010 } 2011 rt = fwd_ro.ro_rt; 2012 2013 if (curthread->td_type == TD_TYPE_NETISR) { 2014 /* 2015 * Save the IP header and at most 8 bytes of the payload, 2016 * in case we need to generate an ICMP message to the src. 2017 */ 2018 mtemp = ipforward_mtemp[mycpuid]; 2019 KASSERT((mtemp->m_flags & M_EXT) == 0 && 2020 mtemp->m_data == mtemp->m_pktdat && 2021 m_tag_first(mtemp) == NULL, 2022 ("ip_forward invalid mtemp1")); 2023 2024 if (!m_dup_pkthdr(mtemp, m, M_NOWAIT)) { 2025 /* 2026 * It's probably ok if the pkthdr dup fails (because 2027 * the deep copy of the tag chain failed), but for now 2028 * be conservative and just discard the copy since 2029 * code below may some day want the tags. 2030 */ 2031 mtemp = NULL; 2032 } else { 2033 mtemp->m_type = m->m_type; 2034 mtemp->m_len = imin((IP_VHL_HL(ip->ip_vhl) << 2) + 8, 2035 (int)ip->ip_len); 2036 mtemp->m_pkthdr.len = mtemp->m_len; 2037 m_copydata(m, 0, mtemp->m_len, mtod(mtemp, caddr_t)); 2038 } 2039 } 2040 2041 if (!ipstealth) 2042 ip->ip_ttl -= IPTTLDEC; 2043 2044 /* 2045 * If forwarding packet using same interface that it came in on, 2046 * perhaps should send a redirect to sender to shortcut a hop. 2047 * Only send redirect if source is sending directly to us, 2048 * and if packet was not source routed (or has any options). 2049 * Also, don't send redirect if forwarding using a default route 2050 * or a route modified by a redirect. 2051 */ 2052 if (rt->rt_ifp == m->m_pkthdr.rcvif && 2053 !(rt->rt_flags & (RTF_DYNAMIC | RTF_MODIFIED)) && 2054 satosin(rt_key(rt))->sin_addr.s_addr != INADDR_ANY && 2055 ipsendredirects && !using_srcrt && next_hop == NULL) { 2056 u_long src = ntohl(ip->ip_src.s_addr); 2057 struct in_ifaddr *rt_ifa = (struct in_ifaddr *)rt->rt_ifa; 2058 2059 if (rt_ifa != NULL && 2060 (src & rt_ifa->ia_subnetmask) == rt_ifa->ia_subnet) { 2061 if (rt->rt_flags & RTF_GATEWAY) 2062 dest = satosin(rt->rt_gateway)->sin_addr.s_addr; 2063 else 2064 dest = pkt_dst.s_addr; 2065 /* 2066 * Router requirements says to only send 2067 * host redirects. 2068 */ 2069 type = ICMP_REDIRECT; 2070 code = ICMP_REDIRECT_HOST; 2071 #ifdef DIAGNOSTIC 2072 if (ipprintfs) 2073 kprintf("redirect (%d) to %x\n", code, dest); 2074 #endif 2075 } 2076 } 2077 2078 error = ip_output(m, NULL, &fwd_ro, IP_FORWARDING, NULL, NULL); 2079 if (error == 0) { 2080 ipstat.ips_forward++; 2081 if (type == 0) { 2082 if (mtemp) 2083 ipflow_create(&fwd_ro, mtemp); 2084 goto done; 2085 } 2086 ipstat.ips_redirectsent++; 2087 } else { 2088 ipstat.ips_cantforward++; 2089 } 2090 2091 if (mtemp == NULL) 2092 goto done; 2093 2094 /* 2095 * Errors that do not require generating ICMP message 2096 */ 2097 switch (error) { 2098 case ENOBUFS: 2099 /* 2100 * A router should not generate ICMP_SOURCEQUENCH as 2101 * required in RFC1812 Requirements for IP Version 4 Routers. 2102 * Source quench could be a big problem under DoS attacks, 2103 * or if the underlying interface is rate-limited. 2104 * Those who need source quench packets may re-enable them 2105 * via the net.inet.ip.sendsourcequench sysctl. 2106 */ 2107 if (!ip_sendsourcequench) 2108 goto done; 2109 break; 2110 2111 case EACCES: /* ipfw denied packet */ 2112 goto done; 2113 } 2114 2115 KASSERT((mtemp->m_flags & M_EXT) == 0 && 2116 mtemp->m_data == mtemp->m_pktdat, 2117 ("ip_forward invalid mtemp2")); 2118 mcopy = m_copym(mtemp, 0, mtemp->m_len, M_NOWAIT); 2119 if (mcopy == NULL) 2120 goto done; 2121 2122 /* 2123 * Send ICMP message. 2124 */ 2125 switch (error) { 2126 case 0: /* forwarded, but need redirect */ 2127 /* type, code set above */ 2128 break; 2129 2130 case ENETUNREACH: /* shouldn't happen, checked above */ 2131 case EHOSTUNREACH: 2132 case ENETDOWN: 2133 case EHOSTDOWN: 2134 default: 2135 type = ICMP_UNREACH; 2136 code = ICMP_UNREACH_HOST; 2137 break; 2138 2139 case EMSGSIZE: 2140 type = ICMP_UNREACH; 2141 code = ICMP_UNREACH_NEEDFRAG; 2142 #ifdef IPSEC 2143 /* 2144 * If the packet is routed over IPsec tunnel, tell the 2145 * originator the tunnel MTU. 2146 * tunnel MTU = if MTU - sizeof(IP) - ESP/AH hdrsiz 2147 * XXX quickhack!!! 2148 */ 2149 if (fwd_ro.ro_rt != NULL) { 2150 struct secpolicy *sp = NULL; 2151 int ipsecerror; 2152 int ipsechdr; 2153 struct route *ro; 2154 2155 sp = ipsec4_getpolicybyaddr(mcopy, 2156 IPSEC_DIR_OUTBOUND, 2157 IP_FORWARDING, 2158 &ipsecerror); 2159 2160 if (sp == NULL) 2161 destmtu = fwd_ro.ro_rt->rt_ifp->if_mtu; 2162 else { 2163 /* count IPsec header size */ 2164 ipsechdr = ipsec4_hdrsiz(mcopy, 2165 IPSEC_DIR_OUTBOUND, 2166 NULL); 2167 2168 /* 2169 * find the correct route for outer IPv4 2170 * header, compute tunnel MTU. 2171 * 2172 */ 2173 if (sp->req != NULL && sp->req->sav != NULL && 2174 sp->req->sav->sah != NULL) { 2175 ro = &sp->req->sav->sah->sa_route; 2176 if (ro->ro_rt != NULL && 2177 ro->ro_rt->rt_ifp != NULL) { 2178 destmtu = 2179 ro->ro_rt->rt_ifp->if_mtu; 2180 destmtu -= ipsechdr; 2181 } 2182 } 2183 2184 key_freesp(sp); 2185 } 2186 } 2187 #elif defined(FAST_IPSEC) 2188 /* 2189 * If the packet is routed over IPsec tunnel, tell the 2190 * originator the tunnel MTU. 2191 * tunnel MTU = if MTU - sizeof(IP) - ESP/AH hdrsiz 2192 * XXX quickhack!!! 2193 */ 2194 if (fwd_ro.ro_rt != NULL) { 2195 struct secpolicy *sp = NULL; 2196 int ipsecerror; 2197 int ipsechdr; 2198 struct route *ro; 2199 2200 sp = ipsec_getpolicybyaddr(mcopy, 2201 IPSEC_DIR_OUTBOUND, 2202 IP_FORWARDING, 2203 &ipsecerror); 2204 2205 if (sp == NULL) 2206 destmtu = fwd_ro.ro_rt->rt_ifp->if_mtu; 2207 else { 2208 /* count IPsec header size */ 2209 ipsechdr = ipsec4_hdrsiz(mcopy, 2210 IPSEC_DIR_OUTBOUND, 2211 NULL); 2212 2213 /* 2214 * find the correct route for outer IPv4 2215 * header, compute tunnel MTU. 2216 */ 2217 2218 if (sp->req != NULL && 2219 sp->req->sav != NULL && 2220 sp->req->sav->sah != NULL) { 2221 ro = &sp->req->sav->sah->sa_route; 2222 if (ro->ro_rt != NULL && 2223 ro->ro_rt->rt_ifp != NULL) { 2224 destmtu = 2225 ro->ro_rt->rt_ifp->if_mtu; 2226 destmtu -= ipsechdr; 2227 } 2228 } 2229 2230 KEY_FREESP(&sp); 2231 } 2232 } 2233 #else /* !IPSEC && !FAST_IPSEC */ 2234 if (fwd_ro.ro_rt != NULL) 2235 destmtu = fwd_ro.ro_rt->rt_ifp->if_mtu; 2236 #endif /*IPSEC*/ 2237 ipstat.ips_cantfrag++; 2238 break; 2239 2240 case ENOBUFS: 2241 type = ICMP_SOURCEQUENCH; 2242 code = 0; 2243 break; 2244 2245 case EACCES: /* ipfw denied packet */ 2246 panic("ip_forward EACCES should not reach"); 2247 } 2248 icmp_error(mcopy, type, code, dest, destmtu); 2249 done: 2250 if (mtemp != NULL) 2251 m_tag_delete_chain(mtemp); 2252 if (fwd_ro.ro_rt != NULL) 2253 RTFREE(fwd_ro.ro_rt); 2254 } 2255 2256 void 2257 ip_savecontrol(struct inpcb *inp, struct mbuf **mp, struct ip *ip, 2258 struct mbuf *m) 2259 { 2260 if (inp->inp_socket->so_options & SO_TIMESTAMP) { 2261 struct timeval tv; 2262 2263 microtime(&tv); 2264 *mp = sbcreatecontrol((caddr_t) &tv, sizeof(tv), 2265 SCM_TIMESTAMP, SOL_SOCKET); 2266 if (*mp) 2267 mp = &(*mp)->m_next; 2268 } 2269 if (inp->inp_flags & INP_RECVDSTADDR) { 2270 *mp = sbcreatecontrol((caddr_t) &ip->ip_dst, 2271 sizeof(struct in_addr), IP_RECVDSTADDR, IPPROTO_IP); 2272 if (*mp) 2273 mp = &(*mp)->m_next; 2274 } 2275 if (inp->inp_flags & INP_RECVTTL) { 2276 *mp = sbcreatecontrol((caddr_t) &ip->ip_ttl, 2277 sizeof(u_char), IP_RECVTTL, IPPROTO_IP); 2278 if (*mp) 2279 mp = &(*mp)->m_next; 2280 } 2281 #ifdef notyet 2282 /* XXX 2283 * Moving these out of udp_input() made them even more broken 2284 * than they already were. 2285 */ 2286 /* options were tossed already */ 2287 if (inp->inp_flags & INP_RECVOPTS) { 2288 *mp = sbcreatecontrol((caddr_t) opts_deleted_above, 2289 sizeof(struct in_addr), IP_RECVOPTS, IPPROTO_IP); 2290 if (*mp) 2291 mp = &(*mp)->m_next; 2292 } 2293 /* ip_srcroute doesn't do what we want here, need to fix */ 2294 if (inp->inp_flags & INP_RECVRETOPTS) { 2295 *mp = sbcreatecontrol((caddr_t) ip_srcroute(m), 2296 sizeof(struct in_addr), IP_RECVRETOPTS, IPPROTO_IP); 2297 if (*mp) 2298 mp = &(*mp)->m_next; 2299 } 2300 #endif 2301 if (inp->inp_flags & INP_RECVIF) { 2302 struct ifnet *ifp; 2303 struct sdlbuf { 2304 struct sockaddr_dl sdl; 2305 u_char pad[32]; 2306 } sdlbuf; 2307 struct sockaddr_dl *sdp; 2308 struct sockaddr_dl *sdl2 = &sdlbuf.sdl; 2309 2310 if (((ifp = m->m_pkthdr.rcvif)) && 2311 ((ifp->if_index != 0) && (ifp->if_index <= if_index))) { 2312 sdp = IF_LLSOCKADDR(ifp); 2313 /* 2314 * Change our mind and don't try copy. 2315 */ 2316 if ((sdp->sdl_family != AF_LINK) || 2317 (sdp->sdl_len > sizeof(sdlbuf))) { 2318 goto makedummy; 2319 } 2320 bcopy(sdp, sdl2, sdp->sdl_len); 2321 } else { 2322 makedummy: 2323 sdl2->sdl_len = 2324 offsetof(struct sockaddr_dl, sdl_data[0]); 2325 sdl2->sdl_family = AF_LINK; 2326 sdl2->sdl_index = 0; 2327 sdl2->sdl_nlen = sdl2->sdl_alen = sdl2->sdl_slen = 0; 2328 } 2329 *mp = sbcreatecontrol((caddr_t) sdl2, sdl2->sdl_len, 2330 IP_RECVIF, IPPROTO_IP); 2331 if (*mp) 2332 mp = &(*mp)->m_next; 2333 } 2334 } 2335 2336 /* 2337 * XXX these routines are called from the upper part of the kernel. 2338 * 2339 * They could also be moved to ip_mroute.c, since all the RSVP 2340 * handling is done there already. 2341 */ 2342 int 2343 ip_rsvp_init(struct socket *so) 2344 { 2345 if (so->so_type != SOCK_RAW || 2346 so->so_proto->pr_protocol != IPPROTO_RSVP) 2347 return EOPNOTSUPP; 2348 2349 if (ip_rsvpd != NULL) 2350 return EADDRINUSE; 2351 2352 ip_rsvpd = so; 2353 /* 2354 * This may seem silly, but we need to be sure we don't over-increment 2355 * the RSVP counter, in case something slips up. 2356 */ 2357 if (!ip_rsvp_on) { 2358 ip_rsvp_on = 1; 2359 rsvp_on++; 2360 } 2361 2362 return 0; 2363 } 2364 2365 int 2366 ip_rsvp_done(void) 2367 { 2368 ip_rsvpd = NULL; 2369 /* 2370 * This may seem silly, but we need to be sure we don't over-decrement 2371 * the RSVP counter, in case something slips up. 2372 */ 2373 if (ip_rsvp_on) { 2374 ip_rsvp_on = 0; 2375 rsvp_on--; 2376 } 2377 return 0; 2378 } 2379 2380 int 2381 rsvp_input(struct mbuf **mp, int *offp, int proto) 2382 { 2383 struct mbuf *m = *mp; 2384 2385 *mp = NULL; 2386 2387 if (rsvp_input_p) { /* call the real one if loaded */ 2388 *mp = m; 2389 rsvp_input_p(mp, offp, proto); 2390 return(IPPROTO_DONE); 2391 } 2392 2393 /* Can still get packets with rsvp_on = 0 if there is a local member 2394 * of the group to which the RSVP packet is addressed. But in this 2395 * case we want to throw the packet away. 2396 */ 2397 2398 if (!rsvp_on) { 2399 m_freem(m); 2400 return(IPPROTO_DONE); 2401 } 2402 2403 if (ip_rsvpd != NULL) { 2404 *mp = m; 2405 rip_input(mp, offp, proto); 2406 return(IPPROTO_DONE); 2407 } 2408 /* Drop the packet */ 2409 m_freem(m); 2410 return(IPPROTO_DONE); 2411 } 2412