1 /* $OpenBSD: ip_input.c,v 1.354 2021/01/15 15:18:12 bluhm Exp $ */ 2 /* $NetBSD: ip_input.c,v 1.30 1996/03/16 23:53:58 christos Exp $ */ 3 4 /* 5 * Copyright (c) 1982, 1986, 1988, 1993 6 * The Regents of the University of California. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of the University nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * @(#)ip_input.c 8.2 (Berkeley) 1/4/94 33 */ 34 35 #include "pf.h" 36 #include "carp.h" 37 38 #include <sys/param.h> 39 #include <sys/systm.h> 40 #include <sys/mbuf.h> 41 #include <sys/domain.h> 42 #include <sys/mutex.h> 43 #include <sys/protosw.h> 44 #include <sys/socket.h> 45 #include <sys/socketvar.h> 46 #include <sys/sysctl.h> 47 #include <sys/pool.h> 48 #include <sys/task.h> 49 50 #include <net/if.h> 51 #include <net/if_var.h> 52 #include <net/if_dl.h> 53 #include <net/route.h> 54 #include <net/netisr.h> 55 56 #include <netinet/in.h> 57 #include <netinet/in_systm.h> 58 #include <netinet/if_ether.h> 59 #include <netinet/ip.h> 60 #include <netinet/in_pcb.h> 61 #include <netinet/in_var.h> 62 #include <netinet/ip_var.h> 63 #include <netinet/ip_icmp.h> 64 #include <net/if_types.h> 65 66 #ifdef INET6 67 #include <netinet6/ip6protosw.h> 68 #include <netinet6/ip6_var.h> 69 #endif 70 71 #if NPF > 0 72 #include <net/pfvar.h> 73 #endif 74 75 #ifdef MROUTING 76 #include <netinet/ip_mroute.h> 77 #endif 78 79 #ifdef IPSEC 80 #include <netinet/ip_ipsp.h> 81 #endif /* IPSEC */ 82 83 #if NCARP > 0 84 #include <netinet/ip_carp.h> 85 #endif 86 87 /* values controllable via sysctl */ 88 int ipforwarding = 0; 89 int ipmforwarding = 0; 90 int ipmultipath = 0; 91 int ipsendredirects = 1; 92 int ip_dosourceroute = 0; 93 int ip_defttl = IPDEFTTL; 94 int ip_mtudisc = 1; 95 u_int ip_mtudisc_timeout = IPMTUDISCTIMEOUT; 96 int ip_directedbcast = 0; 97 98 struct rttimer_queue *ip_mtudisc_timeout_q = NULL; 99 100 /* Protects `ipq' and `ip_frags'. */ 101 struct mutex ipq_mutex = MUTEX_INITIALIZER(IPL_SOFTNET); 102 103 /* IP reassembly queue */ 104 LIST_HEAD(, ipq) ipq; 105 106 /* Keep track of memory used for reassembly */ 107 int ip_maxqueue = 300; 108 int ip_frags = 0; 109 110 #ifdef MROUTING 111 extern int ip_mrtproto; 112 #endif 113 114 const struct sysctl_bounded_args ipctl_vars[] = { 115 #ifdef MROUTING 116 { IPCTL_MRTPROTO, &ip_mrtproto, 1, 0 }, 117 #endif 118 { IPCTL_FORWARDING, &ipforwarding, 0, 2 }, 119 { IPCTL_SENDREDIRECTS, &ipsendredirects, 0, 1 }, 120 { IPCTL_DEFTTL, &ip_defttl, 0, 255 }, 121 { IPCTL_DIRECTEDBCAST, &ip_directedbcast, 0, 1 }, 122 { IPCTL_IPPORT_FIRSTAUTO, &ipport_firstauto, 0, 65535 }, 123 { IPCTL_IPPORT_LASTAUTO, &ipport_lastauto, 0, 65535 }, 124 { IPCTL_IPPORT_HIFIRSTAUTO, &ipport_hifirstauto, 0, 65535 }, 125 { IPCTL_IPPORT_HILASTAUTO, &ipport_hilastauto, 0, 65535 }, 126 { IPCTL_IPPORT_MAXQUEUE, &ip_maxqueue, 0, 10000 }, 127 { IPCTL_MFORWARDING, &ipmforwarding, 0, 1 }, 128 { IPCTL_MULTIPATH, &ipmultipath, 0, 1 }, 129 { IPCTL_ARPQUEUED, &la_hold_total, 0, 1000 }, 130 { IPCTL_ARPTIMEOUT, &arpt_keep, 0, INT_MAX }, 131 { IPCTL_ARPDOWN, &arpt_down, 0, INT_MAX }, 132 }; 133 134 struct pool ipqent_pool; 135 struct pool ipq_pool; 136 137 struct cpumem *ipcounters; 138 139 int ip_sysctl_ipstat(void *, size_t *, void *); 140 141 static struct mbuf_queue ipsend_mq; 142 143 extern struct niqueue arpinq; 144 145 int ip_ours(struct mbuf **, int *, int, int); 146 int ip_dooptions(struct mbuf *, struct ifnet *); 147 int in_ouraddr(struct mbuf *, struct ifnet *, struct rtentry **); 148 149 static void ip_send_dispatch(void *); 150 static struct task ipsend_task = TASK_INITIALIZER(ip_send_dispatch, &ipsend_mq); 151 /* 152 * Used to save the IP options in case a protocol wants to respond 153 * to an incoming packet over the same route if the packet got here 154 * using IP source routing. This allows connection establishment and 155 * maintenance when the remote end is on a network that is not known 156 * to us. 157 */ 158 struct ip_srcrt { 159 int isr_nhops; /* number of hops */ 160 struct in_addr isr_dst; /* final destination */ 161 char isr_nop; /* one NOP to align */ 162 char isr_hdr[IPOPT_OFFSET + 1]; /* OPTVAL, OLEN & OFFSET */ 163 struct in_addr isr_routes[MAX_IPOPTLEN/sizeof(struct in_addr)]; 164 }; 165 166 void save_rte(struct mbuf *, u_char *, struct in_addr); 167 168 /* 169 * IP initialization: fill in IP protocol switch table. 170 * All protocols not implemented in kernel go to raw IP protocol handler. 171 */ 172 void 173 ip_init(void) 174 { 175 const struct protosw *pr; 176 int i; 177 const u_int16_t defbaddynamicports_tcp[] = DEFBADDYNAMICPORTS_TCP; 178 const u_int16_t defbaddynamicports_udp[] = DEFBADDYNAMICPORTS_UDP; 179 const u_int16_t defrootonlyports_tcp[] = DEFROOTONLYPORTS_TCP; 180 const u_int16_t defrootonlyports_udp[] = DEFROOTONLYPORTS_UDP; 181 182 ipcounters = counters_alloc(ips_ncounters); 183 184 pool_init(&ipqent_pool, sizeof(struct ipqent), 0, 185 IPL_SOFTNET, 0, "ipqe", NULL); 186 pool_init(&ipq_pool, sizeof(struct ipq), 0, 187 IPL_SOFTNET, 0, "ipq", NULL); 188 189 pr = pffindproto(PF_INET, IPPROTO_RAW, SOCK_RAW); 190 if (pr == NULL) 191 panic("ip_init"); 192 for (i = 0; i < IPPROTO_MAX; i++) 193 ip_protox[i] = pr - inetsw; 194 for (pr = inetdomain.dom_protosw; 195 pr < inetdomain.dom_protoswNPROTOSW; pr++) 196 if (pr->pr_domain->dom_family == PF_INET && 197 pr->pr_protocol && pr->pr_protocol != IPPROTO_RAW && 198 pr->pr_protocol < IPPROTO_MAX) 199 ip_protox[pr->pr_protocol] = pr - inetsw; 200 LIST_INIT(&ipq); 201 if (ip_mtudisc != 0) 202 ip_mtudisc_timeout_q = 203 rt_timer_queue_create(ip_mtudisc_timeout); 204 205 /* Fill in list of ports not to allocate dynamically. */ 206 memset(&baddynamicports, 0, sizeof(baddynamicports)); 207 for (i = 0; defbaddynamicports_tcp[i] != 0; i++) 208 DP_SET(baddynamicports.tcp, defbaddynamicports_tcp[i]); 209 for (i = 0; defbaddynamicports_udp[i] != 0; i++) 210 DP_SET(baddynamicports.udp, defbaddynamicports_udp[i]); 211 212 /* Fill in list of ports only root can bind to. */ 213 memset(&rootonlyports, 0, sizeof(rootonlyports)); 214 for (i = 0; defrootonlyports_tcp[i] != 0; i++) 215 DP_SET(rootonlyports.tcp, defrootonlyports_tcp[i]); 216 for (i = 0; defrootonlyports_udp[i] != 0; i++) 217 DP_SET(rootonlyports.udp, defrootonlyports_udp[i]); 218 219 mq_init(&ipsend_mq, 64, IPL_SOFTNET); 220 221 #ifdef IPSEC 222 ipsec_init(); 223 #endif 224 } 225 226 /* 227 * IPv4 input routine. 228 * 229 * Checksum and byte swap header. Process options. Forward or deliver. 230 */ 231 void 232 ipv4_input(struct ifnet *ifp, struct mbuf *m) 233 { 234 int off, nxt; 235 236 off = 0; 237 nxt = ip_input_if(&m, &off, IPPROTO_IPV4, AF_UNSPEC, ifp); 238 KASSERT(nxt == IPPROTO_DONE); 239 } 240 241 int 242 ip_input_if(struct mbuf **mp, int *offp, int nxt, int af, struct ifnet *ifp) 243 { 244 struct mbuf *m = *mp; 245 struct rtentry *rt = NULL; 246 struct ip *ip; 247 int hlen, len; 248 in_addr_t pfrdr = 0; 249 250 KASSERT(*offp == 0); 251 252 ipstat_inc(ips_total); 253 if (m->m_len < sizeof (struct ip) && 254 (m = *mp = m_pullup(m, sizeof (struct ip))) == NULL) { 255 ipstat_inc(ips_toosmall); 256 goto bad; 257 } 258 ip = mtod(m, struct ip *); 259 if (ip->ip_v != IPVERSION) { 260 ipstat_inc(ips_badvers); 261 goto bad; 262 } 263 hlen = ip->ip_hl << 2; 264 if (hlen < sizeof(struct ip)) { /* minimum header length */ 265 ipstat_inc(ips_badhlen); 266 goto bad; 267 } 268 if (hlen > m->m_len) { 269 if ((m = *mp = m_pullup(m, hlen)) == NULL) { 270 ipstat_inc(ips_badhlen); 271 goto bad; 272 } 273 ip = mtod(m, struct ip *); 274 } 275 276 /* 127/8 must not appear on wire - RFC1122 */ 277 if ((ntohl(ip->ip_dst.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET || 278 (ntohl(ip->ip_src.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET) { 279 if ((ifp->if_flags & IFF_LOOPBACK) == 0) { 280 ipstat_inc(ips_badaddr); 281 goto bad; 282 } 283 } 284 285 if ((m->m_pkthdr.csum_flags & M_IPV4_CSUM_IN_OK) == 0) { 286 if (m->m_pkthdr.csum_flags & M_IPV4_CSUM_IN_BAD) { 287 ipstat_inc(ips_badsum); 288 goto bad; 289 } 290 291 ipstat_inc(ips_inswcsum); 292 if (in_cksum(m, hlen) != 0) { 293 ipstat_inc(ips_badsum); 294 goto bad; 295 } 296 } 297 298 /* Retrieve the packet length. */ 299 len = ntohs(ip->ip_len); 300 301 /* 302 * Convert fields to host representation. 303 */ 304 if (len < hlen) { 305 ipstat_inc(ips_badlen); 306 goto bad; 307 } 308 309 /* 310 * Check that the amount of data in the buffers 311 * is at least as much as the IP header would have us expect. 312 * Trim mbufs if longer than we expect. 313 * Drop packet if shorter than we expect. 314 */ 315 if (m->m_pkthdr.len < len) { 316 ipstat_inc(ips_tooshort); 317 goto bad; 318 } 319 if (m->m_pkthdr.len > len) { 320 if (m->m_len == m->m_pkthdr.len) { 321 m->m_len = len; 322 m->m_pkthdr.len = len; 323 } else 324 m_adj(m, len - m->m_pkthdr.len); 325 } 326 327 #if NCARP > 0 328 if (carp_lsdrop(ifp, m, AF_INET, &ip->ip_src.s_addr, 329 &ip->ip_dst.s_addr, (ip->ip_p == IPPROTO_ICMP ? 0 : 1))) 330 goto bad; 331 #endif 332 333 #if NPF > 0 334 /* 335 * Packet filter 336 */ 337 pfrdr = ip->ip_dst.s_addr; 338 if (pf_test(AF_INET, PF_IN, ifp, mp) != PF_PASS) 339 goto bad; 340 m = *mp; 341 if (m == NULL) 342 goto bad; 343 344 ip = mtod(m, struct ip *); 345 hlen = ip->ip_hl << 2; 346 pfrdr = (pfrdr != ip->ip_dst.s_addr); 347 #endif 348 349 /* 350 * Process options and, if not destined for us, 351 * ship it on. ip_dooptions returns 1 when an 352 * error was detected (causing an icmp message 353 * to be sent and the original packet to be freed). 354 */ 355 if (hlen > sizeof (struct ip) && ip_dooptions(m, ifp)) { 356 m = *mp = NULL; 357 goto bad; 358 } 359 360 if (ip->ip_dst.s_addr == INADDR_BROADCAST || 361 ip->ip_dst.s_addr == INADDR_ANY) { 362 nxt = ip_ours(mp, offp, nxt, af); 363 goto out; 364 } 365 366 switch(in_ouraddr(m, ifp, &rt)) { 367 case 2: 368 goto bad; 369 case 1: 370 nxt = ip_ours(mp, offp, nxt, af); 371 goto out; 372 } 373 374 if (IN_MULTICAST(ip->ip_dst.s_addr)) { 375 /* 376 * Make sure M_MCAST is set. It should theoretically 377 * already be there, but let's play safe because upper 378 * layers check for this flag. 379 */ 380 m->m_flags |= M_MCAST; 381 382 #ifdef MROUTING 383 if (ipmforwarding && ip_mrouter[ifp->if_rdomain]) { 384 int error; 385 386 if (m->m_flags & M_EXT) { 387 if ((m = *mp = m_pullup(m, hlen)) == NULL) { 388 ipstat_inc(ips_toosmall); 389 goto bad; 390 } 391 ip = mtod(m, struct ip *); 392 } 393 /* 394 * If we are acting as a multicast router, all 395 * incoming multicast packets are passed to the 396 * kernel-level multicast forwarding function. 397 * The packet is returned (relatively) intact; if 398 * ip_mforward() returns a non-zero value, the packet 399 * must be discarded, else it may be accepted below. 400 * 401 * (The IP ident field is put in the same byte order 402 * as expected when ip_mforward() is called from 403 * ip_output().) 404 */ 405 KERNEL_LOCK(); 406 error = ip_mforward(m, ifp); 407 KERNEL_UNLOCK(); 408 if (error) { 409 ipstat_inc(ips_cantforward); 410 goto bad; 411 } 412 413 /* 414 * The process-level routing daemon needs to receive 415 * all multicast IGMP packets, whether or not this 416 * host belongs to their destination groups. 417 */ 418 if (ip->ip_p == IPPROTO_IGMP) { 419 nxt = ip_ours(mp, offp, nxt, af); 420 goto out; 421 } 422 ipstat_inc(ips_forward); 423 } 424 #endif 425 /* 426 * See if we belong to the destination multicast group on the 427 * arrival interface. 428 */ 429 if (!in_hasmulti(&ip->ip_dst, ifp)) { 430 ipstat_inc(ips_notmember); 431 if (!IN_LOCAL_GROUP(ip->ip_dst.s_addr)) 432 ipstat_inc(ips_cantforward); 433 goto bad; 434 } 435 nxt = ip_ours(mp, offp, nxt, af); 436 goto out; 437 } 438 439 #if NCARP > 0 440 if (ip->ip_p == IPPROTO_ICMP && 441 carp_lsdrop(ifp, m, AF_INET, &ip->ip_src.s_addr, 442 &ip->ip_dst.s_addr, 1)) 443 goto bad; 444 #endif 445 /* 446 * Not for us; forward if possible and desirable. 447 */ 448 if (ipforwarding == 0) { 449 ipstat_inc(ips_cantforward); 450 goto bad; 451 } 452 #ifdef IPSEC 453 if (ipsec_in_use) { 454 int rv; 455 456 rv = ipsec_forward_check(m, hlen, AF_INET); 457 if (rv != 0) { 458 ipstat_inc(ips_cantforward); 459 goto bad; 460 } 461 /* 462 * Fall through, forward packet. Outbound IPsec policy 463 * checking will occur in ip_output(). 464 */ 465 } 466 #endif /* IPSEC */ 467 468 ip_forward(m, ifp, rt, pfrdr); 469 *mp = NULL; 470 return IPPROTO_DONE; 471 bad: 472 nxt = IPPROTO_DONE; 473 m_freemp(mp); 474 out: 475 rtfree(rt); 476 return nxt; 477 } 478 479 /* 480 * IPv4 local-delivery routine. 481 * 482 * If fragmented try to reassemble. Pass to next level. 483 */ 484 int 485 ip_ours(struct mbuf **mp, int *offp, int nxt, int af) 486 { 487 struct mbuf *m = *mp; 488 struct ip *ip = mtod(m, struct ip *); 489 struct ipq *fp; 490 struct ipqent *ipqe; 491 int mff, hlen; 492 493 hlen = ip->ip_hl << 2; 494 495 /* 496 * If offset or IP_MF are set, must reassemble. 497 * Otherwise, nothing need be done. 498 * (We could look in the reassembly queue to see 499 * if the packet was previously fragmented, 500 * but it's not worth the time; just let them time out.) 501 */ 502 if (ip->ip_off &~ htons(IP_DF | IP_RF)) { 503 if (m->m_flags & M_EXT) { /* XXX */ 504 if ((m = *mp = m_pullup(m, hlen)) == NULL) { 505 ipstat_inc(ips_toosmall); 506 return IPPROTO_DONE; 507 } 508 ip = mtod(m, struct ip *); 509 } 510 511 mtx_enter(&ipq_mutex); 512 513 /* 514 * Look for queue of fragments 515 * of this datagram. 516 */ 517 LIST_FOREACH(fp, &ipq, ipq_q) { 518 if (ip->ip_id == fp->ipq_id && 519 ip->ip_src.s_addr == fp->ipq_src.s_addr && 520 ip->ip_dst.s_addr == fp->ipq_dst.s_addr && 521 ip->ip_p == fp->ipq_p) 522 break; 523 } 524 525 /* 526 * Adjust ip_len to not reflect header, 527 * set ipqe_mff if more fragments are expected, 528 * convert offset of this to bytes. 529 */ 530 ip->ip_len = htons(ntohs(ip->ip_len) - hlen); 531 mff = (ip->ip_off & htons(IP_MF)) != 0; 532 if (mff) { 533 /* 534 * Make sure that fragments have a data length 535 * that's a non-zero multiple of 8 bytes. 536 */ 537 if (ntohs(ip->ip_len) == 0 || 538 (ntohs(ip->ip_len) & 0x7) != 0) { 539 ipstat_inc(ips_badfrags); 540 goto bad; 541 } 542 } 543 ip->ip_off = htons(ntohs(ip->ip_off) << 3); 544 545 /* 546 * If datagram marked as having more fragments 547 * or if this is not the first fragment, 548 * attempt reassembly; if it succeeds, proceed. 549 */ 550 if (mff || ip->ip_off) { 551 ipstat_inc(ips_fragments); 552 if (ip_frags + 1 > ip_maxqueue) { 553 ip_flush(); 554 ipstat_inc(ips_rcvmemdrop); 555 goto bad; 556 } 557 558 ipqe = pool_get(&ipqent_pool, PR_NOWAIT); 559 if (ipqe == NULL) { 560 ipstat_inc(ips_rcvmemdrop); 561 goto bad; 562 } 563 ip_frags++; 564 ipqe->ipqe_mff = mff; 565 ipqe->ipqe_m = m; 566 ipqe->ipqe_ip = ip; 567 m = *mp = ip_reass(ipqe, fp); 568 if (m == NULL) 569 goto bad; 570 ipstat_inc(ips_reassembled); 571 ip = mtod(m, struct ip *); 572 hlen = ip->ip_hl << 2; 573 ip->ip_len = htons(ntohs(ip->ip_len) + hlen); 574 } else 575 if (fp) 576 ip_freef(fp); 577 578 mtx_leave(&ipq_mutex); 579 } 580 581 *offp = hlen; 582 nxt = ip->ip_p; 583 /* Check wheter we are already in a IPv4/IPv6 local deliver loop. */ 584 if (af == AF_UNSPEC) 585 nxt = ip_deliver(mp, offp, nxt, AF_INET); 586 return nxt; 587 bad: 588 mtx_leave(&ipq_mutex); 589 m_freemp(mp); 590 return IPPROTO_DONE; 591 } 592 593 #ifndef INET6 594 #define IPSTAT_INC(name) ipstat_inc(ips_##name) 595 #else 596 #define IPSTAT_INC(name) (af == AF_INET ? \ 597 ipstat_inc(ips_##name) : ip6stat_inc(ip6s_##name)) 598 #endif 599 600 int 601 ip_deliver(struct mbuf **mp, int *offp, int nxt, int af) 602 { 603 const struct protosw *psw; 604 int naf = af; 605 #ifdef INET6 606 int nest = 0; 607 #endif /* INET6 */ 608 609 /* pf might have modified stuff, might have to chksum */ 610 switch (af) { 611 case AF_INET: 612 in_proto_cksum_out(*mp, NULL); 613 break; 614 #ifdef INET6 615 case AF_INET6: 616 in6_proto_cksum_out(*mp, NULL); 617 break; 618 #endif /* INET6 */ 619 } 620 621 /* 622 * Tell launch routine the next header 623 */ 624 IPSTAT_INC(delivered); 625 626 while (nxt != IPPROTO_DONE) { 627 #ifdef INET6 628 if (af == AF_INET6 && 629 ip6_hdrnestlimit && (++nest > ip6_hdrnestlimit)) { 630 ip6stat_inc(ip6s_toomanyhdr); 631 goto bad; 632 } 633 #endif /* INET6 */ 634 635 /* 636 * protection against faulty packet - there should be 637 * more sanity checks in header chain processing. 638 */ 639 if ((*mp)->m_pkthdr.len < *offp) { 640 IPSTAT_INC(tooshort); 641 goto bad; 642 } 643 644 #ifdef IPSEC 645 if (ipsec_in_use) { 646 if (ipsec_local_check(*mp, *offp, nxt, af) != 0) { 647 IPSTAT_INC(cantforward); 648 goto bad; 649 } 650 } 651 /* Otherwise, just fall through and deliver the packet */ 652 #endif /* IPSEC */ 653 654 switch (nxt) { 655 case IPPROTO_IPV4: 656 naf = AF_INET; 657 ipstat_inc(ips_delivered); 658 break; 659 #ifdef INET6 660 case IPPROTO_IPV6: 661 naf = AF_INET6; 662 ip6stat_inc(ip6s_delivered); 663 break; 664 #endif /* INET6 */ 665 } 666 switch (af) { 667 case AF_INET: 668 psw = &inetsw[ip_protox[nxt]]; 669 break; 670 #ifdef INET6 671 case AF_INET6: 672 psw = &inet6sw[ip6_protox[nxt]]; 673 break; 674 #endif /* INET6 */ 675 } 676 nxt = (*psw->pr_input)(mp, offp, nxt, af); 677 af = naf; 678 } 679 return nxt; 680 bad: 681 m_freemp(mp); 682 return IPPROTO_DONE; 683 } 684 #undef IPSTAT_INC 685 686 int 687 in_ouraddr(struct mbuf *m, struct ifnet *ifp, struct rtentry **prt) 688 { 689 struct rtentry *rt; 690 struct ip *ip; 691 struct sockaddr_in sin; 692 int match = 0; 693 694 #if NPF > 0 695 switch (pf_ouraddr(m)) { 696 case 0: 697 return (0); 698 case 1: 699 return (1); 700 default: 701 /* pf does not know it */ 702 break; 703 } 704 #endif 705 706 ip = mtod(m, struct ip *); 707 708 memset(&sin, 0, sizeof(sin)); 709 sin.sin_len = sizeof(sin); 710 sin.sin_family = AF_INET; 711 sin.sin_addr = ip->ip_dst; 712 rt = rtalloc_mpath(sintosa(&sin), &ip->ip_src.s_addr, 713 m->m_pkthdr.ph_rtableid); 714 if (rtisvalid(rt)) { 715 if (ISSET(rt->rt_flags, RTF_LOCAL)) 716 match = 1; 717 718 /* 719 * If directedbcast is enabled we only consider it local 720 * if it is received on the interface with that address. 721 */ 722 if (ISSET(rt->rt_flags, RTF_BROADCAST) && 723 (!ip_directedbcast || rt->rt_ifidx == ifp->if_index)) { 724 match = 1; 725 726 /* Make sure M_BCAST is set */ 727 m->m_flags |= M_BCAST; 728 } 729 } 730 *prt = rt; 731 732 if (!match) { 733 struct ifaddr *ifa; 734 735 /* 736 * No local address or broadcast address found, so check for 737 * ancient classful broadcast addresses. 738 * It must have been broadcast on the link layer, and for an 739 * address on the interface it was received on. 740 */ 741 if (!ISSET(m->m_flags, M_BCAST) || 742 !IN_CLASSFULBROADCAST(ip->ip_dst.s_addr, ip->ip_dst.s_addr)) 743 return (0); 744 745 if (ifp->if_rdomain != rtable_l2(m->m_pkthdr.ph_rtableid)) 746 return (0); 747 /* 748 * The check in the loop assumes you only rx a packet on an UP 749 * interface, and that M_BCAST will only be set on a BROADCAST 750 * interface. 751 */ 752 NET_ASSERT_LOCKED(); 753 TAILQ_FOREACH(ifa, &ifp->if_addrlist, ifa_list) { 754 if (ifa->ifa_addr->sa_family != AF_INET) 755 continue; 756 757 if (IN_CLASSFULBROADCAST(ip->ip_dst.s_addr, 758 ifatoia(ifa)->ia_addr.sin_addr.s_addr)) { 759 match = 1; 760 break; 761 } 762 } 763 } else if (ipforwarding == 0 && rt->rt_ifidx != ifp->if_index && 764 !((ifp->if_flags & IFF_LOOPBACK) || (ifp->if_type == IFT_ENC) || 765 (m->m_pkthdr.pf.flags & PF_TAG_TRANSLATE_LOCALHOST))) { 766 /* received on wrong interface. */ 767 #if NCARP > 0 768 struct ifnet *out_if; 769 770 /* 771 * Virtual IPs on carp interfaces need to be checked also 772 * against the parent interface and other carp interfaces 773 * sharing the same parent. 774 */ 775 out_if = if_get(rt->rt_ifidx); 776 if (!(out_if && carp_strict_addr_chk(out_if, ifp))) { 777 ipstat_inc(ips_wrongif); 778 match = 2; 779 } 780 if_put(out_if); 781 #else 782 ipstat_inc(ips_wrongif); 783 match = 2; 784 #endif 785 } 786 787 return (match); 788 } 789 790 /* 791 * Take incoming datagram fragment and try to 792 * reassemble it into whole datagram. If a chain for 793 * reassembly of this datagram already exists, then it 794 * is given as fp; otherwise have to make a chain. 795 */ 796 struct mbuf * 797 ip_reass(struct ipqent *ipqe, struct ipq *fp) 798 { 799 struct mbuf *m = ipqe->ipqe_m; 800 struct ipqent *nq, *p, *q; 801 struct ip *ip; 802 struct mbuf *t; 803 int hlen = ipqe->ipqe_ip->ip_hl << 2; 804 int i, next; 805 u_int8_t ecn, ecn0; 806 807 MUTEX_ASSERT_LOCKED(&ipq_mutex); 808 809 /* 810 * Presence of header sizes in mbufs 811 * would confuse code below. 812 */ 813 m->m_data += hlen; 814 m->m_len -= hlen; 815 816 /* 817 * If first fragment to arrive, create a reassembly queue. 818 */ 819 if (fp == NULL) { 820 fp = pool_get(&ipq_pool, PR_NOWAIT); 821 if (fp == NULL) 822 goto dropfrag; 823 LIST_INSERT_HEAD(&ipq, fp, ipq_q); 824 fp->ipq_ttl = IPFRAGTTL; 825 fp->ipq_p = ipqe->ipqe_ip->ip_p; 826 fp->ipq_id = ipqe->ipqe_ip->ip_id; 827 LIST_INIT(&fp->ipq_fragq); 828 fp->ipq_src = ipqe->ipqe_ip->ip_src; 829 fp->ipq_dst = ipqe->ipqe_ip->ip_dst; 830 p = NULL; 831 goto insert; 832 } 833 834 /* 835 * Handle ECN by comparing this segment with the first one; 836 * if CE is set, do not lose CE. 837 * drop if CE and not-ECT are mixed for the same packet. 838 */ 839 ecn = ipqe->ipqe_ip->ip_tos & IPTOS_ECN_MASK; 840 ecn0 = LIST_FIRST(&fp->ipq_fragq)->ipqe_ip->ip_tos & IPTOS_ECN_MASK; 841 if (ecn == IPTOS_ECN_CE) { 842 if (ecn0 == IPTOS_ECN_NOTECT) 843 goto dropfrag; 844 if (ecn0 != IPTOS_ECN_CE) 845 LIST_FIRST(&fp->ipq_fragq)->ipqe_ip->ip_tos |= 846 IPTOS_ECN_CE; 847 } 848 if (ecn == IPTOS_ECN_NOTECT && ecn0 != IPTOS_ECN_NOTECT) 849 goto dropfrag; 850 851 /* 852 * Find a segment which begins after this one does. 853 */ 854 for (p = NULL, q = LIST_FIRST(&fp->ipq_fragq); q != NULL; 855 p = q, q = LIST_NEXT(q, ipqe_q)) 856 if (ntohs(q->ipqe_ip->ip_off) > ntohs(ipqe->ipqe_ip->ip_off)) 857 break; 858 859 /* 860 * If there is a preceding segment, it may provide some of 861 * our data already. If so, drop the data from the incoming 862 * segment. If it provides all of our data, drop us. 863 */ 864 if (p != NULL) { 865 i = ntohs(p->ipqe_ip->ip_off) + ntohs(p->ipqe_ip->ip_len) - 866 ntohs(ipqe->ipqe_ip->ip_off); 867 if (i > 0) { 868 if (i >= ntohs(ipqe->ipqe_ip->ip_len)) 869 goto dropfrag; 870 m_adj(ipqe->ipqe_m, i); 871 ipqe->ipqe_ip->ip_off = 872 htons(ntohs(ipqe->ipqe_ip->ip_off) + i); 873 ipqe->ipqe_ip->ip_len = 874 htons(ntohs(ipqe->ipqe_ip->ip_len) - i); 875 } 876 } 877 878 /* 879 * While we overlap succeeding segments trim them or, 880 * if they are completely covered, dequeue them. 881 */ 882 for (; q != NULL && 883 ntohs(ipqe->ipqe_ip->ip_off) + ntohs(ipqe->ipqe_ip->ip_len) > 884 ntohs(q->ipqe_ip->ip_off); q = nq) { 885 i = (ntohs(ipqe->ipqe_ip->ip_off) + 886 ntohs(ipqe->ipqe_ip->ip_len)) - ntohs(q->ipqe_ip->ip_off); 887 if (i < ntohs(q->ipqe_ip->ip_len)) { 888 q->ipqe_ip->ip_len = 889 htons(ntohs(q->ipqe_ip->ip_len) - i); 890 q->ipqe_ip->ip_off = 891 htons(ntohs(q->ipqe_ip->ip_off) + i); 892 m_adj(q->ipqe_m, i); 893 break; 894 } 895 nq = LIST_NEXT(q, ipqe_q); 896 m_freem(q->ipqe_m); 897 LIST_REMOVE(q, ipqe_q); 898 pool_put(&ipqent_pool, q); 899 ip_frags--; 900 } 901 902 insert: 903 /* 904 * Stick new segment in its place; 905 * check for complete reassembly. 906 */ 907 if (p == NULL) { 908 LIST_INSERT_HEAD(&fp->ipq_fragq, ipqe, ipqe_q); 909 } else { 910 LIST_INSERT_AFTER(p, ipqe, ipqe_q); 911 } 912 next = 0; 913 for (p = NULL, q = LIST_FIRST(&fp->ipq_fragq); q != NULL; 914 p = q, q = LIST_NEXT(q, ipqe_q)) { 915 if (ntohs(q->ipqe_ip->ip_off) != next) 916 return (0); 917 next += ntohs(q->ipqe_ip->ip_len); 918 } 919 if (p->ipqe_mff) 920 return (0); 921 922 /* 923 * Reassembly is complete. Check for a bogus message size and 924 * concatenate fragments. 925 */ 926 q = LIST_FIRST(&fp->ipq_fragq); 927 ip = q->ipqe_ip; 928 if ((next + (ip->ip_hl << 2)) > IP_MAXPACKET) { 929 ipstat_inc(ips_toolong); 930 ip_freef(fp); 931 return (0); 932 } 933 m = q->ipqe_m; 934 t = m->m_next; 935 m->m_next = 0; 936 m_cat(m, t); 937 nq = LIST_NEXT(q, ipqe_q); 938 pool_put(&ipqent_pool, q); 939 ip_frags--; 940 for (q = nq; q != NULL; q = nq) { 941 t = q->ipqe_m; 942 nq = LIST_NEXT(q, ipqe_q); 943 pool_put(&ipqent_pool, q); 944 ip_frags--; 945 m_removehdr(t); 946 m_cat(m, t); 947 } 948 949 /* 950 * Create header for new ip packet by 951 * modifying header of first packet; 952 * dequeue and discard fragment reassembly header. 953 * Make header visible. 954 */ 955 ip->ip_len = htons(next); 956 ip->ip_src = fp->ipq_src; 957 ip->ip_dst = fp->ipq_dst; 958 LIST_REMOVE(fp, ipq_q); 959 pool_put(&ipq_pool, fp); 960 m->m_len += (ip->ip_hl << 2); 961 m->m_data -= (ip->ip_hl << 2); 962 m_calchdrlen(m); 963 return (m); 964 965 dropfrag: 966 ipstat_inc(ips_fragdropped); 967 m_freem(m); 968 pool_put(&ipqent_pool, ipqe); 969 ip_frags--; 970 return (NULL); 971 } 972 973 /* 974 * Free a fragment reassembly header and all 975 * associated datagrams. 976 */ 977 void 978 ip_freef(struct ipq *fp) 979 { 980 struct ipqent *q; 981 982 MUTEX_ASSERT_LOCKED(&ipq_mutex); 983 984 while ((q = LIST_FIRST(&fp->ipq_fragq)) != NULL) { 985 LIST_REMOVE(q, ipqe_q); 986 m_freem(q->ipqe_m); 987 pool_put(&ipqent_pool, q); 988 ip_frags--; 989 } 990 LIST_REMOVE(fp, ipq_q); 991 pool_put(&ipq_pool, fp); 992 } 993 994 /* 995 * IP timer processing; 996 * if a timer expires on a reassembly queue, discard it. 997 */ 998 void 999 ip_slowtimo(void) 1000 { 1001 struct ipq *fp, *nfp; 1002 1003 mtx_enter(&ipq_mutex); 1004 LIST_FOREACH_SAFE(fp, &ipq, ipq_q, nfp) { 1005 if (--fp->ipq_ttl == 0) { 1006 ipstat_inc(ips_fragtimeout); 1007 ip_freef(fp); 1008 } 1009 } 1010 mtx_leave(&ipq_mutex); 1011 } 1012 1013 /* 1014 * Flush a bunch of datagram fragments, till we are down to 75%. 1015 */ 1016 void 1017 ip_flush(void) 1018 { 1019 int max = 50; 1020 1021 MUTEX_ASSERT_LOCKED(&ipq_mutex); 1022 1023 while (!LIST_EMPTY(&ipq) && ip_frags > ip_maxqueue * 3 / 4 && --max) { 1024 ipstat_inc(ips_fragdropped); 1025 ip_freef(LIST_FIRST(&ipq)); 1026 } 1027 } 1028 1029 /* 1030 * Do option processing on a datagram, 1031 * possibly discarding it if bad options are encountered, 1032 * or forwarding it if source-routed. 1033 * Returns 1 if packet has been forwarded/freed, 1034 * 0 if the packet should be processed further. 1035 */ 1036 int 1037 ip_dooptions(struct mbuf *m, struct ifnet *ifp) 1038 { 1039 struct ip *ip = mtod(m, struct ip *); 1040 unsigned int rtableid = m->m_pkthdr.ph_rtableid; 1041 struct rtentry *rt; 1042 struct sockaddr_in ipaddr; 1043 u_char *cp; 1044 struct ip_timestamp ipt; 1045 struct in_ifaddr *ia; 1046 int opt, optlen, cnt, off, code, type = ICMP_PARAMPROB, forward = 0; 1047 struct in_addr sin, dst; 1048 u_int32_t ntime; 1049 1050 dst = ip->ip_dst; 1051 cp = (u_char *)(ip + 1); 1052 cnt = (ip->ip_hl << 2) - sizeof (struct ip); 1053 1054 KERNEL_LOCK(); 1055 for (; cnt > 0; cnt -= optlen, cp += optlen) { 1056 opt = cp[IPOPT_OPTVAL]; 1057 if (opt == IPOPT_EOL) 1058 break; 1059 if (opt == IPOPT_NOP) 1060 optlen = 1; 1061 else { 1062 if (cnt < IPOPT_OLEN + sizeof(*cp)) { 1063 code = &cp[IPOPT_OLEN] - (u_char *)ip; 1064 goto bad; 1065 } 1066 optlen = cp[IPOPT_OLEN]; 1067 if (optlen < IPOPT_OLEN + sizeof(*cp) || optlen > cnt) { 1068 code = &cp[IPOPT_OLEN] - (u_char *)ip; 1069 goto bad; 1070 } 1071 } 1072 1073 switch (opt) { 1074 1075 default: 1076 break; 1077 1078 /* 1079 * Source routing with record. 1080 * Find interface with current destination address. 1081 * If none on this machine then drop if strictly routed, 1082 * or do nothing if loosely routed. 1083 * Record interface address and bring up next address 1084 * component. If strictly routed make sure next 1085 * address is on directly accessible net. 1086 */ 1087 case IPOPT_LSRR: 1088 case IPOPT_SSRR: 1089 if (!ip_dosourceroute) { 1090 type = ICMP_UNREACH; 1091 code = ICMP_UNREACH_SRCFAIL; 1092 goto bad; 1093 } 1094 if ((off = cp[IPOPT_OFFSET]) < IPOPT_MINOFF) { 1095 code = &cp[IPOPT_OFFSET] - (u_char *)ip; 1096 goto bad; 1097 } 1098 memset(&ipaddr, 0, sizeof(ipaddr)); 1099 ipaddr.sin_family = AF_INET; 1100 ipaddr.sin_len = sizeof(ipaddr); 1101 ipaddr.sin_addr = ip->ip_dst; 1102 ia = ifatoia(ifa_ifwithaddr(sintosa(&ipaddr), 1103 m->m_pkthdr.ph_rtableid)); 1104 if (ia == NULL) { 1105 if (opt == IPOPT_SSRR) { 1106 type = ICMP_UNREACH; 1107 code = ICMP_UNREACH_SRCFAIL; 1108 goto bad; 1109 } 1110 /* 1111 * Loose routing, and not at next destination 1112 * yet; nothing to do except forward. 1113 */ 1114 break; 1115 } 1116 off--; /* 0 origin */ 1117 if ((off + sizeof(struct in_addr)) > optlen) { 1118 /* 1119 * End of source route. Should be for us. 1120 */ 1121 save_rte(m, cp, ip->ip_src); 1122 break; 1123 } 1124 1125 /* 1126 * locate outgoing interface 1127 */ 1128 memset(&ipaddr, 0, sizeof(ipaddr)); 1129 ipaddr.sin_family = AF_INET; 1130 ipaddr.sin_len = sizeof(ipaddr); 1131 memcpy(&ipaddr.sin_addr, cp + off, 1132 sizeof(ipaddr.sin_addr)); 1133 /* keep packet in the virtual instance */ 1134 rt = rtalloc(sintosa(&ipaddr), RT_RESOLVE, rtableid); 1135 if (!rtisvalid(rt) || ((opt == IPOPT_SSRR) && 1136 ISSET(rt->rt_flags, RTF_GATEWAY))) { 1137 type = ICMP_UNREACH; 1138 code = ICMP_UNREACH_SRCFAIL; 1139 rtfree(rt); 1140 goto bad; 1141 } 1142 ia = ifatoia(rt->rt_ifa); 1143 memcpy(cp + off, &ia->ia_addr.sin_addr, 1144 sizeof(struct in_addr)); 1145 rtfree(rt); 1146 cp[IPOPT_OFFSET] += sizeof(struct in_addr); 1147 ip->ip_dst = ipaddr.sin_addr; 1148 /* 1149 * Let ip_intr's mcast routing check handle mcast pkts 1150 */ 1151 forward = !IN_MULTICAST(ip->ip_dst.s_addr); 1152 break; 1153 1154 case IPOPT_RR: 1155 if (optlen < IPOPT_OFFSET + sizeof(*cp)) { 1156 code = &cp[IPOPT_OLEN] - (u_char *)ip; 1157 goto bad; 1158 } 1159 if ((off = cp[IPOPT_OFFSET]) < IPOPT_MINOFF) { 1160 code = &cp[IPOPT_OFFSET] - (u_char *)ip; 1161 goto bad; 1162 } 1163 1164 /* 1165 * If no space remains, ignore. 1166 */ 1167 off--; /* 0 origin */ 1168 if ((off + sizeof(struct in_addr)) > optlen) 1169 break; 1170 memset(&ipaddr, 0, sizeof(ipaddr)); 1171 ipaddr.sin_family = AF_INET; 1172 ipaddr.sin_len = sizeof(ipaddr); 1173 ipaddr.sin_addr = ip->ip_dst; 1174 /* 1175 * locate outgoing interface; if we're the destination, 1176 * use the incoming interface (should be same). 1177 * Again keep the packet inside the virtual instance. 1178 */ 1179 rt = rtalloc(sintosa(&ipaddr), RT_RESOLVE, rtableid); 1180 if (!rtisvalid(rt)) { 1181 type = ICMP_UNREACH; 1182 code = ICMP_UNREACH_HOST; 1183 rtfree(rt); 1184 goto bad; 1185 } 1186 ia = ifatoia(rt->rt_ifa); 1187 memcpy(cp + off, &ia->ia_addr.sin_addr, 1188 sizeof(struct in_addr)); 1189 rtfree(rt); 1190 cp[IPOPT_OFFSET] += sizeof(struct in_addr); 1191 break; 1192 1193 case IPOPT_TS: 1194 code = cp - (u_char *)ip; 1195 if (optlen < sizeof(struct ip_timestamp)) 1196 goto bad; 1197 memcpy(&ipt, cp, sizeof(struct ip_timestamp)); 1198 if (ipt.ipt_ptr < 5 || ipt.ipt_len < 5) 1199 goto bad; 1200 if (ipt.ipt_ptr - 1 + sizeof(u_int32_t) > ipt.ipt_len) { 1201 if (++ipt.ipt_oflw == 0) 1202 goto bad; 1203 break; 1204 } 1205 memcpy(&sin, cp + ipt.ipt_ptr - 1, sizeof sin); 1206 switch (ipt.ipt_flg) { 1207 1208 case IPOPT_TS_TSONLY: 1209 break; 1210 1211 case IPOPT_TS_TSANDADDR: 1212 if (ipt.ipt_ptr - 1 + sizeof(u_int32_t) + 1213 sizeof(struct in_addr) > ipt.ipt_len) 1214 goto bad; 1215 memset(&ipaddr, 0, sizeof(ipaddr)); 1216 ipaddr.sin_family = AF_INET; 1217 ipaddr.sin_len = sizeof(ipaddr); 1218 ipaddr.sin_addr = dst; 1219 ia = ifatoia(ifaof_ifpforaddr(sintosa(&ipaddr), 1220 ifp)); 1221 if (ia == NULL) 1222 continue; 1223 memcpy(&sin, &ia->ia_addr.sin_addr, 1224 sizeof(struct in_addr)); 1225 ipt.ipt_ptr += sizeof(struct in_addr); 1226 break; 1227 1228 case IPOPT_TS_PRESPEC: 1229 if (ipt.ipt_ptr - 1 + sizeof(u_int32_t) + 1230 sizeof(struct in_addr) > ipt.ipt_len) 1231 goto bad; 1232 memset(&ipaddr, 0, sizeof(ipaddr)); 1233 ipaddr.sin_family = AF_INET; 1234 ipaddr.sin_len = sizeof(ipaddr); 1235 ipaddr.sin_addr = sin; 1236 if (ifa_ifwithaddr(sintosa(&ipaddr), 1237 m->m_pkthdr.ph_rtableid) == NULL) 1238 continue; 1239 ipt.ipt_ptr += sizeof(struct in_addr); 1240 break; 1241 1242 default: 1243 /* XXX can't take &ipt->ipt_flg */ 1244 code = (u_char *)&ipt.ipt_ptr - 1245 (u_char *)ip + 1; 1246 goto bad; 1247 } 1248 ntime = iptime(); 1249 memcpy(cp + ipt.ipt_ptr - 1, &ntime, sizeof(u_int32_t)); 1250 ipt.ipt_ptr += sizeof(u_int32_t); 1251 } 1252 } 1253 KERNEL_UNLOCK(); 1254 if (forward && ipforwarding > 0) { 1255 ip_forward(m, ifp, NULL, 1); 1256 return (1); 1257 } 1258 return (0); 1259 bad: 1260 KERNEL_UNLOCK(); 1261 icmp_error(m, type, code, 0, 0); 1262 ipstat_inc(ips_badoptions); 1263 return (1); 1264 } 1265 1266 /* 1267 * Save incoming source route for use in replies, 1268 * to be picked up later by ip_srcroute if the receiver is interested. 1269 */ 1270 void 1271 save_rte(struct mbuf *m, u_char *option, struct in_addr dst) 1272 { 1273 struct ip_srcrt *isr; 1274 struct m_tag *mtag; 1275 unsigned olen; 1276 1277 olen = option[IPOPT_OLEN]; 1278 if (olen > sizeof(isr->isr_hdr) + sizeof(isr->isr_routes)) 1279 return; 1280 1281 mtag = m_tag_get(PACKET_TAG_SRCROUTE, sizeof(*isr), M_NOWAIT); 1282 if (mtag == NULL) 1283 return; 1284 isr = (struct ip_srcrt *)(mtag + 1); 1285 1286 memcpy(isr->isr_hdr, option, olen); 1287 isr->isr_nhops = (olen - IPOPT_OFFSET - 1) / sizeof(struct in_addr); 1288 isr->isr_dst = dst; 1289 m_tag_prepend(m, mtag); 1290 } 1291 1292 /* 1293 * Retrieve incoming source route for use in replies, 1294 * in the same form used by setsockopt. 1295 * The first hop is placed before the options, will be removed later. 1296 */ 1297 struct mbuf * 1298 ip_srcroute(struct mbuf *m0) 1299 { 1300 struct in_addr *p, *q; 1301 struct mbuf *m; 1302 struct ip_srcrt *isr; 1303 struct m_tag *mtag; 1304 1305 if (!ip_dosourceroute) 1306 return (NULL); 1307 1308 mtag = m_tag_find(m0, PACKET_TAG_SRCROUTE, NULL); 1309 if (mtag == NULL) 1310 return (NULL); 1311 isr = (struct ip_srcrt *)(mtag + 1); 1312 1313 if (isr->isr_nhops == 0) 1314 return (NULL); 1315 m = m_get(M_DONTWAIT, MT_SOOPTS); 1316 if (m == NULL) 1317 return (NULL); 1318 1319 #define OPTSIZ (sizeof(isr->isr_nop) + sizeof(isr->isr_hdr)) 1320 1321 /* length is (nhops+1)*sizeof(addr) + sizeof(nop + header) */ 1322 m->m_len = (isr->isr_nhops + 1) * sizeof(struct in_addr) + OPTSIZ; 1323 1324 /* 1325 * First save first hop for return route 1326 */ 1327 p = &(isr->isr_routes[isr->isr_nhops - 1]); 1328 *(mtod(m, struct in_addr *)) = *p--; 1329 1330 /* 1331 * Copy option fields and padding (nop) to mbuf. 1332 */ 1333 isr->isr_nop = IPOPT_NOP; 1334 isr->isr_hdr[IPOPT_OFFSET] = IPOPT_MINOFF; 1335 memcpy(mtod(m, caddr_t) + sizeof(struct in_addr), &isr->isr_nop, 1336 OPTSIZ); 1337 q = (struct in_addr *)(mtod(m, caddr_t) + 1338 sizeof(struct in_addr) + OPTSIZ); 1339 #undef OPTSIZ 1340 /* 1341 * Record return path as an IP source route, 1342 * reversing the path (pointers are now aligned). 1343 */ 1344 while (p >= isr->isr_routes) { 1345 *q++ = *p--; 1346 } 1347 /* 1348 * Last hop goes to final destination. 1349 */ 1350 *q = isr->isr_dst; 1351 m_tag_delete(m0, (struct m_tag *)isr); 1352 return (m); 1353 } 1354 1355 /* 1356 * Strip out IP options, at higher level protocol in the kernel. 1357 */ 1358 void 1359 ip_stripoptions(struct mbuf *m) 1360 { 1361 int i; 1362 struct ip *ip = mtod(m, struct ip *); 1363 caddr_t opts; 1364 int olen; 1365 1366 olen = (ip->ip_hl<<2) - sizeof (struct ip); 1367 opts = (caddr_t)(ip + 1); 1368 i = m->m_len - (sizeof (struct ip) + olen); 1369 memmove(opts, opts + olen, i); 1370 m->m_len -= olen; 1371 if (m->m_flags & M_PKTHDR) 1372 m->m_pkthdr.len -= olen; 1373 ip->ip_hl = sizeof(struct ip) >> 2; 1374 ip->ip_len = htons(ntohs(ip->ip_len) - olen); 1375 } 1376 1377 const u_char inetctlerrmap[PRC_NCMDS] = { 1378 0, 0, 0, 0, 1379 0, EMSGSIZE, EHOSTDOWN, EHOSTUNREACH, 1380 EHOSTUNREACH, EHOSTUNREACH, ECONNREFUSED, ECONNREFUSED, 1381 EMSGSIZE, EHOSTUNREACH, 0, 0, 1382 0, 0, 0, 0, 1383 ENOPROTOOPT 1384 }; 1385 1386 /* 1387 * Forward a packet. If some error occurs return the sender 1388 * an icmp packet. Note we can't always generate a meaningful 1389 * icmp message because icmp doesn't have a large enough repertoire 1390 * of codes and types. 1391 * 1392 * If not forwarding, just drop the packet. This could be confusing 1393 * if ipforwarding was zero but some routing protocol was advancing 1394 * us as a gateway to somewhere. However, we must let the routing 1395 * protocol deal with that. 1396 * 1397 * The srcrt parameter indicates whether the packet is being forwarded 1398 * via a source route. 1399 */ 1400 void 1401 ip_forward(struct mbuf *m, struct ifnet *ifp, struct rtentry *rt, int srcrt) 1402 { 1403 struct mbuf mfake, *mcopy = NULL; 1404 struct ip *ip = mtod(m, struct ip *); 1405 struct sockaddr_in *sin; 1406 struct route ro; 1407 int error, type = 0, code = 0, destmtu = 0, fake = 0, len; 1408 u_int32_t dest; 1409 1410 dest = 0; 1411 if (m->m_flags & (M_BCAST|M_MCAST) || in_canforward(ip->ip_dst) == 0) { 1412 ipstat_inc(ips_cantforward); 1413 m_freem(m); 1414 goto freecopy; 1415 } 1416 if (ip->ip_ttl <= IPTTLDEC) { 1417 icmp_error(m, ICMP_TIMXCEED, ICMP_TIMXCEED_INTRANS, dest, 0); 1418 goto freecopy; 1419 } 1420 1421 memset(&ro, 0, sizeof(ro)); 1422 sin = satosin(&ro.ro_dst); 1423 sin->sin_family = AF_INET; 1424 sin->sin_len = sizeof(*sin); 1425 sin->sin_addr = ip->ip_dst; 1426 1427 if (!rtisvalid(rt)) { 1428 rtfree(rt); 1429 rt = rtalloc_mpath(sintosa(sin), &ip->ip_src.s_addr, 1430 m->m_pkthdr.ph_rtableid); 1431 if (rt == NULL) { 1432 ipstat_inc(ips_noroute); 1433 icmp_error(m, ICMP_UNREACH, ICMP_UNREACH_HOST, dest, 0); 1434 return; 1435 } 1436 } 1437 1438 /* 1439 * Save at most 68 bytes of the packet in case 1440 * we need to generate an ICMP message to the src. 1441 * The data is saved in the mbuf on the stack that 1442 * acts as a temporary storage not intended to be 1443 * passed down the IP stack or to the mfree. 1444 */ 1445 memset(&mfake.m_hdr, 0, sizeof(mfake.m_hdr)); 1446 mfake.m_type = m->m_type; 1447 if (m_dup_pkthdr(&mfake, m, M_DONTWAIT) == 0) { 1448 mfake.m_data = mfake.m_pktdat; 1449 len = min(ntohs(ip->ip_len), 68); 1450 m_copydata(m, 0, len, mfake.m_pktdat); 1451 mfake.m_pkthdr.len = mfake.m_len = len; 1452 #if NPF > 0 1453 pf_pkt_addr_changed(&mfake); 1454 #endif /* NPF > 0 */ 1455 fake = 1; 1456 } 1457 1458 ip->ip_ttl -= IPTTLDEC; 1459 1460 /* 1461 * If forwarding packet using same interface that it came in on, 1462 * perhaps should send a redirect to sender to shortcut a hop. 1463 * Only send redirect if source is sending directly to us, 1464 * and if packet was not source routed (or has any options). 1465 * Also, don't send redirect if forwarding using a default route 1466 * or a route modified by a redirect. 1467 * Don't send redirect if we advertise destination's arp address 1468 * as ours (proxy arp). 1469 */ 1470 if ((rt->rt_ifidx == ifp->if_index) && 1471 (rt->rt_flags & (RTF_DYNAMIC|RTF_MODIFIED)) == 0 && 1472 satosin(rt_key(rt))->sin_addr.s_addr != 0 && 1473 ipsendredirects && !srcrt && 1474 !arpproxy(satosin(rt_key(rt))->sin_addr, m->m_pkthdr.ph_rtableid)) { 1475 if ((ip->ip_src.s_addr & ifatoia(rt->rt_ifa)->ia_netmask) == 1476 ifatoia(rt->rt_ifa)->ia_net) { 1477 if (rt->rt_flags & RTF_GATEWAY) 1478 dest = satosin(rt->rt_gateway)->sin_addr.s_addr; 1479 else 1480 dest = ip->ip_dst.s_addr; 1481 /* Router requirements says to only send host redirects */ 1482 type = ICMP_REDIRECT; 1483 code = ICMP_REDIRECT_HOST; 1484 } 1485 } 1486 1487 ro.ro_rt = rt; 1488 ro.ro_tableid = m->m_pkthdr.ph_rtableid; 1489 error = ip_output(m, NULL, &ro, 1490 (IP_FORWARDING | (ip_directedbcast ? IP_ALLOWBROADCAST : 0)), 1491 NULL, NULL, 0); 1492 rt = ro.ro_rt; 1493 if (error) 1494 ipstat_inc(ips_cantforward); 1495 else { 1496 ipstat_inc(ips_forward); 1497 if (type) 1498 ipstat_inc(ips_redirectsent); 1499 else 1500 goto freecopy; 1501 } 1502 if (!fake) 1503 goto freecopy; 1504 1505 switch (error) { 1506 1507 case 0: /* forwarded, but need redirect */ 1508 /* type, code set above */ 1509 break; 1510 1511 case ENETUNREACH: /* shouldn't happen, checked above */ 1512 case EHOSTUNREACH: 1513 case ENETDOWN: 1514 case EHOSTDOWN: 1515 default: 1516 type = ICMP_UNREACH; 1517 code = ICMP_UNREACH_HOST; 1518 break; 1519 1520 case EMSGSIZE: 1521 type = ICMP_UNREACH; 1522 code = ICMP_UNREACH_NEEDFRAG; 1523 1524 #ifdef IPSEC 1525 if (rt != NULL) { 1526 if (rt->rt_mtu) 1527 destmtu = rt->rt_mtu; 1528 else { 1529 struct ifnet *destifp; 1530 1531 destifp = if_get(rt->rt_ifidx); 1532 if (destifp != NULL) 1533 destmtu = destifp->if_mtu; 1534 if_put(destifp); 1535 } 1536 } 1537 #endif /*IPSEC*/ 1538 ipstat_inc(ips_cantfrag); 1539 break; 1540 1541 case EACCES: 1542 /* 1543 * pf(4) blocked the packet. There is no need to send an ICMP 1544 * packet back since pf(4) takes care of it. 1545 */ 1546 goto freecopy; 1547 case ENOBUFS: 1548 /* 1549 * a router should not generate ICMP_SOURCEQUENCH as 1550 * required in RFC1812 Requirements for IP Version 4 Routers. 1551 * source quench could be a big problem under DoS attacks, 1552 * or the underlying interface is rate-limited. 1553 */ 1554 goto freecopy; 1555 } 1556 1557 mcopy = m_copym(&mfake, 0, len, M_DONTWAIT); 1558 if (mcopy) 1559 icmp_error(mcopy, type, code, dest, destmtu); 1560 1561 freecopy: 1562 if (fake) 1563 m_tag_delete_chain(&mfake); 1564 rtfree(rt); 1565 } 1566 1567 int 1568 ip_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp, 1569 size_t newlen) 1570 { 1571 int error; 1572 #ifdef MROUTING 1573 extern struct mrtstat mrtstat; 1574 #endif 1575 1576 /* Almost all sysctl names at this level are terminal. */ 1577 if (namelen != 1 && name[0] != IPCTL_IFQUEUE && 1578 name[0] != IPCTL_ARPQUEUE) 1579 return (ENOTDIR); 1580 1581 switch (name[0]) { 1582 case IPCTL_SOURCEROUTE: 1583 /* 1584 * Don't allow this to change in a secure environment. 1585 */ 1586 if (newp && securelevel > 0) 1587 return (EPERM); 1588 NET_LOCK(); 1589 error = sysctl_int(oldp, oldlenp, newp, newlen, 1590 &ip_dosourceroute); 1591 NET_UNLOCK(); 1592 return (error); 1593 case IPCTL_MTUDISC: 1594 NET_LOCK(); 1595 error = sysctl_int(oldp, oldlenp, newp, newlen, 1596 &ip_mtudisc); 1597 if (ip_mtudisc != 0 && ip_mtudisc_timeout_q == NULL) { 1598 ip_mtudisc_timeout_q = 1599 rt_timer_queue_create(ip_mtudisc_timeout); 1600 } else if (ip_mtudisc == 0 && ip_mtudisc_timeout_q != NULL) { 1601 rt_timer_queue_destroy(ip_mtudisc_timeout_q); 1602 ip_mtudisc_timeout_q = NULL; 1603 } 1604 NET_UNLOCK(); 1605 return error; 1606 case IPCTL_MTUDISCTIMEOUT: 1607 NET_LOCK(); 1608 error = sysctl_int(oldp, oldlenp, newp, newlen, 1609 &ip_mtudisc_timeout); 1610 if (ip_mtudisc_timeout_q != NULL) 1611 rt_timer_queue_change(ip_mtudisc_timeout_q, 1612 ip_mtudisc_timeout); 1613 NET_UNLOCK(); 1614 return (error); 1615 #ifdef IPSEC 1616 case IPCTL_ENCDEBUG: 1617 case IPCTL_IPSEC_STATS: 1618 case IPCTL_IPSEC_EXPIRE_ACQUIRE: 1619 case IPCTL_IPSEC_EMBRYONIC_SA_TIMEOUT: 1620 case IPCTL_IPSEC_REQUIRE_PFS: 1621 case IPCTL_IPSEC_SOFT_ALLOCATIONS: 1622 case IPCTL_IPSEC_ALLOCATIONS: 1623 case IPCTL_IPSEC_SOFT_BYTES: 1624 case IPCTL_IPSEC_BYTES: 1625 case IPCTL_IPSEC_TIMEOUT: 1626 case IPCTL_IPSEC_SOFT_TIMEOUT: 1627 case IPCTL_IPSEC_SOFT_FIRSTUSE: 1628 case IPCTL_IPSEC_FIRSTUSE: 1629 case IPCTL_IPSEC_ENC_ALGORITHM: 1630 case IPCTL_IPSEC_AUTH_ALGORITHM: 1631 case IPCTL_IPSEC_IPCOMP_ALGORITHM: 1632 return (ipsec_sysctl(name, namelen, oldp, oldlenp, newp, 1633 newlen)); 1634 #endif 1635 case IPCTL_IFQUEUE: 1636 return (EOPNOTSUPP); 1637 case IPCTL_ARPQUEUE: 1638 return (sysctl_niq(name + 1, namelen - 1, 1639 oldp, oldlenp, newp, newlen, &arpinq)); 1640 case IPCTL_STATS: 1641 return (ip_sysctl_ipstat(oldp, oldlenp, newp)); 1642 #ifdef MROUTING 1643 case IPCTL_MRTSTATS: 1644 return (sysctl_rdstruct(oldp, oldlenp, newp, 1645 &mrtstat, sizeof(mrtstat))); 1646 case IPCTL_MRTMFC: 1647 if (newp) 1648 return (EPERM); 1649 NET_LOCK(); 1650 error = mrt_sysctl_mfc(oldp, oldlenp); 1651 NET_UNLOCK(); 1652 return (error); 1653 case IPCTL_MRTVIF: 1654 if (newp) 1655 return (EPERM); 1656 NET_LOCK(); 1657 error = mrt_sysctl_vif(oldp, oldlenp); 1658 NET_UNLOCK(); 1659 return (error); 1660 #else 1661 case IPCTL_MRTPROTO: 1662 case IPCTL_MRTSTATS: 1663 case IPCTL_MRTMFC: 1664 case IPCTL_MRTVIF: 1665 return (EOPNOTSUPP); 1666 #endif 1667 default: 1668 NET_LOCK(); 1669 error = sysctl_bounded_arr(ipctl_vars, nitems(ipctl_vars), 1670 name, namelen, oldp, oldlenp, newp, newlen); 1671 NET_UNLOCK(); 1672 return (error); 1673 } 1674 /* NOTREACHED */ 1675 } 1676 1677 int 1678 ip_sysctl_ipstat(void *oldp, size_t *oldlenp, void *newp) 1679 { 1680 uint64_t counters[ips_ncounters]; 1681 struct ipstat ipstat; 1682 u_long *words = (u_long *)&ipstat; 1683 int i; 1684 1685 CTASSERT(sizeof(ipstat) == (nitems(counters) * sizeof(u_long))); 1686 memset(&ipstat, 0, sizeof ipstat); 1687 counters_read(ipcounters, counters, nitems(counters)); 1688 1689 for (i = 0; i < nitems(counters); i++) 1690 words[i] = (u_long)counters[i]; 1691 1692 return (sysctl_rdstruct(oldp, oldlenp, newp, &ipstat, sizeof(ipstat))); 1693 } 1694 1695 void 1696 ip_savecontrol(struct inpcb *inp, struct mbuf **mp, struct ip *ip, 1697 struct mbuf *m) 1698 { 1699 if (inp->inp_socket->so_options & SO_TIMESTAMP) { 1700 struct timeval tv; 1701 1702 m_microtime(m, &tv); 1703 *mp = sbcreatecontrol((caddr_t) &tv, sizeof(tv), 1704 SCM_TIMESTAMP, SOL_SOCKET); 1705 if (*mp) 1706 mp = &(*mp)->m_next; 1707 } 1708 1709 if (inp->inp_flags & INP_RECVDSTADDR) { 1710 *mp = sbcreatecontrol((caddr_t) &ip->ip_dst, 1711 sizeof(struct in_addr), IP_RECVDSTADDR, IPPROTO_IP); 1712 if (*mp) 1713 mp = &(*mp)->m_next; 1714 } 1715 #ifdef notyet 1716 /* this code is broken and will probably never be fixed. */ 1717 /* options were tossed already */ 1718 if (inp->inp_flags & INP_RECVOPTS) { 1719 *mp = sbcreatecontrol((caddr_t) opts_deleted_above, 1720 sizeof(struct in_addr), IP_RECVOPTS, IPPROTO_IP); 1721 if (*mp) 1722 mp = &(*mp)->m_next; 1723 } 1724 /* ip_srcroute doesn't do what we want here, need to fix */ 1725 if (inp->inp_flags & INP_RECVRETOPTS) { 1726 *mp = sbcreatecontrol((caddr_t) ip_srcroute(m), 1727 sizeof(struct in_addr), IP_RECVRETOPTS, IPPROTO_IP); 1728 if (*mp) 1729 mp = &(*mp)->m_next; 1730 } 1731 #endif 1732 if (inp->inp_flags & INP_RECVIF) { 1733 struct sockaddr_dl sdl; 1734 struct ifnet *ifp; 1735 1736 ifp = if_get(m->m_pkthdr.ph_ifidx); 1737 if (ifp == NULL || ifp->if_sadl == NULL) { 1738 memset(&sdl, 0, sizeof(sdl)); 1739 sdl.sdl_len = offsetof(struct sockaddr_dl, sdl_data[0]); 1740 sdl.sdl_family = AF_LINK; 1741 sdl.sdl_index = ifp != NULL ? ifp->if_index : 0; 1742 sdl.sdl_nlen = sdl.sdl_alen = sdl.sdl_slen = 0; 1743 *mp = sbcreatecontrol((caddr_t) &sdl, sdl.sdl_len, 1744 IP_RECVIF, IPPROTO_IP); 1745 } else { 1746 *mp = sbcreatecontrol((caddr_t) ifp->if_sadl, 1747 ifp->if_sadl->sdl_len, IP_RECVIF, IPPROTO_IP); 1748 } 1749 if (*mp) 1750 mp = &(*mp)->m_next; 1751 if_put(ifp); 1752 } 1753 if (inp->inp_flags & INP_RECVTTL) { 1754 *mp = sbcreatecontrol((caddr_t) &ip->ip_ttl, 1755 sizeof(u_int8_t), IP_RECVTTL, IPPROTO_IP); 1756 if (*mp) 1757 mp = &(*mp)->m_next; 1758 } 1759 if (inp->inp_flags & INP_RECVRTABLE) { 1760 u_int rtableid = inp->inp_rtableid; 1761 1762 #if NPF > 0 1763 if (m && m->m_pkthdr.pf.flags & PF_TAG_DIVERTED) { 1764 struct pf_divert *divert; 1765 1766 divert = pf_find_divert(m); 1767 KASSERT(divert != NULL); 1768 rtableid = divert->rdomain; 1769 } 1770 #endif 1771 1772 *mp = sbcreatecontrol((caddr_t) &rtableid, 1773 sizeof(u_int), IP_RECVRTABLE, IPPROTO_IP); 1774 if (*mp) 1775 mp = &(*mp)->m_next; 1776 } 1777 } 1778 1779 void 1780 ip_send_dispatch(void *xmq) 1781 { 1782 struct mbuf_queue *mq = xmq; 1783 struct mbuf *m; 1784 struct mbuf_list ml; 1785 1786 mq_delist(mq, &ml); 1787 if (ml_empty(&ml)) 1788 return; 1789 1790 NET_LOCK(); 1791 while ((m = ml_dequeue(&ml)) != NULL) { 1792 ip_output(m, NULL, NULL, 0, NULL, NULL, 0); 1793 } 1794 NET_UNLOCK(); 1795 } 1796 1797 void 1798 ip_send(struct mbuf *m) 1799 { 1800 mq_enqueue(&ipsend_mq, m); 1801 task_add(net_tq(0), &ipsend_task); 1802 } 1803