1 /* $OpenBSD: ip_input.c,v 1.336 2017/12/29 17:05:25 bluhm Exp $ */ 2 /* $NetBSD: ip_input.c,v 1.30 1996/03/16 23:53:58 christos Exp $ */ 3 4 /* 5 * Copyright (c) 1982, 1986, 1988, 1993 6 * The Regents of the University of California. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of the University nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * @(#)ip_input.c 8.2 (Berkeley) 1/4/94 33 */ 34 35 #include "pf.h" 36 #include "carp.h" 37 38 #include <sys/param.h> 39 #include <sys/systm.h> 40 #include <sys/mbuf.h> 41 #include <sys/domain.h> 42 #include <sys/mutex.h> 43 #include <sys/protosw.h> 44 #include <sys/socket.h> 45 #include <sys/socketvar.h> 46 #include <sys/sysctl.h> 47 #include <sys/pool.h> 48 #include <sys/task.h> 49 50 #include <net/if.h> 51 #include <net/if_var.h> 52 #include <net/if_dl.h> 53 #include <net/route.h> 54 #include <net/netisr.h> 55 56 #include <netinet/in.h> 57 #include <netinet/in_systm.h> 58 #include <netinet/if_ether.h> 59 #include <netinet/ip.h> 60 #include <netinet/in_pcb.h> 61 #include <netinet/in_var.h> 62 #include <netinet/ip_var.h> 63 #include <netinet/ip_icmp.h> 64 65 #ifdef INET6 66 #include <netinet6/ip6protosw.h> 67 #include <netinet6/ip6_var.h> 68 #endif 69 70 #if NPF > 0 71 #include <net/pfvar.h> 72 #endif 73 74 #ifdef MROUTING 75 #include <netinet/ip_mroute.h> 76 #endif 77 78 #ifdef IPSEC 79 #include <netinet/ip_ipsp.h> 80 #endif /* IPSEC */ 81 82 #if NCARP > 0 83 #include <net/if_types.h> 84 #include <netinet/ip_carp.h> 85 #endif 86 87 /* values controllable via sysctl */ 88 int ipforwarding = 0; 89 int ipmforwarding = 0; 90 int ipmultipath = 0; 91 int ipsendredirects = 1; 92 int ip_dosourceroute = 0; 93 int ip_defttl = IPDEFTTL; 94 int ip_mtudisc = 1; 95 u_int ip_mtudisc_timeout = IPMTUDISCTIMEOUT; 96 int ip_directedbcast = 0; 97 98 struct rttimer_queue *ip_mtudisc_timeout_q = NULL; 99 100 /* Protects `ipq' and `ip_frags'. */ 101 struct mutex ipq_mutex = MUTEX_INITIALIZER(IPL_SOFTNET); 102 103 /* IP reassembly queue */ 104 LIST_HEAD(, ipq) ipq; 105 106 /* Keep track of memory used for reassembly */ 107 int ip_maxqueue = 300; 108 int ip_frags = 0; 109 110 int *ipctl_vars[IPCTL_MAXID] = IPCTL_VARS; 111 112 struct niqueue ipintrq = NIQUEUE_INITIALIZER(IPQ_MAXLEN, NETISR_IP); 113 114 struct pool ipqent_pool; 115 struct pool ipq_pool; 116 117 struct cpumem *ipcounters; 118 119 int ip_sysctl_ipstat(void *, size_t *, void *); 120 121 static struct mbuf_queue ipsend_mq; 122 123 int ip_ours(struct mbuf **, int *, int, int); 124 int ip_local(struct mbuf **, int *, int, int); 125 int ip_dooptions(struct mbuf *, struct ifnet *); 126 int in_ouraddr(struct mbuf *, struct ifnet *, struct rtentry **); 127 128 static void ip_send_dispatch(void *); 129 static struct task ipsend_task = TASK_INITIALIZER(ip_send_dispatch, &ipsend_mq); 130 /* 131 * Used to save the IP options in case a protocol wants to respond 132 * to an incoming packet over the same route if the packet got here 133 * using IP source routing. This allows connection establishment and 134 * maintenance when the remote end is on a network that is not known 135 * to us. 136 */ 137 struct ip_srcrt { 138 int isr_nhops; /* number of hops */ 139 struct in_addr isr_dst; /* final destination */ 140 char isr_nop; /* one NOP to align */ 141 char isr_hdr[IPOPT_OFFSET + 1]; /* OPTVAL, OLEN & OFFSET */ 142 struct in_addr isr_routes[MAX_IPOPTLEN/sizeof(struct in_addr)]; 143 }; 144 145 void save_rte(struct mbuf *, u_char *, struct in_addr); 146 147 /* 148 * IP initialization: fill in IP protocol switch table. 149 * All protocols not implemented in kernel go to raw IP protocol handler. 150 */ 151 void 152 ip_init(void) 153 { 154 const struct protosw *pr; 155 int i; 156 const u_int16_t defbaddynamicports_tcp[] = DEFBADDYNAMICPORTS_TCP; 157 const u_int16_t defbaddynamicports_udp[] = DEFBADDYNAMICPORTS_UDP; 158 const u_int16_t defrootonlyports_tcp[] = DEFROOTONLYPORTS_TCP; 159 const u_int16_t defrootonlyports_udp[] = DEFROOTONLYPORTS_UDP; 160 161 ipcounters = counters_alloc(ips_ncounters); 162 163 pool_init(&ipqent_pool, sizeof(struct ipqent), 0, 164 IPL_SOFTNET, 0, "ipqe", NULL); 165 pool_init(&ipq_pool, sizeof(struct ipq), 0, 166 IPL_SOFTNET, 0, "ipq", NULL); 167 168 pr = pffindproto(PF_INET, IPPROTO_RAW, SOCK_RAW); 169 if (pr == NULL) 170 panic("ip_init"); 171 for (i = 0; i < IPPROTO_MAX; i++) 172 ip_protox[i] = pr - inetsw; 173 for (pr = inetdomain.dom_protosw; 174 pr < inetdomain.dom_protoswNPROTOSW; pr++) 175 if (pr->pr_domain->dom_family == PF_INET && 176 pr->pr_protocol && pr->pr_protocol != IPPROTO_RAW && 177 pr->pr_protocol < IPPROTO_MAX) 178 ip_protox[pr->pr_protocol] = pr - inetsw; 179 LIST_INIT(&ipq); 180 if (ip_mtudisc != 0) 181 ip_mtudisc_timeout_q = 182 rt_timer_queue_create(ip_mtudisc_timeout); 183 184 /* Fill in list of ports not to allocate dynamically. */ 185 memset(&baddynamicports, 0, sizeof(baddynamicports)); 186 for (i = 0; defbaddynamicports_tcp[i] != 0; i++) 187 DP_SET(baddynamicports.tcp, defbaddynamicports_tcp[i]); 188 for (i = 0; defbaddynamicports_udp[i] != 0; i++) 189 DP_SET(baddynamicports.udp, defbaddynamicports_udp[i]); 190 191 /* Fill in list of ports only root can bind to. */ 192 memset(&rootonlyports, 0, sizeof(rootonlyports)); 193 for (i = 0; defrootonlyports_tcp[i] != 0; i++) 194 DP_SET(rootonlyports.tcp, defrootonlyports_tcp[i]); 195 for (i = 0; defrootonlyports_udp[i] != 0; i++) 196 DP_SET(rootonlyports.udp, defrootonlyports_udp[i]); 197 198 mq_init(&ipsend_mq, 64, IPL_SOFTNET); 199 200 #ifdef IPSEC 201 ipsec_init(); 202 #endif 203 } 204 205 /* 206 * Enqueue packet for local delivery. Queuing is used as a boundary 207 * between the network layer (input/forward path) running without 208 * KERNEL_LOCK() and the transport layer still needing it. 209 */ 210 int 211 ip_ours(struct mbuf **mp, int *offp, int nxt, int af) 212 { 213 /* We are already in a IPv4/IPv6 local deliver loop. */ 214 if (af != AF_UNSPEC) 215 return ip_local(mp, offp, nxt, af); 216 217 niq_enqueue(&ipintrq, *mp); 218 *mp = NULL; 219 return IPPROTO_DONE; 220 } 221 222 /* 223 * Dequeue and process locally delivered packets. 224 */ 225 void 226 ipintr(void) 227 { 228 struct mbuf *m; 229 int off, nxt; 230 231 while ((m = niq_dequeue(&ipintrq)) != NULL) { 232 #ifdef DIAGNOSTIC 233 if ((m->m_flags & M_PKTHDR) == 0) 234 panic("ipintr no HDR"); 235 #endif 236 off = 0; 237 nxt = ip_local(&m, &off, IPPROTO_IPV4, AF_UNSPEC); 238 KASSERT(nxt == IPPROTO_DONE); 239 } 240 } 241 242 /* 243 * IPv4 input routine. 244 * 245 * Checksum and byte swap header. Process options. Forward or deliver. 246 */ 247 void 248 ipv4_input(struct ifnet *ifp, struct mbuf *m) 249 { 250 int off, nxt; 251 252 off = 0; 253 nxt = ip_input_if(&m, &off, IPPROTO_IPV4, AF_UNSPEC, ifp); 254 KASSERT(nxt == IPPROTO_DONE); 255 } 256 257 int 258 ip_input_if(struct mbuf **mp, int *offp, int nxt, int af, struct ifnet *ifp) 259 { 260 struct mbuf *m = *mp; 261 struct rtentry *rt = NULL; 262 struct ip *ip; 263 int hlen, len; 264 in_addr_t pfrdr = 0; 265 266 KASSERT(*offp == 0); 267 268 ipstat_inc(ips_total); 269 if (m->m_len < sizeof (struct ip) && 270 (m = *mp = m_pullup(m, sizeof (struct ip))) == NULL) { 271 ipstat_inc(ips_toosmall); 272 goto bad; 273 } 274 ip = mtod(m, struct ip *); 275 if (ip->ip_v != IPVERSION) { 276 ipstat_inc(ips_badvers); 277 goto bad; 278 } 279 hlen = ip->ip_hl << 2; 280 if (hlen < sizeof(struct ip)) { /* minimum header length */ 281 ipstat_inc(ips_badhlen); 282 goto bad; 283 } 284 if (hlen > m->m_len) { 285 if ((m = *mp = m_pullup(m, hlen)) == NULL) { 286 ipstat_inc(ips_badhlen); 287 goto bad; 288 } 289 ip = mtod(m, struct ip *); 290 } 291 292 /* 127/8 must not appear on wire - RFC1122 */ 293 if ((ntohl(ip->ip_dst.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET || 294 (ntohl(ip->ip_src.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET) { 295 if ((ifp->if_flags & IFF_LOOPBACK) == 0) { 296 ipstat_inc(ips_badaddr); 297 goto bad; 298 } 299 } 300 301 if ((m->m_pkthdr.csum_flags & M_IPV4_CSUM_IN_OK) == 0) { 302 if (m->m_pkthdr.csum_flags & M_IPV4_CSUM_IN_BAD) { 303 ipstat_inc(ips_badsum); 304 goto bad; 305 } 306 307 ipstat_inc(ips_inswcsum); 308 if (in_cksum(m, hlen) != 0) { 309 ipstat_inc(ips_badsum); 310 goto bad; 311 } 312 } 313 314 /* Retrieve the packet length. */ 315 len = ntohs(ip->ip_len); 316 317 /* 318 * Convert fields to host representation. 319 */ 320 if (len < hlen) { 321 ipstat_inc(ips_badlen); 322 goto bad; 323 } 324 325 /* 326 * Check that the amount of data in the buffers 327 * is at least as much as the IP header would have us expect. 328 * Trim mbufs if longer than we expect. 329 * Drop packet if shorter than we expect. 330 */ 331 if (m->m_pkthdr.len < len) { 332 ipstat_inc(ips_tooshort); 333 goto bad; 334 } 335 if (m->m_pkthdr.len > len) { 336 if (m->m_len == m->m_pkthdr.len) { 337 m->m_len = len; 338 m->m_pkthdr.len = len; 339 } else 340 m_adj(m, len - m->m_pkthdr.len); 341 } 342 343 #if NCARP > 0 344 if (ifp->if_type == IFT_CARP && 345 carp_lsdrop(m, AF_INET, &ip->ip_src.s_addr, &ip->ip_dst.s_addr, 346 (ip->ip_p == IPPROTO_ICMP ? 0 : 1))) 347 goto bad; 348 #endif 349 350 #if NPF > 0 351 /* 352 * Packet filter 353 */ 354 pfrdr = ip->ip_dst.s_addr; 355 if (pf_test(AF_INET, PF_IN, ifp, mp) != PF_PASS) 356 goto bad; 357 m = *mp; 358 if (m == NULL) 359 goto bad; 360 361 ip = mtod(m, struct ip *); 362 hlen = ip->ip_hl << 2; 363 pfrdr = (pfrdr != ip->ip_dst.s_addr); 364 #endif 365 366 /* 367 * Process options and, if not destined for us, 368 * ship it on. ip_dooptions returns 1 when an 369 * error was detected (causing an icmp message 370 * to be sent and the original packet to be freed). 371 */ 372 if (hlen > sizeof (struct ip) && ip_dooptions(m, ifp)) { 373 m = *mp = NULL; 374 goto bad; 375 } 376 377 if (ip->ip_dst.s_addr == INADDR_BROADCAST || 378 ip->ip_dst.s_addr == INADDR_ANY) { 379 nxt = ip_ours(mp, offp, nxt, af); 380 goto out; 381 } 382 383 if (in_ouraddr(m, ifp, &rt)) { 384 nxt = ip_ours(mp, offp, nxt, af); 385 goto out; 386 } 387 388 if (IN_MULTICAST(ip->ip_dst.s_addr)) { 389 /* 390 * Make sure M_MCAST is set. It should theoretically 391 * already be there, but let's play safe because upper 392 * layers check for this flag. 393 */ 394 m->m_flags |= M_MCAST; 395 396 #ifdef MROUTING 397 if (ipmforwarding && ip_mrouter[ifp->if_rdomain]) { 398 int error; 399 400 if (m->m_flags & M_EXT) { 401 if ((m = *mp = m_pullup(m, hlen)) == NULL) { 402 ipstat_inc(ips_toosmall); 403 goto bad; 404 } 405 ip = mtod(m, struct ip *); 406 } 407 /* 408 * If we are acting as a multicast router, all 409 * incoming multicast packets are passed to the 410 * kernel-level multicast forwarding function. 411 * The packet is returned (relatively) intact; if 412 * ip_mforward() returns a non-zero value, the packet 413 * must be discarded, else it may be accepted below. 414 * 415 * (The IP ident field is put in the same byte order 416 * as expected when ip_mforward() is called from 417 * ip_output().) 418 */ 419 KERNEL_LOCK(); 420 error = ip_mforward(m, ifp); 421 KERNEL_UNLOCK(); 422 if (error) { 423 ipstat_inc(ips_cantforward); 424 goto bad; 425 } 426 427 /* 428 * The process-level routing daemon needs to receive 429 * all multicast IGMP packets, whether or not this 430 * host belongs to their destination groups. 431 */ 432 if (ip->ip_p == IPPROTO_IGMP) { 433 nxt = ip_ours(mp, offp, nxt, af); 434 goto out; 435 } 436 ipstat_inc(ips_forward); 437 } 438 #endif 439 /* 440 * See if we belong to the destination multicast group on the 441 * arrival interface. 442 */ 443 if (!in_hasmulti(&ip->ip_dst, ifp)) { 444 ipstat_inc(ips_notmember); 445 if (!IN_LOCAL_GROUP(ip->ip_dst.s_addr)) 446 ipstat_inc(ips_cantforward); 447 goto bad; 448 } 449 nxt = ip_ours(mp, offp, nxt, af); 450 goto out; 451 } 452 453 #if NCARP > 0 454 if (ifp->if_type == IFT_CARP && ip->ip_p == IPPROTO_ICMP && 455 carp_lsdrop(m, AF_INET, &ip->ip_src.s_addr, &ip->ip_dst.s_addr, 1)) 456 goto bad; 457 #endif 458 /* 459 * Not for us; forward if possible and desirable. 460 */ 461 if (ipforwarding == 0) { 462 ipstat_inc(ips_cantforward); 463 goto bad; 464 } 465 #ifdef IPSEC 466 if (ipsec_in_use) { 467 int rv; 468 469 rv = ipsec_forward_check(m, hlen, AF_INET); 470 if (rv != 0) { 471 ipstat_inc(ips_cantforward); 472 goto bad; 473 } 474 /* 475 * Fall through, forward packet. Outbound IPsec policy 476 * checking will occur in ip_output(). 477 */ 478 } 479 #endif /* IPSEC */ 480 481 ip_forward(m, ifp, rt, pfrdr); 482 *mp = NULL; 483 return IPPROTO_DONE; 484 bad: 485 nxt = IPPROTO_DONE; 486 m_freemp(mp); 487 out: 488 rtfree(rt); 489 return nxt; 490 } 491 492 /* 493 * IPv4 local-delivery routine. 494 * 495 * If fragmented try to reassemble. Pass to next level. 496 */ 497 int 498 ip_local(struct mbuf **mp, int *offp, int nxt, int af) 499 { 500 struct mbuf *m = *mp; 501 struct ip *ip = mtod(m, struct ip *); 502 struct ipq *fp; 503 struct ipqent *ipqe; 504 int mff, hlen; 505 506 hlen = ip->ip_hl << 2; 507 508 /* 509 * If offset or IP_MF are set, must reassemble. 510 * Otherwise, nothing need be done. 511 * (We could look in the reassembly queue to see 512 * if the packet was previously fragmented, 513 * but it's not worth the time; just let them time out.) 514 */ 515 if (ip->ip_off &~ htons(IP_DF | IP_RF)) { 516 if (m->m_flags & M_EXT) { /* XXX */ 517 if ((m = *mp = m_pullup(m, hlen)) == NULL) { 518 ipstat_inc(ips_toosmall); 519 return IPPROTO_DONE; 520 } 521 ip = mtod(m, struct ip *); 522 } 523 524 mtx_enter(&ipq_mutex); 525 526 /* 527 * Look for queue of fragments 528 * of this datagram. 529 */ 530 LIST_FOREACH(fp, &ipq, ipq_q) { 531 if (ip->ip_id == fp->ipq_id && 532 ip->ip_src.s_addr == fp->ipq_src.s_addr && 533 ip->ip_dst.s_addr == fp->ipq_dst.s_addr && 534 ip->ip_p == fp->ipq_p) 535 break; 536 } 537 538 /* 539 * Adjust ip_len to not reflect header, 540 * set ipqe_mff if more fragments are expected, 541 * convert offset of this to bytes. 542 */ 543 ip->ip_len = htons(ntohs(ip->ip_len) - hlen); 544 mff = (ip->ip_off & htons(IP_MF)) != 0; 545 if (mff) { 546 /* 547 * Make sure that fragments have a data length 548 * that's a non-zero multiple of 8 bytes. 549 */ 550 if (ntohs(ip->ip_len) == 0 || 551 (ntohs(ip->ip_len) & 0x7) != 0) { 552 ipstat_inc(ips_badfrags); 553 goto bad; 554 } 555 } 556 ip->ip_off = htons(ntohs(ip->ip_off) << 3); 557 558 /* 559 * If datagram marked as having more fragments 560 * or if this is not the first fragment, 561 * attempt reassembly; if it succeeds, proceed. 562 */ 563 if (mff || ip->ip_off) { 564 ipstat_inc(ips_fragments); 565 if (ip_frags + 1 > ip_maxqueue) { 566 ip_flush(); 567 ipstat_inc(ips_rcvmemdrop); 568 goto bad; 569 } 570 571 ipqe = pool_get(&ipqent_pool, PR_NOWAIT); 572 if (ipqe == NULL) { 573 ipstat_inc(ips_rcvmemdrop); 574 goto bad; 575 } 576 ip_frags++; 577 ipqe->ipqe_mff = mff; 578 ipqe->ipqe_m = m; 579 ipqe->ipqe_ip = ip; 580 m = *mp = ip_reass(ipqe, fp); 581 if (m == NULL) 582 goto bad; 583 ipstat_inc(ips_reassembled); 584 ip = mtod(m, struct ip *); 585 hlen = ip->ip_hl << 2; 586 ip->ip_len = htons(ntohs(ip->ip_len) + hlen); 587 } else 588 if (fp) 589 ip_freef(fp); 590 591 mtx_leave(&ipq_mutex); 592 } 593 594 *offp = hlen; 595 nxt = ip->ip_p; 596 /* Check wheter we are already in a IPv4/IPv6 local deliver loop. */ 597 if (af == AF_UNSPEC) 598 nxt = ip_deliver(mp, offp, nxt, AF_INET); 599 return nxt; 600 bad: 601 mtx_leave(&ipq_mutex); 602 m_freemp(mp); 603 return IPPROTO_DONE; 604 } 605 606 #ifndef INET6 607 #define IPSTAT_INC(name) ipstat_inc(ips_##name) 608 #else 609 #define IPSTAT_INC(name) (af == AF_INET ? \ 610 ipstat_inc(ips_##name) : ip6stat_inc(ip6s_##name)) 611 #endif 612 613 int 614 ip_deliver(struct mbuf **mp, int *offp, int nxt, int af) 615 { 616 const struct protosw *psw; 617 int naf = af; 618 #ifdef INET6 619 int nest = 0; 620 #endif /* INET6 */ 621 622 /* pf might have modified stuff, might have to chksum */ 623 switch (af) { 624 case AF_INET: 625 in_proto_cksum_out(*mp, NULL); 626 break; 627 #ifdef INET6 628 case AF_INET6: 629 in6_proto_cksum_out(*mp, NULL); 630 break; 631 #endif /* INET6 */ 632 } 633 634 /* 635 * Tell launch routine the next header 636 */ 637 IPSTAT_INC(delivered); 638 639 while (nxt != IPPROTO_DONE) { 640 #ifdef INET6 641 if (af == AF_INET6 && 642 ip6_hdrnestlimit && (++nest > ip6_hdrnestlimit)) { 643 ip6stat_inc(ip6s_toomanyhdr); 644 goto bad; 645 } 646 #endif /* INET6 */ 647 648 /* 649 * protection against faulty packet - there should be 650 * more sanity checks in header chain processing. 651 */ 652 if ((*mp)->m_pkthdr.len < *offp) { 653 IPSTAT_INC(tooshort); 654 goto bad; 655 } 656 657 #ifdef INET6 658 /* draft-itojun-ipv6-tcp-to-anycast */ 659 if (af == AF_INET6 && 660 ISSET((*mp)->m_flags, M_ACAST) && (nxt == IPPROTO_TCP)) { 661 if ((*mp)->m_len >= sizeof(struct ip6_hdr)) { 662 icmp6_error(*mp, ICMP6_DST_UNREACH, 663 ICMP6_DST_UNREACH_ADDR, 664 offsetof(struct ip6_hdr, ip6_dst)); 665 *mp = NULL; 666 } 667 goto bad; 668 } 669 #endif /* INET6 */ 670 671 #ifdef IPSEC 672 if (ipsec_in_use) { 673 if (ipsec_local_check(*mp, *offp, nxt, af) != 0) { 674 IPSTAT_INC(cantforward); 675 goto bad; 676 } 677 } 678 /* Otherwise, just fall through and deliver the packet */ 679 #endif /* IPSEC */ 680 681 switch (nxt) { 682 case IPPROTO_IPV4: 683 naf = AF_INET; 684 ipstat_inc(ips_delivered); 685 break; 686 #ifdef INET6 687 case IPPROTO_IPV6: 688 naf = AF_INET6; 689 ip6stat_inc(ip6s_delivered); 690 break; 691 #endif /* INET6 */ 692 } 693 switch (af) { 694 case AF_INET: 695 psw = &inetsw[ip_protox[nxt]]; 696 break; 697 #ifdef INET6 698 case AF_INET6: 699 psw = &inet6sw[ip6_protox[nxt]]; 700 break; 701 #endif /* INET6 */ 702 } 703 nxt = (*psw->pr_input)(mp, offp, nxt, af); 704 af = naf; 705 } 706 return nxt; 707 bad: 708 m_freemp(mp); 709 return IPPROTO_DONE; 710 } 711 #undef IPSTAT_INC 712 713 int 714 in_ouraddr(struct mbuf *m, struct ifnet *ifp, struct rtentry **prt) 715 { 716 struct rtentry *rt; 717 struct ip *ip; 718 struct sockaddr_in sin; 719 int match = 0; 720 721 #if NPF > 0 722 switch (pf_ouraddr(m)) { 723 case 0: 724 return (0); 725 case 1: 726 return (1); 727 default: 728 /* pf does not know it */ 729 break; 730 } 731 #endif 732 733 ip = mtod(m, struct ip *); 734 735 memset(&sin, 0, sizeof(sin)); 736 sin.sin_len = sizeof(sin); 737 sin.sin_family = AF_INET; 738 sin.sin_addr = ip->ip_dst; 739 rt = rtalloc_mpath(sintosa(&sin), &ip->ip_src.s_addr, 740 m->m_pkthdr.ph_rtableid); 741 if (rtisvalid(rt)) { 742 if (ISSET(rt->rt_flags, RTF_LOCAL)) 743 match = 1; 744 745 /* 746 * If directedbcast is enabled we only consider it local 747 * if it is received on the interface with that address. 748 */ 749 if (ISSET(rt->rt_flags, RTF_BROADCAST) && 750 (!ip_directedbcast || rt->rt_ifidx == ifp->if_index)) { 751 match = 1; 752 753 /* Make sure M_BCAST is set */ 754 m->m_flags |= M_BCAST; 755 } 756 } 757 *prt = rt; 758 759 if (!match) { 760 struct ifaddr *ifa; 761 762 /* 763 * No local address or broadcast address found, so check for 764 * ancient classful broadcast addresses. 765 * It must have been broadcast on the link layer, and for an 766 * address on the interface it was received on. 767 */ 768 if (!ISSET(m->m_flags, M_BCAST) || 769 !IN_CLASSFULBROADCAST(ip->ip_dst.s_addr, ip->ip_dst.s_addr)) 770 return (0); 771 772 if (ifp->if_rdomain != rtable_l2(m->m_pkthdr.ph_rtableid)) 773 return (0); 774 /* 775 * The check in the loop assumes you only rx a packet on an UP 776 * interface, and that M_BCAST will only be set on a BROADCAST 777 * interface. 778 */ 779 NET_ASSERT_LOCKED(); 780 TAILQ_FOREACH(ifa, &ifp->if_addrlist, ifa_list) { 781 if (ifa->ifa_addr->sa_family != AF_INET) 782 continue; 783 784 if (IN_CLASSFULBROADCAST(ip->ip_dst.s_addr, 785 ifatoia(ifa)->ia_addr.sin_addr.s_addr)) { 786 match = 1; 787 break; 788 } 789 } 790 } 791 792 return (match); 793 } 794 795 /* 796 * Take incoming datagram fragment and try to 797 * reassemble it into whole datagram. If a chain for 798 * reassembly of this datagram already exists, then it 799 * is given as fp; otherwise have to make a chain. 800 */ 801 struct mbuf * 802 ip_reass(struct ipqent *ipqe, struct ipq *fp) 803 { 804 struct mbuf *m = ipqe->ipqe_m; 805 struct ipqent *nq, *p, *q; 806 struct ip *ip; 807 struct mbuf *t; 808 int hlen = ipqe->ipqe_ip->ip_hl << 2; 809 int i, next; 810 u_int8_t ecn, ecn0; 811 812 MUTEX_ASSERT_LOCKED(&ipq_mutex); 813 814 /* 815 * Presence of header sizes in mbufs 816 * would confuse code below. 817 */ 818 m->m_data += hlen; 819 m->m_len -= hlen; 820 821 /* 822 * If first fragment to arrive, create a reassembly queue. 823 */ 824 if (fp == NULL) { 825 fp = pool_get(&ipq_pool, PR_NOWAIT); 826 if (fp == NULL) 827 goto dropfrag; 828 LIST_INSERT_HEAD(&ipq, fp, ipq_q); 829 fp->ipq_ttl = IPFRAGTTL; 830 fp->ipq_p = ipqe->ipqe_ip->ip_p; 831 fp->ipq_id = ipqe->ipqe_ip->ip_id; 832 LIST_INIT(&fp->ipq_fragq); 833 fp->ipq_src = ipqe->ipqe_ip->ip_src; 834 fp->ipq_dst = ipqe->ipqe_ip->ip_dst; 835 p = NULL; 836 goto insert; 837 } 838 839 /* 840 * Handle ECN by comparing this segment with the first one; 841 * if CE is set, do not lose CE. 842 * drop if CE and not-ECT are mixed for the same packet. 843 */ 844 ecn = ipqe->ipqe_ip->ip_tos & IPTOS_ECN_MASK; 845 ecn0 = LIST_FIRST(&fp->ipq_fragq)->ipqe_ip->ip_tos & IPTOS_ECN_MASK; 846 if (ecn == IPTOS_ECN_CE) { 847 if (ecn0 == IPTOS_ECN_NOTECT) 848 goto dropfrag; 849 if (ecn0 != IPTOS_ECN_CE) 850 LIST_FIRST(&fp->ipq_fragq)->ipqe_ip->ip_tos |= 851 IPTOS_ECN_CE; 852 } 853 if (ecn == IPTOS_ECN_NOTECT && ecn0 != IPTOS_ECN_NOTECT) 854 goto dropfrag; 855 856 /* 857 * Find a segment which begins after this one does. 858 */ 859 for (p = NULL, q = LIST_FIRST(&fp->ipq_fragq); q != NULL; 860 p = q, q = LIST_NEXT(q, ipqe_q)) 861 if (ntohs(q->ipqe_ip->ip_off) > ntohs(ipqe->ipqe_ip->ip_off)) 862 break; 863 864 /* 865 * If there is a preceding segment, it may provide some of 866 * our data already. If so, drop the data from the incoming 867 * segment. If it provides all of our data, drop us. 868 */ 869 if (p != NULL) { 870 i = ntohs(p->ipqe_ip->ip_off) + ntohs(p->ipqe_ip->ip_len) - 871 ntohs(ipqe->ipqe_ip->ip_off); 872 if (i > 0) { 873 if (i >= ntohs(ipqe->ipqe_ip->ip_len)) 874 goto dropfrag; 875 m_adj(ipqe->ipqe_m, i); 876 ipqe->ipqe_ip->ip_off = 877 htons(ntohs(ipqe->ipqe_ip->ip_off) + i); 878 ipqe->ipqe_ip->ip_len = 879 htons(ntohs(ipqe->ipqe_ip->ip_len) - i); 880 } 881 } 882 883 /* 884 * While we overlap succeeding segments trim them or, 885 * if they are completely covered, dequeue them. 886 */ 887 for (; q != NULL && 888 ntohs(ipqe->ipqe_ip->ip_off) + ntohs(ipqe->ipqe_ip->ip_len) > 889 ntohs(q->ipqe_ip->ip_off); q = nq) { 890 i = (ntohs(ipqe->ipqe_ip->ip_off) + 891 ntohs(ipqe->ipqe_ip->ip_len)) - ntohs(q->ipqe_ip->ip_off); 892 if (i < ntohs(q->ipqe_ip->ip_len)) { 893 q->ipqe_ip->ip_len = 894 htons(ntohs(q->ipqe_ip->ip_len) - i); 895 q->ipqe_ip->ip_off = 896 htons(ntohs(q->ipqe_ip->ip_off) + i); 897 m_adj(q->ipqe_m, i); 898 break; 899 } 900 nq = LIST_NEXT(q, ipqe_q); 901 m_freem(q->ipqe_m); 902 LIST_REMOVE(q, ipqe_q); 903 pool_put(&ipqent_pool, q); 904 ip_frags--; 905 } 906 907 insert: 908 /* 909 * Stick new segment in its place; 910 * check for complete reassembly. 911 */ 912 if (p == NULL) { 913 LIST_INSERT_HEAD(&fp->ipq_fragq, ipqe, ipqe_q); 914 } else { 915 LIST_INSERT_AFTER(p, ipqe, ipqe_q); 916 } 917 next = 0; 918 for (p = NULL, q = LIST_FIRST(&fp->ipq_fragq); q != NULL; 919 p = q, q = LIST_NEXT(q, ipqe_q)) { 920 if (ntohs(q->ipqe_ip->ip_off) != next) 921 return (0); 922 next += ntohs(q->ipqe_ip->ip_len); 923 } 924 if (p->ipqe_mff) 925 return (0); 926 927 /* 928 * Reassembly is complete. Check for a bogus message size and 929 * concatenate fragments. 930 */ 931 q = LIST_FIRST(&fp->ipq_fragq); 932 ip = q->ipqe_ip; 933 if ((next + (ip->ip_hl << 2)) > IP_MAXPACKET) { 934 ipstat_inc(ips_toolong); 935 ip_freef(fp); 936 return (0); 937 } 938 m = q->ipqe_m; 939 t = m->m_next; 940 m->m_next = 0; 941 m_cat(m, t); 942 nq = LIST_NEXT(q, ipqe_q); 943 pool_put(&ipqent_pool, q); 944 ip_frags--; 945 for (q = nq; q != NULL; q = nq) { 946 t = q->ipqe_m; 947 nq = LIST_NEXT(q, ipqe_q); 948 pool_put(&ipqent_pool, q); 949 ip_frags--; 950 m_cat(m, t); 951 } 952 953 /* 954 * Create header for new ip packet by 955 * modifying header of first packet; 956 * dequeue and discard fragment reassembly header. 957 * Make header visible. 958 */ 959 ip->ip_len = htons(next); 960 ip->ip_src = fp->ipq_src; 961 ip->ip_dst = fp->ipq_dst; 962 LIST_REMOVE(fp, ipq_q); 963 pool_put(&ipq_pool, fp); 964 m->m_len += (ip->ip_hl << 2); 965 m->m_data -= (ip->ip_hl << 2); 966 /* some debugging cruft by sklower, below, will go away soon */ 967 if (m->m_flags & M_PKTHDR) { /* XXX this should be done elsewhere */ 968 int plen = 0; 969 for (t = m; t; t = t->m_next) 970 plen += t->m_len; 971 m->m_pkthdr.len = plen; 972 } 973 return (m); 974 975 dropfrag: 976 ipstat_inc(ips_fragdropped); 977 m_freem(m); 978 pool_put(&ipqent_pool, ipqe); 979 ip_frags--; 980 return (NULL); 981 } 982 983 /* 984 * Free a fragment reassembly header and all 985 * associated datagrams. 986 */ 987 void 988 ip_freef(struct ipq *fp) 989 { 990 struct ipqent *q; 991 992 MUTEX_ASSERT_LOCKED(&ipq_mutex); 993 994 while ((q = LIST_FIRST(&fp->ipq_fragq)) != NULL) { 995 LIST_REMOVE(q, ipqe_q); 996 m_freem(q->ipqe_m); 997 pool_put(&ipqent_pool, q); 998 ip_frags--; 999 } 1000 LIST_REMOVE(fp, ipq_q); 1001 pool_put(&ipq_pool, fp); 1002 } 1003 1004 /* 1005 * IP timer processing; 1006 * if a timer expires on a reassembly queue, discard it. 1007 */ 1008 void 1009 ip_slowtimo(void) 1010 { 1011 struct ipq *fp, *nfp; 1012 1013 mtx_enter(&ipq_mutex); 1014 LIST_FOREACH_SAFE(fp, &ipq, ipq_q, nfp) { 1015 if (--fp->ipq_ttl == 0) { 1016 ipstat_inc(ips_fragtimeout); 1017 ip_freef(fp); 1018 } 1019 } 1020 mtx_leave(&ipq_mutex); 1021 } 1022 1023 /* 1024 * Flush a bunch of datagram fragments, till we are down to 75%. 1025 */ 1026 void 1027 ip_flush(void) 1028 { 1029 int max = 50; 1030 1031 MUTEX_ASSERT_LOCKED(&ipq_mutex); 1032 1033 while (!LIST_EMPTY(&ipq) && ip_frags > ip_maxqueue * 3 / 4 && --max) { 1034 ipstat_inc(ips_fragdropped); 1035 ip_freef(LIST_FIRST(&ipq)); 1036 } 1037 } 1038 1039 /* 1040 * Do option processing on a datagram, 1041 * possibly discarding it if bad options are encountered, 1042 * or forwarding it if source-routed. 1043 * Returns 1 if packet has been forwarded/freed, 1044 * 0 if the packet should be processed further. 1045 */ 1046 int 1047 ip_dooptions(struct mbuf *m, struct ifnet *ifp) 1048 { 1049 struct ip *ip = mtod(m, struct ip *); 1050 unsigned int rtableid = m->m_pkthdr.ph_rtableid; 1051 struct rtentry *rt; 1052 struct sockaddr_in ipaddr; 1053 u_char *cp; 1054 struct ip_timestamp ipt; 1055 struct in_ifaddr *ia; 1056 int opt, optlen, cnt, off, code, type = ICMP_PARAMPROB, forward = 0; 1057 struct in_addr sin, dst; 1058 u_int32_t ntime; 1059 1060 dst = ip->ip_dst; 1061 cp = (u_char *)(ip + 1); 1062 cnt = (ip->ip_hl << 2) - sizeof (struct ip); 1063 1064 KERNEL_LOCK(); 1065 for (; cnt > 0; cnt -= optlen, cp += optlen) { 1066 opt = cp[IPOPT_OPTVAL]; 1067 if (opt == IPOPT_EOL) 1068 break; 1069 if (opt == IPOPT_NOP) 1070 optlen = 1; 1071 else { 1072 if (cnt < IPOPT_OLEN + sizeof(*cp)) { 1073 code = &cp[IPOPT_OLEN] - (u_char *)ip; 1074 goto bad; 1075 } 1076 optlen = cp[IPOPT_OLEN]; 1077 if (optlen < IPOPT_OLEN + sizeof(*cp) || optlen > cnt) { 1078 code = &cp[IPOPT_OLEN] - (u_char *)ip; 1079 goto bad; 1080 } 1081 } 1082 1083 switch (opt) { 1084 1085 default: 1086 break; 1087 1088 /* 1089 * Source routing with record. 1090 * Find interface with current destination address. 1091 * If none on this machine then drop if strictly routed, 1092 * or do nothing if loosely routed. 1093 * Record interface address and bring up next address 1094 * component. If strictly routed make sure next 1095 * address is on directly accessible net. 1096 */ 1097 case IPOPT_LSRR: 1098 case IPOPT_SSRR: 1099 if (!ip_dosourceroute) { 1100 type = ICMP_UNREACH; 1101 code = ICMP_UNREACH_SRCFAIL; 1102 goto bad; 1103 } 1104 if ((off = cp[IPOPT_OFFSET]) < IPOPT_MINOFF) { 1105 code = &cp[IPOPT_OFFSET] - (u_char *)ip; 1106 goto bad; 1107 } 1108 memset(&ipaddr, 0, sizeof(ipaddr)); 1109 ipaddr.sin_family = AF_INET; 1110 ipaddr.sin_len = sizeof(ipaddr); 1111 ipaddr.sin_addr = ip->ip_dst; 1112 ia = ifatoia(ifa_ifwithaddr(sintosa(&ipaddr), 1113 m->m_pkthdr.ph_rtableid)); 1114 if (ia == NULL) { 1115 if (opt == IPOPT_SSRR) { 1116 type = ICMP_UNREACH; 1117 code = ICMP_UNREACH_SRCFAIL; 1118 goto bad; 1119 } 1120 /* 1121 * Loose routing, and not at next destination 1122 * yet; nothing to do except forward. 1123 */ 1124 break; 1125 } 1126 off--; /* 0 origin */ 1127 if ((off + sizeof(struct in_addr)) > optlen) { 1128 /* 1129 * End of source route. Should be for us. 1130 */ 1131 save_rte(m, cp, ip->ip_src); 1132 break; 1133 } 1134 1135 /* 1136 * locate outgoing interface 1137 */ 1138 memset(&ipaddr, 0, sizeof(ipaddr)); 1139 ipaddr.sin_family = AF_INET; 1140 ipaddr.sin_len = sizeof(ipaddr); 1141 memcpy(&ipaddr.sin_addr, cp + off, 1142 sizeof(ipaddr.sin_addr)); 1143 /* keep packet in the virtual instance */ 1144 rt = rtalloc(sintosa(&ipaddr), RT_RESOLVE, rtableid); 1145 if (!rtisvalid(rt) || ((opt == IPOPT_SSRR) && 1146 ISSET(rt->rt_flags, RTF_GATEWAY))) { 1147 type = ICMP_UNREACH; 1148 code = ICMP_UNREACH_SRCFAIL; 1149 rtfree(rt); 1150 goto bad; 1151 } 1152 ia = ifatoia(rt->rt_ifa); 1153 memcpy(cp + off, &ia->ia_addr.sin_addr, 1154 sizeof(struct in_addr)); 1155 rtfree(rt); 1156 cp[IPOPT_OFFSET] += sizeof(struct in_addr); 1157 ip->ip_dst = ipaddr.sin_addr; 1158 /* 1159 * Let ip_intr's mcast routing check handle mcast pkts 1160 */ 1161 forward = !IN_MULTICAST(ip->ip_dst.s_addr); 1162 break; 1163 1164 case IPOPT_RR: 1165 if (optlen < IPOPT_OFFSET + sizeof(*cp)) { 1166 code = &cp[IPOPT_OLEN] - (u_char *)ip; 1167 goto bad; 1168 } 1169 if ((off = cp[IPOPT_OFFSET]) < IPOPT_MINOFF) { 1170 code = &cp[IPOPT_OFFSET] - (u_char *)ip; 1171 goto bad; 1172 } 1173 1174 /* 1175 * If no space remains, ignore. 1176 */ 1177 off--; /* 0 origin */ 1178 if ((off + sizeof(struct in_addr)) > optlen) 1179 break; 1180 memset(&ipaddr, 0, sizeof(ipaddr)); 1181 ipaddr.sin_family = AF_INET; 1182 ipaddr.sin_len = sizeof(ipaddr); 1183 ipaddr.sin_addr = ip->ip_dst; 1184 /* 1185 * locate outgoing interface; if we're the destination, 1186 * use the incoming interface (should be same). 1187 * Again keep the packet inside the virtual instance. 1188 */ 1189 rt = rtalloc(sintosa(&ipaddr), RT_RESOLVE, rtableid); 1190 if (!rtisvalid(rt)) { 1191 type = ICMP_UNREACH; 1192 code = ICMP_UNREACH_HOST; 1193 rtfree(rt); 1194 goto bad; 1195 } 1196 ia = ifatoia(rt->rt_ifa); 1197 memcpy(cp + off, &ia->ia_addr.sin_addr, 1198 sizeof(struct in_addr)); 1199 rtfree(rt); 1200 cp[IPOPT_OFFSET] += sizeof(struct in_addr); 1201 break; 1202 1203 case IPOPT_TS: 1204 code = cp - (u_char *)ip; 1205 if (optlen < sizeof(struct ip_timestamp)) 1206 goto bad; 1207 memcpy(&ipt, cp, sizeof(struct ip_timestamp)); 1208 if (ipt.ipt_ptr < 5 || ipt.ipt_len < 5) 1209 goto bad; 1210 if (ipt.ipt_ptr - 1 + sizeof(u_int32_t) > ipt.ipt_len) { 1211 if (++ipt.ipt_oflw == 0) 1212 goto bad; 1213 break; 1214 } 1215 memcpy(&sin, cp + ipt.ipt_ptr - 1, sizeof sin); 1216 switch (ipt.ipt_flg) { 1217 1218 case IPOPT_TS_TSONLY: 1219 break; 1220 1221 case IPOPT_TS_TSANDADDR: 1222 if (ipt.ipt_ptr - 1 + sizeof(u_int32_t) + 1223 sizeof(struct in_addr) > ipt.ipt_len) 1224 goto bad; 1225 memset(&ipaddr, 0, sizeof(ipaddr)); 1226 ipaddr.sin_family = AF_INET; 1227 ipaddr.sin_len = sizeof(ipaddr); 1228 ipaddr.sin_addr = dst; 1229 ia = ifatoia(ifaof_ifpforaddr(sintosa(&ipaddr), 1230 ifp)); 1231 if (ia == NULL) 1232 continue; 1233 memcpy(&sin, &ia->ia_addr.sin_addr, 1234 sizeof(struct in_addr)); 1235 ipt.ipt_ptr += sizeof(struct in_addr); 1236 break; 1237 1238 case IPOPT_TS_PRESPEC: 1239 if (ipt.ipt_ptr - 1 + sizeof(u_int32_t) + 1240 sizeof(struct in_addr) > ipt.ipt_len) 1241 goto bad; 1242 memset(&ipaddr, 0, sizeof(ipaddr)); 1243 ipaddr.sin_family = AF_INET; 1244 ipaddr.sin_len = sizeof(ipaddr); 1245 ipaddr.sin_addr = sin; 1246 if (ifa_ifwithaddr(sintosa(&ipaddr), 1247 m->m_pkthdr.ph_rtableid) == NULL) 1248 continue; 1249 ipt.ipt_ptr += sizeof(struct in_addr); 1250 break; 1251 1252 default: 1253 /* XXX can't take &ipt->ipt_flg */ 1254 code = (u_char *)&ipt.ipt_ptr - 1255 (u_char *)ip + 1; 1256 goto bad; 1257 } 1258 ntime = iptime(); 1259 memcpy(cp + ipt.ipt_ptr - 1, &ntime, sizeof(u_int32_t)); 1260 ipt.ipt_ptr += sizeof(u_int32_t); 1261 } 1262 } 1263 KERNEL_UNLOCK(); 1264 if (forward && ipforwarding) { 1265 ip_forward(m, ifp, NULL, 1); 1266 return (1); 1267 } 1268 return (0); 1269 bad: 1270 KERNEL_UNLOCK(); 1271 icmp_error(m, type, code, 0, 0); 1272 ipstat_inc(ips_badoptions); 1273 return (1); 1274 } 1275 1276 /* 1277 * Save incoming source route for use in replies, 1278 * to be picked up later by ip_srcroute if the receiver is interested. 1279 */ 1280 void 1281 save_rte(struct mbuf *m, u_char *option, struct in_addr dst) 1282 { 1283 struct ip_srcrt *isr; 1284 struct m_tag *mtag; 1285 unsigned olen; 1286 1287 olen = option[IPOPT_OLEN]; 1288 if (olen > sizeof(isr->isr_hdr) + sizeof(isr->isr_routes)) 1289 return; 1290 1291 mtag = m_tag_get(PACKET_TAG_SRCROUTE, sizeof(*isr), M_NOWAIT); 1292 if (mtag == NULL) 1293 return; 1294 isr = (struct ip_srcrt *)(mtag + 1); 1295 1296 memcpy(isr->isr_hdr, option, olen); 1297 isr->isr_nhops = (olen - IPOPT_OFFSET - 1) / sizeof(struct in_addr); 1298 isr->isr_dst = dst; 1299 m_tag_prepend(m, mtag); 1300 } 1301 1302 /* 1303 * Retrieve incoming source route for use in replies, 1304 * in the same form used by setsockopt. 1305 * The first hop is placed before the options, will be removed later. 1306 */ 1307 struct mbuf * 1308 ip_srcroute(struct mbuf *m0) 1309 { 1310 struct in_addr *p, *q; 1311 struct mbuf *m; 1312 struct ip_srcrt *isr; 1313 struct m_tag *mtag; 1314 1315 if (!ip_dosourceroute) 1316 return (NULL); 1317 1318 mtag = m_tag_find(m0, PACKET_TAG_SRCROUTE, NULL); 1319 if (mtag == NULL) 1320 return (NULL); 1321 isr = (struct ip_srcrt *)(mtag + 1); 1322 1323 if (isr->isr_nhops == 0) 1324 return (NULL); 1325 m = m_get(M_DONTWAIT, MT_SOOPTS); 1326 if (m == NULL) 1327 return (NULL); 1328 1329 #define OPTSIZ (sizeof(isr->isr_nop) + sizeof(isr->isr_hdr)) 1330 1331 /* length is (nhops+1)*sizeof(addr) + sizeof(nop + header) */ 1332 m->m_len = (isr->isr_nhops + 1) * sizeof(struct in_addr) + OPTSIZ; 1333 1334 /* 1335 * First save first hop for return route 1336 */ 1337 p = &(isr->isr_routes[isr->isr_nhops - 1]); 1338 *(mtod(m, struct in_addr *)) = *p--; 1339 1340 /* 1341 * Copy option fields and padding (nop) to mbuf. 1342 */ 1343 isr->isr_nop = IPOPT_NOP; 1344 isr->isr_hdr[IPOPT_OFFSET] = IPOPT_MINOFF; 1345 memcpy(mtod(m, caddr_t) + sizeof(struct in_addr), &isr->isr_nop, 1346 OPTSIZ); 1347 q = (struct in_addr *)(mtod(m, caddr_t) + 1348 sizeof(struct in_addr) + OPTSIZ); 1349 #undef OPTSIZ 1350 /* 1351 * Record return path as an IP source route, 1352 * reversing the path (pointers are now aligned). 1353 */ 1354 while (p >= isr->isr_routes) { 1355 *q++ = *p--; 1356 } 1357 /* 1358 * Last hop goes to final destination. 1359 */ 1360 *q = isr->isr_dst; 1361 m_tag_delete(m0, (struct m_tag *)isr); 1362 return (m); 1363 } 1364 1365 /* 1366 * Strip out IP options, at higher level protocol in the kernel. 1367 */ 1368 void 1369 ip_stripoptions(struct mbuf *m) 1370 { 1371 int i; 1372 struct ip *ip = mtod(m, struct ip *); 1373 caddr_t opts; 1374 int olen; 1375 1376 olen = (ip->ip_hl<<2) - sizeof (struct ip); 1377 opts = (caddr_t)(ip + 1); 1378 i = m->m_len - (sizeof (struct ip) + olen); 1379 memmove(opts, opts + olen, i); 1380 m->m_len -= olen; 1381 if (m->m_flags & M_PKTHDR) 1382 m->m_pkthdr.len -= olen; 1383 ip->ip_hl = sizeof(struct ip) >> 2; 1384 ip->ip_len = htons(ntohs(ip->ip_len) - olen); 1385 } 1386 1387 const int inetctlerrmap[PRC_NCMDS] = { 1388 0, 0, 0, 0, 1389 0, EMSGSIZE, EHOSTDOWN, EHOSTUNREACH, 1390 EHOSTUNREACH, EHOSTUNREACH, ECONNREFUSED, ECONNREFUSED, 1391 EMSGSIZE, EHOSTUNREACH, 0, 0, 1392 0, 0, 0, 0, 1393 ENOPROTOOPT 1394 }; 1395 1396 /* 1397 * Forward a packet. If some error occurs return the sender 1398 * an icmp packet. Note we can't always generate a meaningful 1399 * icmp message because icmp doesn't have a large enough repertoire 1400 * of codes and types. 1401 * 1402 * If not forwarding, just drop the packet. This could be confusing 1403 * if ipforwarding was zero but some routing protocol was advancing 1404 * us as a gateway to somewhere. However, we must let the routing 1405 * protocol deal with that. 1406 * 1407 * The srcrt parameter indicates whether the packet is being forwarded 1408 * via a source route. 1409 */ 1410 void 1411 ip_forward(struct mbuf *m, struct ifnet *ifp, struct rtentry *rt, int srcrt) 1412 { 1413 struct mbuf mfake, *mcopy = NULL; 1414 struct ip *ip = mtod(m, struct ip *); 1415 struct sockaddr_in *sin; 1416 struct route ro; 1417 int error, type = 0, code = 0, destmtu = 0, fake = 0, len; 1418 u_int32_t dest; 1419 1420 dest = 0; 1421 if (m->m_flags & (M_BCAST|M_MCAST) || in_canforward(ip->ip_dst) == 0) { 1422 ipstat_inc(ips_cantforward); 1423 m_freem(m); 1424 goto freecopy; 1425 } 1426 if (ip->ip_ttl <= IPTTLDEC) { 1427 icmp_error(m, ICMP_TIMXCEED, ICMP_TIMXCEED_INTRANS, dest, 0); 1428 goto freecopy; 1429 } 1430 1431 sin = satosin(&ro.ro_dst); 1432 memset(sin, 0, sizeof(*sin)); 1433 sin->sin_family = AF_INET; 1434 sin->sin_len = sizeof(*sin); 1435 sin->sin_addr = ip->ip_dst; 1436 1437 if (!rtisvalid(rt)) { 1438 rtfree(rt); 1439 rt = rtalloc_mpath(sintosa(sin), &ip->ip_src.s_addr, 1440 m->m_pkthdr.ph_rtableid); 1441 if (rt == NULL) { 1442 icmp_error(m, ICMP_UNREACH, ICMP_UNREACH_HOST, dest, 0); 1443 return; 1444 } 1445 } 1446 1447 /* 1448 * Save at most 68 bytes of the packet in case 1449 * we need to generate an ICMP message to the src. 1450 * The data is saved in the mbuf on the stack that 1451 * acts as a temporary storage not intended to be 1452 * passed down the IP stack or to the mfree. 1453 */ 1454 memset(&mfake.m_hdr, 0, sizeof(mfake.m_hdr)); 1455 mfake.m_type = m->m_type; 1456 if (m_dup_pkthdr(&mfake, m, M_DONTWAIT) == 0) { 1457 mfake.m_data = mfake.m_pktdat; 1458 len = min(ntohs(ip->ip_len), 68); 1459 m_copydata(m, 0, len, mfake.m_pktdat); 1460 mfake.m_pkthdr.len = mfake.m_len = len; 1461 #if NPF > 0 1462 pf_pkt_addr_changed(&mfake); 1463 #endif /* NPF > 0 */ 1464 fake = 1; 1465 } 1466 1467 ip->ip_ttl -= IPTTLDEC; 1468 1469 /* 1470 * If forwarding packet using same interface that it came in on, 1471 * perhaps should send a redirect to sender to shortcut a hop. 1472 * Only send redirect if source is sending directly to us, 1473 * and if packet was not source routed (or has any options). 1474 * Also, don't send redirect if forwarding using a default route 1475 * or a route modified by a redirect. 1476 * Don't send redirect if we advertise destination's arp address 1477 * as ours (proxy arp). 1478 */ 1479 if ((rt->rt_ifidx == ifp->if_index) && 1480 (rt->rt_flags & (RTF_DYNAMIC|RTF_MODIFIED)) == 0 && 1481 satosin(rt_key(rt))->sin_addr.s_addr != 0 && 1482 ipsendredirects && !srcrt && 1483 !arpproxy(satosin(rt_key(rt))->sin_addr, m->m_pkthdr.ph_rtableid)) { 1484 if ((ip->ip_src.s_addr & ifatoia(rt->rt_ifa)->ia_netmask) == 1485 ifatoia(rt->rt_ifa)->ia_net) { 1486 if (rt->rt_flags & RTF_GATEWAY) 1487 dest = satosin(rt->rt_gateway)->sin_addr.s_addr; 1488 else 1489 dest = ip->ip_dst.s_addr; 1490 /* Router requirements says to only send host redirects */ 1491 type = ICMP_REDIRECT; 1492 code = ICMP_REDIRECT_HOST; 1493 } 1494 } 1495 1496 ro.ro_rt = rt; 1497 ro.ro_tableid = m->m_pkthdr.ph_rtableid; 1498 error = ip_output(m, NULL, &ro, 1499 (IP_FORWARDING | (ip_directedbcast ? IP_ALLOWBROADCAST : 0)), 1500 NULL, NULL, 0); 1501 rt = ro.ro_rt; 1502 if (error) 1503 ipstat_inc(ips_cantforward); 1504 else { 1505 ipstat_inc(ips_forward); 1506 if (type) 1507 ipstat_inc(ips_redirectsent); 1508 else 1509 goto freecopy; 1510 } 1511 if (!fake) 1512 goto freecopy; 1513 1514 switch (error) { 1515 1516 case 0: /* forwarded, but need redirect */ 1517 /* type, code set above */ 1518 break; 1519 1520 case ENETUNREACH: /* shouldn't happen, checked above */ 1521 case EHOSTUNREACH: 1522 case ENETDOWN: 1523 case EHOSTDOWN: 1524 default: 1525 type = ICMP_UNREACH; 1526 code = ICMP_UNREACH_HOST; 1527 break; 1528 1529 case EMSGSIZE: 1530 type = ICMP_UNREACH; 1531 code = ICMP_UNREACH_NEEDFRAG; 1532 1533 #ifdef IPSEC 1534 if (rt != NULL) { 1535 if (rt->rt_mtu) 1536 destmtu = rt->rt_mtu; 1537 else { 1538 struct ifnet *destifp; 1539 1540 destifp = if_get(rt->rt_ifidx); 1541 if (destifp != NULL) 1542 destmtu = destifp->if_mtu; 1543 if_put(destifp); 1544 } 1545 } 1546 #endif /*IPSEC*/ 1547 ipstat_inc(ips_cantfrag); 1548 break; 1549 1550 case EACCES: 1551 /* 1552 * pf(4) blocked the packet. There is no need to send an ICMP 1553 * packet back since pf(4) takes care of it. 1554 */ 1555 goto freecopy; 1556 case ENOBUFS: 1557 /* 1558 * a router should not generate ICMP_SOURCEQUENCH as 1559 * required in RFC1812 Requirements for IP Version 4 Routers. 1560 * source quench could be a big problem under DoS attacks, 1561 * or the underlying interface is rate-limited. 1562 */ 1563 goto freecopy; 1564 } 1565 1566 mcopy = m_copym(&mfake, 0, len, M_DONTWAIT); 1567 if (mcopy) 1568 icmp_error(mcopy, type, code, dest, destmtu); 1569 1570 freecopy: 1571 if (fake) 1572 m_tag_delete_chain(&mfake); 1573 rtfree(rt); 1574 } 1575 1576 int 1577 ip_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp, 1578 size_t newlen) 1579 { 1580 int error; 1581 #ifdef MROUTING 1582 extern int ip_mrtproto; 1583 extern struct mrtstat mrtstat; 1584 #endif 1585 1586 /* Almost all sysctl names at this level are terminal. */ 1587 if (namelen != 1 && name[0] != IPCTL_IFQUEUE) 1588 return (ENOTDIR); 1589 1590 switch (name[0]) { 1591 case IPCTL_SOURCEROUTE: 1592 /* 1593 * Don't allow this to change in a secure environment. 1594 */ 1595 if (newp && securelevel > 0) 1596 return (EPERM); 1597 NET_LOCK(); 1598 error = sysctl_int(oldp, oldlenp, newp, newlen, 1599 &ip_dosourceroute); 1600 NET_UNLOCK(); 1601 return (error); 1602 case IPCTL_MTUDISC: 1603 NET_LOCK(); 1604 error = sysctl_int(oldp, oldlenp, newp, newlen, 1605 &ip_mtudisc); 1606 if (ip_mtudisc != 0 && ip_mtudisc_timeout_q == NULL) { 1607 ip_mtudisc_timeout_q = 1608 rt_timer_queue_create(ip_mtudisc_timeout); 1609 } else if (ip_mtudisc == 0 && ip_mtudisc_timeout_q != NULL) { 1610 rt_timer_queue_destroy(ip_mtudisc_timeout_q); 1611 ip_mtudisc_timeout_q = NULL; 1612 } 1613 NET_UNLOCK(); 1614 return error; 1615 case IPCTL_MTUDISCTIMEOUT: 1616 NET_LOCK(); 1617 error = sysctl_int(oldp, oldlenp, newp, newlen, 1618 &ip_mtudisc_timeout); 1619 if (ip_mtudisc_timeout_q != NULL) 1620 rt_timer_queue_change(ip_mtudisc_timeout_q, 1621 ip_mtudisc_timeout); 1622 NET_UNLOCK(); 1623 return (error); 1624 #ifdef IPSEC 1625 case IPCTL_ENCDEBUG: 1626 case IPCTL_IPSEC_EXPIRE_ACQUIRE: 1627 case IPCTL_IPSEC_EMBRYONIC_SA_TIMEOUT: 1628 case IPCTL_IPSEC_REQUIRE_PFS: 1629 case IPCTL_IPSEC_SOFT_ALLOCATIONS: 1630 case IPCTL_IPSEC_ALLOCATIONS: 1631 case IPCTL_IPSEC_SOFT_BYTES: 1632 case IPCTL_IPSEC_BYTES: 1633 case IPCTL_IPSEC_TIMEOUT: 1634 case IPCTL_IPSEC_SOFT_TIMEOUT: 1635 case IPCTL_IPSEC_SOFT_FIRSTUSE: 1636 case IPCTL_IPSEC_FIRSTUSE: 1637 case IPCTL_IPSEC_ENC_ALGORITHM: 1638 case IPCTL_IPSEC_AUTH_ALGORITHM: 1639 case IPCTL_IPSEC_IPCOMP_ALGORITHM: 1640 return (ipsec_sysctl(name, namelen, oldp, oldlenp, newp, 1641 newlen)); 1642 #endif 1643 case IPCTL_IFQUEUE: 1644 return (sysctl_niq(name + 1, namelen - 1, 1645 oldp, oldlenp, newp, newlen, &ipintrq)); 1646 case IPCTL_STATS: 1647 return (ip_sysctl_ipstat(oldp, oldlenp, newp)); 1648 #ifdef MROUTING 1649 case IPCTL_MRTSTATS: 1650 return (sysctl_rdstruct(oldp, oldlenp, newp, 1651 &mrtstat, sizeof(mrtstat))); 1652 case IPCTL_MRTPROTO: 1653 return (sysctl_rdint(oldp, oldlenp, newp, ip_mrtproto)); 1654 case IPCTL_MRTMFC: 1655 if (newp) 1656 return (EPERM); 1657 NET_LOCK(); 1658 error = mrt_sysctl_mfc(oldp, oldlenp); 1659 NET_UNLOCK(); 1660 return (error); 1661 case IPCTL_MRTVIF: 1662 if (newp) 1663 return (EPERM); 1664 NET_LOCK(); 1665 error = mrt_sysctl_vif(oldp, oldlenp); 1666 NET_UNLOCK(); 1667 return (error); 1668 #else 1669 case IPCTL_MRTPROTO: 1670 case IPCTL_MRTSTATS: 1671 case IPCTL_MRTMFC: 1672 case IPCTL_MRTVIF: 1673 return (EOPNOTSUPP); 1674 #endif 1675 default: 1676 if (name[0] < IPCTL_MAXID) { 1677 NET_LOCK(); 1678 error = sysctl_int_arr(ipctl_vars, name, namelen, 1679 oldp, oldlenp, newp, newlen); 1680 NET_UNLOCK(); 1681 return (error); 1682 } 1683 return (EOPNOTSUPP); 1684 } 1685 /* NOTREACHED */ 1686 } 1687 1688 int 1689 ip_sysctl_ipstat(void *oldp, size_t *oldlenp, void *newp) 1690 { 1691 uint64_t counters[ips_ncounters]; 1692 struct ipstat ipstat; 1693 u_long *words = (u_long *)&ipstat; 1694 int i; 1695 1696 CTASSERT(sizeof(ipstat) == (nitems(counters) * sizeof(u_long))); 1697 memset(&ipstat, 0, sizeof ipstat); 1698 counters_read(ipcounters, counters, nitems(counters)); 1699 1700 for (i = 0; i < nitems(counters); i++) 1701 words[i] = (u_long)counters[i]; 1702 1703 return (sysctl_rdstruct(oldp, oldlenp, newp, &ipstat, sizeof(ipstat))); 1704 } 1705 1706 void 1707 ip_savecontrol(struct inpcb *inp, struct mbuf **mp, struct ip *ip, 1708 struct mbuf *m) 1709 { 1710 if (inp->inp_socket->so_options & SO_TIMESTAMP) { 1711 struct timeval tv; 1712 1713 microtime(&tv); 1714 *mp = sbcreatecontrol((caddr_t) &tv, sizeof(tv), 1715 SCM_TIMESTAMP, SOL_SOCKET); 1716 if (*mp) 1717 mp = &(*mp)->m_next; 1718 } 1719 1720 if (inp->inp_flags & INP_RECVDSTADDR) { 1721 *mp = sbcreatecontrol((caddr_t) &ip->ip_dst, 1722 sizeof(struct in_addr), IP_RECVDSTADDR, IPPROTO_IP); 1723 if (*mp) 1724 mp = &(*mp)->m_next; 1725 } 1726 #ifdef notyet 1727 /* this code is broken and will probably never be fixed. */ 1728 /* options were tossed already */ 1729 if (inp->inp_flags & INP_RECVOPTS) { 1730 *mp = sbcreatecontrol((caddr_t) opts_deleted_above, 1731 sizeof(struct in_addr), IP_RECVOPTS, IPPROTO_IP); 1732 if (*mp) 1733 mp = &(*mp)->m_next; 1734 } 1735 /* ip_srcroute doesn't do what we want here, need to fix */ 1736 if (inp->inp_flags & INP_RECVRETOPTS) { 1737 *mp = sbcreatecontrol((caddr_t) ip_srcroute(m), 1738 sizeof(struct in_addr), IP_RECVRETOPTS, IPPROTO_IP); 1739 if (*mp) 1740 mp = &(*mp)->m_next; 1741 } 1742 #endif 1743 if (inp->inp_flags & INP_RECVIF) { 1744 struct sockaddr_dl sdl; 1745 struct ifnet *ifp; 1746 1747 ifp = if_get(m->m_pkthdr.ph_ifidx); 1748 if (ifp == NULL || ifp->if_sadl == NULL) { 1749 memset(&sdl, 0, sizeof(sdl)); 1750 sdl.sdl_len = offsetof(struct sockaddr_dl, sdl_data[0]); 1751 sdl.sdl_family = AF_LINK; 1752 sdl.sdl_index = ifp != NULL ? ifp->if_index : 0; 1753 sdl.sdl_nlen = sdl.sdl_alen = sdl.sdl_slen = 0; 1754 *mp = sbcreatecontrol((caddr_t) &sdl, sdl.sdl_len, 1755 IP_RECVIF, IPPROTO_IP); 1756 } else { 1757 *mp = sbcreatecontrol((caddr_t) ifp->if_sadl, 1758 ifp->if_sadl->sdl_len, IP_RECVIF, IPPROTO_IP); 1759 } 1760 if (*mp) 1761 mp = &(*mp)->m_next; 1762 if_put(ifp); 1763 } 1764 if (inp->inp_flags & INP_RECVTTL) { 1765 *mp = sbcreatecontrol((caddr_t) &ip->ip_ttl, 1766 sizeof(u_int8_t), IP_RECVTTL, IPPROTO_IP); 1767 if (*mp) 1768 mp = &(*mp)->m_next; 1769 } 1770 if (inp->inp_flags & INP_RECVRTABLE) { 1771 u_int rtableid = inp->inp_rtableid; 1772 1773 #if NPF > 0 1774 if (m && m->m_pkthdr.pf.flags & PF_TAG_DIVERTED) { 1775 struct pf_divert *divert; 1776 1777 divert = pf_find_divert(m); 1778 KASSERT(divert != NULL); 1779 rtableid = divert->rdomain; 1780 } 1781 #endif 1782 1783 *mp = sbcreatecontrol((caddr_t) &rtableid, 1784 sizeof(u_int), IP_RECVRTABLE, IPPROTO_IP); 1785 if (*mp) 1786 mp = &(*mp)->m_next; 1787 } 1788 } 1789 1790 void 1791 ip_send_dispatch(void *xmq) 1792 { 1793 struct mbuf_queue *mq = xmq; 1794 struct mbuf *m; 1795 struct mbuf_list ml; 1796 1797 mq_delist(mq, &ml); 1798 if (ml_empty(&ml)) 1799 return; 1800 1801 NET_RLOCK(); 1802 while ((m = ml_dequeue(&ml)) != NULL) { 1803 ip_output(m, NULL, NULL, 0, NULL, NULL, 0); 1804 } 1805 NET_RUNLOCK(); 1806 } 1807 1808 void 1809 ip_send(struct mbuf *m) 1810 { 1811 mq_enqueue(&ipsend_mq, m); 1812 task_add(net_tq(0), &ipsend_task); 1813 } 1814