1 /* $OpenBSD: ip_input.c,v 1.403 2025/01/03 21:27:40 bluhm Exp $ */ 2 /* $NetBSD: ip_input.c,v 1.30 1996/03/16 23:53:58 christos Exp $ */ 3 4 /* 5 * Copyright (c) 1982, 1986, 1988, 1993 6 * The Regents of the University of California. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of the University nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * @(#)ip_input.c 8.2 (Berkeley) 1/4/94 33 */ 34 35 #include "pf.h" 36 #include "carp.h" 37 38 #include <sys/param.h> 39 #include <sys/systm.h> 40 #include <sys/mbuf.h> 41 #include <sys/domain.h> 42 #include <sys/mutex.h> 43 #include <sys/protosw.h> 44 #include <sys/socket.h> 45 #include <sys/socketvar.h> 46 #include <sys/sysctl.h> 47 #include <sys/pool.h> 48 #include <sys/task.h> 49 50 #include <net/if.h> 51 #include <net/if_var.h> 52 #include <net/if_dl.h> 53 #include <net/route.h> 54 #include <net/netisr.h> 55 56 #include <netinet/in.h> 57 #include <netinet/in_systm.h> 58 #include <netinet/if_ether.h> 59 #include <netinet/ip.h> 60 #include <netinet/in_pcb.h> 61 #include <netinet/in_var.h> 62 #include <netinet/ip_var.h> 63 #include <netinet/ip_icmp.h> 64 #include <net/if_types.h> 65 66 #ifdef INET6 67 #include <netinet6/ip6_var.h> 68 #endif 69 70 #if NPF > 0 71 #include <net/pfvar.h> 72 #endif 73 74 #ifdef MROUTING 75 #include <netinet/ip_mroute.h> 76 #endif 77 78 #ifdef IPSEC 79 #include <netinet/ip_ipsp.h> 80 #endif /* IPSEC */ 81 82 #if NCARP > 0 83 #include <netinet/ip_carp.h> 84 #endif 85 86 /* 87 * Locks used to protect global variables in this file: 88 * I immutable after creation 89 * a atomic operations 90 * N net lock 91 */ 92 93 /* values controllable via sysctl */ 94 int ip_forwarding = 0; /* [a] */ 95 int ipmforwarding = 0; 96 int ipmultipath = 0; 97 int ip_sendredirects = 1; /* [a] */ 98 int ip_dosourceroute = 0; 99 int ip_defttl = IPDEFTTL; 100 int ip_mtudisc = 1; 101 int ip_mtudisc_timeout = IPMTUDISCTIMEOUT; 102 int ip_directedbcast = 0; /* [a] */ 103 104 /* Protects `ipq' and `ip_frags'. */ 105 struct mutex ipq_mutex = MUTEX_INITIALIZER(IPL_SOFTNET); 106 107 /* IP reassembly queue */ 108 LIST_HEAD(, ipq) ipq; 109 110 /* Keep track of memory used for reassembly */ 111 int ip_maxqueue = 300; 112 int ip_frags = 0; 113 114 const struct sysctl_bounded_args ipctl_vars_unlocked[] = { 115 { IPCTL_FORWARDING, &ip_forwarding, 0, 2 }, 116 { IPCTL_SENDREDIRECTS, &ip_sendredirects, 0, 1 }, 117 { IPCTL_DIRECTEDBCAST, &ip_directedbcast, 0, 1 }, 118 }; 119 120 const struct sysctl_bounded_args ipctl_vars[] = { 121 #ifdef MROUTING 122 { IPCTL_MRTPROTO, &ip_mrtproto, SYSCTL_INT_READONLY }, 123 #endif 124 { IPCTL_DEFTTL, &ip_defttl, 0, 255 }, 125 { IPCTL_IPPORT_FIRSTAUTO, &ipport_firstauto, 0, 65535 }, 126 { IPCTL_IPPORT_LASTAUTO, &ipport_lastauto, 0, 65535 }, 127 { IPCTL_IPPORT_HIFIRSTAUTO, &ipport_hifirstauto, 0, 65535 }, 128 { IPCTL_IPPORT_HILASTAUTO, &ipport_hilastauto, 0, 65535 }, 129 { IPCTL_IPPORT_MAXQUEUE, &ip_maxqueue, 0, 10000 }, 130 { IPCTL_MFORWARDING, &ipmforwarding, 0, 1 }, 131 { IPCTL_ARPTIMEOUT, &arpt_keep, 0, INT_MAX }, 132 { IPCTL_ARPDOWN, &arpt_down, 0, INT_MAX }, 133 }; 134 135 struct niqueue ipintrq = NIQUEUE_INITIALIZER(IPQ_MAXLEN, NETISR_IP); 136 137 struct pool ipqent_pool; 138 struct pool ipq_pool; 139 140 struct cpumem *ipcounters; 141 142 int ip_sysctl_ipstat(void *, size_t *, void *); 143 144 static struct mbuf_queue ipsend_mq; 145 static struct mbuf_queue ipsendraw_mq; 146 147 extern struct niqueue arpinq; 148 149 int ip_ours(struct mbuf **, int *, int, int); 150 int ip_ours_enqueue(struct mbuf **mp, int *offp, int nxt); 151 int ip_dooptions(struct mbuf *, struct ifnet *, int); 152 int in_ouraddr(struct mbuf *, struct ifnet *, struct route *, int); 153 154 int ip_fragcheck(struct mbuf **, int *); 155 struct mbuf * ip_reass(struct ipqent *, struct ipq *); 156 void ip_freef(struct ipq *); 157 void ip_flush(void); 158 159 static void ip_send_dispatch(void *); 160 static void ip_sendraw_dispatch(void *); 161 static struct task ipsend_task = TASK_INITIALIZER(ip_send_dispatch, &ipsend_mq); 162 static struct task ipsendraw_task = 163 TASK_INITIALIZER(ip_sendraw_dispatch, &ipsendraw_mq); 164 165 /* 166 * Used to save the IP options in case a protocol wants to respond 167 * to an incoming packet over the same route if the packet got here 168 * using IP source routing. This allows connection establishment and 169 * maintenance when the remote end is on a network that is not known 170 * to us. 171 */ 172 struct ip_srcrt { 173 int isr_nhops; /* number of hops */ 174 struct in_addr isr_dst; /* final destination */ 175 char isr_nop; /* one NOP to align */ 176 char isr_hdr[IPOPT_OFFSET + 1]; /* OPTVAL, OLEN & OFFSET */ 177 struct in_addr isr_routes[MAX_IPOPTLEN/sizeof(struct in_addr)]; 178 }; 179 180 void save_rte(struct mbuf *, u_char *, struct in_addr); 181 182 /* 183 * IP initialization: fill in IP protocol switch table. 184 * All protocols not implemented in kernel go to raw IP protocol handler. 185 */ 186 void 187 ip_init(void) 188 { 189 const struct protosw *pr; 190 int i; 191 const u_int16_t defbaddynamicports_tcp[] = DEFBADDYNAMICPORTS_TCP; 192 const u_int16_t defbaddynamicports_udp[] = DEFBADDYNAMICPORTS_UDP; 193 const u_int16_t defrootonlyports_tcp[] = DEFROOTONLYPORTS_TCP; 194 const u_int16_t defrootonlyports_udp[] = DEFROOTONLYPORTS_UDP; 195 196 ipcounters = counters_alloc(ips_ncounters); 197 198 pool_init(&ipqent_pool, sizeof(struct ipqent), 0, 199 IPL_SOFTNET, 0, "ipqe", NULL); 200 pool_init(&ipq_pool, sizeof(struct ipq), 0, 201 IPL_SOFTNET, 0, "ipq", NULL); 202 203 pr = pffindproto(PF_INET, IPPROTO_RAW, SOCK_RAW); 204 if (pr == NULL) 205 panic("ip_init"); 206 for (i = 0; i < IPPROTO_MAX; i++) 207 ip_protox[i] = pr - inetsw; 208 for (pr = inetdomain.dom_protosw; 209 pr < inetdomain.dom_protoswNPROTOSW; pr++) 210 if (pr->pr_domain->dom_family == PF_INET && 211 pr->pr_protocol && pr->pr_protocol != IPPROTO_RAW && 212 pr->pr_protocol < IPPROTO_MAX) 213 ip_protox[pr->pr_protocol] = pr - inetsw; 214 LIST_INIT(&ipq); 215 216 /* Fill in list of ports not to allocate dynamically. */ 217 memset(&baddynamicports, 0, sizeof(baddynamicports)); 218 for (i = 0; defbaddynamicports_tcp[i] != 0; i++) 219 DP_SET(baddynamicports.tcp, defbaddynamicports_tcp[i]); 220 for (i = 0; defbaddynamicports_udp[i] != 0; i++) 221 DP_SET(baddynamicports.udp, defbaddynamicports_udp[i]); 222 223 /* Fill in list of ports only root can bind to. */ 224 memset(&rootonlyports, 0, sizeof(rootonlyports)); 225 for (i = 0; defrootonlyports_tcp[i] != 0; i++) 226 DP_SET(rootonlyports.tcp, defrootonlyports_tcp[i]); 227 for (i = 0; defrootonlyports_udp[i] != 0; i++) 228 DP_SET(rootonlyports.udp, defrootonlyports_udp[i]); 229 230 mq_init(&ipsend_mq, 64, IPL_SOFTNET); 231 mq_init(&ipsendraw_mq, 64, IPL_SOFTNET); 232 233 arpinit(); 234 #ifdef IPSEC 235 ipsec_init(); 236 #endif 237 #ifdef MROUTING 238 rt_timer_queue_init(&ip_mrouterq, MCAST_EXPIRE_FREQUENCY, 239 &mfc_expire_route); 240 #endif 241 } 242 243 /* 244 * Enqueue packet for local delivery. Queuing is used as a boundary 245 * between the network layer (input/forward path) running with 246 * NET_LOCK_SHARED() and the transport layer needing it exclusively. 247 */ 248 int 249 ip_ours(struct mbuf **mp, int *offp, int nxt, int af) 250 { 251 nxt = ip_fragcheck(mp, offp); 252 if (nxt == IPPROTO_DONE) 253 return IPPROTO_DONE; 254 255 /* We are already in a IPv4/IPv6 local deliver loop. */ 256 if (af != AF_UNSPEC) 257 return nxt; 258 259 nxt = ip_deliver(mp, offp, nxt, AF_INET, 1); 260 if (nxt == IPPROTO_DONE) 261 return IPPROTO_DONE; 262 263 return ip_ours_enqueue(mp, offp, nxt); 264 } 265 266 int 267 ip_ours_enqueue(struct mbuf **mp, int *offp, int nxt) 268 { 269 /* save values for later, use after dequeue */ 270 if (*offp != sizeof(struct ip)) { 271 struct m_tag *mtag; 272 struct ipoffnxt *ion; 273 274 /* mbuf tags are expensive, but only used for header options */ 275 mtag = m_tag_get(PACKET_TAG_IP_OFFNXT, sizeof(*ion), 276 M_NOWAIT); 277 if (mtag == NULL) { 278 ipstat_inc(ips_idropped); 279 m_freemp(mp); 280 return IPPROTO_DONE; 281 } 282 ion = (struct ipoffnxt *)(mtag + 1); 283 ion->ion_off = *offp; 284 ion->ion_nxt = nxt; 285 286 m_tag_prepend(*mp, mtag); 287 } 288 289 niq_enqueue(&ipintrq, *mp); 290 *mp = NULL; 291 return IPPROTO_DONE; 292 } 293 294 /* 295 * Dequeue and process locally delivered packets. 296 * This is called with exclusive NET_LOCK(). 297 */ 298 void 299 ipintr(void) 300 { 301 struct mbuf *m; 302 303 while ((m = niq_dequeue(&ipintrq)) != NULL) { 304 struct m_tag *mtag; 305 int off, nxt; 306 307 #ifdef DIAGNOSTIC 308 if ((m->m_flags & M_PKTHDR) == 0) 309 panic("ipintr no HDR"); 310 #endif 311 mtag = m_tag_find(m, PACKET_TAG_IP_OFFNXT, NULL); 312 if (mtag != NULL) { 313 struct ipoffnxt *ion; 314 315 ion = (struct ipoffnxt *)(mtag + 1); 316 off = ion->ion_off; 317 nxt = ion->ion_nxt; 318 319 m_tag_delete(m, mtag); 320 } else { 321 struct ip *ip; 322 323 ip = mtod(m, struct ip *); 324 off = ip->ip_hl << 2; 325 nxt = ip->ip_p; 326 } 327 328 nxt = ip_deliver(&m, &off, nxt, AF_INET, 0); 329 KASSERT(nxt == IPPROTO_DONE); 330 } 331 } 332 333 /* 334 * IPv4 input routine. 335 * 336 * Checksum and byte swap header. Process options. Forward or deliver. 337 */ 338 void 339 ipv4_input(struct ifnet *ifp, struct mbuf *m) 340 { 341 int off, nxt; 342 343 off = 0; 344 nxt = ip_input_if(&m, &off, IPPROTO_IPV4, AF_UNSPEC, ifp); 345 KASSERT(nxt == IPPROTO_DONE); 346 } 347 348 struct mbuf * 349 ipv4_check(struct ifnet *ifp, struct mbuf *m) 350 { 351 struct ip *ip; 352 int hlen, len; 353 354 if (m->m_len < sizeof(*ip)) { 355 m = m_pullup(m, sizeof(*ip)); 356 if (m == NULL) { 357 ipstat_inc(ips_toosmall); 358 return (NULL); 359 } 360 } 361 362 ip = mtod(m, struct ip *); 363 if (ip->ip_v != IPVERSION) { 364 ipstat_inc(ips_badvers); 365 goto bad; 366 } 367 368 hlen = ip->ip_hl << 2; 369 if (hlen < sizeof(*ip)) { /* minimum header length */ 370 ipstat_inc(ips_badhlen); 371 goto bad; 372 } 373 if (hlen > m->m_len) { 374 m = m_pullup(m, hlen); 375 if (m == NULL) { 376 ipstat_inc(ips_badhlen); 377 return (NULL); 378 } 379 ip = mtod(m, struct ip *); 380 } 381 382 /* 127/8 must not appear on wire - RFC1122 */ 383 if ((ntohl(ip->ip_dst.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET || 384 (ntohl(ip->ip_src.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET) { 385 if ((ifp->if_flags & IFF_LOOPBACK) == 0) { 386 ipstat_inc(ips_badaddr); 387 goto bad; 388 } 389 } 390 391 if (!ISSET(m->m_pkthdr.csum_flags, M_IPV4_CSUM_IN_OK)) { 392 if (ISSET(m->m_pkthdr.csum_flags, M_IPV4_CSUM_IN_BAD)) { 393 ipstat_inc(ips_badsum); 394 goto bad; 395 } 396 397 ipstat_inc(ips_inswcsum); 398 if (in_cksum(m, hlen) != 0) { 399 ipstat_inc(ips_badsum); 400 goto bad; 401 } 402 403 SET(m->m_pkthdr.csum_flags, M_IPV4_CSUM_IN_OK); 404 } 405 406 /* Retrieve the packet length. */ 407 len = ntohs(ip->ip_len); 408 409 /* 410 * Convert fields to host representation. 411 */ 412 if (len < hlen) { 413 ipstat_inc(ips_badlen); 414 goto bad; 415 } 416 417 /* 418 * Check that the amount of data in the buffers 419 * is at least as much as the IP header would have us expect. 420 * Trim mbufs if longer than we expect. 421 * Drop packet if shorter than we expect. 422 */ 423 if (m->m_pkthdr.len < len) { 424 ipstat_inc(ips_tooshort); 425 goto bad; 426 } 427 if (m->m_pkthdr.len > len) { 428 if (m->m_len == m->m_pkthdr.len) { 429 m->m_len = len; 430 m->m_pkthdr.len = len; 431 } else 432 m_adj(m, len - m->m_pkthdr.len); 433 } 434 435 return (m); 436 bad: 437 m_freem(m); 438 return (NULL); 439 } 440 441 int 442 ip_input_if(struct mbuf **mp, int *offp, int nxt, int af, struct ifnet *ifp) 443 { 444 struct route ro; 445 struct mbuf *m; 446 struct ip *ip; 447 int hlen; 448 #if NPF > 0 449 struct in_addr odst; 450 #endif 451 int flags = 0; 452 453 KASSERT(*offp == 0); 454 455 ro.ro_rt = NULL; 456 ipstat_inc(ips_total); 457 m = *mp = ipv4_check(ifp, *mp); 458 if (m == NULL) 459 goto bad; 460 461 ip = mtod(m, struct ip *); 462 463 #if NCARP > 0 464 if (carp_lsdrop(ifp, m, AF_INET, &ip->ip_src.s_addr, 465 &ip->ip_dst.s_addr, (ip->ip_p == IPPROTO_ICMP ? 0 : 1))) 466 goto bad; 467 #endif 468 469 #if NPF > 0 470 /* 471 * Packet filter 472 */ 473 odst = ip->ip_dst; 474 if (pf_test(AF_INET, PF_IN, ifp, mp) != PF_PASS) 475 goto bad; 476 m = *mp; 477 if (m == NULL) 478 goto bad; 479 480 ip = mtod(m, struct ip *); 481 if (odst.s_addr != ip->ip_dst.s_addr) 482 SET(flags, IP_REDIRECT); 483 #endif 484 485 switch (atomic_load_int(&ip_forwarding)) { 486 case 2: 487 SET(flags, IP_FORWARDING_IPSEC); 488 /* FALLTHROUGH */ 489 case 1: 490 SET(flags, IP_FORWARDING); 491 break; 492 } 493 if (atomic_load_int(&ip_directedbcast)) 494 SET(flags, IP_ALLOWBROADCAST); 495 496 hlen = ip->ip_hl << 2; 497 498 /* 499 * Process options and, if not destined for us, 500 * ship it on. ip_dooptions returns 1 when an 501 * error was detected (causing an icmp message 502 * to be sent and the original packet to be freed). 503 */ 504 if (hlen > sizeof (struct ip) && ip_dooptions(m, ifp, flags)) { 505 m = *mp = NULL; 506 goto bad; 507 } 508 509 if (ip->ip_dst.s_addr == INADDR_BROADCAST || 510 ip->ip_dst.s_addr == INADDR_ANY) { 511 nxt = ip_ours(mp, offp, nxt, af); 512 goto out; 513 } 514 515 switch(in_ouraddr(m, ifp, &ro, flags)) { 516 case 2: 517 goto bad; 518 case 1: 519 nxt = ip_ours(mp, offp, nxt, af); 520 goto out; 521 } 522 523 if (IN_MULTICAST(ip->ip_dst.s_addr)) { 524 /* 525 * Make sure M_MCAST is set. It should theoretically 526 * already be there, but let's play safe because upper 527 * layers check for this flag. 528 */ 529 m->m_flags |= M_MCAST; 530 531 #ifdef MROUTING 532 if (ipmforwarding && ip_mrouter[ifp->if_rdomain]) { 533 int error; 534 535 if (m->m_flags & M_EXT) { 536 if ((m = *mp = m_pullup(m, hlen)) == NULL) { 537 ipstat_inc(ips_toosmall); 538 goto bad; 539 } 540 ip = mtod(m, struct ip *); 541 } 542 /* 543 * If we are acting as a multicast router, all 544 * incoming multicast packets are passed to the 545 * kernel-level multicast forwarding function. 546 * The packet is returned (relatively) intact; if 547 * ip_mforward() returns a non-zero value, the packet 548 * must be discarded, else it may be accepted below. 549 * 550 * (The IP ident field is put in the same byte order 551 * as expected when ip_mforward() is called from 552 * ip_output().) 553 */ 554 KERNEL_LOCK(); 555 error = ip_mforward(m, ifp, flags); 556 KERNEL_UNLOCK(); 557 if (error) { 558 ipstat_inc(ips_cantforward); 559 goto bad; 560 } 561 562 /* 563 * The process-level routing daemon needs to receive 564 * all multicast IGMP packets, whether or not this 565 * host belongs to their destination groups. 566 */ 567 if (ip->ip_p == IPPROTO_IGMP) { 568 nxt = ip_ours(mp, offp, nxt, af); 569 goto out; 570 } 571 ipstat_inc(ips_forward); 572 } 573 #endif 574 /* 575 * See if we belong to the destination multicast group on the 576 * arrival interface. 577 */ 578 if (!in_hasmulti(&ip->ip_dst, ifp)) { 579 ipstat_inc(ips_notmember); 580 if (!IN_LOCAL_GROUP(ip->ip_dst.s_addr)) 581 ipstat_inc(ips_cantforward); 582 goto bad; 583 } 584 nxt = ip_ours(mp, offp, nxt, af); 585 goto out; 586 } 587 588 #if NCARP > 0 589 if (ip->ip_p == IPPROTO_ICMP && 590 carp_lsdrop(ifp, m, AF_INET, &ip->ip_src.s_addr, 591 &ip->ip_dst.s_addr, 1)) 592 goto bad; 593 #endif 594 /* 595 * Not for us; forward if possible and desirable. 596 */ 597 if (!ISSET(flags, IP_FORWARDING)) { 598 ipstat_inc(ips_cantforward); 599 goto bad; 600 } 601 #ifdef IPSEC 602 if (ipsec_in_use) { 603 int rv; 604 605 rv = ipsec_forward_check(m, hlen, AF_INET); 606 if (rv != 0) { 607 ipstat_inc(ips_cantforward); 608 goto bad; 609 } 610 /* 611 * Fall through, forward packet. Outbound IPsec policy 612 * checking will occur in ip_output(). 613 */ 614 } 615 #endif /* IPSEC */ 616 617 ip_forward(m, ifp, &ro, flags); 618 *mp = NULL; 619 rtfree(ro.ro_rt); 620 return IPPROTO_DONE; 621 bad: 622 nxt = IPPROTO_DONE; 623 m_freemp(mp); 624 out: 625 rtfree(ro.ro_rt); 626 return nxt; 627 } 628 629 int 630 ip_fragcheck(struct mbuf **mp, int *offp) 631 { 632 struct ip *ip; 633 struct ipq *fp; 634 struct ipqent *ipqe; 635 int hlen; 636 uint16_t mff; 637 638 ip = mtod(*mp, struct ip *); 639 hlen = ip->ip_hl << 2; 640 641 /* 642 * If offset or more fragments are set, must reassemble. 643 * Otherwise, nothing need be done. 644 * (We could look in the reassembly queue to see 645 * if the packet was previously fragmented, 646 * but it's not worth the time; just let them time out.) 647 */ 648 if (ISSET(ip->ip_off, htons(IP_OFFMASK | IP_MF))) { 649 if ((*mp)->m_flags & M_EXT) { /* XXX */ 650 if ((*mp = m_pullup(*mp, hlen)) == NULL) { 651 ipstat_inc(ips_toosmall); 652 return IPPROTO_DONE; 653 } 654 ip = mtod(*mp, struct ip *); 655 } 656 657 /* 658 * Adjust ip_len to not reflect header, 659 * set ipqe_mff if more fragments are expected, 660 * convert offset of this to bytes. 661 */ 662 ip->ip_len = htons(ntohs(ip->ip_len) - hlen); 663 mff = ISSET(ip->ip_off, htons(IP_MF)); 664 if (mff) { 665 /* 666 * Make sure that fragments have a data length 667 * that's a non-zero multiple of 8 bytes. 668 */ 669 if (ntohs(ip->ip_len) == 0 || 670 (ntohs(ip->ip_len) & 0x7) != 0) { 671 ipstat_inc(ips_badfrags); 672 m_freemp(mp); 673 return IPPROTO_DONE; 674 } 675 } 676 ip->ip_off = htons(ntohs(ip->ip_off) << 3); 677 678 mtx_enter(&ipq_mutex); 679 680 /* 681 * Look for queue of fragments 682 * of this datagram. 683 */ 684 LIST_FOREACH(fp, &ipq, ipq_q) { 685 if (ip->ip_id == fp->ipq_id && 686 ip->ip_src.s_addr == fp->ipq_src.s_addr && 687 ip->ip_dst.s_addr == fp->ipq_dst.s_addr && 688 ip->ip_p == fp->ipq_p) 689 break; 690 } 691 692 /* 693 * If datagram marked as having more fragments 694 * or if this is not the first fragment, 695 * attempt reassembly; if it succeeds, proceed. 696 */ 697 if (mff || ip->ip_off) { 698 ipstat_inc(ips_fragments); 699 if (ip_frags + 1 > ip_maxqueue) { 700 ip_flush(); 701 ipstat_inc(ips_rcvmemdrop); 702 goto bad; 703 } 704 705 ipqe = pool_get(&ipqent_pool, PR_NOWAIT); 706 if (ipqe == NULL) { 707 ipstat_inc(ips_rcvmemdrop); 708 goto bad; 709 } 710 ip_frags++; 711 ipqe->ipqe_mff = mff; 712 ipqe->ipqe_m = *mp; 713 ipqe->ipqe_ip = ip; 714 *mp = ip_reass(ipqe, fp); 715 if (*mp == NULL) 716 goto bad; 717 ipstat_inc(ips_reassembled); 718 ip = mtod(*mp, struct ip *); 719 hlen = ip->ip_hl << 2; 720 ip->ip_len = htons(ntohs(ip->ip_len) + hlen); 721 } else { 722 if (fp != NULL) 723 ip_freef(fp); 724 } 725 726 mtx_leave(&ipq_mutex); 727 } 728 729 *offp = hlen; 730 return ip->ip_p; 731 732 bad: 733 mtx_leave(&ipq_mutex); 734 m_freemp(mp); 735 return IPPROTO_DONE; 736 } 737 738 #ifndef INET6 739 #define IPSTAT_INC(name) ipstat_inc(ips_##name) 740 #else 741 #define IPSTAT_INC(name) (af == AF_INET ? \ 742 ipstat_inc(ips_##name) : ip6stat_inc(ip6s_##name)) 743 #endif 744 745 int 746 ip_deliver(struct mbuf **mp, int *offp, int nxt, int af, int shared) 747 { 748 #ifdef INET6 749 int nest = 0; 750 #endif 751 752 /* 753 * Tell launch routine the next header 754 */ 755 IPSTAT_INC(delivered); 756 757 while (nxt != IPPROTO_DONE) { 758 const struct protosw *psw; 759 int naf; 760 761 switch (af) { 762 case AF_INET: 763 psw = &inetsw[ip_protox[nxt]]; 764 break; 765 #ifdef INET6 766 case AF_INET6: 767 psw = &inet6sw[ip6_protox[nxt]]; 768 break; 769 #endif 770 } 771 if (shared && !ISSET(psw->pr_flags, PR_MPINPUT)) { 772 /* delivery not finished, decrement counter, queue */ 773 switch (af) { 774 case AF_INET: 775 counters_dec(ipcounters, ips_delivered); 776 return ip_ours_enqueue(mp, offp, nxt); 777 #ifdef INET6 778 case AF_INET6: 779 counters_dec(ip6counters, ip6s_delivered); 780 return ip6_ours_enqueue(mp, offp, nxt); 781 #endif 782 } 783 break; 784 } 785 786 #ifdef INET6 787 if (af == AF_INET6 && 788 ip6_hdrnestlimit && (++nest > ip6_hdrnestlimit)) { 789 ip6stat_inc(ip6s_toomanyhdr); 790 goto bad; 791 } 792 #endif 793 794 /* 795 * protection against faulty packet - there should be 796 * more sanity checks in header chain processing. 797 */ 798 if ((*mp)->m_pkthdr.len < *offp) { 799 IPSTAT_INC(tooshort); 800 goto bad; 801 } 802 803 #ifdef IPSEC 804 if (ipsec_in_use) { 805 if (ipsec_local_check(*mp, *offp, nxt, af) != 0) { 806 IPSTAT_INC(cantforward); 807 goto bad; 808 } 809 } 810 /* Otherwise, just fall through and deliver the packet */ 811 #endif 812 813 switch (nxt) { 814 case IPPROTO_IPV4: 815 naf = AF_INET; 816 ipstat_inc(ips_delivered); 817 break; 818 #ifdef INET6 819 case IPPROTO_IPV6: 820 naf = AF_INET6; 821 ip6stat_inc(ip6s_delivered); 822 break; 823 #endif 824 default: 825 naf = af; 826 break; 827 } 828 nxt = (*psw->pr_input)(mp, offp, nxt, af); 829 af = naf; 830 } 831 return nxt; 832 bad: 833 m_freemp(mp); 834 return IPPROTO_DONE; 835 } 836 #undef IPSTAT_INC 837 838 int 839 in_ouraddr(struct mbuf *m, struct ifnet *ifp, struct route *ro, int flags) 840 { 841 struct rtentry *rt; 842 struct ip *ip; 843 int match = 0; 844 845 #if NPF > 0 846 switch (pf_ouraddr(m)) { 847 case 0: 848 return (0); 849 case 1: 850 return (1); 851 default: 852 /* pf does not know it */ 853 break; 854 } 855 #endif 856 857 ip = mtod(m, struct ip *); 858 859 rt = route_mpath(ro, &ip->ip_dst, &ip->ip_src, m->m_pkthdr.ph_rtableid); 860 if (rt != NULL) { 861 if (ISSET(rt->rt_flags, RTF_LOCAL)) 862 match = 1; 863 864 /* 865 * If directedbcast is enabled we only consider it local 866 * if it is received on the interface with that address. 867 */ 868 if (ISSET(rt->rt_flags, RTF_BROADCAST) && 869 (!ISSET(flags, IP_ALLOWBROADCAST) || 870 rt->rt_ifidx == ifp->if_index)) { 871 match = 1; 872 873 /* Make sure M_BCAST is set */ 874 m->m_flags |= M_BCAST; 875 } 876 } 877 878 if (!match) { 879 struct ifaddr *ifa; 880 881 /* 882 * No local address or broadcast address found, so check for 883 * ancient classful broadcast addresses. 884 * It must have been broadcast on the link layer, and for an 885 * address on the interface it was received on. 886 */ 887 if (!ISSET(m->m_flags, M_BCAST) || 888 !IN_CLASSFULBROADCAST(ip->ip_dst.s_addr, ip->ip_dst.s_addr)) 889 return (0); 890 891 if (ifp->if_rdomain != rtable_l2(m->m_pkthdr.ph_rtableid)) 892 return (0); 893 /* 894 * The check in the loop assumes you only rx a packet on an UP 895 * interface, and that M_BCAST will only be set on a BROADCAST 896 * interface. 897 */ 898 NET_ASSERT_LOCKED(); 899 TAILQ_FOREACH(ifa, &ifp->if_addrlist, ifa_list) { 900 if (ifa->ifa_addr->sa_family != AF_INET) 901 continue; 902 903 if (IN_CLASSFULBROADCAST(ip->ip_dst.s_addr, 904 ifatoia(ifa)->ia_addr.sin_addr.s_addr)) { 905 match = 1; 906 break; 907 } 908 } 909 } else if (!ISSET(flags, IP_FORWARDING) && 910 rt->rt_ifidx != ifp->if_index && 911 !((ifp->if_flags & IFF_LOOPBACK) || (ifp->if_type == IFT_ENC) || 912 (m->m_pkthdr.pf.flags & PF_TAG_TRANSLATE_LOCALHOST))) { 913 /* received on wrong interface. */ 914 #if NCARP > 0 915 struct ifnet *out_if; 916 917 /* 918 * Virtual IPs on carp interfaces need to be checked also 919 * against the parent interface and other carp interfaces 920 * sharing the same parent. 921 */ 922 out_if = if_get(rt->rt_ifidx); 923 if (!(out_if && carp_strict_addr_chk(out_if, ifp))) { 924 ipstat_inc(ips_wrongif); 925 match = 2; 926 } 927 if_put(out_if); 928 #else 929 ipstat_inc(ips_wrongif); 930 match = 2; 931 #endif 932 } 933 934 return (match); 935 } 936 937 /* 938 * Take incoming datagram fragment and try to 939 * reassemble it into whole datagram. If a chain for 940 * reassembly of this datagram already exists, then it 941 * is given as fp; otherwise have to make a chain. 942 */ 943 struct mbuf * 944 ip_reass(struct ipqent *ipqe, struct ipq *fp) 945 { 946 struct mbuf *m = ipqe->ipqe_m; 947 struct ipqent *nq, *p, *q; 948 struct ip *ip; 949 struct mbuf *t; 950 int hlen = ipqe->ipqe_ip->ip_hl << 2; 951 int i, next; 952 u_int8_t ecn, ecn0; 953 954 MUTEX_ASSERT_LOCKED(&ipq_mutex); 955 956 /* 957 * Presence of header sizes in mbufs 958 * would confuse code below. 959 */ 960 m->m_data += hlen; 961 m->m_len -= hlen; 962 963 /* 964 * If first fragment to arrive, create a reassembly queue. 965 */ 966 if (fp == NULL) { 967 fp = pool_get(&ipq_pool, PR_NOWAIT); 968 if (fp == NULL) 969 goto dropfrag; 970 LIST_INSERT_HEAD(&ipq, fp, ipq_q); 971 fp->ipq_ttl = IPFRAGTTL; 972 fp->ipq_p = ipqe->ipqe_ip->ip_p; 973 fp->ipq_id = ipqe->ipqe_ip->ip_id; 974 LIST_INIT(&fp->ipq_fragq); 975 fp->ipq_src = ipqe->ipqe_ip->ip_src; 976 fp->ipq_dst = ipqe->ipqe_ip->ip_dst; 977 p = NULL; 978 goto insert; 979 } 980 981 /* 982 * Handle ECN by comparing this segment with the first one; 983 * if CE is set, do not lose CE. 984 * drop if CE and not-ECT are mixed for the same packet. 985 */ 986 ecn = ipqe->ipqe_ip->ip_tos & IPTOS_ECN_MASK; 987 ecn0 = LIST_FIRST(&fp->ipq_fragq)->ipqe_ip->ip_tos & IPTOS_ECN_MASK; 988 if (ecn == IPTOS_ECN_CE) { 989 if (ecn0 == IPTOS_ECN_NOTECT) 990 goto dropfrag; 991 if (ecn0 != IPTOS_ECN_CE) 992 LIST_FIRST(&fp->ipq_fragq)->ipqe_ip->ip_tos |= 993 IPTOS_ECN_CE; 994 } 995 if (ecn == IPTOS_ECN_NOTECT && ecn0 != IPTOS_ECN_NOTECT) 996 goto dropfrag; 997 998 /* 999 * Find a segment which begins after this one does. 1000 */ 1001 for (p = NULL, q = LIST_FIRST(&fp->ipq_fragq); q != NULL; 1002 p = q, q = LIST_NEXT(q, ipqe_q)) 1003 if (ntohs(q->ipqe_ip->ip_off) > ntohs(ipqe->ipqe_ip->ip_off)) 1004 break; 1005 1006 /* 1007 * If there is a preceding segment, it may provide some of 1008 * our data already. If so, drop the data from the incoming 1009 * segment. If it provides all of our data, drop us. 1010 */ 1011 if (p != NULL) { 1012 i = ntohs(p->ipqe_ip->ip_off) + ntohs(p->ipqe_ip->ip_len) - 1013 ntohs(ipqe->ipqe_ip->ip_off); 1014 if (i > 0) { 1015 if (i >= ntohs(ipqe->ipqe_ip->ip_len)) 1016 goto dropfrag; 1017 m_adj(ipqe->ipqe_m, i); 1018 ipqe->ipqe_ip->ip_off = 1019 htons(ntohs(ipqe->ipqe_ip->ip_off) + i); 1020 ipqe->ipqe_ip->ip_len = 1021 htons(ntohs(ipqe->ipqe_ip->ip_len) - i); 1022 } 1023 } 1024 1025 /* 1026 * While we overlap succeeding segments trim them or, 1027 * if they are completely covered, dequeue them. 1028 */ 1029 for (; q != NULL && 1030 ntohs(ipqe->ipqe_ip->ip_off) + ntohs(ipqe->ipqe_ip->ip_len) > 1031 ntohs(q->ipqe_ip->ip_off); q = nq) { 1032 i = (ntohs(ipqe->ipqe_ip->ip_off) + 1033 ntohs(ipqe->ipqe_ip->ip_len)) - ntohs(q->ipqe_ip->ip_off); 1034 if (i < ntohs(q->ipqe_ip->ip_len)) { 1035 q->ipqe_ip->ip_len = 1036 htons(ntohs(q->ipqe_ip->ip_len) - i); 1037 q->ipqe_ip->ip_off = 1038 htons(ntohs(q->ipqe_ip->ip_off) + i); 1039 m_adj(q->ipqe_m, i); 1040 break; 1041 } 1042 nq = LIST_NEXT(q, ipqe_q); 1043 m_freem(q->ipqe_m); 1044 LIST_REMOVE(q, ipqe_q); 1045 pool_put(&ipqent_pool, q); 1046 ip_frags--; 1047 } 1048 1049 insert: 1050 /* 1051 * Stick new segment in its place; 1052 * check for complete reassembly. 1053 */ 1054 if (p == NULL) { 1055 LIST_INSERT_HEAD(&fp->ipq_fragq, ipqe, ipqe_q); 1056 } else { 1057 LIST_INSERT_AFTER(p, ipqe, ipqe_q); 1058 } 1059 next = 0; 1060 for (p = NULL, q = LIST_FIRST(&fp->ipq_fragq); q != NULL; 1061 p = q, q = LIST_NEXT(q, ipqe_q)) { 1062 if (ntohs(q->ipqe_ip->ip_off) != next) 1063 return (0); 1064 next += ntohs(q->ipqe_ip->ip_len); 1065 } 1066 if (p->ipqe_mff) 1067 return (0); 1068 1069 /* 1070 * Reassembly is complete. Check for a bogus message size and 1071 * concatenate fragments. 1072 */ 1073 q = LIST_FIRST(&fp->ipq_fragq); 1074 ip = q->ipqe_ip; 1075 if ((next + (ip->ip_hl << 2)) > IP_MAXPACKET) { 1076 ipstat_inc(ips_toolong); 1077 ip_freef(fp); 1078 return (0); 1079 } 1080 m = q->ipqe_m; 1081 t = m->m_next; 1082 m->m_next = 0; 1083 m_cat(m, t); 1084 nq = LIST_NEXT(q, ipqe_q); 1085 pool_put(&ipqent_pool, q); 1086 ip_frags--; 1087 for (q = nq; q != NULL; q = nq) { 1088 t = q->ipqe_m; 1089 nq = LIST_NEXT(q, ipqe_q); 1090 pool_put(&ipqent_pool, q); 1091 ip_frags--; 1092 m_removehdr(t); 1093 m_cat(m, t); 1094 } 1095 1096 /* 1097 * Create header for new ip packet by 1098 * modifying header of first packet; 1099 * dequeue and discard fragment reassembly header. 1100 * Make header visible. 1101 */ 1102 ip->ip_len = htons(next); 1103 ip->ip_src = fp->ipq_src; 1104 ip->ip_dst = fp->ipq_dst; 1105 LIST_REMOVE(fp, ipq_q); 1106 pool_put(&ipq_pool, fp); 1107 m->m_len += (ip->ip_hl << 2); 1108 m->m_data -= (ip->ip_hl << 2); 1109 m_calchdrlen(m); 1110 return (m); 1111 1112 dropfrag: 1113 ipstat_inc(ips_fragdropped); 1114 m_freem(m); 1115 pool_put(&ipqent_pool, ipqe); 1116 ip_frags--; 1117 return (NULL); 1118 } 1119 1120 /* 1121 * Free a fragment reassembly header and all 1122 * associated datagrams. 1123 */ 1124 void 1125 ip_freef(struct ipq *fp) 1126 { 1127 struct ipqent *q; 1128 1129 MUTEX_ASSERT_LOCKED(&ipq_mutex); 1130 1131 while ((q = LIST_FIRST(&fp->ipq_fragq)) != NULL) { 1132 LIST_REMOVE(q, ipqe_q); 1133 m_freem(q->ipqe_m); 1134 pool_put(&ipqent_pool, q); 1135 ip_frags--; 1136 } 1137 LIST_REMOVE(fp, ipq_q); 1138 pool_put(&ipq_pool, fp); 1139 } 1140 1141 /* 1142 * IP timer processing; 1143 * if a timer expires on a reassembly queue, discard it. 1144 */ 1145 void 1146 ip_slowtimo(void) 1147 { 1148 struct ipq *fp, *nfp; 1149 1150 mtx_enter(&ipq_mutex); 1151 LIST_FOREACH_SAFE(fp, &ipq, ipq_q, nfp) { 1152 if (--fp->ipq_ttl == 0) { 1153 ipstat_inc(ips_fragtimeout); 1154 ip_freef(fp); 1155 } 1156 } 1157 mtx_leave(&ipq_mutex); 1158 } 1159 1160 /* 1161 * Flush a bunch of datagram fragments, till we are down to 75%. 1162 */ 1163 void 1164 ip_flush(void) 1165 { 1166 int max = 50; 1167 1168 MUTEX_ASSERT_LOCKED(&ipq_mutex); 1169 1170 while (!LIST_EMPTY(&ipq) && ip_frags > ip_maxqueue * 3 / 4 && --max) { 1171 ipstat_inc(ips_fragdropped); 1172 ip_freef(LIST_FIRST(&ipq)); 1173 } 1174 } 1175 1176 /* 1177 * Do option processing on a datagram, 1178 * possibly discarding it if bad options are encountered, 1179 * or forwarding it if source-routed. 1180 * Returns 1 if packet has been forwarded/freed, 1181 * 0 if the packet should be processed further. 1182 */ 1183 int 1184 ip_dooptions(struct mbuf *m, struct ifnet *ifp, int flags) 1185 { 1186 struct ip *ip = mtod(m, struct ip *); 1187 unsigned int rtableid = m->m_pkthdr.ph_rtableid; 1188 struct rtentry *rt; 1189 struct sockaddr_in ipaddr; 1190 u_char *cp; 1191 struct ip_timestamp ipt; 1192 struct in_ifaddr *ia; 1193 int opt, optlen, cnt, off, code, type = ICMP_PARAMPROB, forward = 0; 1194 struct in_addr sin, dst; 1195 u_int32_t ntime; 1196 1197 dst = ip->ip_dst; 1198 cp = (u_char *)(ip + 1); 1199 cnt = (ip->ip_hl << 2) - sizeof (struct ip); 1200 1201 KERNEL_LOCK(); 1202 for (; cnt > 0; cnt -= optlen, cp += optlen) { 1203 opt = cp[IPOPT_OPTVAL]; 1204 if (opt == IPOPT_EOL) 1205 break; 1206 if (opt == IPOPT_NOP) 1207 optlen = 1; 1208 else { 1209 if (cnt < IPOPT_OLEN + sizeof(*cp)) { 1210 code = &cp[IPOPT_OLEN] - (u_char *)ip; 1211 goto bad; 1212 } 1213 optlen = cp[IPOPT_OLEN]; 1214 if (optlen < IPOPT_OLEN + sizeof(*cp) || optlen > cnt) { 1215 code = &cp[IPOPT_OLEN] - (u_char *)ip; 1216 goto bad; 1217 } 1218 } 1219 1220 switch (opt) { 1221 1222 default: 1223 break; 1224 1225 /* 1226 * Source routing with record. 1227 * Find interface with current destination address. 1228 * If none on this machine then drop if strictly routed, 1229 * or do nothing if loosely routed. 1230 * Record interface address and bring up next address 1231 * component. If strictly routed make sure next 1232 * address is on directly accessible net. 1233 */ 1234 case IPOPT_LSRR: 1235 case IPOPT_SSRR: 1236 if (!ip_dosourceroute) { 1237 type = ICMP_UNREACH; 1238 code = ICMP_UNREACH_SRCFAIL; 1239 goto bad; 1240 } 1241 if (optlen < IPOPT_OFFSET + sizeof(*cp)) { 1242 code = &cp[IPOPT_OLEN] - (u_char *)ip; 1243 goto bad; 1244 } 1245 if ((off = cp[IPOPT_OFFSET]) < IPOPT_MINOFF) { 1246 code = &cp[IPOPT_OFFSET] - (u_char *)ip; 1247 goto bad; 1248 } 1249 memset(&ipaddr, 0, sizeof(ipaddr)); 1250 ipaddr.sin_family = AF_INET; 1251 ipaddr.sin_len = sizeof(ipaddr); 1252 ipaddr.sin_addr = ip->ip_dst; 1253 ia = ifatoia(ifa_ifwithaddr(sintosa(&ipaddr), 1254 m->m_pkthdr.ph_rtableid)); 1255 if (ia == NULL) { 1256 if (opt == IPOPT_SSRR) { 1257 type = ICMP_UNREACH; 1258 code = ICMP_UNREACH_SRCFAIL; 1259 goto bad; 1260 } 1261 /* 1262 * Loose routing, and not at next destination 1263 * yet; nothing to do except forward. 1264 */ 1265 break; 1266 } 1267 off--; /* 0 origin */ 1268 if ((off + sizeof(struct in_addr)) > optlen) { 1269 /* 1270 * End of source route. Should be for us. 1271 */ 1272 save_rte(m, cp, ip->ip_src); 1273 break; 1274 } 1275 1276 /* 1277 * locate outgoing interface 1278 */ 1279 memset(&ipaddr, 0, sizeof(ipaddr)); 1280 ipaddr.sin_family = AF_INET; 1281 ipaddr.sin_len = sizeof(ipaddr); 1282 memcpy(&ipaddr.sin_addr, cp + off, 1283 sizeof(ipaddr.sin_addr)); 1284 /* keep packet in the virtual instance */ 1285 rt = rtalloc(sintosa(&ipaddr), RT_RESOLVE, rtableid); 1286 if (!rtisvalid(rt) || ((opt == IPOPT_SSRR) && 1287 ISSET(rt->rt_flags, RTF_GATEWAY))) { 1288 type = ICMP_UNREACH; 1289 code = ICMP_UNREACH_SRCFAIL; 1290 rtfree(rt); 1291 goto bad; 1292 } 1293 ia = ifatoia(rt->rt_ifa); 1294 memcpy(cp + off, &ia->ia_addr.sin_addr, 1295 sizeof(struct in_addr)); 1296 rtfree(rt); 1297 cp[IPOPT_OFFSET] += sizeof(struct in_addr); 1298 ip->ip_dst = ipaddr.sin_addr; 1299 /* 1300 * Let ip_intr's mcast routing check handle mcast pkts 1301 */ 1302 forward = !IN_MULTICAST(ip->ip_dst.s_addr); 1303 break; 1304 1305 case IPOPT_RR: 1306 if (optlen < IPOPT_OFFSET + sizeof(*cp)) { 1307 code = &cp[IPOPT_OLEN] - (u_char *)ip; 1308 goto bad; 1309 } 1310 if ((off = cp[IPOPT_OFFSET]) < IPOPT_MINOFF) { 1311 code = &cp[IPOPT_OFFSET] - (u_char *)ip; 1312 goto bad; 1313 } 1314 1315 /* 1316 * If no space remains, ignore. 1317 */ 1318 off--; /* 0 origin */ 1319 if ((off + sizeof(struct in_addr)) > optlen) 1320 break; 1321 memset(&ipaddr, 0, sizeof(ipaddr)); 1322 ipaddr.sin_family = AF_INET; 1323 ipaddr.sin_len = sizeof(ipaddr); 1324 ipaddr.sin_addr = ip->ip_dst; 1325 /* 1326 * locate outgoing interface; if we're the destination, 1327 * use the incoming interface (should be same). 1328 * Again keep the packet inside the virtual instance. 1329 */ 1330 rt = rtalloc(sintosa(&ipaddr), RT_RESOLVE, rtableid); 1331 if (!rtisvalid(rt)) { 1332 type = ICMP_UNREACH; 1333 code = ICMP_UNREACH_HOST; 1334 rtfree(rt); 1335 goto bad; 1336 } 1337 ia = ifatoia(rt->rt_ifa); 1338 memcpy(cp + off, &ia->ia_addr.sin_addr, 1339 sizeof(struct in_addr)); 1340 rtfree(rt); 1341 cp[IPOPT_OFFSET] += sizeof(struct in_addr); 1342 break; 1343 1344 case IPOPT_TS: 1345 code = cp - (u_char *)ip; 1346 if (optlen < sizeof(struct ip_timestamp)) 1347 goto bad; 1348 memcpy(&ipt, cp, sizeof(struct ip_timestamp)); 1349 if (ipt.ipt_ptr < 5 || ipt.ipt_len < 5) 1350 goto bad; 1351 if (ipt.ipt_ptr - 1 + sizeof(u_int32_t) > ipt.ipt_len) { 1352 if (++ipt.ipt_oflw == 0) 1353 goto bad; 1354 break; 1355 } 1356 memcpy(&sin, cp + ipt.ipt_ptr - 1, sizeof sin); 1357 switch (ipt.ipt_flg) { 1358 1359 case IPOPT_TS_TSONLY: 1360 break; 1361 1362 case IPOPT_TS_TSANDADDR: 1363 if (ipt.ipt_ptr - 1 + sizeof(u_int32_t) + 1364 sizeof(struct in_addr) > ipt.ipt_len) 1365 goto bad; 1366 memset(&ipaddr, 0, sizeof(ipaddr)); 1367 ipaddr.sin_family = AF_INET; 1368 ipaddr.sin_len = sizeof(ipaddr); 1369 ipaddr.sin_addr = dst; 1370 ia = ifatoia(ifaof_ifpforaddr(sintosa(&ipaddr), 1371 ifp)); 1372 if (ia == NULL) 1373 continue; 1374 memcpy(&sin, &ia->ia_addr.sin_addr, 1375 sizeof(struct in_addr)); 1376 ipt.ipt_ptr += sizeof(struct in_addr); 1377 break; 1378 1379 case IPOPT_TS_PRESPEC: 1380 if (ipt.ipt_ptr - 1 + sizeof(u_int32_t) + 1381 sizeof(struct in_addr) > ipt.ipt_len) 1382 goto bad; 1383 memset(&ipaddr, 0, sizeof(ipaddr)); 1384 ipaddr.sin_family = AF_INET; 1385 ipaddr.sin_len = sizeof(ipaddr); 1386 ipaddr.sin_addr = sin; 1387 if (ifa_ifwithaddr(sintosa(&ipaddr), 1388 m->m_pkthdr.ph_rtableid) == NULL) 1389 continue; 1390 ipt.ipt_ptr += sizeof(struct in_addr); 1391 break; 1392 1393 default: 1394 /* XXX can't take &ipt->ipt_flg */ 1395 code = (u_char *)&ipt.ipt_ptr - 1396 (u_char *)ip + 1; 1397 goto bad; 1398 } 1399 ntime = iptime(); 1400 memcpy(cp + ipt.ipt_ptr - 1, &ntime, sizeof(u_int32_t)); 1401 ipt.ipt_ptr += sizeof(u_int32_t); 1402 } 1403 } 1404 KERNEL_UNLOCK(); 1405 if (forward && ISSET(flags, IP_FORWARDING)) { 1406 ip_forward(m, ifp, NULL, flags | IP_REDIRECT); 1407 return (1); 1408 } 1409 return (0); 1410 bad: 1411 KERNEL_UNLOCK(); 1412 icmp_error(m, type, code, 0, 0); 1413 ipstat_inc(ips_badoptions); 1414 return (1); 1415 } 1416 1417 /* 1418 * Save incoming source route for use in replies, 1419 * to be picked up later by ip_srcroute if the receiver is interested. 1420 */ 1421 void 1422 save_rte(struct mbuf *m, u_char *option, struct in_addr dst) 1423 { 1424 struct ip_srcrt *isr; 1425 struct m_tag *mtag; 1426 unsigned olen; 1427 1428 olen = option[IPOPT_OLEN]; 1429 if (olen > sizeof(isr->isr_hdr) + sizeof(isr->isr_routes)) 1430 return; 1431 1432 mtag = m_tag_get(PACKET_TAG_SRCROUTE, sizeof(*isr), M_NOWAIT); 1433 if (mtag == NULL) { 1434 ipstat_inc(ips_idropped); 1435 return; 1436 } 1437 isr = (struct ip_srcrt *)(mtag + 1); 1438 1439 memcpy(isr->isr_hdr, option, olen); 1440 isr->isr_nhops = (olen - IPOPT_OFFSET - 1) / sizeof(struct in_addr); 1441 isr->isr_dst = dst; 1442 m_tag_prepend(m, mtag); 1443 } 1444 1445 /* 1446 * Retrieve incoming source route for use in replies, 1447 * in the same form used by setsockopt. 1448 * The first hop is placed before the options, will be removed later. 1449 */ 1450 struct mbuf * 1451 ip_srcroute(struct mbuf *m0) 1452 { 1453 struct in_addr *p, *q; 1454 struct mbuf *m; 1455 struct ip_srcrt *isr; 1456 struct m_tag *mtag; 1457 1458 if (!ip_dosourceroute) 1459 return (NULL); 1460 1461 mtag = m_tag_find(m0, PACKET_TAG_SRCROUTE, NULL); 1462 if (mtag == NULL) 1463 return (NULL); 1464 isr = (struct ip_srcrt *)(mtag + 1); 1465 1466 if (isr->isr_nhops == 0) 1467 return (NULL); 1468 m = m_get(M_DONTWAIT, MT_SOOPTS); 1469 if (m == NULL) { 1470 ipstat_inc(ips_idropped); 1471 return (NULL); 1472 } 1473 1474 #define OPTSIZ (sizeof(isr->isr_nop) + sizeof(isr->isr_hdr)) 1475 1476 /* length is (nhops+1)*sizeof(addr) + sizeof(nop + header) */ 1477 m->m_len = (isr->isr_nhops + 1) * sizeof(struct in_addr) + OPTSIZ; 1478 1479 /* 1480 * First save first hop for return route 1481 */ 1482 p = &(isr->isr_routes[isr->isr_nhops - 1]); 1483 *(mtod(m, struct in_addr *)) = *p--; 1484 1485 /* 1486 * Copy option fields and padding (nop) to mbuf. 1487 */ 1488 isr->isr_nop = IPOPT_NOP; 1489 isr->isr_hdr[IPOPT_OFFSET] = IPOPT_MINOFF; 1490 memcpy(mtod(m, caddr_t) + sizeof(struct in_addr), &isr->isr_nop, 1491 OPTSIZ); 1492 q = (struct in_addr *)(mtod(m, caddr_t) + 1493 sizeof(struct in_addr) + OPTSIZ); 1494 #undef OPTSIZ 1495 /* 1496 * Record return path as an IP source route, 1497 * reversing the path (pointers are now aligned). 1498 */ 1499 while (p >= isr->isr_routes) { 1500 *q++ = *p--; 1501 } 1502 /* 1503 * Last hop goes to final destination. 1504 */ 1505 *q = isr->isr_dst; 1506 m_tag_delete(m0, (struct m_tag *)isr); 1507 return (m); 1508 } 1509 1510 /* 1511 * Strip out IP options, at higher level protocol in the kernel. 1512 */ 1513 void 1514 ip_stripoptions(struct mbuf *m) 1515 { 1516 int i; 1517 struct ip *ip = mtod(m, struct ip *); 1518 caddr_t opts; 1519 int olen; 1520 1521 olen = (ip->ip_hl<<2) - sizeof (struct ip); 1522 opts = (caddr_t)(ip + 1); 1523 i = m->m_len - (sizeof (struct ip) + olen); 1524 memmove(opts, opts + olen, i); 1525 m->m_len -= olen; 1526 if (m->m_flags & M_PKTHDR) 1527 m->m_pkthdr.len -= olen; 1528 ip->ip_hl = sizeof(struct ip) >> 2; 1529 ip->ip_len = htons(ntohs(ip->ip_len) - olen); 1530 } 1531 1532 const u_char inetctlerrmap[PRC_NCMDS] = { 1533 0, 0, 0, 0, 1534 0, EMSGSIZE, EHOSTDOWN, EHOSTUNREACH, 1535 EHOSTUNREACH, EHOSTUNREACH, ECONNREFUSED, ECONNREFUSED, 1536 EMSGSIZE, EHOSTUNREACH, 0, 0, 1537 0, 0, 0, 0, 1538 ENOPROTOOPT 1539 }; 1540 1541 /* 1542 * Forward a packet. If some error occurs return the sender 1543 * an icmp packet. Note we can't always generate a meaningful 1544 * icmp message because icmp doesn't have a large enough repertoire 1545 * of codes and types. 1546 * 1547 * If not forwarding, just drop the packet. This could be confusing 1548 * if ip_forwarding was zero but some routing protocol was advancing 1549 * us as a gateway to somewhere. However, we must let the routing 1550 * protocol deal with that. 1551 * 1552 * The srcrt parameter indicates whether the packet is being forwarded 1553 * via a source route. 1554 */ 1555 void 1556 ip_forward(struct mbuf *m, struct ifnet *ifp, struct route *ro, int flags) 1557 { 1558 struct ip *ip = mtod(m, struct ip *); 1559 struct route iproute; 1560 struct rtentry *rt; 1561 u_int rtableid = m->m_pkthdr.ph_rtableid; 1562 u_int8_t loopcnt = m->m_pkthdr.ph_loopcnt; 1563 u_int icmp_len; 1564 char icmp_buf[68]; 1565 CTASSERT(sizeof(icmp_buf) <= MHLEN); 1566 u_short mflags, pfflags; 1567 struct mbuf *mcopy; 1568 int error = 0, type = 0, code = 0, destmtu = 0; 1569 u_int32_t dest; 1570 1571 dest = 0; 1572 if (m->m_flags & (M_BCAST|M_MCAST) || in_canforward(ip->ip_dst) == 0) { 1573 ipstat_inc(ips_cantforward); 1574 m_freem(m); 1575 goto done; 1576 } 1577 if (ip->ip_ttl <= IPTTLDEC) { 1578 icmp_error(m, ICMP_TIMXCEED, ICMP_TIMXCEED_INTRANS, dest, 0); 1579 goto done; 1580 } 1581 1582 if (ro == NULL) { 1583 ro = &iproute; 1584 ro->ro_rt = NULL; 1585 } 1586 rt = route_mpath(ro, &ip->ip_dst, &ip->ip_src, rtableid); 1587 if (rt == NULL) { 1588 ipstat_inc(ips_noroute); 1589 icmp_error(m, ICMP_UNREACH, ICMP_UNREACH_HOST, dest, 0); 1590 goto done; 1591 } 1592 1593 /* 1594 * Save at most 68 bytes of the packet in case we need to generate 1595 * an ICMP message to the src. The data is saved on the stack. 1596 * A new mbuf is only allocated when ICMP is actually created. 1597 */ 1598 icmp_len = min(sizeof(icmp_buf), ntohs(ip->ip_len)); 1599 mflags = m->m_flags; 1600 pfflags = m->m_pkthdr.pf.flags; 1601 m_copydata(m, 0, icmp_len, icmp_buf); 1602 1603 ip->ip_ttl -= IPTTLDEC; 1604 1605 /* 1606 * If forwarding packet using same interface that it came in on, 1607 * perhaps should send a redirect to sender to shortcut a hop. 1608 * Only send redirect if source is sending directly to us, 1609 * and if packet was not source routed (or has any options). 1610 * Also, don't send redirect if forwarding using a default route 1611 * or a route modified by a redirect. 1612 * Don't send redirect if we advertise destination's arp address 1613 * as ours (proxy arp). 1614 */ 1615 if (rt->rt_ifidx == ifp->if_index && 1616 !ISSET(rt->rt_flags, RTF_DYNAMIC|RTF_MODIFIED) && 1617 satosin(rt_key(rt))->sin_addr.s_addr != INADDR_ANY && 1618 !ISSET(flags, IP_REDIRECT) && 1619 atomic_load_int(&ip_sendredirects) && 1620 !arpproxy(satosin(rt_key(rt))->sin_addr, rtableid)) { 1621 if ((ip->ip_src.s_addr & ifatoia(rt->rt_ifa)->ia_netmask) == 1622 ifatoia(rt->rt_ifa)->ia_net) { 1623 if (rt->rt_flags & RTF_GATEWAY) 1624 dest = satosin(rt->rt_gateway)->sin_addr.s_addr; 1625 else 1626 dest = ip->ip_dst.s_addr; 1627 /* Router requirements says to only send host redirects */ 1628 type = ICMP_REDIRECT; 1629 code = ICMP_REDIRECT_HOST; 1630 } 1631 } 1632 1633 error = ip_output(m, NULL, ro, flags | IP_FORWARDING, NULL, NULL, 0); 1634 rt = ro->ro_rt; 1635 if (error) 1636 ipstat_inc(ips_cantforward); 1637 else { 1638 ipstat_inc(ips_forward); 1639 if (type) 1640 ipstat_inc(ips_redirectsent); 1641 else 1642 goto done; 1643 } 1644 switch (error) { 1645 case 0: /* forwarded, but need redirect */ 1646 /* type, code set above */ 1647 break; 1648 1649 case EMSGSIZE: 1650 type = ICMP_UNREACH; 1651 code = ICMP_UNREACH_NEEDFRAG; 1652 if (rt != NULL) { 1653 u_int rtmtu; 1654 1655 rtmtu = atomic_load_int(&rt->rt_mtu); 1656 if (rtmtu != 0) { 1657 destmtu = rtmtu; 1658 } else { 1659 struct ifnet *destifp; 1660 1661 destifp = if_get(rt->rt_ifidx); 1662 if (destifp != NULL) 1663 destmtu = destifp->if_mtu; 1664 if_put(destifp); 1665 } 1666 } 1667 ipstat_inc(ips_cantfrag); 1668 if (destmtu == 0) 1669 goto done; 1670 break; 1671 1672 case EACCES: 1673 /* 1674 * pf(4) blocked the packet. There is no need to send an ICMP 1675 * packet back since pf(4) takes care of it. 1676 */ 1677 goto done; 1678 1679 case ENOBUFS: 1680 /* 1681 * a router should not generate ICMP_SOURCEQUENCH as 1682 * required in RFC1812 Requirements for IP Version 4 Routers. 1683 * source quench could be a big problem under DoS attacks, 1684 * or the underlying interface is rate-limited. 1685 */ 1686 goto done; 1687 1688 case ENETUNREACH: /* shouldn't happen, checked above */ 1689 case EHOSTUNREACH: 1690 case ENETDOWN: 1691 case EHOSTDOWN: 1692 default: 1693 type = ICMP_UNREACH; 1694 code = ICMP_UNREACH_HOST; 1695 break; 1696 } 1697 1698 mcopy = m_gethdr(M_DONTWAIT, MT_DATA); 1699 if (mcopy == NULL) 1700 goto done; 1701 mcopy->m_len = mcopy->m_pkthdr.len = icmp_len; 1702 mcopy->m_flags |= (mflags & M_COPYFLAGS); 1703 mcopy->m_pkthdr.ph_rtableid = rtableid; 1704 mcopy->m_pkthdr.ph_ifidx = ifp->if_index; 1705 mcopy->m_pkthdr.ph_loopcnt = loopcnt; 1706 mcopy->m_pkthdr.pf.flags |= (pfflags & PF_TAG_GENERATED); 1707 memcpy(mcopy->m_data, icmp_buf, icmp_len); 1708 icmp_error(mcopy, type, code, dest, destmtu); 1709 1710 done: 1711 if (ro == &iproute) 1712 rtfree(ro->ro_rt); 1713 } 1714 1715 int 1716 ip_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp, 1717 size_t newlen) 1718 { 1719 #ifdef MROUTING 1720 extern struct mrtstat mrtstat; 1721 #endif 1722 int oldval, error; 1723 1724 /* Almost all sysctl names at this level are terminal. */ 1725 if (namelen != 1 && name[0] != IPCTL_IFQUEUE && 1726 name[0] != IPCTL_ARPQUEUE) 1727 return (ENOTDIR); 1728 1729 switch (name[0]) { 1730 case IPCTL_SOURCEROUTE: 1731 NET_LOCK(); 1732 error = sysctl_securelevel_int(oldp, oldlenp, newp, newlen, 1733 &ip_dosourceroute); 1734 NET_UNLOCK(); 1735 return (error); 1736 case IPCTL_MTUDISC: 1737 NET_LOCK(); 1738 error = sysctl_int(oldp, oldlenp, newp, newlen, &ip_mtudisc); 1739 if (ip_mtudisc == 0) 1740 rt_timer_queue_flush(&ip_mtudisc_timeout_q); 1741 NET_UNLOCK(); 1742 return error; 1743 case IPCTL_MTUDISCTIMEOUT: 1744 NET_LOCK(); 1745 error = sysctl_int_bounded(oldp, oldlenp, newp, newlen, 1746 &ip_mtudisc_timeout, 0, INT_MAX); 1747 rt_timer_queue_change(&ip_mtudisc_timeout_q, 1748 ip_mtudisc_timeout); 1749 NET_UNLOCK(); 1750 return (error); 1751 #ifdef IPSEC 1752 case IPCTL_ENCDEBUG: 1753 case IPCTL_IPSEC_STATS: 1754 case IPCTL_IPSEC_EXPIRE_ACQUIRE: 1755 case IPCTL_IPSEC_EMBRYONIC_SA_TIMEOUT: 1756 case IPCTL_IPSEC_REQUIRE_PFS: 1757 case IPCTL_IPSEC_SOFT_ALLOCATIONS: 1758 case IPCTL_IPSEC_ALLOCATIONS: 1759 case IPCTL_IPSEC_SOFT_BYTES: 1760 case IPCTL_IPSEC_BYTES: 1761 case IPCTL_IPSEC_TIMEOUT: 1762 case IPCTL_IPSEC_SOFT_TIMEOUT: 1763 case IPCTL_IPSEC_SOFT_FIRSTUSE: 1764 case IPCTL_IPSEC_FIRSTUSE: 1765 case IPCTL_IPSEC_ENC_ALGORITHM: 1766 case IPCTL_IPSEC_AUTH_ALGORITHM: 1767 case IPCTL_IPSEC_IPCOMP_ALGORITHM: 1768 return (ipsec_sysctl(name, namelen, oldp, oldlenp, newp, 1769 newlen)); 1770 #endif 1771 case IPCTL_IFQUEUE: 1772 return (sysctl_niq(name + 1, namelen - 1, 1773 oldp, oldlenp, newp, newlen, &ipintrq)); 1774 case IPCTL_ARPQUEUE: 1775 return (sysctl_niq(name + 1, namelen - 1, 1776 oldp, oldlenp, newp, newlen, &arpinq)); 1777 case IPCTL_ARPQUEUED: 1778 return (sysctl_rdint(oldp, oldlenp, newp, 1779 atomic_load_int(&la_hold_total))); 1780 case IPCTL_STATS: 1781 return (ip_sysctl_ipstat(oldp, oldlenp, newp)); 1782 #ifdef MROUTING 1783 case IPCTL_MRTSTATS: 1784 return (sysctl_rdstruct(oldp, oldlenp, newp, 1785 &mrtstat, sizeof(mrtstat))); 1786 case IPCTL_MRTMFC: 1787 if (newp) 1788 return (EPERM); 1789 NET_LOCK(); 1790 error = mrt_sysctl_mfc(oldp, oldlenp); 1791 NET_UNLOCK(); 1792 return (error); 1793 case IPCTL_MRTVIF: 1794 if (newp) 1795 return (EPERM); 1796 NET_LOCK(); 1797 error = mrt_sysctl_vif(oldp, oldlenp); 1798 NET_UNLOCK(); 1799 return (error); 1800 #else 1801 case IPCTL_MRTPROTO: 1802 case IPCTL_MRTSTATS: 1803 case IPCTL_MRTMFC: 1804 case IPCTL_MRTVIF: 1805 return (EOPNOTSUPP); 1806 #endif 1807 case IPCTL_MULTIPATH: 1808 NET_LOCK(); 1809 oldval = ipmultipath; 1810 error = sysctl_int_bounded(oldp, oldlenp, newp, newlen, 1811 &ipmultipath, 0, 1); 1812 if (oldval != ipmultipath) 1813 atomic_inc_long(&rtgeneration); 1814 NET_UNLOCK(); 1815 return (error); 1816 case IPCTL_FORWARDING: 1817 case IPCTL_SENDREDIRECTS: 1818 case IPCTL_DIRECTEDBCAST: 1819 return (sysctl_bounded_arr( 1820 ipctl_vars_unlocked, nitems(ipctl_vars_unlocked), 1821 name, namelen, oldp, oldlenp, newp, newlen)); 1822 default: 1823 NET_LOCK(); 1824 error = sysctl_bounded_arr(ipctl_vars, nitems(ipctl_vars), 1825 name, namelen, oldp, oldlenp, newp, newlen); 1826 NET_UNLOCK(); 1827 return (error); 1828 } 1829 /* NOTREACHED */ 1830 } 1831 1832 int 1833 ip_sysctl_ipstat(void *oldp, size_t *oldlenp, void *newp) 1834 { 1835 uint64_t counters[ips_ncounters]; 1836 struct ipstat ipstat; 1837 u_long *words = (u_long *)&ipstat; 1838 int i; 1839 1840 CTASSERT(sizeof(ipstat) == (nitems(counters) * sizeof(u_long))); 1841 memset(&ipstat, 0, sizeof ipstat); 1842 counters_read(ipcounters, counters, nitems(counters), NULL); 1843 1844 for (i = 0; i < nitems(counters); i++) 1845 words[i] = (u_long)counters[i]; 1846 1847 return (sysctl_rdstruct(oldp, oldlenp, newp, &ipstat, sizeof(ipstat))); 1848 } 1849 1850 void 1851 ip_savecontrol(struct inpcb *inp, struct mbuf **mp, struct ip *ip, 1852 struct mbuf *m) 1853 { 1854 if (inp->inp_socket->so_options & SO_TIMESTAMP) { 1855 struct timeval tv; 1856 1857 m_microtime(m, &tv); 1858 *mp = sbcreatecontrol((caddr_t) &tv, sizeof(tv), 1859 SCM_TIMESTAMP, SOL_SOCKET); 1860 if (*mp) 1861 mp = &(*mp)->m_next; 1862 } 1863 1864 if (inp->inp_flags & INP_RECVDSTADDR) { 1865 *mp = sbcreatecontrol((caddr_t) &ip->ip_dst, 1866 sizeof(struct in_addr), IP_RECVDSTADDR, IPPROTO_IP); 1867 if (*mp) 1868 mp = &(*mp)->m_next; 1869 } 1870 #ifdef notyet 1871 /* this code is broken and will probably never be fixed. */ 1872 /* options were tossed already */ 1873 if (inp->inp_flags & INP_RECVOPTS) { 1874 *mp = sbcreatecontrol((caddr_t) opts_deleted_above, 1875 sizeof(struct in_addr), IP_RECVOPTS, IPPROTO_IP); 1876 if (*mp) 1877 mp = &(*mp)->m_next; 1878 } 1879 /* ip_srcroute doesn't do what we want here, need to fix */ 1880 if (inp->inp_flags & INP_RECVRETOPTS) { 1881 *mp = sbcreatecontrol((caddr_t) ip_srcroute(m), 1882 sizeof(struct in_addr), IP_RECVRETOPTS, IPPROTO_IP); 1883 if (*mp) 1884 mp = &(*mp)->m_next; 1885 } 1886 #endif 1887 if (inp->inp_flags & INP_RECVIF) { 1888 struct sockaddr_dl sdl; 1889 struct ifnet *ifp; 1890 1891 ifp = if_get(m->m_pkthdr.ph_ifidx); 1892 if (ifp == NULL || ifp->if_sadl == NULL) { 1893 memset(&sdl, 0, sizeof(sdl)); 1894 sdl.sdl_len = offsetof(struct sockaddr_dl, sdl_data[0]); 1895 sdl.sdl_family = AF_LINK; 1896 sdl.sdl_index = ifp != NULL ? ifp->if_index : 0; 1897 sdl.sdl_nlen = sdl.sdl_alen = sdl.sdl_slen = 0; 1898 *mp = sbcreatecontrol((caddr_t) &sdl, sdl.sdl_len, 1899 IP_RECVIF, IPPROTO_IP); 1900 } else { 1901 *mp = sbcreatecontrol((caddr_t) ifp->if_sadl, 1902 ifp->if_sadl->sdl_len, IP_RECVIF, IPPROTO_IP); 1903 } 1904 if (*mp) 1905 mp = &(*mp)->m_next; 1906 if_put(ifp); 1907 } 1908 if (inp->inp_flags & INP_RECVTTL) { 1909 *mp = sbcreatecontrol((caddr_t) &ip->ip_ttl, 1910 sizeof(u_int8_t), IP_RECVTTL, IPPROTO_IP); 1911 if (*mp) 1912 mp = &(*mp)->m_next; 1913 } 1914 if (inp->inp_flags & INP_RECVRTABLE) { 1915 u_int rtableid = inp->inp_rtableid; 1916 1917 #if NPF > 0 1918 if (m && m->m_pkthdr.pf.flags & PF_TAG_DIVERTED) { 1919 struct pf_divert *divert; 1920 1921 divert = pf_find_divert(m); 1922 KASSERT(divert != NULL); 1923 rtableid = divert->rdomain; 1924 } 1925 #endif 1926 1927 *mp = sbcreatecontrol((caddr_t) &rtableid, 1928 sizeof(u_int), IP_RECVRTABLE, IPPROTO_IP); 1929 if (*mp) 1930 mp = &(*mp)->m_next; 1931 } 1932 } 1933 1934 void 1935 ip_send_do_dispatch(void *xmq, int flags) 1936 { 1937 struct mbuf_queue *mq = xmq; 1938 struct mbuf *m; 1939 struct mbuf_list ml; 1940 struct m_tag *mtag; 1941 1942 mq_delist(mq, &ml); 1943 if (ml_empty(&ml)) 1944 return; 1945 1946 NET_LOCK_SHARED(); 1947 while ((m = ml_dequeue(&ml)) != NULL) { 1948 u_int32_t ipsecflowinfo = 0; 1949 1950 if ((mtag = m_tag_find(m, PACKET_TAG_IPSEC_FLOWINFO, NULL)) 1951 != NULL) { 1952 ipsecflowinfo = *(u_int32_t *)(mtag + 1); 1953 m_tag_delete(m, mtag); 1954 } 1955 ip_output(m, NULL, NULL, flags, NULL, NULL, ipsecflowinfo); 1956 } 1957 NET_UNLOCK_SHARED(); 1958 } 1959 1960 void 1961 ip_sendraw_dispatch(void *xmq) 1962 { 1963 ip_send_do_dispatch(xmq, IP_RAWOUTPUT); 1964 } 1965 1966 void 1967 ip_send_dispatch(void *xmq) 1968 { 1969 ip_send_do_dispatch(xmq, 0); 1970 } 1971 1972 void 1973 ip_send(struct mbuf *m) 1974 { 1975 mq_enqueue(&ipsend_mq, m); 1976 task_add(net_tq(0), &ipsend_task); 1977 } 1978 1979 void 1980 ip_send_raw(struct mbuf *m) 1981 { 1982 mq_enqueue(&ipsendraw_mq, m); 1983 task_add(net_tq(0), &ipsendraw_task); 1984 } 1985