1 /* $OpenBSD: ip_input.c,v 1.279 2016/07/22 07:39:06 mpi Exp $ */ 2 /* $NetBSD: ip_input.c,v 1.30 1996/03/16 23:53:58 christos Exp $ */ 3 4 /* 5 * Copyright (c) 1982, 1986, 1988, 1993 6 * The Regents of the University of California. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of the University nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * @(#)ip_input.c 8.2 (Berkeley) 1/4/94 33 */ 34 35 #include "pf.h" 36 #include "carp.h" 37 38 #include <sys/param.h> 39 #include <sys/systm.h> 40 #include <sys/mbuf.h> 41 #include <sys/domain.h> 42 #include <sys/protosw.h> 43 #include <sys/socket.h> 44 #include <sys/socketvar.h> 45 #include <sys/sysctl.h> 46 #include <sys/pool.h> 47 #include <sys/task.h> 48 49 #include <net/if.h> 50 #include <net/if_var.h> 51 #include <net/if_dl.h> 52 #include <net/route.h> 53 #include <net/netisr.h> 54 55 #include <netinet/in.h> 56 #include <netinet/in_systm.h> 57 #include <netinet/if_ether.h> 58 #include <netinet/ip.h> 59 #include <netinet/in_pcb.h> 60 #include <netinet/in_var.h> 61 #include <netinet/ip_var.h> 62 #include <netinet/ip_icmp.h> 63 64 #if NPF > 0 65 #include <net/pfvar.h> 66 #endif 67 68 #ifdef MROUTING 69 #include <netinet/ip_mroute.h> 70 #endif 71 72 #ifdef IPSEC 73 #include <netinet/ip_ipsp.h> 74 #endif /* IPSEC */ 75 76 #if NCARP > 0 77 #include <net/if_types.h> 78 #include <netinet/ip_carp.h> 79 #endif 80 81 struct ipqhead ipq; 82 83 int encdebug = 0; 84 int ipsec_keep_invalid = IPSEC_DEFAULT_EMBRYONIC_SA_TIMEOUT; 85 int ipsec_require_pfs = IPSEC_DEFAULT_PFS; 86 int ipsec_soft_allocations = IPSEC_DEFAULT_SOFT_ALLOCATIONS; 87 int ipsec_exp_allocations = IPSEC_DEFAULT_EXP_ALLOCATIONS; 88 int ipsec_soft_bytes = IPSEC_DEFAULT_SOFT_BYTES; 89 int ipsec_exp_bytes = IPSEC_DEFAULT_EXP_BYTES; 90 int ipsec_soft_timeout = IPSEC_DEFAULT_SOFT_TIMEOUT; 91 int ipsec_exp_timeout = IPSEC_DEFAULT_EXP_TIMEOUT; 92 int ipsec_soft_first_use = IPSEC_DEFAULT_SOFT_FIRST_USE; 93 int ipsec_exp_first_use = IPSEC_DEFAULT_EXP_FIRST_USE; 94 int ipsec_expire_acquire = IPSEC_DEFAULT_EXPIRE_ACQUIRE; 95 char ipsec_def_enc[20]; 96 char ipsec_def_auth[20]; 97 char ipsec_def_comp[20]; 98 99 /* values controllable via sysctl */ 100 int ipforwarding = 0; 101 int ipmforwarding = 0; 102 int ipmultipath = 0; 103 int ipsendredirects = 1; 104 int ip_dosourceroute = 0; 105 int ip_defttl = IPDEFTTL; 106 int ip_mtudisc = 1; 107 u_int ip_mtudisc_timeout = IPMTUDISCTIMEOUT; 108 int ip_directedbcast = 0; 109 110 struct rttimer_queue *ip_mtudisc_timeout_q = NULL; 111 112 /* Keep track of memory used for reassembly */ 113 int ip_maxqueue = 300; 114 int ip_frags = 0; 115 116 int *ipctl_vars[IPCTL_MAXID] = IPCTL_VARS; 117 118 struct niqueue ipintrq = NIQUEUE_INITIALIZER(IFQ_MAXLEN, NETISR_IP); 119 120 struct pool ipqent_pool; 121 struct pool ipq_pool; 122 123 struct ipstat ipstat; 124 125 static struct mbuf_queue ipsend_mq; 126 127 void ip_ours(struct mbuf *); 128 int ip_dooptions(struct mbuf *, struct ifnet *); 129 int in_ouraddr(struct mbuf *, struct ifnet *, struct rtentry **); 130 void ip_forward(struct mbuf *, struct ifnet *, struct rtentry *, int); 131 #ifdef IPSEC 132 int ip_input_ipsec_fwd_check(struct mbuf *, int); 133 int ip_input_ipsec_ours_check(struct mbuf *, int); 134 #endif /* IPSEC */ 135 136 static void ip_send_dispatch(void *); 137 static struct task ipsend_task = TASK_INITIALIZER(ip_send_dispatch, &ipsend_mq); 138 /* 139 * Used to save the IP options in case a protocol wants to respond 140 * to an incoming packet over the same route if the packet got here 141 * using IP source routing. This allows connection establishment and 142 * maintenance when the remote end is on a network that is not known 143 * to us. 144 */ 145 struct ip_srcrt { 146 int isr_nhops; /* number of hops */ 147 struct in_addr isr_dst; /* final destination */ 148 char isr_nop; /* one NOP to align */ 149 char isr_hdr[IPOPT_OFFSET + 1]; /* OPTVAL, OLEN & OFFSET */ 150 struct in_addr isr_routes[MAX_IPOPTLEN/sizeof(struct in_addr)]; 151 }; 152 153 void save_rte(struct mbuf *, u_char *, struct in_addr); 154 155 /* 156 * IP initialization: fill in IP protocol switch table. 157 * All protocols not implemented in kernel go to raw IP protocol handler. 158 */ 159 void 160 ip_init(void) 161 { 162 struct protosw *pr; 163 int i; 164 const u_int16_t defbaddynamicports_tcp[] = DEFBADDYNAMICPORTS_TCP; 165 const u_int16_t defbaddynamicports_udp[] = DEFBADDYNAMICPORTS_UDP; 166 const u_int16_t defrootonlyports_tcp[] = DEFROOTONLYPORTS_TCP; 167 const u_int16_t defrootonlyports_udp[] = DEFROOTONLYPORTS_UDP; 168 169 pool_init(&ipqent_pool, sizeof(struct ipqent), 0, 0, 0, "ipqe", NULL); 170 pool_init(&ipq_pool, sizeof(struct ipq), 0, 0, 0, "ipq", NULL); 171 172 pr = pffindproto(PF_INET, IPPROTO_RAW, SOCK_RAW); 173 if (pr == NULL) 174 panic("ip_init"); 175 for (i = 0; i < IPPROTO_MAX; i++) 176 ip_protox[i] = pr - inetsw; 177 for (pr = inetdomain.dom_protosw; 178 pr < inetdomain.dom_protoswNPROTOSW; pr++) 179 if (pr->pr_domain->dom_family == PF_INET && 180 pr->pr_protocol && pr->pr_protocol != IPPROTO_RAW && 181 pr->pr_protocol < IPPROTO_MAX) 182 ip_protox[pr->pr_protocol] = pr - inetsw; 183 LIST_INIT(&ipq); 184 if (ip_mtudisc != 0) 185 ip_mtudisc_timeout_q = 186 rt_timer_queue_create(ip_mtudisc_timeout); 187 188 /* Fill in list of ports not to allocate dynamically. */ 189 memset(&baddynamicports, 0, sizeof(baddynamicports)); 190 for (i = 0; defbaddynamicports_tcp[i] != 0; i++) 191 DP_SET(baddynamicports.tcp, defbaddynamicports_tcp[i]); 192 for (i = 0; defbaddynamicports_udp[i] != 0; i++) 193 DP_SET(baddynamicports.udp, defbaddynamicports_udp[i]); 194 195 /* Fill in list of ports only root can bind to. */ 196 memset(&rootonlyports, 0, sizeof(rootonlyports)); 197 for (i = 0; defrootonlyports_tcp[i] != 0; i++) 198 DP_SET(rootonlyports.tcp, defrootonlyports_tcp[i]); 199 for (i = 0; defrootonlyports_udp[i] != 0; i++) 200 DP_SET(rootonlyports.udp, defrootonlyports_udp[i]); 201 202 strlcpy(ipsec_def_enc, IPSEC_DEFAULT_DEF_ENC, sizeof(ipsec_def_enc)); 203 strlcpy(ipsec_def_auth, IPSEC_DEFAULT_DEF_AUTH, sizeof(ipsec_def_auth)); 204 strlcpy(ipsec_def_comp, IPSEC_DEFAULT_DEF_COMP, sizeof(ipsec_def_comp)); 205 206 mq_init(&ipsend_mq, 64, IPL_SOFTNET); 207 } 208 209 void 210 ipintr(void) 211 { 212 struct mbuf *m; 213 214 /* 215 * Get next datagram off input queue and get IP header 216 * in first mbuf. 217 */ 218 while ((m = niq_dequeue(&ipintrq)) != NULL) { 219 #ifdef DIAGNOSTIC 220 if ((m->m_flags & M_PKTHDR) == 0) 221 panic("ipintr no HDR"); 222 #endif 223 ipv4_input(m); 224 } 225 } 226 227 /* 228 * IPv4 input routine. 229 * 230 * Checksum and byte swap header. Process options. Forward or deliver. 231 */ 232 void 233 ipv4_input(struct mbuf *m) 234 { 235 struct ifnet *ifp; 236 struct rtentry *rt = NULL; 237 struct ip *ip; 238 int hlen, len; 239 #if defined(MROUTING) || defined(IPSEC) 240 int rv; 241 #endif 242 in_addr_t pfrdr = 0; 243 244 ifp = if_get(m->m_pkthdr.ph_ifidx); 245 if (ifp == NULL) 246 goto bad; 247 248 ipstat.ips_total++; 249 if (m->m_len < sizeof (struct ip) && 250 (m = m_pullup(m, sizeof (struct ip))) == NULL) { 251 ipstat.ips_toosmall++; 252 goto out; 253 } 254 ip = mtod(m, struct ip *); 255 if (ip->ip_v != IPVERSION) { 256 ipstat.ips_badvers++; 257 goto bad; 258 } 259 hlen = ip->ip_hl << 2; 260 if (hlen < sizeof(struct ip)) { /* minimum header length */ 261 ipstat.ips_badhlen++; 262 goto bad; 263 } 264 if (hlen > m->m_len) { 265 if ((m = m_pullup(m, hlen)) == NULL) { 266 ipstat.ips_badhlen++; 267 goto out; 268 } 269 ip = mtod(m, struct ip *); 270 } 271 272 /* 127/8 must not appear on wire - RFC1122 */ 273 if ((ntohl(ip->ip_dst.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET || 274 (ntohl(ip->ip_src.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET) { 275 if ((ifp->if_flags & IFF_LOOPBACK) == 0) { 276 ipstat.ips_badaddr++; 277 goto bad; 278 } 279 } 280 281 if ((m->m_pkthdr.csum_flags & M_IPV4_CSUM_IN_OK) == 0) { 282 if (m->m_pkthdr.csum_flags & M_IPV4_CSUM_IN_BAD) { 283 ipstat.ips_badsum++; 284 goto bad; 285 } 286 287 ipstat.ips_inswcsum++; 288 if (in_cksum(m, hlen) != 0) { 289 ipstat.ips_badsum++; 290 goto bad; 291 } 292 } 293 294 /* Retrieve the packet length. */ 295 len = ntohs(ip->ip_len); 296 297 /* 298 * Convert fields to host representation. 299 */ 300 if (len < hlen) { 301 ipstat.ips_badlen++; 302 goto bad; 303 } 304 305 /* 306 * Check that the amount of data in the buffers 307 * is at least as much as the IP header would have us expect. 308 * Trim mbufs if longer than we expect. 309 * Drop packet if shorter than we expect. 310 */ 311 if (m->m_pkthdr.len < len) { 312 ipstat.ips_tooshort++; 313 goto bad; 314 } 315 if (m->m_pkthdr.len > len) { 316 if (m->m_len == m->m_pkthdr.len) { 317 m->m_len = len; 318 m->m_pkthdr.len = len; 319 } else 320 m_adj(m, len - m->m_pkthdr.len); 321 } 322 323 #if NCARP > 0 324 if (ifp->if_type == IFT_CARP && ip->ip_p != IPPROTO_ICMP && 325 carp_lsdrop(m, AF_INET, &ip->ip_src.s_addr, &ip->ip_dst.s_addr)) 326 goto bad; 327 #endif 328 329 #if NPF > 0 330 /* 331 * Packet filter 332 */ 333 pfrdr = ip->ip_dst.s_addr; 334 if (pf_test(AF_INET, PF_IN, ifp, &m) != PF_PASS) 335 goto bad; 336 if (m == NULL) 337 goto out; 338 339 ip = mtod(m, struct ip *); 340 hlen = ip->ip_hl << 2; 341 pfrdr = (pfrdr != ip->ip_dst.s_addr); 342 #endif 343 344 /* 345 * Process options and, if not destined for us, 346 * ship it on. ip_dooptions returns 1 when an 347 * error was detected (causing an icmp message 348 * to be sent and the original packet to be freed). 349 */ 350 if (hlen > sizeof (struct ip) && ip_dooptions(m, ifp)) { 351 goto out; 352 } 353 354 if (in_ouraddr(m, ifp, &rt)) { 355 ip_ours(m); 356 goto out; 357 } 358 359 if (IN_MULTICAST(ip->ip_dst.s_addr)) { 360 /* 361 * Make sure M_MCAST is set. It should theoretically 362 * already be there, but let's play safe because upper 363 * layers check for this flag. 364 */ 365 m->m_flags |= M_MCAST; 366 367 #ifdef MROUTING 368 if (ipmforwarding && ip_mrouter) { 369 if (m->m_flags & M_EXT) { 370 if ((m = m_pullup(m, hlen)) == NULL) { 371 ipstat.ips_toosmall++; 372 goto out; 373 } 374 ip = mtod(m, struct ip *); 375 } 376 /* 377 * If we are acting as a multicast router, all 378 * incoming multicast packets are passed to the 379 * kernel-level multicast forwarding function. 380 * The packet is returned (relatively) intact; if 381 * ip_mforward() returns a non-zero value, the packet 382 * must be discarded, else it may be accepted below. 383 * 384 * (The IP ident field is put in the same byte order 385 * as expected when ip_mforward() is called from 386 * ip_output().) 387 */ 388 KERNEL_LOCK(); 389 rv = ip_mforward(m, ifp); 390 KERNEL_UNLOCK(); 391 if (rv != 0) { 392 ipstat.ips_cantforward++; 393 goto bad; 394 } 395 396 /* 397 * The process-level routing daemon needs to receive 398 * all multicast IGMP packets, whether or not this 399 * host belongs to their destination groups. 400 */ 401 if (ip->ip_p == IPPROTO_IGMP) { 402 ip_ours(m); 403 goto out; 404 } 405 ipstat.ips_forward++; 406 } 407 #endif 408 /* 409 * See if we belong to the destination multicast group on the 410 * arrival interface. 411 */ 412 if (!in_hasmulti(&ip->ip_dst, ifp)) { 413 ipstat.ips_notmember++; 414 if (!IN_LOCAL_GROUP(ip->ip_dst.s_addr)) 415 ipstat.ips_cantforward++; 416 goto bad; 417 } 418 ip_ours(m); 419 goto out; 420 } 421 422 if (ip->ip_dst.s_addr == INADDR_BROADCAST || 423 ip->ip_dst.s_addr == INADDR_ANY) { 424 ip_ours(m); 425 goto out; 426 } 427 428 #if NCARP > 0 429 if (ifp->if_type == IFT_CARP && ip->ip_p == IPPROTO_ICMP && 430 carp_lsdrop(m, AF_INET, &ip->ip_src.s_addr, &ip->ip_dst.s_addr)) 431 goto bad; 432 #endif 433 /* 434 * Not for us; forward if possible and desirable. 435 */ 436 if (ipforwarding == 0) { 437 ipstat.ips_cantforward++; 438 goto bad; 439 } 440 #ifdef IPSEC 441 if (ipsec_in_use) { 442 KERNEL_LOCK(); 443 rv = ip_input_ipsec_fwd_check(m, hlen); 444 KERNEL_UNLOCK(); 445 if (rv != 0) { 446 ipstat.ips_cantforward++; 447 goto bad; 448 } 449 /* 450 * Fall through, forward packet. Outbound IPsec policy 451 * checking will occur in ip_output(). 452 */ 453 } 454 #endif /* IPSEC */ 455 456 ip_forward(m, ifp, rt, pfrdr); 457 if_put(ifp); 458 return; 459 bad: 460 m_freem(m); 461 out: 462 rtfree(rt); 463 if_put(ifp); 464 } 465 466 /* 467 * IPv4 local-delivery routine. 468 * 469 * If fragmented try to reassemble. Pass to next level. 470 */ 471 void 472 ip_ours(struct mbuf *m) 473 { 474 struct ip *ip = mtod(m, struct ip *); 475 struct ipq *fp; 476 struct ipqent *ipqe; 477 int mff, hlen; 478 479 hlen = ip->ip_hl << 2; 480 481 /* pf might have modified stuff, might have to chksum */ 482 in_proto_cksum_out(m, NULL); 483 484 /* 485 * If offset or IP_MF are set, must reassemble. 486 * Otherwise, nothing need be done. 487 * (We could look in the reassembly queue to see 488 * if the packet was previously fragmented, 489 * but it's not worth the time; just let them time out.) 490 */ 491 if (ip->ip_off &~ htons(IP_DF | IP_RF)) { 492 if (m->m_flags & M_EXT) { /* XXX */ 493 if ((m = m_pullup(m, hlen)) == NULL) { 494 ipstat.ips_toosmall++; 495 return; 496 } 497 ip = mtod(m, struct ip *); 498 } 499 500 /* 501 * Look for queue of fragments 502 * of this datagram. 503 */ 504 LIST_FOREACH(fp, &ipq, ipq_q) 505 if (ip->ip_id == fp->ipq_id && 506 ip->ip_src.s_addr == fp->ipq_src.s_addr && 507 ip->ip_dst.s_addr == fp->ipq_dst.s_addr && 508 ip->ip_p == fp->ipq_p) 509 goto found; 510 fp = 0; 511 found: 512 513 /* 514 * Adjust ip_len to not reflect header, 515 * set ipqe_mff if more fragments are expected, 516 * convert offset of this to bytes. 517 */ 518 ip->ip_len = htons(ntohs(ip->ip_len) - hlen); 519 mff = (ip->ip_off & htons(IP_MF)) != 0; 520 if (mff) { 521 /* 522 * Make sure that fragments have a data length 523 * that's a non-zero multiple of 8 bytes. 524 */ 525 if (ntohs(ip->ip_len) == 0 || 526 (ntohs(ip->ip_len) & 0x7) != 0) { 527 ipstat.ips_badfrags++; 528 goto bad; 529 } 530 } 531 ip->ip_off = htons(ntohs(ip->ip_off) << 3); 532 533 /* 534 * If datagram marked as having more fragments 535 * or if this is not the first fragment, 536 * attempt reassembly; if it succeeds, proceed. 537 */ 538 if (mff || ip->ip_off) { 539 ipstat.ips_fragments++; 540 if (ip_frags + 1 > ip_maxqueue) { 541 ip_flush(); 542 ipstat.ips_rcvmemdrop++; 543 goto bad; 544 } 545 546 ipqe = pool_get(&ipqent_pool, PR_NOWAIT); 547 if (ipqe == NULL) { 548 ipstat.ips_rcvmemdrop++; 549 goto bad; 550 } 551 ip_frags++; 552 ipqe->ipqe_mff = mff; 553 ipqe->ipqe_m = m; 554 ipqe->ipqe_ip = ip; 555 m = ip_reass(ipqe, fp); 556 if (m == NULL) { 557 return; 558 } 559 ipstat.ips_reassembled++; 560 ip = mtod(m, struct ip *); 561 hlen = ip->ip_hl << 2; 562 ip->ip_len = htons(ntohs(ip->ip_len) + hlen); 563 } else 564 if (fp) 565 ip_freef(fp); 566 } 567 568 #ifdef IPSEC 569 if (ipsec_in_use) { 570 if (ip_input_ipsec_ours_check(m, hlen) != 0) { 571 ipstat.ips_cantforward++; 572 goto bad; 573 } 574 } 575 /* Otherwise, just fall through and deliver the packet */ 576 #endif /* IPSEC */ 577 578 /* 579 * Switch out to protocol's input routine. 580 */ 581 ipstat.ips_delivered++; 582 (*inetsw[ip_protox[ip->ip_p]].pr_input)(m, hlen, NULL, 0); 583 return; 584 bad: 585 m_freem(m); 586 } 587 588 int 589 in_ouraddr(struct mbuf *m, struct ifnet *ifp, struct rtentry **prt) 590 { 591 struct rtentry *rt; 592 struct ip *ip; 593 struct sockaddr_in sin; 594 int match = 0; 595 596 #if NPF > 0 597 switch (pf_ouraddr(m)) { 598 case 0: 599 return (0); 600 case 1: 601 return (1); 602 default: 603 /* pf does not know it */ 604 break; 605 } 606 #endif 607 608 ip = mtod(m, struct ip *); 609 610 memset(&sin, 0, sizeof(sin)); 611 sin.sin_len = sizeof(sin); 612 sin.sin_family = AF_INET; 613 sin.sin_addr = ip->ip_dst; 614 rt = rtalloc_mpath(sintosa(&sin), &ip->ip_src.s_addr, 615 m->m_pkthdr.ph_rtableid); 616 if (rtisvalid(rt)) { 617 if (ISSET(rt->rt_flags, RTF_LOCAL)) 618 match = 1; 619 620 /* 621 * If directedbcast is enabled we only consider it local 622 * if it is received on the interface with that address. 623 */ 624 if (ISSET(rt->rt_flags, RTF_BROADCAST) && 625 (!ip_directedbcast || rt->rt_ifidx == ifp->if_index)) { 626 match = 1; 627 628 /* Make sure M_BCAST is set */ 629 m->m_flags |= M_BCAST; 630 } 631 } 632 *prt = rt; 633 634 if (!match) { 635 struct ifaddr *ifa; 636 637 /* 638 * No local address or broadcast address found, so check for 639 * ancient classful broadcast addresses. 640 * It must have been broadcast on the link layer, and for an 641 * address on the interface it was received on. 642 */ 643 if (!ISSET(m->m_flags, M_BCAST) || 644 !IN_CLASSFULBROADCAST(ip->ip_dst.s_addr, ip->ip_dst.s_addr)) 645 return (0); 646 647 if (ifp->if_rdomain != rtable_l2(m->m_pkthdr.ph_rtableid)) 648 return (0); 649 /* 650 * The check in the loop assumes you only rx a packet on an UP 651 * interface, and that M_BCAST will only be set on a BROADCAST 652 * interface. 653 */ 654 KERNEL_LOCK(); 655 TAILQ_FOREACH(ifa, &ifp->if_addrlist, ifa_list) { 656 if (ifa->ifa_addr->sa_family != AF_INET) 657 continue; 658 659 if (IN_CLASSFULBROADCAST(ip->ip_dst.s_addr, 660 ifatoia(ifa)->ia_addr.sin_addr.s_addr)) { 661 match = 1; 662 break; 663 } 664 } 665 KERNEL_UNLOCK(); 666 } 667 668 return (match); 669 } 670 671 #ifdef IPSEC 672 int 673 ip_input_ipsec_fwd_check(struct mbuf *m, int hlen) 674 { 675 struct tdb *tdb; 676 struct tdb_ident *tdbi; 677 struct m_tag *mtag; 678 int error = 0; 679 680 /* 681 * IPsec policy check for forwarded packets. Look at 682 * inner-most IPsec SA used. 683 */ 684 mtag = m_tag_find(m, PACKET_TAG_IPSEC_IN_DONE, NULL); 685 if (mtag != NULL) { 686 tdbi = (struct tdb_ident *)(mtag + 1); 687 tdb = gettdb(tdbi->rdomain, tdbi->spi, &tdbi->dst, tdbi->proto); 688 } else 689 tdb = NULL; 690 ipsp_spd_lookup(m, AF_INET, hlen, &error, IPSP_DIRECTION_IN, tdb, NULL, 691 0); 692 693 return error; 694 } 695 696 int 697 ip_input_ipsec_ours_check(struct mbuf *m, int hlen) 698 { 699 struct ip *ip = mtod(m, struct ip *); 700 struct tdb *tdb; 701 struct tdb_ident *tdbi; 702 struct m_tag *mtag; 703 int error = 0; 704 705 /* 706 * If it's a protected packet for us, skip the policy check. 707 * That's because we really only care about the properties of 708 * the protected packet, and not the intermediate versions. 709 * While this is not the most paranoid setting, it allows 710 * some flexibility in handling nested tunnels (in setting up 711 * the policies). 712 */ 713 if ((ip->ip_p == IPPROTO_ESP) || (ip->ip_p == IPPROTO_AH) || 714 (ip->ip_p == IPPROTO_IPCOMP)) 715 return 0; 716 717 /* 718 * If the protected packet was tunneled, then we need to 719 * verify the protected packet's information, not the 720 * external headers. Thus, skip the policy lookup for the 721 * external packet, and keep the IPsec information linked on 722 * the packet header (the encapsulation routines know how 723 * to deal with that). 724 */ 725 if ((ip->ip_p == IPPROTO_IPIP) || (ip->ip_p == IPPROTO_IPV6)) 726 return 0; 727 728 /* 729 * If the protected packet is TCP or UDP, we'll do the 730 * policy check in the respective input routine, so we can 731 * check for bypass sockets. 732 */ 733 if ((ip->ip_p == IPPROTO_TCP) || (ip->ip_p == IPPROTO_UDP)) 734 return 0; 735 736 /* 737 * IPsec policy check for local-delivery packets. Look at the 738 * inner-most SA that protected the packet. This is in fact 739 * a bit too restrictive (it could end up causing packets to 740 * be dropped that semantically follow the policy, e.g., in 741 * certain SA-bundle configurations); but the alternative is 742 * very complicated (and requires keeping track of what 743 * kinds of tunneling headers have been seen in-between the 744 * IPsec headers), and I don't think we lose much functionality 745 * that's needed in the real world (who uses bundles anyway ?). 746 */ 747 mtag = m_tag_find(m, PACKET_TAG_IPSEC_IN_DONE, NULL); 748 if (mtag) { 749 tdbi = (struct tdb_ident *)(mtag + 1); 750 tdb = gettdb(tdbi->rdomain, tdbi->spi, &tdbi->dst, 751 tdbi->proto); 752 } else 753 tdb = NULL; 754 ipsp_spd_lookup(m, AF_INET, hlen, &error, IPSP_DIRECTION_IN, 755 tdb, NULL, 0); 756 757 return error; 758 } 759 #endif /* IPSEC */ 760 761 /* 762 * Take incoming datagram fragment and try to 763 * reassemble it into whole datagram. If a chain for 764 * reassembly of this datagram already exists, then it 765 * is given as fp; otherwise have to make a chain. 766 */ 767 struct mbuf * 768 ip_reass(struct ipqent *ipqe, struct ipq *fp) 769 { 770 struct mbuf *m = ipqe->ipqe_m; 771 struct ipqent *nq, *p, *q; 772 struct ip *ip; 773 struct mbuf *t; 774 int hlen = ipqe->ipqe_ip->ip_hl << 2; 775 int i, next; 776 u_int8_t ecn, ecn0; 777 778 /* 779 * Presence of header sizes in mbufs 780 * would confuse code below. 781 */ 782 m->m_data += hlen; 783 m->m_len -= hlen; 784 785 /* 786 * If first fragment to arrive, create a reassembly queue. 787 */ 788 if (fp == NULL) { 789 fp = pool_get(&ipq_pool, PR_NOWAIT); 790 if (fp == NULL) 791 goto dropfrag; 792 LIST_INSERT_HEAD(&ipq, fp, ipq_q); 793 fp->ipq_ttl = IPFRAGTTL; 794 fp->ipq_p = ipqe->ipqe_ip->ip_p; 795 fp->ipq_id = ipqe->ipqe_ip->ip_id; 796 LIST_INIT(&fp->ipq_fragq); 797 fp->ipq_src = ipqe->ipqe_ip->ip_src; 798 fp->ipq_dst = ipqe->ipqe_ip->ip_dst; 799 p = NULL; 800 goto insert; 801 } 802 803 /* 804 * Handle ECN by comparing this segment with the first one; 805 * if CE is set, do not lose CE. 806 * drop if CE and not-ECT are mixed for the same packet. 807 */ 808 ecn = ipqe->ipqe_ip->ip_tos & IPTOS_ECN_MASK; 809 ecn0 = LIST_FIRST(&fp->ipq_fragq)->ipqe_ip->ip_tos & IPTOS_ECN_MASK; 810 if (ecn == IPTOS_ECN_CE) { 811 if (ecn0 == IPTOS_ECN_NOTECT) 812 goto dropfrag; 813 if (ecn0 != IPTOS_ECN_CE) 814 LIST_FIRST(&fp->ipq_fragq)->ipqe_ip->ip_tos |= IPTOS_ECN_CE; 815 } 816 if (ecn == IPTOS_ECN_NOTECT && ecn0 != IPTOS_ECN_NOTECT) 817 goto dropfrag; 818 819 /* 820 * Find a segment which begins after this one does. 821 */ 822 for (p = NULL, q = LIST_FIRST(&fp->ipq_fragq); q != NULL; 823 p = q, q = LIST_NEXT(q, ipqe_q)) 824 if (ntohs(q->ipqe_ip->ip_off) > ntohs(ipqe->ipqe_ip->ip_off)) 825 break; 826 827 /* 828 * If there is a preceding segment, it may provide some of 829 * our data already. If so, drop the data from the incoming 830 * segment. If it provides all of our data, drop us. 831 */ 832 if (p != NULL) { 833 i = ntohs(p->ipqe_ip->ip_off) + ntohs(p->ipqe_ip->ip_len) - 834 ntohs(ipqe->ipqe_ip->ip_off); 835 if (i > 0) { 836 if (i >= ntohs(ipqe->ipqe_ip->ip_len)) 837 goto dropfrag; 838 m_adj(ipqe->ipqe_m, i); 839 ipqe->ipqe_ip->ip_off = 840 htons(ntohs(ipqe->ipqe_ip->ip_off) + i); 841 ipqe->ipqe_ip->ip_len = 842 htons(ntohs(ipqe->ipqe_ip->ip_len) - i); 843 } 844 } 845 846 /* 847 * While we overlap succeeding segments trim them or, 848 * if they are completely covered, dequeue them. 849 */ 850 for (; q != NULL && 851 ntohs(ipqe->ipqe_ip->ip_off) + ntohs(ipqe->ipqe_ip->ip_len) > 852 ntohs(q->ipqe_ip->ip_off); q = nq) { 853 i = (ntohs(ipqe->ipqe_ip->ip_off) + 854 ntohs(ipqe->ipqe_ip->ip_len)) - ntohs(q->ipqe_ip->ip_off); 855 if (i < ntohs(q->ipqe_ip->ip_len)) { 856 q->ipqe_ip->ip_len = 857 htons(ntohs(q->ipqe_ip->ip_len) - i); 858 q->ipqe_ip->ip_off = 859 htons(ntohs(q->ipqe_ip->ip_off) + i); 860 m_adj(q->ipqe_m, i); 861 break; 862 } 863 nq = LIST_NEXT(q, ipqe_q); 864 m_freem(q->ipqe_m); 865 LIST_REMOVE(q, ipqe_q); 866 pool_put(&ipqent_pool, q); 867 ip_frags--; 868 } 869 870 insert: 871 /* 872 * Stick new segment in its place; 873 * check for complete reassembly. 874 */ 875 if (p == NULL) { 876 LIST_INSERT_HEAD(&fp->ipq_fragq, ipqe, ipqe_q); 877 } else { 878 LIST_INSERT_AFTER(p, ipqe, ipqe_q); 879 } 880 next = 0; 881 for (p = NULL, q = LIST_FIRST(&fp->ipq_fragq); q != NULL; 882 p = q, q = LIST_NEXT(q, ipqe_q)) { 883 if (ntohs(q->ipqe_ip->ip_off) != next) 884 return (0); 885 next += ntohs(q->ipqe_ip->ip_len); 886 } 887 if (p->ipqe_mff) 888 return (0); 889 890 /* 891 * Reassembly is complete. Check for a bogus message size and 892 * concatenate fragments. 893 */ 894 q = LIST_FIRST(&fp->ipq_fragq); 895 ip = q->ipqe_ip; 896 if ((next + (ip->ip_hl << 2)) > IP_MAXPACKET) { 897 ipstat.ips_toolong++; 898 ip_freef(fp); 899 return (0); 900 } 901 m = q->ipqe_m; 902 t = m->m_next; 903 m->m_next = 0; 904 m_cat(m, t); 905 nq = LIST_NEXT(q, ipqe_q); 906 pool_put(&ipqent_pool, q); 907 ip_frags--; 908 for (q = nq; q != NULL; q = nq) { 909 t = q->ipqe_m; 910 nq = LIST_NEXT(q, ipqe_q); 911 pool_put(&ipqent_pool, q); 912 ip_frags--; 913 m_cat(m, t); 914 } 915 916 /* 917 * Create header for new ip packet by 918 * modifying header of first packet; 919 * dequeue and discard fragment reassembly header. 920 * Make header visible. 921 */ 922 ip->ip_len = htons(next); 923 ip->ip_src = fp->ipq_src; 924 ip->ip_dst = fp->ipq_dst; 925 LIST_REMOVE(fp, ipq_q); 926 pool_put(&ipq_pool, fp); 927 m->m_len += (ip->ip_hl << 2); 928 m->m_data -= (ip->ip_hl << 2); 929 /* some debugging cruft by sklower, below, will go away soon */ 930 if (m->m_flags & M_PKTHDR) { /* XXX this should be done elsewhere */ 931 int plen = 0; 932 for (t = m; t; t = t->m_next) 933 plen += t->m_len; 934 m->m_pkthdr.len = plen; 935 } 936 return (m); 937 938 dropfrag: 939 ipstat.ips_fragdropped++; 940 m_freem(m); 941 pool_put(&ipqent_pool, ipqe); 942 ip_frags--; 943 return (0); 944 } 945 946 /* 947 * Free a fragment reassembly header and all 948 * associated datagrams. 949 */ 950 void 951 ip_freef(struct ipq *fp) 952 { 953 struct ipqent *q, *p; 954 955 for (q = LIST_FIRST(&fp->ipq_fragq); q != NULL; q = p) { 956 p = LIST_NEXT(q, ipqe_q); 957 m_freem(q->ipqe_m); 958 LIST_REMOVE(q, ipqe_q); 959 pool_put(&ipqent_pool, q); 960 ip_frags--; 961 } 962 LIST_REMOVE(fp, ipq_q); 963 pool_put(&ipq_pool, fp); 964 } 965 966 /* 967 * IP timer processing; 968 * if a timer expires on a reassembly queue, discard it. 969 * clear the forwarding cache, there might be a better route. 970 */ 971 void 972 ip_slowtimo(void) 973 { 974 struct ipq *fp, *nfp; 975 int s = splsoftnet(); 976 977 for (fp = LIST_FIRST(&ipq); fp != NULL; fp = nfp) { 978 nfp = LIST_NEXT(fp, ipq_q); 979 if (--fp->ipq_ttl == 0) { 980 ipstat.ips_fragtimeout++; 981 ip_freef(fp); 982 } 983 } 984 splx(s); 985 } 986 987 /* 988 * Drain off all datagram fragments. 989 */ 990 void 991 ip_drain(void) 992 { 993 while (!LIST_EMPTY(&ipq)) { 994 ipstat.ips_fragdropped++; 995 ip_freef(LIST_FIRST(&ipq)); 996 } 997 } 998 999 /* 1000 * Flush a bunch of datagram fragments, till we are down to 75%. 1001 */ 1002 void 1003 ip_flush(void) 1004 { 1005 int max = 50; 1006 1007 /* ipq already locked */ 1008 while (!LIST_EMPTY(&ipq) && ip_frags > ip_maxqueue * 3 / 4 && --max) { 1009 ipstat.ips_fragdropped++; 1010 ip_freef(LIST_FIRST(&ipq)); 1011 } 1012 } 1013 1014 /* 1015 * Do option processing on a datagram, 1016 * possibly discarding it if bad options are encountered, 1017 * or forwarding it if source-routed. 1018 * Returns 1 if packet has been forwarded/freed, 1019 * 0 if the packet should be processed further. 1020 */ 1021 int 1022 ip_dooptions(struct mbuf *m, struct ifnet *ifp) 1023 { 1024 struct ip *ip = mtod(m, struct ip *); 1025 unsigned int rtableid = m->m_pkthdr.ph_rtableid; 1026 struct rtentry *rt; 1027 struct sockaddr_in ipaddr; 1028 u_char *cp; 1029 struct ip_timestamp ipt; 1030 struct in_ifaddr *ia; 1031 int opt, optlen, cnt, off, code, type = ICMP_PARAMPROB, forward = 0; 1032 struct in_addr sin, dst; 1033 u_int32_t ntime; 1034 1035 dst = ip->ip_dst; 1036 cp = (u_char *)(ip + 1); 1037 cnt = (ip->ip_hl << 2) - sizeof (struct ip); 1038 1039 KERNEL_LOCK(); 1040 for (; cnt > 0; cnt -= optlen, cp += optlen) { 1041 opt = cp[IPOPT_OPTVAL]; 1042 if (opt == IPOPT_EOL) 1043 break; 1044 if (opt == IPOPT_NOP) 1045 optlen = 1; 1046 else { 1047 if (cnt < IPOPT_OLEN + sizeof(*cp)) { 1048 code = &cp[IPOPT_OLEN] - (u_char *)ip; 1049 goto bad; 1050 } 1051 optlen = cp[IPOPT_OLEN]; 1052 if (optlen < IPOPT_OLEN + sizeof(*cp) || optlen > cnt) { 1053 code = &cp[IPOPT_OLEN] - (u_char *)ip; 1054 goto bad; 1055 } 1056 } 1057 1058 switch (opt) { 1059 1060 default: 1061 break; 1062 1063 /* 1064 * Source routing with record. 1065 * Find interface with current destination address. 1066 * If none on this machine then drop if strictly routed, 1067 * or do nothing if loosely routed. 1068 * Record interface address and bring up next address 1069 * component. If strictly routed make sure next 1070 * address is on directly accessible net. 1071 */ 1072 case IPOPT_LSRR: 1073 case IPOPT_SSRR: 1074 if (!ip_dosourceroute) { 1075 type = ICMP_UNREACH; 1076 code = ICMP_UNREACH_SRCFAIL; 1077 goto bad; 1078 } 1079 if ((off = cp[IPOPT_OFFSET]) < IPOPT_MINOFF) { 1080 code = &cp[IPOPT_OFFSET] - (u_char *)ip; 1081 goto bad; 1082 } 1083 memset(&ipaddr, 0, sizeof(ipaddr)); 1084 ipaddr.sin_family = AF_INET; 1085 ipaddr.sin_len = sizeof(ipaddr); 1086 ipaddr.sin_addr = ip->ip_dst; 1087 ia = ifatoia(ifa_ifwithaddr(sintosa(&ipaddr), 1088 m->m_pkthdr.ph_rtableid)); 1089 if (ia == NULL) { 1090 if (opt == IPOPT_SSRR) { 1091 type = ICMP_UNREACH; 1092 code = ICMP_UNREACH_SRCFAIL; 1093 goto bad; 1094 } 1095 /* 1096 * Loose routing, and not at next destination 1097 * yet; nothing to do except forward. 1098 */ 1099 break; 1100 } 1101 off--; /* 0 origin */ 1102 if ((off + sizeof(struct in_addr)) > optlen) { 1103 /* 1104 * End of source route. Should be for us. 1105 */ 1106 save_rte(m, cp, ip->ip_src); 1107 break; 1108 } 1109 1110 /* 1111 * locate outgoing interface 1112 */ 1113 memset(&ipaddr, 0, sizeof(ipaddr)); 1114 ipaddr.sin_family = AF_INET; 1115 ipaddr.sin_len = sizeof(ipaddr); 1116 memcpy(&ipaddr.sin_addr, cp + off, 1117 sizeof(ipaddr.sin_addr)); 1118 if (opt == IPOPT_SSRR) { 1119 if ((ia = ifatoia(ifa_ifwithdstaddr(sintosa(&ipaddr), 1120 m->m_pkthdr.ph_rtableid))) == NULL) 1121 ia = ifatoia(ifa_ifwithnet(sintosa(&ipaddr), 1122 m->m_pkthdr.ph_rtableid)); 1123 if (ia == NULL) { 1124 type = ICMP_UNREACH; 1125 code = ICMP_UNREACH_SRCFAIL; 1126 goto bad; 1127 } 1128 memcpy(cp + off, &ia->ia_addr.sin_addr, 1129 sizeof(struct in_addr)); 1130 cp[IPOPT_OFFSET] += sizeof(struct in_addr); 1131 } else { 1132 /* keep packet in the virtual instance */ 1133 rt = rtalloc(sintosa(&ipaddr), RT_RESOLVE, 1134 rtableid); 1135 if (!rtisvalid(rt)) { 1136 type = ICMP_UNREACH; 1137 code = ICMP_UNREACH_SRCFAIL; 1138 rtfree(rt); 1139 goto bad; 1140 } 1141 ia = ifatoia(rt->rt_ifa); 1142 memcpy(cp + off, &ia->ia_addr.sin_addr, 1143 sizeof(struct in_addr)); 1144 rtfree(rt); 1145 cp[IPOPT_OFFSET] += sizeof(struct in_addr); 1146 } 1147 ip->ip_dst = ipaddr.sin_addr; 1148 /* 1149 * Let ip_intr's mcast routing check handle mcast pkts 1150 */ 1151 forward = !IN_MULTICAST(ip->ip_dst.s_addr); 1152 break; 1153 1154 case IPOPT_RR: 1155 if (optlen < IPOPT_OFFSET + sizeof(*cp)) { 1156 code = &cp[IPOPT_OLEN] - (u_char *)ip; 1157 goto bad; 1158 } 1159 if ((off = cp[IPOPT_OFFSET]) < IPOPT_MINOFF) { 1160 code = &cp[IPOPT_OFFSET] - (u_char *)ip; 1161 goto bad; 1162 } 1163 1164 /* 1165 * If no space remains, ignore. 1166 */ 1167 off--; /* 0 origin */ 1168 if ((off + sizeof(struct in_addr)) > optlen) 1169 break; 1170 memset(&ipaddr, 0, sizeof(ipaddr)); 1171 ipaddr.sin_family = AF_INET; 1172 ipaddr.sin_len = sizeof(ipaddr); 1173 ipaddr.sin_addr = ip->ip_dst; 1174 /* 1175 * locate outgoing interface; if we're the destination, 1176 * use the incoming interface (should be same). 1177 * Again keep the packet inside the virtual instance. 1178 */ 1179 rt = rtalloc(sintosa(&ipaddr), RT_RESOLVE, rtableid); 1180 if (!rtisvalid(rt)) { 1181 type = ICMP_UNREACH; 1182 code = ICMP_UNREACH_HOST; 1183 rtfree(rt); 1184 goto bad; 1185 } 1186 ia = ifatoia(rt->rt_ifa); 1187 memcpy(cp + off, &ia->ia_addr.sin_addr, 1188 sizeof(struct in_addr)); 1189 rtfree(rt); 1190 cp[IPOPT_OFFSET] += sizeof(struct in_addr); 1191 break; 1192 1193 case IPOPT_TS: 1194 code = cp - (u_char *)ip; 1195 if (optlen < sizeof(struct ip_timestamp)) 1196 goto bad; 1197 memcpy(&ipt, cp, sizeof(struct ip_timestamp)); 1198 if (ipt.ipt_ptr < 5 || ipt.ipt_len < 5) 1199 goto bad; 1200 if (ipt.ipt_ptr - 1 + sizeof(u_int32_t) > ipt.ipt_len) { 1201 if (++ipt.ipt_oflw == 0) 1202 goto bad; 1203 break; 1204 } 1205 memcpy(&sin, cp + ipt.ipt_ptr - 1, sizeof sin); 1206 switch (ipt.ipt_flg) { 1207 1208 case IPOPT_TS_TSONLY: 1209 break; 1210 1211 case IPOPT_TS_TSANDADDR: 1212 if (ipt.ipt_ptr - 1 + sizeof(u_int32_t) + 1213 sizeof(struct in_addr) > ipt.ipt_len) 1214 goto bad; 1215 memset(&ipaddr, 0, sizeof(ipaddr)); 1216 ipaddr.sin_family = AF_INET; 1217 ipaddr.sin_len = sizeof(ipaddr); 1218 ipaddr.sin_addr = dst; 1219 ia = ifatoia(ifaof_ifpforaddr(sintosa(&ipaddr), 1220 ifp)); 1221 if (ia == NULL) 1222 continue; 1223 memcpy(&sin, &ia->ia_addr.sin_addr, 1224 sizeof(struct in_addr)); 1225 ipt.ipt_ptr += sizeof(struct in_addr); 1226 break; 1227 1228 case IPOPT_TS_PRESPEC: 1229 if (ipt.ipt_ptr - 1 + sizeof(u_int32_t) + 1230 sizeof(struct in_addr) > ipt.ipt_len) 1231 goto bad; 1232 memset(&ipaddr, 0, sizeof(ipaddr)); 1233 ipaddr.sin_family = AF_INET; 1234 ipaddr.sin_len = sizeof(ipaddr); 1235 ipaddr.sin_addr = sin; 1236 if (ifa_ifwithaddr(sintosa(&ipaddr), 1237 m->m_pkthdr.ph_rtableid) == NULL) 1238 continue; 1239 ipt.ipt_ptr += sizeof(struct in_addr); 1240 break; 1241 1242 default: 1243 /* XXX can't take &ipt->ipt_flg */ 1244 code = (u_char *)&ipt.ipt_ptr - 1245 (u_char *)ip + 1; 1246 goto bad; 1247 } 1248 ntime = iptime(); 1249 memcpy(cp + ipt.ipt_ptr - 1, &ntime, sizeof(u_int32_t)); 1250 ipt.ipt_ptr += sizeof(u_int32_t); 1251 } 1252 } 1253 KERNEL_UNLOCK(); 1254 if (forward && ipforwarding) { 1255 ip_forward(m, ifp, NULL, 1); 1256 return (1); 1257 } 1258 return (0); 1259 bad: 1260 KERNEL_UNLOCK(); 1261 icmp_error(m, type, code, 0, 0); 1262 ipstat.ips_badoptions++; 1263 return (1); 1264 } 1265 1266 /* 1267 * Save incoming source route for use in replies, 1268 * to be picked up later by ip_srcroute if the receiver is interested. 1269 */ 1270 void 1271 save_rte(struct mbuf *m, u_char *option, struct in_addr dst) 1272 { 1273 struct ip_srcrt *isr; 1274 struct m_tag *mtag; 1275 unsigned olen; 1276 1277 olen = option[IPOPT_OLEN]; 1278 if (olen > sizeof(isr->isr_hdr) + sizeof(isr->isr_routes)) 1279 return; 1280 1281 mtag = m_tag_get(PACKET_TAG_SRCROUTE, sizeof(*isr), M_NOWAIT); 1282 if (mtag == NULL) 1283 return; 1284 isr = (struct ip_srcrt *)(mtag + 1); 1285 1286 memcpy(isr->isr_hdr, option, olen); 1287 isr->isr_nhops = (olen - IPOPT_OFFSET - 1) / sizeof(struct in_addr); 1288 isr->isr_dst = dst; 1289 m_tag_prepend(m, mtag); 1290 } 1291 1292 /* 1293 * Retrieve incoming source route for use in replies, 1294 * in the same form used by setsockopt. 1295 * The first hop is placed before the options, will be removed later. 1296 */ 1297 struct mbuf * 1298 ip_srcroute(struct mbuf *m0) 1299 { 1300 struct in_addr *p, *q; 1301 struct mbuf *m; 1302 struct ip_srcrt *isr; 1303 struct m_tag *mtag; 1304 1305 if (!ip_dosourceroute) 1306 return (NULL); 1307 1308 mtag = m_tag_find(m0, PACKET_TAG_SRCROUTE, NULL); 1309 if (mtag == NULL) 1310 return (NULL); 1311 isr = (struct ip_srcrt *)(mtag + 1); 1312 1313 if (isr->isr_nhops == 0) 1314 return (NULL); 1315 m = m_get(M_DONTWAIT, MT_SOOPTS); 1316 if (m == NULL) 1317 return (NULL); 1318 1319 #define OPTSIZ (sizeof(isr->isr_nop) + sizeof(isr->isr_hdr)) 1320 1321 /* length is (nhops+1)*sizeof(addr) + sizeof(nop + header) */ 1322 m->m_len = (isr->isr_nhops + 1) * sizeof(struct in_addr) + OPTSIZ; 1323 1324 /* 1325 * First save first hop for return route 1326 */ 1327 p = &(isr->isr_routes[isr->isr_nhops - 1]); 1328 *(mtod(m, struct in_addr *)) = *p--; 1329 1330 /* 1331 * Copy option fields and padding (nop) to mbuf. 1332 */ 1333 isr->isr_nop = IPOPT_NOP; 1334 isr->isr_hdr[IPOPT_OFFSET] = IPOPT_MINOFF; 1335 memcpy(mtod(m, caddr_t) + sizeof(struct in_addr), &isr->isr_nop, 1336 OPTSIZ); 1337 q = (struct in_addr *)(mtod(m, caddr_t) + 1338 sizeof(struct in_addr) + OPTSIZ); 1339 #undef OPTSIZ 1340 /* 1341 * Record return path as an IP source route, 1342 * reversing the path (pointers are now aligned). 1343 */ 1344 while (p >= isr->isr_routes) { 1345 *q++ = *p--; 1346 } 1347 /* 1348 * Last hop goes to final destination. 1349 */ 1350 *q = isr->isr_dst; 1351 m_tag_delete(m0, (struct m_tag *)isr); 1352 return (m); 1353 } 1354 1355 /* 1356 * Strip out IP options, at higher level protocol in the kernel. 1357 */ 1358 void 1359 ip_stripoptions(struct mbuf *m) 1360 { 1361 int i; 1362 struct ip *ip = mtod(m, struct ip *); 1363 caddr_t opts; 1364 int olen; 1365 1366 olen = (ip->ip_hl<<2) - sizeof (struct ip); 1367 opts = (caddr_t)(ip + 1); 1368 i = m->m_len - (sizeof (struct ip) + olen); 1369 memmove(opts, opts + olen, i); 1370 m->m_len -= olen; 1371 if (m->m_flags & M_PKTHDR) 1372 m->m_pkthdr.len -= olen; 1373 ip->ip_hl = sizeof(struct ip) >> 2; 1374 ip->ip_len = htons(ntohs(ip->ip_len) - olen); 1375 } 1376 1377 int inetctlerrmap[PRC_NCMDS] = { 1378 0, 0, 0, 0, 1379 0, EMSGSIZE, EHOSTDOWN, EHOSTUNREACH, 1380 EHOSTUNREACH, EHOSTUNREACH, ECONNREFUSED, ECONNREFUSED, 1381 EMSGSIZE, EHOSTUNREACH, 0, 0, 1382 0, 0, 0, 0, 1383 ENOPROTOOPT 1384 }; 1385 1386 /* 1387 * Forward a packet. If some error occurs return the sender 1388 * an icmp packet. Note we can't always generate a meaningful 1389 * icmp message because icmp doesn't have a large enough repertoire 1390 * of codes and types. 1391 * 1392 * If not forwarding, just drop the packet. This could be confusing 1393 * if ipforwarding was zero but some routing protocol was advancing 1394 * us as a gateway to somewhere. However, we must let the routing 1395 * protocol deal with that. 1396 * 1397 * The srcrt parameter indicates whether the packet is being forwarded 1398 * via a source route. 1399 */ 1400 void 1401 ip_forward(struct mbuf *m, struct ifnet *ifp, struct rtentry *rt, int srcrt) 1402 { 1403 struct mbuf mfake, *mcopy = NULL; 1404 struct ip *ip = mtod(m, struct ip *); 1405 struct sockaddr_in *sin; 1406 struct route ro; 1407 int error, type = 0, code = 0, destmtu = 0, fake = 0, len; 1408 u_int32_t dest; 1409 1410 dest = 0; 1411 if (m->m_flags & (M_BCAST|M_MCAST) || in_canforward(ip->ip_dst) == 0) { 1412 ipstat.ips_cantforward++; 1413 m_freem(m); 1414 goto freecopy; 1415 } 1416 if (ip->ip_ttl <= IPTTLDEC) { 1417 icmp_error(m, ICMP_TIMXCEED, ICMP_TIMXCEED_INTRANS, dest, 0); 1418 goto freecopy; 1419 } 1420 1421 sin = satosin(&ro.ro_dst); 1422 memset(sin, 0, sizeof(*sin)); 1423 sin->sin_family = AF_INET; 1424 sin->sin_len = sizeof(*sin); 1425 sin->sin_addr = ip->ip_dst; 1426 1427 if (!rtisvalid(rt)) { 1428 rtfree(rt); 1429 rt = rtalloc_mpath(sintosa(sin), &ip->ip_src.s_addr, 1430 m->m_pkthdr.ph_rtableid); 1431 if (rt == NULL) { 1432 icmp_error(m, ICMP_UNREACH, ICMP_UNREACH_HOST, dest, 0); 1433 return; 1434 } 1435 } 1436 1437 /* 1438 * Save at most 68 bytes of the packet in case 1439 * we need to generate an ICMP message to the src. 1440 * The data is saved in the mbuf on the stack that 1441 * acts as a temporary storage not intended to be 1442 * passed down the IP stack or to the mfree. 1443 */ 1444 memset(&mfake.m_hdr, 0, sizeof(mfake.m_hdr)); 1445 mfake.m_type = m->m_type; 1446 if (m_dup_pkthdr(&mfake, m, M_DONTWAIT) == 0) { 1447 mfake.m_data = mfake.m_pktdat; 1448 len = min(ntohs(ip->ip_len), 68); 1449 m_copydata(m, 0, len, mfake.m_pktdat); 1450 mfake.m_pkthdr.len = mfake.m_len = len; 1451 #if NPF > 0 1452 pf_pkt_unlink_state_key(&mfake); 1453 #endif /* NPF > 0 */ 1454 fake = 1; 1455 } 1456 1457 ip->ip_ttl -= IPTTLDEC; 1458 1459 /* 1460 * If forwarding packet using same interface that it came in on, 1461 * perhaps should send a redirect to sender to shortcut a hop. 1462 * Only send redirect if source is sending directly to us, 1463 * and if packet was not source routed (or has any options). 1464 * Also, don't send redirect if forwarding using a default route 1465 * or a route modified by a redirect. 1466 * Don't send redirect if we advertise destination's arp address 1467 * as ours (proxy arp). 1468 */ 1469 if ((rt->rt_ifidx == ifp->if_index) && 1470 (rt->rt_flags & (RTF_DYNAMIC|RTF_MODIFIED)) == 0 && 1471 satosin(rt_key(rt))->sin_addr.s_addr != 0 && 1472 ipsendredirects && !srcrt && 1473 !arpproxy(satosin(rt_key(rt))->sin_addr, m->m_pkthdr.ph_rtableid)) { 1474 if ((ip->ip_src.s_addr & ifatoia(rt->rt_ifa)->ia_netmask) == 1475 ifatoia(rt->rt_ifa)->ia_net) { 1476 if (rt->rt_flags & RTF_GATEWAY) 1477 dest = satosin(rt->rt_gateway)->sin_addr.s_addr; 1478 else 1479 dest = ip->ip_dst.s_addr; 1480 /* Router requirements says to only send host redirects */ 1481 type = ICMP_REDIRECT; 1482 code = ICMP_REDIRECT_HOST; 1483 } 1484 } 1485 1486 ro.ro_rt = rt; 1487 ro.ro_tableid = m->m_pkthdr.ph_rtableid; 1488 error = ip_output(m, NULL, &ro, 1489 (IP_FORWARDING | (ip_directedbcast ? IP_ALLOWBROADCAST : 0)), 1490 NULL, NULL, 0); 1491 rt = ro.ro_rt; 1492 if (error) 1493 ipstat.ips_cantforward++; 1494 else { 1495 ipstat.ips_forward++; 1496 if (type) 1497 ipstat.ips_redirectsent++; 1498 else 1499 goto freecopy; 1500 } 1501 if (!fake) 1502 goto freecopy; 1503 1504 switch (error) { 1505 1506 case 0: /* forwarded, but need redirect */ 1507 /* type, code set above */ 1508 break; 1509 1510 case ENETUNREACH: /* shouldn't happen, checked above */ 1511 case EHOSTUNREACH: 1512 case ENETDOWN: 1513 case EHOSTDOWN: 1514 default: 1515 type = ICMP_UNREACH; 1516 code = ICMP_UNREACH_HOST; 1517 break; 1518 1519 case EMSGSIZE: 1520 type = ICMP_UNREACH; 1521 code = ICMP_UNREACH_NEEDFRAG; 1522 1523 #ifdef IPSEC 1524 if (rt != NULL) { 1525 if (rt->rt_rmx.rmx_mtu) 1526 destmtu = rt->rt_rmx.rmx_mtu; 1527 else { 1528 struct ifnet *destifp; 1529 1530 destifp = if_get(rt->rt_ifidx); 1531 if (destifp != NULL) 1532 destmtu = destifp->if_mtu; 1533 if_put(destifp); 1534 } 1535 } 1536 #endif /*IPSEC*/ 1537 ipstat.ips_cantfrag++; 1538 break; 1539 1540 case EACCES: 1541 /* 1542 * pf(4) blocked the packet. There is no need to send an ICMP 1543 * packet back since pf(4) takes care of it. 1544 */ 1545 goto freecopy; 1546 case ENOBUFS: 1547 /* 1548 * a router should not generate ICMP_SOURCEQUENCH as 1549 * required in RFC1812 Requirements for IP Version 4 Routers. 1550 * source quench could be a big problem under DoS attacks, 1551 * or the underlying interface is rate-limited. 1552 */ 1553 goto freecopy; 1554 } 1555 1556 mcopy = m_copym(&mfake, 0, len, M_DONTWAIT); 1557 if (mcopy) 1558 icmp_error(mcopy, type, code, dest, destmtu); 1559 1560 freecopy: 1561 if (fake) 1562 m_tag_delete_chain(&mfake); 1563 rtfree(rt); 1564 } 1565 1566 int 1567 ip_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp, 1568 size_t newlen) 1569 { 1570 int s, error; 1571 #ifdef MROUTING 1572 extern int ip_mrtproto; 1573 extern struct mrtstat mrtstat; 1574 #endif 1575 1576 /* Almost all sysctl names at this level are terminal. */ 1577 if (namelen != 1 && name[0] != IPCTL_IFQUEUE) 1578 return (ENOTDIR); 1579 1580 switch (name[0]) { 1581 #ifdef notyet 1582 case IPCTL_DEFMTU: 1583 return (sysctl_int(oldp, oldlenp, newp, newlen, &ip_mtu)); 1584 #endif 1585 case IPCTL_SOURCEROUTE: 1586 /* 1587 * Don't allow this to change in a secure environment. 1588 */ 1589 if (newp && securelevel > 0) 1590 return (EPERM); 1591 return (sysctl_int(oldp, oldlenp, newp, newlen, 1592 &ip_dosourceroute)); 1593 case IPCTL_MTUDISC: 1594 error = sysctl_int(oldp, oldlenp, newp, newlen, 1595 &ip_mtudisc); 1596 if (ip_mtudisc != 0 && ip_mtudisc_timeout_q == NULL) { 1597 ip_mtudisc_timeout_q = 1598 rt_timer_queue_create(ip_mtudisc_timeout); 1599 } else if (ip_mtudisc == 0 && ip_mtudisc_timeout_q != NULL) { 1600 s = splsoftnet(); 1601 rt_timer_queue_destroy(ip_mtudisc_timeout_q); 1602 ip_mtudisc_timeout_q = NULL; 1603 splx(s); 1604 } 1605 return error; 1606 case IPCTL_MTUDISCTIMEOUT: 1607 error = sysctl_int(oldp, oldlenp, newp, newlen, 1608 &ip_mtudisc_timeout); 1609 if (ip_mtudisc_timeout_q != NULL) { 1610 s = splsoftnet(); 1611 rt_timer_queue_change(ip_mtudisc_timeout_q, 1612 ip_mtudisc_timeout); 1613 splx(s); 1614 } 1615 return (error); 1616 case IPCTL_IPSEC_ENC_ALGORITHM: 1617 return (sysctl_tstring(oldp, oldlenp, newp, newlen, 1618 ipsec_def_enc, sizeof(ipsec_def_enc))); 1619 case IPCTL_IPSEC_AUTH_ALGORITHM: 1620 return (sysctl_tstring(oldp, oldlenp, newp, newlen, 1621 ipsec_def_auth, 1622 sizeof(ipsec_def_auth))); 1623 case IPCTL_IPSEC_IPCOMP_ALGORITHM: 1624 return (sysctl_tstring(oldp, oldlenp, newp, newlen, 1625 ipsec_def_comp, 1626 sizeof(ipsec_def_comp))); 1627 case IPCTL_IFQUEUE: 1628 return (sysctl_niq(name + 1, namelen - 1, 1629 oldp, oldlenp, newp, newlen, &ipintrq)); 1630 case IPCTL_STATS: 1631 return (sysctl_rdstruct(oldp, oldlenp, newp, 1632 &ipstat, sizeof(ipstat))); 1633 #ifdef MROUTING 1634 case IPCTL_MRTSTATS: 1635 return (sysctl_rdstruct(oldp, oldlenp, newp, 1636 &mrtstat, sizeof(mrtstat))); 1637 case IPCTL_MRTPROTO: 1638 return (sysctl_rdint(oldp, oldlenp, newp, ip_mrtproto)); 1639 case IPCTL_MRTMFC: 1640 if (newp) 1641 return (EPERM); 1642 return mrt_sysctl_mfc(oldp, oldlenp); 1643 case IPCTL_MRTVIF: 1644 if (newp) 1645 return (EPERM); 1646 return mrt_sysctl_vif(oldp, oldlenp); 1647 #else 1648 case IPCTL_MRTPROTO: 1649 case IPCTL_MRTSTATS: 1650 case IPCTL_MRTMFC: 1651 case IPCTL_MRTVIF: 1652 return (EOPNOTSUPP); 1653 #endif 1654 default: 1655 if (name[0] < IPCTL_MAXID) 1656 return (sysctl_int_arr(ipctl_vars, name, namelen, 1657 oldp, oldlenp, newp, newlen)); 1658 return (EOPNOTSUPP); 1659 } 1660 /* NOTREACHED */ 1661 } 1662 1663 void 1664 ip_savecontrol(struct inpcb *inp, struct mbuf **mp, struct ip *ip, 1665 struct mbuf *m) 1666 { 1667 #ifdef SO_TIMESTAMP 1668 if (inp->inp_socket->so_options & SO_TIMESTAMP) { 1669 struct timeval tv; 1670 1671 microtime(&tv); 1672 *mp = sbcreatecontrol((caddr_t) &tv, sizeof(tv), 1673 SCM_TIMESTAMP, SOL_SOCKET); 1674 if (*mp) 1675 mp = &(*mp)->m_next; 1676 } 1677 #endif 1678 if (inp->inp_flags & INP_RECVDSTADDR) { 1679 *mp = sbcreatecontrol((caddr_t) &ip->ip_dst, 1680 sizeof(struct in_addr), IP_RECVDSTADDR, IPPROTO_IP); 1681 if (*mp) 1682 mp = &(*mp)->m_next; 1683 } 1684 #ifdef notyet 1685 /* this code is broken and will probably never be fixed. */ 1686 /* options were tossed already */ 1687 if (inp->inp_flags & INP_RECVOPTS) { 1688 *mp = sbcreatecontrol((caddr_t) opts_deleted_above, 1689 sizeof(struct in_addr), IP_RECVOPTS, IPPROTO_IP); 1690 if (*mp) 1691 mp = &(*mp)->m_next; 1692 } 1693 /* ip_srcroute doesn't do what we want here, need to fix */ 1694 if (inp->inp_flags & INP_RECVRETOPTS) { 1695 *mp = sbcreatecontrol((caddr_t) ip_srcroute(m), 1696 sizeof(struct in_addr), IP_RECVRETOPTS, IPPROTO_IP); 1697 if (*mp) 1698 mp = &(*mp)->m_next; 1699 } 1700 #endif 1701 if (inp->inp_flags & INP_RECVIF) { 1702 struct sockaddr_dl sdl; 1703 struct ifnet *ifp; 1704 1705 ifp = if_get(m->m_pkthdr.ph_ifidx); 1706 if (ifp == NULL || ifp->if_sadl == NULL) { 1707 memset(&sdl, 0, sizeof(sdl)); 1708 sdl.sdl_len = offsetof(struct sockaddr_dl, sdl_data[0]); 1709 sdl.sdl_family = AF_LINK; 1710 sdl.sdl_index = ifp != NULL ? ifp->if_index : 0; 1711 sdl.sdl_nlen = sdl.sdl_alen = sdl.sdl_slen = 0; 1712 *mp = sbcreatecontrol((caddr_t) &sdl, sdl.sdl_len, 1713 IP_RECVIF, IPPROTO_IP); 1714 } else { 1715 *mp = sbcreatecontrol((caddr_t) ifp->if_sadl, 1716 ifp->if_sadl->sdl_len, IP_RECVIF, IPPROTO_IP); 1717 } 1718 if (*mp) 1719 mp = &(*mp)->m_next; 1720 if_put(ifp); 1721 } 1722 if (inp->inp_flags & INP_RECVTTL) { 1723 *mp = sbcreatecontrol((caddr_t) &ip->ip_ttl, 1724 sizeof(u_int8_t), IP_RECVTTL, IPPROTO_IP); 1725 if (*mp) 1726 mp = &(*mp)->m_next; 1727 } 1728 if (inp->inp_flags & INP_RECVRTABLE) { 1729 u_int rtableid = inp->inp_rtableid; 1730 #if NPF > 0 1731 struct pf_divert *divert; 1732 1733 if (m && m->m_pkthdr.pf.flags & PF_TAG_DIVERTED && 1734 (divert = pf_find_divert(m)) != NULL) 1735 rtableid = divert->rdomain; 1736 #endif 1737 1738 *mp = sbcreatecontrol((caddr_t) &rtableid, 1739 sizeof(u_int), IP_RECVRTABLE, IPPROTO_IP); 1740 if (*mp) 1741 mp = &(*mp)->m_next; 1742 } 1743 } 1744 1745 void 1746 ip_send_dispatch(void *xmq) 1747 { 1748 struct mbuf_queue *mq = xmq; 1749 struct mbuf *m; 1750 struct mbuf_list ml; 1751 int s; 1752 1753 mq_delist(mq, &ml); 1754 KERNEL_LOCK(); 1755 s = splsoftnet(); 1756 while ((m = ml_dequeue(&ml)) != NULL) { 1757 ip_output(m, NULL, NULL, 0, NULL, NULL, 0); 1758 } 1759 splx(s); 1760 KERNEL_UNLOCK(); 1761 } 1762 1763 void 1764 ip_send(struct mbuf *m) 1765 { 1766 mq_enqueue(&ipsend_mq, m); 1767 task_add(softnettq, &ipsend_task); 1768 } 1769