1 /* $NetBSD: ip_output.c,v 1.162 2006/05/15 00:05:17 christos Exp $ */ 2 3 /* 4 * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the name of the project nor the names of its contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 */ 31 32 /*- 33 * Copyright (c) 1998 The NetBSD Foundation, Inc. 34 * All rights reserved. 35 * 36 * This code is derived from software contributed to The NetBSD Foundation 37 * by Public Access Networks Corporation ("Panix"). It was developed under 38 * contract to Panix by Eric Haszlakiewicz and Thor Lancelot Simon. 39 * 40 * Redistribution and use in source and binary forms, with or without 41 * modification, are permitted provided that the following conditions 42 * are met: 43 * 1. Redistributions of source code must retain the above copyright 44 * notice, this list of conditions and the following disclaimer. 45 * 2. Redistributions in binary form must reproduce the above copyright 46 * notice, this list of conditions and the following disclaimer in the 47 * documentation and/or other materials provided with the distribution. 48 * 3. All advertising materials mentioning features or use of this software 49 * must display the following acknowledgement: 50 * This product includes software developed by the NetBSD 51 * Foundation, Inc. and its contributors. 52 * 4. Neither the name of The NetBSD Foundation nor the names of its 53 * contributors may be used to endorse or promote products derived 54 * from this software without specific prior written permission. 55 * 56 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 57 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 58 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 59 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 60 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 61 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 62 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 63 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 64 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 65 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 66 * POSSIBILITY OF SUCH DAMAGE. 67 */ 68 69 /* 70 * Copyright (c) 1982, 1986, 1988, 1990, 1993 71 * The Regents of the University of California. All rights reserved. 72 * 73 * Redistribution and use in source and binary forms, with or without 74 * modification, are permitted provided that the following conditions 75 * are met: 76 * 1. Redistributions of source code must retain the above copyright 77 * notice, this list of conditions and the following disclaimer. 78 * 2. Redistributions in binary form must reproduce the above copyright 79 * notice, this list of conditions and the following disclaimer in the 80 * documentation and/or other materials provided with the distribution. 81 * 3. Neither the name of the University nor the names of its contributors 82 * may be used to endorse or promote products derived from this software 83 * without specific prior written permission. 84 * 85 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 86 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 87 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 88 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 89 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 90 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 91 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 92 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 93 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 94 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 95 * SUCH DAMAGE. 96 * 97 * @(#)ip_output.c 8.3 (Berkeley) 1/21/94 98 */ 99 100 #include <sys/cdefs.h> 101 __KERNEL_RCSID(0, "$NetBSD: ip_output.c,v 1.162 2006/05/15 00:05:17 christos Exp $"); 102 103 #include "opt_pfil_hooks.h" 104 #include "opt_inet.h" 105 #include "opt_ipsec.h" 106 #include "opt_mrouting.h" 107 108 #include <sys/param.h> 109 #include <sys/malloc.h> 110 #include <sys/mbuf.h> 111 #include <sys/errno.h> 112 #include <sys/protosw.h> 113 #include <sys/socket.h> 114 #include <sys/socketvar.h> 115 #include <sys/kauth.h> 116 #ifdef FAST_IPSEC 117 #include <sys/domain.h> 118 #endif 119 #include <sys/systm.h> 120 #include <sys/proc.h> 121 122 #include <net/if.h> 123 #include <net/route.h> 124 #include <net/pfil.h> 125 126 #include <netinet/in.h> 127 #include <netinet/in_systm.h> 128 #include <netinet/ip.h> 129 #include <netinet/in_pcb.h> 130 #include <netinet/in_var.h> 131 #include <netinet/ip_var.h> 132 #include <netinet/in_offload.h> 133 134 #ifdef MROUTING 135 #include <netinet/ip_mroute.h> 136 #endif 137 138 #include <machine/stdarg.h> 139 140 #ifdef IPSEC 141 #include <netinet6/ipsec.h> 142 #include <netkey/key.h> 143 #include <netkey/key_debug.h> 144 #endif /*IPSEC*/ 145 146 #ifdef FAST_IPSEC 147 #include <netipsec/ipsec.h> 148 #include <netipsec/key.h> 149 #include <netipsec/xform.h> 150 #endif /* FAST_IPSEC*/ 151 152 #ifdef IPSEC_NAT_T 153 #include <netinet/udp.h> 154 #endif 155 156 static struct mbuf *ip_insertoptions(struct mbuf *, struct mbuf *, int *); 157 static struct ifnet *ip_multicast_if(struct in_addr *, int *); 158 static void ip_mloopback(struct ifnet *, struct mbuf *, struct sockaddr_in *); 159 static int ip_getoptval(struct mbuf *, u_int8_t *, u_int); 160 161 #ifdef PFIL_HOOKS 162 extern struct pfil_head inet_pfil_hook; /* XXX */ 163 #endif 164 165 int ip_do_loopback_cksum = 0; 166 167 #define IN_NEED_CHECKSUM(ifp, csum_flags) \ 168 (__predict_true(((ifp)->if_flags & IFF_LOOPBACK) == 0 || \ 169 (((csum_flags) & M_CSUM_UDPv4) != 0 && udp_do_loopback_cksum) || \ 170 (((csum_flags) & M_CSUM_TCPv4) != 0 && tcp_do_loopback_cksum) || \ 171 (((csum_flags) & M_CSUM_IPv4) != 0 && ip_do_loopback_cksum))) 172 173 struct ip_tso_output_args { 174 struct ifnet *ifp; 175 struct sockaddr *sa; 176 struct rtentry *rt; 177 }; 178 179 static int ip_tso_output_callback(void *, struct mbuf *); 180 static int ip_tso_output(struct ifnet *, struct mbuf *, struct sockaddr *, 181 struct rtentry *); 182 183 static int 184 ip_tso_output_callback(void *vp, struct mbuf *m) 185 { 186 struct ip_tso_output_args *args = vp; 187 struct ifnet *ifp = args->ifp; 188 189 return (*ifp->if_output)(ifp, m, args->sa, args->rt); 190 } 191 192 static int 193 ip_tso_output(struct ifnet *ifp, struct mbuf *m, struct sockaddr *sa, 194 struct rtentry *rt) 195 { 196 struct ip_tso_output_args args; 197 198 args.ifp = ifp; 199 args.sa = sa; 200 args.rt = rt; 201 202 return tcp4_segment(m, ip_tso_output_callback, &args); 203 } 204 205 /* 206 * IP output. The packet in mbuf chain m contains a skeletal IP 207 * header (with len, off, ttl, proto, tos, src, dst). 208 * The mbuf chain containing the packet will be freed. 209 * The mbuf opt, if present, will not be freed. 210 */ 211 int 212 ip_output(struct mbuf *m0, ...) 213 { 214 struct ip *ip; 215 struct ifnet *ifp; 216 struct mbuf *m = m0; 217 int hlen = sizeof (struct ip); 218 int len, error = 0; 219 struct route iproute; 220 struct sockaddr_in *dst; 221 struct in_ifaddr *ia; 222 struct mbuf *opt; 223 struct route *ro; 224 int flags, sw_csum; 225 int *mtu_p; 226 u_long mtu; 227 struct ip_moptions *imo; 228 struct socket *so; 229 va_list ap; 230 #ifdef IPSEC_NAT_T 231 int natt_frag = 0; 232 #endif 233 #ifdef IPSEC 234 struct secpolicy *sp = NULL; 235 #endif /*IPSEC*/ 236 #ifdef FAST_IPSEC 237 struct inpcb *inp; 238 struct m_tag *mtag; 239 struct secpolicy *sp = NULL; 240 struct tdb_ident *tdbi; 241 int s; 242 #endif 243 u_int16_t ip_len; 244 245 len = 0; 246 va_start(ap, m0); 247 opt = va_arg(ap, struct mbuf *); 248 ro = va_arg(ap, struct route *); 249 flags = va_arg(ap, int); 250 imo = va_arg(ap, struct ip_moptions *); 251 so = va_arg(ap, struct socket *); 252 if (flags & IP_RETURNMTU) 253 mtu_p = va_arg(ap, int *); 254 else 255 mtu_p = NULL; 256 va_end(ap); 257 258 MCLAIM(m, &ip_tx_mowner); 259 #ifdef FAST_IPSEC 260 if (so != NULL && so->so_proto->pr_domain->dom_family == AF_INET) 261 inp = (struct inpcb *)so->so_pcb; 262 else 263 inp = NULL; 264 #endif /* FAST_IPSEC */ 265 266 #ifdef DIAGNOSTIC 267 if ((m->m_flags & M_PKTHDR) == 0) 268 panic("ip_output no HDR"); 269 #endif 270 if (opt) { 271 m = ip_insertoptions(m, opt, &len); 272 if (len >= sizeof(struct ip)) 273 hlen = len; 274 } 275 ip = mtod(m, struct ip *); 276 /* 277 * Fill in IP header. 278 */ 279 if ((flags & (IP_FORWARDING|IP_RAWOUTPUT)) == 0) { 280 ip->ip_v = IPVERSION; 281 ip->ip_off = htons(0); 282 if ((m->m_pkthdr.csum_flags & M_CSUM_TSOv4) == 0) { 283 ip->ip_id = ip_newid(); 284 } else { 285 286 /* 287 * TSO capable interfaces (typically?) increment 288 * ip_id for each segment. 289 * "allocate" enough ids here to increase the chance 290 * for them to be unique. 291 * 292 * note that the following calculation is not 293 * needed to be precise. wasting some ip_id is fine. 294 */ 295 296 unsigned int segsz = m->m_pkthdr.segsz; 297 unsigned int datasz = ntohs(ip->ip_len) - hlen; 298 unsigned int num = howmany(datasz, segsz); 299 300 ip->ip_id = ip_newid_range(num); 301 } 302 ip->ip_hl = hlen >> 2; 303 ipstat.ips_localout++; 304 } else { 305 hlen = ip->ip_hl << 2; 306 } 307 /* 308 * Route packet. 309 */ 310 if (ro == 0) { 311 ro = &iproute; 312 bzero((caddr_t)ro, sizeof (*ro)); 313 } 314 dst = satosin(&ro->ro_dst); 315 /* 316 * If there is a cached route, 317 * check that it is to the same destination 318 * and is still up. If not, free it and try again. 319 * The address family should also be checked in case of sharing the 320 * cache with IPv6. 321 */ 322 if (ro->ro_rt && ((ro->ro_rt->rt_flags & RTF_UP) == 0 || 323 dst->sin_family != AF_INET || 324 !in_hosteq(dst->sin_addr, ip->ip_dst))) { 325 RTFREE(ro->ro_rt); 326 ro->ro_rt = (struct rtentry *)0; 327 } 328 if (ro->ro_rt == 0) { 329 bzero(dst, sizeof(*dst)); 330 dst->sin_family = AF_INET; 331 dst->sin_len = sizeof(*dst); 332 dst->sin_addr = ip->ip_dst; 333 } 334 /* 335 * If routing to interface only, 336 * short circuit routing lookup. 337 */ 338 if (flags & IP_ROUTETOIF) { 339 if ((ia = ifatoia(ifa_ifwithladdr(sintosa(dst)))) == 0) { 340 ipstat.ips_noroute++; 341 error = ENETUNREACH; 342 goto bad; 343 } 344 ifp = ia->ia_ifp; 345 mtu = ifp->if_mtu; 346 ip->ip_ttl = 1; 347 } else if ((IN_MULTICAST(ip->ip_dst.s_addr) || 348 ip->ip_dst.s_addr == INADDR_BROADCAST) && 349 imo != NULL && imo->imo_multicast_ifp != NULL) { 350 ifp = imo->imo_multicast_ifp; 351 mtu = ifp->if_mtu; 352 IFP_TO_IA(ifp, ia); 353 } else { 354 if (ro->ro_rt == 0) 355 rtalloc(ro); 356 if (ro->ro_rt == 0) { 357 ipstat.ips_noroute++; 358 error = EHOSTUNREACH; 359 goto bad; 360 } 361 ia = ifatoia(ro->ro_rt->rt_ifa); 362 ifp = ro->ro_rt->rt_ifp; 363 if ((mtu = ro->ro_rt->rt_rmx.rmx_mtu) == 0) 364 mtu = ifp->if_mtu; 365 ro->ro_rt->rt_use++; 366 if (ro->ro_rt->rt_flags & RTF_GATEWAY) 367 dst = satosin(ro->ro_rt->rt_gateway); 368 } 369 if (IN_MULTICAST(ip->ip_dst.s_addr) || 370 (ip->ip_dst.s_addr == INADDR_BROADCAST)) { 371 struct in_multi *inm; 372 373 m->m_flags |= (ip->ip_dst.s_addr == INADDR_BROADCAST) ? 374 M_BCAST : M_MCAST; 375 /* 376 * IP destination address is multicast. Make sure "dst" 377 * still points to the address in "ro". (It may have been 378 * changed to point to a gateway address, above.) 379 */ 380 dst = satosin(&ro->ro_dst); 381 /* 382 * See if the caller provided any multicast options 383 */ 384 if (imo != NULL) 385 ip->ip_ttl = imo->imo_multicast_ttl; 386 else 387 ip->ip_ttl = IP_DEFAULT_MULTICAST_TTL; 388 389 /* 390 * if we don't know the outgoing ifp yet, we can't generate 391 * output 392 */ 393 if (!ifp) { 394 ipstat.ips_noroute++; 395 error = ENETUNREACH; 396 goto bad; 397 } 398 399 /* 400 * If the packet is multicast or broadcast, confirm that 401 * the outgoing interface can transmit it. 402 */ 403 if (((m->m_flags & M_MCAST) && 404 (ifp->if_flags & IFF_MULTICAST) == 0) || 405 ((m->m_flags & M_BCAST) && 406 (ifp->if_flags & (IFF_BROADCAST|IFF_POINTOPOINT)) == 0)) { 407 ipstat.ips_noroute++; 408 error = ENETUNREACH; 409 goto bad; 410 } 411 /* 412 * If source address not specified yet, use an address 413 * of outgoing interface. 414 */ 415 if (in_nullhost(ip->ip_src)) { 416 struct in_ifaddr *xia; 417 418 IFP_TO_IA(ifp, xia); 419 if (!xia) { 420 error = EADDRNOTAVAIL; 421 goto bad; 422 } 423 ip->ip_src = xia->ia_addr.sin_addr; 424 } 425 426 IN_LOOKUP_MULTI(ip->ip_dst, ifp, inm); 427 if (inm != NULL && 428 (imo == NULL || imo->imo_multicast_loop)) { 429 /* 430 * If we belong to the destination multicast group 431 * on the outgoing interface, and the caller did not 432 * forbid loopback, loop back a copy. 433 */ 434 ip_mloopback(ifp, m, dst); 435 } 436 #ifdef MROUTING 437 else { 438 /* 439 * If we are acting as a multicast router, perform 440 * multicast forwarding as if the packet had just 441 * arrived on the interface to which we are about 442 * to send. The multicast forwarding function 443 * recursively calls this function, using the 444 * IP_FORWARDING flag to prevent infinite recursion. 445 * 446 * Multicasts that are looped back by ip_mloopback(), 447 * above, will be forwarded by the ip_input() routine, 448 * if necessary. 449 */ 450 extern struct socket *ip_mrouter; 451 452 if (ip_mrouter && (flags & IP_FORWARDING) == 0) { 453 if (ip_mforward(m, ifp) != 0) { 454 m_freem(m); 455 goto done; 456 } 457 } 458 } 459 #endif 460 /* 461 * Multicasts with a time-to-live of zero may be looped- 462 * back, above, but must not be transmitted on a network. 463 * Also, multicasts addressed to the loopback interface 464 * are not sent -- the above call to ip_mloopback() will 465 * loop back a copy if this host actually belongs to the 466 * destination group on the loopback interface. 467 */ 468 if (ip->ip_ttl == 0 || (ifp->if_flags & IFF_LOOPBACK) != 0) { 469 m_freem(m); 470 goto done; 471 } 472 473 goto sendit; 474 } 475 /* 476 * If source address not specified yet, use address 477 * of outgoing interface. 478 */ 479 if (in_nullhost(ip->ip_src)) 480 ip->ip_src = ia->ia_addr.sin_addr; 481 482 /* 483 * packets with Class-D address as source are not valid per 484 * RFC 1112 485 */ 486 if (IN_MULTICAST(ip->ip_src.s_addr)) { 487 ipstat.ips_odropped++; 488 error = EADDRNOTAVAIL; 489 goto bad; 490 } 491 492 /* 493 * Look for broadcast address and 494 * and verify user is allowed to send 495 * such a packet. 496 */ 497 if (in_broadcast(dst->sin_addr, ifp)) { 498 if ((ifp->if_flags & IFF_BROADCAST) == 0) { 499 error = EADDRNOTAVAIL; 500 goto bad; 501 } 502 if ((flags & IP_ALLOWBROADCAST) == 0) { 503 error = EACCES; 504 goto bad; 505 } 506 /* don't allow broadcast messages to be fragmented */ 507 if (ntohs(ip->ip_len) > ifp->if_mtu) { 508 error = EMSGSIZE; 509 goto bad; 510 } 511 m->m_flags |= M_BCAST; 512 } else 513 m->m_flags &= ~M_BCAST; 514 515 sendit: 516 /* 517 * If we're doing Path MTU Discovery, we need to set DF unless 518 * the route's MTU is locked. 519 */ 520 if ((flags & IP_MTUDISC) != 0 && ro->ro_rt != NULL && 521 (ro->ro_rt->rt_rmx.rmx_locks & RTV_MTU) == 0) 522 ip->ip_off |= htons(IP_DF); 523 524 /* Remember the current ip_len */ 525 ip_len = ntohs(ip->ip_len); 526 527 #ifdef IPSEC 528 /* get SP for this packet */ 529 if (so == NULL) 530 sp = ipsec4_getpolicybyaddr(m, IPSEC_DIR_OUTBOUND, 531 flags, &error); 532 else { 533 if (IPSEC_PCB_SKIP_IPSEC(sotoinpcb_hdr(so)->inph_sp, 534 IPSEC_DIR_OUTBOUND)) 535 goto skip_ipsec; 536 sp = ipsec4_getpolicybysock(m, IPSEC_DIR_OUTBOUND, so, &error); 537 } 538 539 if (sp == NULL) { 540 ipsecstat.out_inval++; 541 goto bad; 542 } 543 544 error = 0; 545 546 /* check policy */ 547 switch (sp->policy) { 548 case IPSEC_POLICY_DISCARD: 549 /* 550 * This packet is just discarded. 551 */ 552 ipsecstat.out_polvio++; 553 goto bad; 554 555 case IPSEC_POLICY_BYPASS: 556 case IPSEC_POLICY_NONE: 557 /* no need to do IPsec. */ 558 goto skip_ipsec; 559 560 case IPSEC_POLICY_IPSEC: 561 if (sp->req == NULL) { 562 /* XXX should be panic ? */ 563 printf("ip_output: No IPsec request specified.\n"); 564 error = EINVAL; 565 goto bad; 566 } 567 break; 568 569 case IPSEC_POLICY_ENTRUST: 570 default: 571 printf("ip_output: Invalid policy found. %d\n", sp->policy); 572 } 573 574 #ifdef IPSEC_NAT_T 575 /* 576 * NAT-T ESP fragmentation: don't do IPSec processing now, 577 * we'll do it on each fragmented packet. 578 */ 579 if (sp->req->sav && 580 ((sp->req->sav->natt_type & UDP_ENCAP_ESPINUDP) || 581 (sp->req->sav->natt_type & UDP_ENCAP_ESPINUDP_NON_IKE))) { 582 if (ntohs(ip->ip_len) > sp->req->sav->esp_frag) { 583 natt_frag = 1; 584 mtu = sp->req->sav->esp_frag; 585 goto skip_ipsec; 586 } 587 } 588 #endif /* IPSEC_NAT_T */ 589 590 /* 591 * ipsec4_output() expects ip_len and ip_off in network 592 * order. They have been set to network order above. 593 */ 594 595 { 596 struct ipsec_output_state state; 597 bzero(&state, sizeof(state)); 598 state.m = m; 599 if (flags & IP_ROUTETOIF) { 600 state.ro = &iproute; 601 bzero(&iproute, sizeof(iproute)); 602 } else 603 state.ro = ro; 604 state.dst = (struct sockaddr *)dst; 605 606 /* 607 * We can't defer the checksum of payload data if 608 * we're about to encrypt/authenticate it. 609 * 610 * XXX When we support crypto offloading functions of 611 * XXX network interfaces, we need to reconsider this, 612 * XXX since it's likely that they'll support checksumming, 613 * XXX as well. 614 */ 615 if (m->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) { 616 in_delayed_cksum(m); 617 m->m_pkthdr.csum_flags &= ~(M_CSUM_TCPv4|M_CSUM_UDPv4); 618 } 619 620 error = ipsec4_output(&state, sp, flags); 621 622 m = state.m; 623 if (flags & IP_ROUTETOIF) { 624 /* 625 * if we have tunnel mode SA, we may need to ignore 626 * IP_ROUTETOIF. 627 */ 628 if (state.ro != &iproute || state.ro->ro_rt != NULL) { 629 flags &= ~IP_ROUTETOIF; 630 ro = state.ro; 631 } 632 } else 633 ro = state.ro; 634 dst = (struct sockaddr_in *)state.dst; 635 if (error) { 636 /* mbuf is already reclaimed in ipsec4_output. */ 637 m0 = NULL; 638 switch (error) { 639 case EHOSTUNREACH: 640 case ENETUNREACH: 641 case EMSGSIZE: 642 case ENOBUFS: 643 case ENOMEM: 644 break; 645 default: 646 printf("ip4_output (ipsec): error code %d\n", error); 647 /*fall through*/ 648 case ENOENT: 649 /* don't show these error codes to the user */ 650 error = 0; 651 break; 652 } 653 goto bad; 654 } 655 656 /* be sure to update variables that are affected by ipsec4_output() */ 657 ip = mtod(m, struct ip *); 658 hlen = ip->ip_hl << 2; 659 ip_len = ntohs(ip->ip_len); 660 661 if (ro->ro_rt == NULL) { 662 if ((flags & IP_ROUTETOIF) == 0) { 663 printf("ip_output: " 664 "can't update route after IPsec processing\n"); 665 error = EHOSTUNREACH; /*XXX*/ 666 goto bad; 667 } 668 } else { 669 /* nobody uses ia beyond here */ 670 if (state.encap) { 671 ifp = ro->ro_rt->rt_ifp; 672 if ((mtu = ro->ro_rt->rt_rmx.rmx_mtu) == 0) 673 mtu = ifp->if_mtu; 674 } 675 } 676 } 677 skip_ipsec: 678 #endif /*IPSEC*/ 679 #ifdef FAST_IPSEC 680 /* 681 * Check the security policy (SP) for the packet and, if 682 * required, do IPsec-related processing. There are two 683 * cases here; the first time a packet is sent through 684 * it will be untagged and handled by ipsec4_checkpolicy. 685 * If the packet is resubmitted to ip_output (e.g. after 686 * AH, ESP, etc. processing), there will be a tag to bypass 687 * the lookup and related policy checking. 688 */ 689 mtag = m_tag_find(m, PACKET_TAG_IPSEC_PENDING_TDB, NULL); 690 s = splsoftnet(); 691 if (mtag != NULL) { 692 tdbi = (struct tdb_ident *)(mtag + 1); 693 sp = ipsec_getpolicy(tdbi, IPSEC_DIR_OUTBOUND); 694 if (sp == NULL) 695 error = -EINVAL; /* force silent drop */ 696 m_tag_delete(m, mtag); 697 } else { 698 if (inp != NULL && 699 IPSEC_PCB_SKIP_IPSEC(inp->inp_sp, IPSEC_DIR_OUTBOUND)) 700 goto spd_done; 701 sp = ipsec4_checkpolicy(m, IPSEC_DIR_OUTBOUND, flags, 702 &error, inp); 703 } 704 /* 705 * There are four return cases: 706 * sp != NULL apply IPsec policy 707 * sp == NULL, error == 0 no IPsec handling needed 708 * sp == NULL, error == -EINVAL discard packet w/o error 709 * sp == NULL, error != 0 discard packet, report error 710 */ 711 if (sp != NULL) { 712 #ifdef IPSEC_NAT_T 713 /* 714 * NAT-T ESP fragmentation: don't do IPSec processing now, 715 * we'll do it on each fragmented packet. 716 */ 717 if (sp->req->sav && 718 ((sp->req->sav->natt_type & UDP_ENCAP_ESPINUDP) || 719 (sp->req->sav->natt_type & UDP_ENCAP_ESPINUDP_NON_IKE))) { 720 if (ntohs(ip->ip_len) > sp->req->sav->esp_frag) { 721 natt_frag = 1; 722 mtu = sp->req->sav->esp_frag; 723 goto spd_done; 724 } 725 } 726 #endif /* IPSEC_NAT_T */ 727 /* Loop detection, check if ipsec processing already done */ 728 IPSEC_ASSERT(sp->req != NULL, ("ip_output: no ipsec request")); 729 for (mtag = m_tag_first(m); mtag != NULL; 730 mtag = m_tag_next(m, mtag)) { 731 #ifdef MTAG_ABI_COMPAT 732 if (mtag->m_tag_cookie != MTAG_ABI_COMPAT) 733 continue; 734 #endif 735 if (mtag->m_tag_id != PACKET_TAG_IPSEC_OUT_DONE && 736 mtag->m_tag_id != PACKET_TAG_IPSEC_OUT_CRYPTO_NEEDED) 737 continue; 738 /* 739 * Check if policy has an SA associated with it. 740 * This can happen when an SP has yet to acquire 741 * an SA; e.g. on first reference. If it occurs, 742 * then we let ipsec4_process_packet do its thing. 743 */ 744 if (sp->req->sav == NULL) 745 break; 746 tdbi = (struct tdb_ident *)(mtag + 1); 747 if (tdbi->spi == sp->req->sav->spi && 748 tdbi->proto == sp->req->sav->sah->saidx.proto && 749 bcmp(&tdbi->dst, &sp->req->sav->sah->saidx.dst, 750 sizeof (union sockaddr_union)) == 0) { 751 /* 752 * No IPsec processing is needed, free 753 * reference to SP. 754 * 755 * NB: null pointer to avoid free at 756 * done: below. 757 */ 758 KEY_FREESP(&sp), sp = NULL; 759 splx(s); 760 goto spd_done; 761 } 762 } 763 764 /* 765 * Do delayed checksums now because we send before 766 * this is done in the normal processing path. 767 */ 768 if (m->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) { 769 in_delayed_cksum(m); 770 m->m_pkthdr.csum_flags &= ~(M_CSUM_TCPv4|M_CSUM_UDPv4); 771 } 772 773 #ifdef __FreeBSD__ 774 ip->ip_len = htons(ip->ip_len); 775 ip->ip_off = htons(ip->ip_off); 776 #endif 777 778 /* NB: callee frees mbuf */ 779 error = ipsec4_process_packet(m, sp->req, flags, 0); 780 /* 781 * Preserve KAME behaviour: ENOENT can be returned 782 * when an SA acquire is in progress. Don't propagate 783 * this to user-level; it confuses applications. 784 * 785 * XXX this will go away when the SADB is redone. 786 */ 787 if (error == ENOENT) 788 error = 0; 789 splx(s); 790 goto done; 791 } else { 792 splx(s); 793 794 if (error != 0) { 795 /* 796 * Hack: -EINVAL is used to signal that a packet 797 * should be silently discarded. This is typically 798 * because we asked key management for an SA and 799 * it was delayed (e.g. kicked up to IKE). 800 */ 801 if (error == -EINVAL) 802 error = 0; 803 goto bad; 804 } else { 805 /* No IPsec processing for this packet. */ 806 } 807 #ifdef notyet 808 /* 809 * If deferred crypto processing is needed, check that 810 * the interface supports it. 811 */ 812 mtag = m_tag_find(m, PACKET_TAG_IPSEC_OUT_CRYPTO_NEEDED, NULL); 813 if (mtag != NULL && (ifp->if_capenable & IFCAP_IPSEC) == 0) { 814 /* notify IPsec to do its own crypto */ 815 ipsp_skipcrypto_unmark((struct tdb_ident *)(mtag + 1)); 816 error = EHOSTUNREACH; 817 goto bad; 818 } 819 #endif 820 } 821 spd_done: 822 #endif /* FAST_IPSEC */ 823 824 #ifdef PFIL_HOOKS 825 /* 826 * Run through list of hooks for output packets. 827 */ 828 if ((error = pfil_run_hooks(&inet_pfil_hook, &m, ifp, PFIL_OUT)) != 0) 829 goto done; 830 if (m == NULL) 831 goto done; 832 833 ip = mtod(m, struct ip *); 834 hlen = ip->ip_hl << 2; 835 #endif /* PFIL_HOOKS */ 836 837 m->m_pkthdr.csum_data |= hlen << 16; 838 839 #if IFA_STATS 840 /* 841 * search for the source address structure to 842 * maintain output statistics. 843 */ 844 INADDR_TO_IA(ip->ip_src, ia); 845 #endif 846 847 /* Maybe skip checksums on loopback interfaces. */ 848 if (IN_NEED_CHECKSUM(ifp, M_CSUM_IPv4)) { 849 m->m_pkthdr.csum_flags |= M_CSUM_IPv4; 850 } 851 sw_csum = m->m_pkthdr.csum_flags & ~ifp->if_csum_flags_tx; 852 /* 853 * If small enough for mtu of path, or if using TCP segmentation 854 * offload, can just send directly. 855 */ 856 if (ip_len <= mtu || 857 (m->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0) { 858 #if IFA_STATS 859 if (ia) 860 ia->ia_ifa.ifa_data.ifad_outbytes += ip_len; 861 #endif 862 /* 863 * Always initialize the sum to 0! Some HW assisted 864 * checksumming requires this. 865 */ 866 ip->ip_sum = 0; 867 868 if ((m->m_pkthdr.csum_flags & M_CSUM_TSOv4) == 0) { 869 /* 870 * Perform any checksums that the hardware can't do 871 * for us. 872 * 873 * XXX Does any hardware require the {th,uh}_sum 874 * XXX fields to be 0? 875 */ 876 if (sw_csum & M_CSUM_IPv4) { 877 KASSERT(IN_NEED_CHECKSUM(ifp, M_CSUM_IPv4)); 878 ip->ip_sum = in_cksum(m, hlen); 879 m->m_pkthdr.csum_flags &= ~M_CSUM_IPv4; 880 } 881 if (sw_csum & (M_CSUM_TCPv4|M_CSUM_UDPv4)) { 882 if (IN_NEED_CHECKSUM(ifp, 883 sw_csum & (M_CSUM_TCPv4|M_CSUM_UDPv4))) { 884 in_delayed_cksum(m); 885 } 886 m->m_pkthdr.csum_flags &= 887 ~(M_CSUM_TCPv4|M_CSUM_UDPv4); 888 } 889 } 890 891 #ifdef IPSEC 892 /* clean ipsec history once it goes out of the node */ 893 ipsec_delaux(m); 894 #endif 895 896 if (__predict_true( 897 (m->m_pkthdr.csum_flags & M_CSUM_TSOv4) == 0 || 898 (ifp->if_capenable & IFCAP_TSOv4) != 0)) { 899 error = 900 (*ifp->if_output)(ifp, m, sintosa(dst), ro->ro_rt); 901 } else { 902 error = 903 ip_tso_output(ifp, m, sintosa(dst), ro->ro_rt); 904 } 905 goto done; 906 } 907 908 /* 909 * We can't use HW checksumming if we're about to 910 * to fragment the packet. 911 * 912 * XXX Some hardware can do this. 913 */ 914 if (m->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) { 915 if (IN_NEED_CHECKSUM(ifp, 916 m->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4))) { 917 in_delayed_cksum(m); 918 } 919 m->m_pkthdr.csum_flags &= ~(M_CSUM_TCPv4|M_CSUM_UDPv4); 920 } 921 922 /* 923 * Too large for interface; fragment if possible. 924 * Must be able to put at least 8 bytes per fragment. 925 */ 926 if (ntohs(ip->ip_off) & IP_DF) { 927 if (flags & IP_RETURNMTU) 928 *mtu_p = mtu; 929 error = EMSGSIZE; 930 ipstat.ips_cantfrag++; 931 goto bad; 932 } 933 934 error = ip_fragment(m, ifp, mtu); 935 if (error) { 936 m = NULL; 937 goto bad; 938 } 939 940 for (; m; m = m0) { 941 m0 = m->m_nextpkt; 942 m->m_nextpkt = 0; 943 if (error == 0) { 944 #if IFA_STATS 945 if (ia) 946 ia->ia_ifa.ifa_data.ifad_outbytes += 947 ntohs(ip->ip_len); 948 #endif 949 #ifdef IPSEC 950 /* clean ipsec history once it goes out of the node */ 951 ipsec_delaux(m); 952 #endif /* IPSEC */ 953 954 #ifdef IPSEC_NAT_T 955 /* 956 * If we get there, the packet has not been handeld by 957 * IPSec whereas it should have. Now that it has been 958 * fragmented, re-inject it in ip_output so that IPsec 959 * processing can occur. 960 */ 961 if (natt_frag) { 962 error = ip_output(m, opt, 963 ro, flags, imo, so, mtu_p); 964 } else 965 #endif /* IPSEC_NAT_T */ 966 { 967 KASSERT((m->m_pkthdr.csum_flags & 968 (M_CSUM_UDPv4 | M_CSUM_TCPv4)) == 0); 969 error = (*ifp->if_output)(ifp, m, sintosa(dst), 970 ro->ro_rt); 971 } 972 } else 973 m_freem(m); 974 } 975 976 if (error == 0) 977 ipstat.ips_fragmented++; 978 done: 979 if (ro == &iproute && (flags & IP_ROUTETOIF) == 0 && ro->ro_rt) { 980 RTFREE(ro->ro_rt); 981 ro->ro_rt = 0; 982 } 983 984 #ifdef IPSEC 985 if (sp != NULL) { 986 KEYDEBUG(KEYDEBUG_IPSEC_STAMP, 987 printf("DP ip_output call free SP:%p\n", sp)); 988 key_freesp(sp); 989 } 990 #endif /* IPSEC */ 991 #ifdef FAST_IPSEC 992 if (sp != NULL) 993 KEY_FREESP(&sp); 994 #endif /* FAST_IPSEC */ 995 996 return (error); 997 bad: 998 m_freem(m); 999 goto done; 1000 } 1001 1002 int 1003 ip_fragment(struct mbuf *m, struct ifnet *ifp, u_long mtu) 1004 { 1005 struct ip *ip, *mhip; 1006 struct mbuf *m0; 1007 int len, hlen, off; 1008 int mhlen, firstlen; 1009 struct mbuf **mnext; 1010 int sw_csum = m->m_pkthdr.csum_flags; 1011 int fragments = 0; 1012 int s; 1013 int error = 0; 1014 1015 ip = mtod(m, struct ip *); 1016 hlen = ip->ip_hl << 2; 1017 if (ifp != NULL) 1018 sw_csum &= ~ifp->if_csum_flags_tx; 1019 1020 len = (mtu - hlen) &~ 7; 1021 if (len < 8) { 1022 m_freem(m); 1023 return (EMSGSIZE); 1024 } 1025 1026 firstlen = len; 1027 mnext = &m->m_nextpkt; 1028 1029 /* 1030 * Loop through length of segment after first fragment, 1031 * make new header and copy data of each part and link onto chain. 1032 */ 1033 m0 = m; 1034 mhlen = sizeof (struct ip); 1035 for (off = hlen + len; off < ntohs(ip->ip_len); off += len) { 1036 MGETHDR(m, M_DONTWAIT, MT_HEADER); 1037 if (m == 0) { 1038 error = ENOBUFS; 1039 ipstat.ips_odropped++; 1040 goto sendorfree; 1041 } 1042 MCLAIM(m, m0->m_owner); 1043 *mnext = m; 1044 mnext = &m->m_nextpkt; 1045 m->m_data += max_linkhdr; 1046 mhip = mtod(m, struct ip *); 1047 *mhip = *ip; 1048 /* we must inherit MCAST and BCAST flags */ 1049 m->m_flags |= m0->m_flags & (M_MCAST|M_BCAST); 1050 if (hlen > sizeof (struct ip)) { 1051 mhlen = ip_optcopy(ip, mhip) + sizeof (struct ip); 1052 mhip->ip_hl = mhlen >> 2; 1053 } 1054 m->m_len = mhlen; 1055 mhip->ip_off = ((off - hlen) >> 3) + 1056 (ntohs(ip->ip_off) & ~IP_MF); 1057 if (ip->ip_off & htons(IP_MF)) 1058 mhip->ip_off |= IP_MF; 1059 if (off + len >= ntohs(ip->ip_len)) 1060 len = ntohs(ip->ip_len) - off; 1061 else 1062 mhip->ip_off |= IP_MF; 1063 HTONS(mhip->ip_off); 1064 mhip->ip_len = htons((u_int16_t)(len + mhlen)); 1065 m->m_next = m_copy(m0, off, len); 1066 if (m->m_next == 0) { 1067 error = ENOBUFS; /* ??? */ 1068 ipstat.ips_odropped++; 1069 goto sendorfree; 1070 } 1071 m->m_pkthdr.len = mhlen + len; 1072 m->m_pkthdr.rcvif = (struct ifnet *)0; 1073 mhip->ip_sum = 0; 1074 if (sw_csum & M_CSUM_IPv4) { 1075 mhip->ip_sum = in_cksum(m, mhlen); 1076 KASSERT((m->m_pkthdr.csum_flags & M_CSUM_IPv4) == 0); 1077 } else { 1078 m->m_pkthdr.csum_flags |= M_CSUM_IPv4; 1079 m->m_pkthdr.csum_data |= mhlen << 16; 1080 } 1081 ipstat.ips_ofragments++; 1082 fragments++; 1083 } 1084 /* 1085 * Update first fragment by trimming what's been copied out 1086 * and updating header, then send each fragment (in order). 1087 */ 1088 m = m0; 1089 m_adj(m, hlen + firstlen - ntohs(ip->ip_len)); 1090 m->m_pkthdr.len = hlen + firstlen; 1091 ip->ip_len = htons((u_int16_t)m->m_pkthdr.len); 1092 ip->ip_off |= htons(IP_MF); 1093 ip->ip_sum = 0; 1094 if (sw_csum & M_CSUM_IPv4) { 1095 ip->ip_sum = in_cksum(m, hlen); 1096 m->m_pkthdr.csum_flags &= ~M_CSUM_IPv4; 1097 } else { 1098 KASSERT(m->m_pkthdr.csum_flags & M_CSUM_IPv4); 1099 KASSERT(M_CSUM_DATA_IPv4_IPHL(m->m_pkthdr.csum_data) >= 1100 sizeof(struct ip)); 1101 } 1102 sendorfree: 1103 /* 1104 * If there is no room for all the fragments, don't queue 1105 * any of them. 1106 */ 1107 if (ifp != NULL) { 1108 s = splnet(); 1109 if (ifp->if_snd.ifq_maxlen - ifp->if_snd.ifq_len < fragments && 1110 error == 0) { 1111 error = ENOBUFS; 1112 ipstat.ips_odropped++; 1113 IFQ_INC_DROPS(&ifp->if_snd); 1114 } 1115 splx(s); 1116 } 1117 if (error) { 1118 for (m = m0; m; m = m0) { 1119 m0 = m->m_nextpkt; 1120 m->m_nextpkt = NULL; 1121 m_freem(m); 1122 } 1123 } 1124 return (error); 1125 } 1126 1127 /* 1128 * Process a delayed payload checksum calculation. 1129 */ 1130 void 1131 in_delayed_cksum(struct mbuf *m) 1132 { 1133 struct ip *ip; 1134 u_int16_t csum, offset; 1135 1136 ip = mtod(m, struct ip *); 1137 offset = ip->ip_hl << 2; 1138 csum = in4_cksum(m, 0, offset, ntohs(ip->ip_len) - offset); 1139 if (csum == 0 && (m->m_pkthdr.csum_flags & M_CSUM_UDPv4) != 0) 1140 csum = 0xffff; 1141 1142 offset += M_CSUM_DATA_IPv4_OFFSET(m->m_pkthdr.csum_data); 1143 1144 if ((offset + sizeof(u_int16_t)) > m->m_len) { 1145 /* This happen when ip options were inserted 1146 printf("in_delayed_cksum: pullup len %d off %d proto %d\n", 1147 m->m_len, offset, ip->ip_p); 1148 */ 1149 m_copyback(m, offset, sizeof(csum), (caddr_t) &csum); 1150 } else 1151 *(u_int16_t *)(mtod(m, caddr_t) + offset) = csum; 1152 } 1153 1154 /* 1155 * Determine the maximum length of the options to be inserted; 1156 * we would far rather allocate too much space rather than too little. 1157 */ 1158 1159 u_int 1160 ip_optlen(struct inpcb *inp) 1161 { 1162 struct mbuf *m = inp->inp_options; 1163 1164 if (m && m->m_len > offsetof(struct ipoption, ipopt_dst)) 1165 return (m->m_len - offsetof(struct ipoption, ipopt_dst)); 1166 else 1167 return 0; 1168 } 1169 1170 1171 /* 1172 * Insert IP options into preformed packet. 1173 * Adjust IP destination as required for IP source routing, 1174 * as indicated by a non-zero in_addr at the start of the options. 1175 */ 1176 static struct mbuf * 1177 ip_insertoptions(struct mbuf *m, struct mbuf *opt, int *phlen) 1178 { 1179 struct ipoption *p = mtod(opt, struct ipoption *); 1180 struct mbuf *n; 1181 struct ip *ip = mtod(m, struct ip *); 1182 unsigned optlen; 1183 1184 optlen = opt->m_len - sizeof(p->ipopt_dst); 1185 if (optlen + ntohs(ip->ip_len) > IP_MAXPACKET) 1186 return (m); /* XXX should fail */ 1187 if (!in_nullhost(p->ipopt_dst)) 1188 ip->ip_dst = p->ipopt_dst; 1189 if (M_READONLY(m) || M_LEADINGSPACE(m) < optlen) { 1190 MGETHDR(n, M_DONTWAIT, MT_HEADER); 1191 if (n == 0) 1192 return (m); 1193 MCLAIM(n, m->m_owner); 1194 M_MOVE_PKTHDR(n, m); 1195 m->m_len -= sizeof(struct ip); 1196 m->m_data += sizeof(struct ip); 1197 n->m_next = m; 1198 m = n; 1199 m->m_len = optlen + sizeof(struct ip); 1200 m->m_data += max_linkhdr; 1201 bcopy((caddr_t)ip, mtod(m, caddr_t), sizeof(struct ip)); 1202 } else { 1203 m->m_data -= optlen; 1204 m->m_len += optlen; 1205 memmove(mtod(m, caddr_t), ip, sizeof(struct ip)); 1206 } 1207 m->m_pkthdr.len += optlen; 1208 ip = mtod(m, struct ip *); 1209 bcopy((caddr_t)p->ipopt_list, (caddr_t)(ip + 1), (unsigned)optlen); 1210 *phlen = sizeof(struct ip) + optlen; 1211 ip->ip_len = htons(ntohs(ip->ip_len) + optlen); 1212 return (m); 1213 } 1214 1215 /* 1216 * Copy options from ip to jp, 1217 * omitting those not copied during fragmentation. 1218 */ 1219 int 1220 ip_optcopy(struct ip *ip, struct ip *jp) 1221 { 1222 u_char *cp, *dp; 1223 int opt, optlen, cnt; 1224 1225 cp = (u_char *)(ip + 1); 1226 dp = (u_char *)(jp + 1); 1227 cnt = (ip->ip_hl << 2) - sizeof (struct ip); 1228 for (; cnt > 0; cnt -= optlen, cp += optlen) { 1229 opt = cp[0]; 1230 if (opt == IPOPT_EOL) 1231 break; 1232 if (opt == IPOPT_NOP) { 1233 /* Preserve for IP mcast tunnel's LSRR alignment. */ 1234 *dp++ = IPOPT_NOP; 1235 optlen = 1; 1236 continue; 1237 } 1238 #ifdef DIAGNOSTIC 1239 if (cnt < IPOPT_OLEN + sizeof(*cp)) 1240 panic("malformed IPv4 option passed to ip_optcopy"); 1241 #endif 1242 optlen = cp[IPOPT_OLEN]; 1243 #ifdef DIAGNOSTIC 1244 if (optlen < IPOPT_OLEN + sizeof(*cp) || optlen > cnt) 1245 panic("malformed IPv4 option passed to ip_optcopy"); 1246 #endif 1247 /* bogus lengths should have been caught by ip_dooptions */ 1248 if (optlen > cnt) 1249 optlen = cnt; 1250 if (IPOPT_COPIED(opt)) { 1251 bcopy((caddr_t)cp, (caddr_t)dp, (unsigned)optlen); 1252 dp += optlen; 1253 } 1254 } 1255 for (optlen = dp - (u_char *)(jp+1); optlen & 0x3; optlen++) 1256 *dp++ = IPOPT_EOL; 1257 return (optlen); 1258 } 1259 1260 /* 1261 * IP socket option processing. 1262 */ 1263 int 1264 ip_ctloutput(int op, struct socket *so, int level, int optname, 1265 struct mbuf **mp) 1266 { 1267 struct inpcb *inp = sotoinpcb(so); 1268 struct mbuf *m = *mp; 1269 int optval = 0; 1270 int error = 0; 1271 #if defined(IPSEC) || defined(FAST_IPSEC) 1272 struct proc *p = curproc; /*XXX*/ 1273 #endif 1274 1275 if (level != IPPROTO_IP) { 1276 error = EINVAL; 1277 if (op == PRCO_SETOPT && *mp) 1278 (void) m_free(*mp); 1279 } else switch (op) { 1280 1281 case PRCO_SETOPT: 1282 switch (optname) { 1283 case IP_OPTIONS: 1284 #ifdef notyet 1285 case IP_RETOPTS: 1286 return (ip_pcbopts(optname, &inp->inp_options, m)); 1287 #else 1288 return (ip_pcbopts(&inp->inp_options, m)); 1289 #endif 1290 1291 case IP_TOS: 1292 case IP_TTL: 1293 case IP_RECVOPTS: 1294 case IP_RECVRETOPTS: 1295 case IP_RECVDSTADDR: 1296 case IP_RECVIF: 1297 if (m == NULL || m->m_len != sizeof(int)) 1298 error = EINVAL; 1299 else { 1300 optval = *mtod(m, int *); 1301 switch (optname) { 1302 1303 case IP_TOS: 1304 inp->inp_ip.ip_tos = optval; 1305 break; 1306 1307 case IP_TTL: 1308 inp->inp_ip.ip_ttl = optval; 1309 break; 1310 #define OPTSET(bit) \ 1311 if (optval) \ 1312 inp->inp_flags |= bit; \ 1313 else \ 1314 inp->inp_flags &= ~bit; 1315 1316 case IP_RECVOPTS: 1317 OPTSET(INP_RECVOPTS); 1318 break; 1319 1320 case IP_RECVRETOPTS: 1321 OPTSET(INP_RECVRETOPTS); 1322 break; 1323 1324 case IP_RECVDSTADDR: 1325 OPTSET(INP_RECVDSTADDR); 1326 break; 1327 1328 case IP_RECVIF: 1329 OPTSET(INP_RECVIF); 1330 break; 1331 } 1332 } 1333 break; 1334 #undef OPTSET 1335 1336 case IP_MULTICAST_IF: 1337 case IP_MULTICAST_TTL: 1338 case IP_MULTICAST_LOOP: 1339 case IP_ADD_MEMBERSHIP: 1340 case IP_DROP_MEMBERSHIP: 1341 error = ip_setmoptions(optname, &inp->inp_moptions, m); 1342 break; 1343 1344 case IP_PORTRANGE: 1345 if (m == 0 || m->m_len != sizeof(int)) 1346 error = EINVAL; 1347 else { 1348 optval = *mtod(m, int *); 1349 1350 switch (optval) { 1351 1352 case IP_PORTRANGE_DEFAULT: 1353 case IP_PORTRANGE_HIGH: 1354 inp->inp_flags &= ~(INP_LOWPORT); 1355 break; 1356 1357 case IP_PORTRANGE_LOW: 1358 inp->inp_flags |= INP_LOWPORT; 1359 break; 1360 1361 default: 1362 error = EINVAL; 1363 break; 1364 } 1365 } 1366 break; 1367 1368 #if defined(IPSEC) || defined(FAST_IPSEC) 1369 case IP_IPSEC_POLICY: 1370 { 1371 caddr_t req = NULL; 1372 size_t len = 0; 1373 int priv = 0; 1374 1375 #ifdef __NetBSD__ 1376 if (p == 0 || kauth_authorize_generic(p->p_cred, KAUTH_GENERIC_ISSUSER, 1377 &p->p_acflag)) 1378 priv = 0; 1379 else 1380 priv = 1; 1381 #else 1382 priv = (in6p->in6p_socket->so_state & SS_PRIV); 1383 #endif 1384 if (m) { 1385 req = mtod(m, caddr_t); 1386 len = m->m_len; 1387 } 1388 error = ipsec4_set_policy(inp, optname, req, len, priv); 1389 break; 1390 } 1391 #endif /*IPSEC*/ 1392 1393 default: 1394 error = ENOPROTOOPT; 1395 break; 1396 } 1397 if (m) 1398 (void)m_free(m); 1399 break; 1400 1401 case PRCO_GETOPT: 1402 switch (optname) { 1403 case IP_OPTIONS: 1404 case IP_RETOPTS: 1405 *mp = m = m_get(M_WAIT, MT_SOOPTS); 1406 MCLAIM(m, so->so_mowner); 1407 if (inp->inp_options) { 1408 m->m_len = inp->inp_options->m_len; 1409 bcopy(mtod(inp->inp_options, caddr_t), 1410 mtod(m, caddr_t), (unsigned)m->m_len); 1411 } else 1412 m->m_len = 0; 1413 break; 1414 1415 case IP_TOS: 1416 case IP_TTL: 1417 case IP_RECVOPTS: 1418 case IP_RECVRETOPTS: 1419 case IP_RECVDSTADDR: 1420 case IP_RECVIF: 1421 case IP_ERRORMTU: 1422 *mp = m = m_get(M_WAIT, MT_SOOPTS); 1423 MCLAIM(m, so->so_mowner); 1424 m->m_len = sizeof(int); 1425 switch (optname) { 1426 1427 case IP_TOS: 1428 optval = inp->inp_ip.ip_tos; 1429 break; 1430 1431 case IP_TTL: 1432 optval = inp->inp_ip.ip_ttl; 1433 break; 1434 1435 case IP_ERRORMTU: 1436 optval = inp->inp_errormtu; 1437 break; 1438 1439 #define OPTBIT(bit) (inp->inp_flags & bit ? 1 : 0) 1440 1441 case IP_RECVOPTS: 1442 optval = OPTBIT(INP_RECVOPTS); 1443 break; 1444 1445 case IP_RECVRETOPTS: 1446 optval = OPTBIT(INP_RECVRETOPTS); 1447 break; 1448 1449 case IP_RECVDSTADDR: 1450 optval = OPTBIT(INP_RECVDSTADDR); 1451 break; 1452 1453 case IP_RECVIF: 1454 optval = OPTBIT(INP_RECVIF); 1455 break; 1456 } 1457 *mtod(m, int *) = optval; 1458 break; 1459 1460 #if 0 /* defined(IPSEC) || defined(FAST_IPSEC) */ 1461 /* XXX: code broken */ 1462 case IP_IPSEC_POLICY: 1463 { 1464 caddr_t req = NULL; 1465 size_t len = 0; 1466 1467 if (m) { 1468 req = mtod(m, caddr_t); 1469 len = m->m_len; 1470 } 1471 error = ipsec4_get_policy(inp, req, len, mp); 1472 break; 1473 } 1474 #endif /*IPSEC*/ 1475 1476 case IP_MULTICAST_IF: 1477 case IP_MULTICAST_TTL: 1478 case IP_MULTICAST_LOOP: 1479 case IP_ADD_MEMBERSHIP: 1480 case IP_DROP_MEMBERSHIP: 1481 error = ip_getmoptions(optname, inp->inp_moptions, mp); 1482 if (*mp) 1483 MCLAIM(*mp, so->so_mowner); 1484 break; 1485 1486 case IP_PORTRANGE: 1487 *mp = m = m_get(M_WAIT, MT_SOOPTS); 1488 MCLAIM(m, so->so_mowner); 1489 m->m_len = sizeof(int); 1490 1491 if (inp->inp_flags & INP_LOWPORT) 1492 optval = IP_PORTRANGE_LOW; 1493 else 1494 optval = IP_PORTRANGE_DEFAULT; 1495 1496 *mtod(m, int *) = optval; 1497 break; 1498 1499 default: 1500 error = ENOPROTOOPT; 1501 break; 1502 } 1503 break; 1504 } 1505 return (error); 1506 } 1507 1508 /* 1509 * Set up IP options in pcb for insertion in output packets. 1510 * Store in mbuf with pointer in pcbopt, adding pseudo-option 1511 * with destination address if source routed. 1512 */ 1513 int 1514 #ifdef notyet 1515 ip_pcbopts(int optname, struct mbuf **pcbopt, struct mbuf *m) 1516 #else 1517 ip_pcbopts(struct mbuf **pcbopt, struct mbuf *m) 1518 #endif 1519 { 1520 int cnt, optlen; 1521 u_char *cp; 1522 u_char opt; 1523 1524 /* turn off any old options */ 1525 if (*pcbopt) 1526 (void)m_free(*pcbopt); 1527 *pcbopt = 0; 1528 if (m == (struct mbuf *)0 || m->m_len == 0) { 1529 /* 1530 * Only turning off any previous options. 1531 */ 1532 if (m) 1533 (void)m_free(m); 1534 return (0); 1535 } 1536 1537 #ifndef __vax__ 1538 if (m->m_len % sizeof(int32_t)) 1539 goto bad; 1540 #endif 1541 /* 1542 * IP first-hop destination address will be stored before 1543 * actual options; move other options back 1544 * and clear it when none present. 1545 */ 1546 if (m->m_data + m->m_len + sizeof(struct in_addr) >= &m->m_dat[MLEN]) 1547 goto bad; 1548 cnt = m->m_len; 1549 m->m_len += sizeof(struct in_addr); 1550 cp = mtod(m, u_char *) + sizeof(struct in_addr); 1551 memmove(cp, mtod(m, caddr_t), (unsigned)cnt); 1552 bzero(mtod(m, caddr_t), sizeof(struct in_addr)); 1553 1554 for (; cnt > 0; cnt -= optlen, cp += optlen) { 1555 opt = cp[IPOPT_OPTVAL]; 1556 if (opt == IPOPT_EOL) 1557 break; 1558 if (opt == IPOPT_NOP) 1559 optlen = 1; 1560 else { 1561 if (cnt < IPOPT_OLEN + sizeof(*cp)) 1562 goto bad; 1563 optlen = cp[IPOPT_OLEN]; 1564 if (optlen < IPOPT_OLEN + sizeof(*cp) || optlen > cnt) 1565 goto bad; 1566 } 1567 switch (opt) { 1568 1569 default: 1570 break; 1571 1572 case IPOPT_LSRR: 1573 case IPOPT_SSRR: 1574 /* 1575 * user process specifies route as: 1576 * ->A->B->C->D 1577 * D must be our final destination (but we can't 1578 * check that since we may not have connected yet). 1579 * A is first hop destination, which doesn't appear in 1580 * actual IP option, but is stored before the options. 1581 */ 1582 if (optlen < IPOPT_MINOFF - 1 + sizeof(struct in_addr)) 1583 goto bad; 1584 m->m_len -= sizeof(struct in_addr); 1585 cnt -= sizeof(struct in_addr); 1586 optlen -= sizeof(struct in_addr); 1587 cp[IPOPT_OLEN] = optlen; 1588 /* 1589 * Move first hop before start of options. 1590 */ 1591 bcopy((caddr_t)&cp[IPOPT_OFFSET+1], mtod(m, caddr_t), 1592 sizeof(struct in_addr)); 1593 /* 1594 * Then copy rest of options back 1595 * to close up the deleted entry. 1596 */ 1597 (void)memmove(&cp[IPOPT_OFFSET+1], 1598 &cp[IPOPT_OFFSET+1] + sizeof(struct in_addr), 1599 (unsigned)cnt - (IPOPT_MINOFF - 1)); 1600 break; 1601 } 1602 } 1603 if (m->m_len > MAX_IPOPTLEN + sizeof(struct in_addr)) 1604 goto bad; 1605 *pcbopt = m; 1606 return (0); 1607 1608 bad: 1609 (void)m_free(m); 1610 return (EINVAL); 1611 } 1612 1613 /* 1614 * following RFC1724 section 3.3, 0.0.0.0/8 is interpreted as interface index. 1615 */ 1616 static struct ifnet * 1617 ip_multicast_if(struct in_addr *a, int *ifindexp) 1618 { 1619 int ifindex; 1620 struct ifnet *ifp = NULL; 1621 struct in_ifaddr *ia; 1622 1623 if (ifindexp) 1624 *ifindexp = 0; 1625 if (ntohl(a->s_addr) >> 24 == 0) { 1626 ifindex = ntohl(a->s_addr) & 0xffffff; 1627 if (ifindex < 0 || if_indexlim <= ifindex) 1628 return NULL; 1629 ifp = ifindex2ifnet[ifindex]; 1630 if (!ifp) 1631 return NULL; 1632 if (ifindexp) 1633 *ifindexp = ifindex; 1634 } else { 1635 LIST_FOREACH(ia, &IN_IFADDR_HASH(a->s_addr), ia_hash) { 1636 if (in_hosteq(ia->ia_addr.sin_addr, *a) && 1637 (ia->ia_ifp->if_flags & IFF_MULTICAST) != 0) { 1638 ifp = ia->ia_ifp; 1639 break; 1640 } 1641 } 1642 } 1643 return ifp; 1644 } 1645 1646 static int 1647 ip_getoptval(struct mbuf *m, u_int8_t *val, u_int maxval) 1648 { 1649 u_int tval; 1650 1651 if (m == NULL) 1652 return EINVAL; 1653 1654 switch (m->m_len) { 1655 case sizeof(u_char): 1656 tval = *(mtod(m, u_char *)); 1657 break; 1658 case sizeof(u_int): 1659 tval = *(mtod(m, u_int *)); 1660 break; 1661 default: 1662 return EINVAL; 1663 } 1664 1665 if (tval > maxval) 1666 return EINVAL; 1667 1668 *val = tval; 1669 return 0; 1670 } 1671 1672 /* 1673 * Set the IP multicast options in response to user setsockopt(). 1674 */ 1675 int 1676 ip_setmoptions(int optname, struct ip_moptions **imop, struct mbuf *m) 1677 { 1678 int error = 0; 1679 int i; 1680 struct in_addr addr; 1681 struct ip_mreq *mreq; 1682 struct ifnet *ifp; 1683 struct ip_moptions *imo = *imop; 1684 struct route ro; 1685 struct sockaddr_in *dst; 1686 int ifindex; 1687 1688 if (imo == NULL) { 1689 /* 1690 * No multicast option buffer attached to the pcb; 1691 * allocate one and initialize to default values. 1692 */ 1693 imo = (struct ip_moptions *)malloc(sizeof(*imo), M_IPMOPTS, 1694 M_WAITOK); 1695 1696 if (imo == NULL) 1697 return (ENOBUFS); 1698 *imop = imo; 1699 imo->imo_multicast_ifp = NULL; 1700 imo->imo_multicast_addr.s_addr = INADDR_ANY; 1701 imo->imo_multicast_ttl = IP_DEFAULT_MULTICAST_TTL; 1702 imo->imo_multicast_loop = IP_DEFAULT_MULTICAST_LOOP; 1703 imo->imo_num_memberships = 0; 1704 } 1705 1706 switch (optname) { 1707 1708 case IP_MULTICAST_IF: 1709 /* 1710 * Select the interface for outgoing multicast packets. 1711 */ 1712 if (m == NULL || m->m_len != sizeof(struct in_addr)) { 1713 error = EINVAL; 1714 break; 1715 } 1716 addr = *(mtod(m, struct in_addr *)); 1717 /* 1718 * INADDR_ANY is used to remove a previous selection. 1719 * When no interface is selected, a default one is 1720 * chosen every time a multicast packet is sent. 1721 */ 1722 if (in_nullhost(addr)) { 1723 imo->imo_multicast_ifp = NULL; 1724 break; 1725 } 1726 /* 1727 * The selected interface is identified by its local 1728 * IP address. Find the interface and confirm that 1729 * it supports multicasting. 1730 */ 1731 ifp = ip_multicast_if(&addr, &ifindex); 1732 if (ifp == NULL || (ifp->if_flags & IFF_MULTICAST) == 0) { 1733 error = EADDRNOTAVAIL; 1734 break; 1735 } 1736 imo->imo_multicast_ifp = ifp; 1737 if (ifindex) 1738 imo->imo_multicast_addr = addr; 1739 else 1740 imo->imo_multicast_addr.s_addr = INADDR_ANY; 1741 break; 1742 1743 case IP_MULTICAST_TTL: 1744 /* 1745 * Set the IP time-to-live for outgoing multicast packets. 1746 */ 1747 error = ip_getoptval(m, &imo->imo_multicast_ttl, MAXTTL); 1748 break; 1749 1750 case IP_MULTICAST_LOOP: 1751 /* 1752 * Set the loopback flag for outgoing multicast packets. 1753 * Must be zero or one. 1754 */ 1755 error = ip_getoptval(m, &imo->imo_multicast_loop, 1); 1756 break; 1757 1758 case IP_ADD_MEMBERSHIP: 1759 /* 1760 * Add a multicast group membership. 1761 * Group must be a valid IP multicast address. 1762 */ 1763 if (m == NULL || m->m_len != sizeof(struct ip_mreq)) { 1764 error = EINVAL; 1765 break; 1766 } 1767 mreq = mtod(m, struct ip_mreq *); 1768 if (!IN_MULTICAST(mreq->imr_multiaddr.s_addr)) { 1769 error = EINVAL; 1770 break; 1771 } 1772 /* 1773 * If no interface address was provided, use the interface of 1774 * the route to the given multicast address. 1775 */ 1776 if (in_nullhost(mreq->imr_interface)) { 1777 bzero((caddr_t)&ro, sizeof(ro)); 1778 ro.ro_rt = NULL; 1779 dst = satosin(&ro.ro_dst); 1780 dst->sin_len = sizeof(*dst); 1781 dst->sin_family = AF_INET; 1782 dst->sin_addr = mreq->imr_multiaddr; 1783 rtalloc(&ro); 1784 if (ro.ro_rt == NULL) { 1785 error = EADDRNOTAVAIL; 1786 break; 1787 } 1788 ifp = ro.ro_rt->rt_ifp; 1789 rtfree(ro.ro_rt); 1790 } else { 1791 ifp = ip_multicast_if(&mreq->imr_interface, NULL); 1792 } 1793 /* 1794 * See if we found an interface, and confirm that it 1795 * supports multicast. 1796 */ 1797 if (ifp == NULL || (ifp->if_flags & IFF_MULTICAST) == 0) { 1798 error = EADDRNOTAVAIL; 1799 break; 1800 } 1801 /* 1802 * See if the membership already exists or if all the 1803 * membership slots are full. 1804 */ 1805 for (i = 0; i < imo->imo_num_memberships; ++i) { 1806 if (imo->imo_membership[i]->inm_ifp == ifp && 1807 in_hosteq(imo->imo_membership[i]->inm_addr, 1808 mreq->imr_multiaddr)) 1809 break; 1810 } 1811 if (i < imo->imo_num_memberships) { 1812 error = EADDRINUSE; 1813 break; 1814 } 1815 if (i == IP_MAX_MEMBERSHIPS) { 1816 error = ETOOMANYREFS; 1817 break; 1818 } 1819 /* 1820 * Everything looks good; add a new record to the multicast 1821 * address list for the given interface. 1822 */ 1823 if ((imo->imo_membership[i] = 1824 in_addmulti(&mreq->imr_multiaddr, ifp)) == NULL) { 1825 error = ENOBUFS; 1826 break; 1827 } 1828 ++imo->imo_num_memberships; 1829 break; 1830 1831 case IP_DROP_MEMBERSHIP: 1832 /* 1833 * Drop a multicast group membership. 1834 * Group must be a valid IP multicast address. 1835 */ 1836 if (m == NULL || m->m_len != sizeof(struct ip_mreq)) { 1837 error = EINVAL; 1838 break; 1839 } 1840 mreq = mtod(m, struct ip_mreq *); 1841 if (!IN_MULTICAST(mreq->imr_multiaddr.s_addr)) { 1842 error = EINVAL; 1843 break; 1844 } 1845 /* 1846 * If an interface address was specified, get a pointer 1847 * to its ifnet structure. 1848 */ 1849 if (in_nullhost(mreq->imr_interface)) 1850 ifp = NULL; 1851 else { 1852 ifp = ip_multicast_if(&mreq->imr_interface, NULL); 1853 if (ifp == NULL) { 1854 error = EADDRNOTAVAIL; 1855 break; 1856 } 1857 } 1858 /* 1859 * Find the membership in the membership array. 1860 */ 1861 for (i = 0; i < imo->imo_num_memberships; ++i) { 1862 if ((ifp == NULL || 1863 imo->imo_membership[i]->inm_ifp == ifp) && 1864 in_hosteq(imo->imo_membership[i]->inm_addr, 1865 mreq->imr_multiaddr)) 1866 break; 1867 } 1868 if (i == imo->imo_num_memberships) { 1869 error = EADDRNOTAVAIL; 1870 break; 1871 } 1872 /* 1873 * Give up the multicast address record to which the 1874 * membership points. 1875 */ 1876 in_delmulti(imo->imo_membership[i]); 1877 /* 1878 * Remove the gap in the membership array. 1879 */ 1880 for (++i; i < imo->imo_num_memberships; ++i) 1881 imo->imo_membership[i-1] = imo->imo_membership[i]; 1882 --imo->imo_num_memberships; 1883 break; 1884 1885 default: 1886 error = EOPNOTSUPP; 1887 break; 1888 } 1889 1890 /* 1891 * If all options have default values, no need to keep the mbuf. 1892 */ 1893 if (imo->imo_multicast_ifp == NULL && 1894 imo->imo_multicast_ttl == IP_DEFAULT_MULTICAST_TTL && 1895 imo->imo_multicast_loop == IP_DEFAULT_MULTICAST_LOOP && 1896 imo->imo_num_memberships == 0) { 1897 free(*imop, M_IPMOPTS); 1898 *imop = NULL; 1899 } 1900 1901 return (error); 1902 } 1903 1904 /* 1905 * Return the IP multicast options in response to user getsockopt(). 1906 */ 1907 int 1908 ip_getmoptions(int optname, struct ip_moptions *imo, struct mbuf **mp) 1909 { 1910 u_char *ttl; 1911 u_char *loop; 1912 struct in_addr *addr; 1913 struct in_ifaddr *ia; 1914 1915 *mp = m_get(M_WAIT, MT_SOOPTS); 1916 1917 switch (optname) { 1918 1919 case IP_MULTICAST_IF: 1920 addr = mtod(*mp, struct in_addr *); 1921 (*mp)->m_len = sizeof(struct in_addr); 1922 if (imo == NULL || imo->imo_multicast_ifp == NULL) 1923 *addr = zeroin_addr; 1924 else if (imo->imo_multicast_addr.s_addr) { 1925 /* return the value user has set */ 1926 *addr = imo->imo_multicast_addr; 1927 } else { 1928 IFP_TO_IA(imo->imo_multicast_ifp, ia); 1929 *addr = ia ? ia->ia_addr.sin_addr : zeroin_addr; 1930 } 1931 return (0); 1932 1933 case IP_MULTICAST_TTL: 1934 ttl = mtod(*mp, u_char *); 1935 (*mp)->m_len = 1; 1936 *ttl = imo ? imo->imo_multicast_ttl 1937 : IP_DEFAULT_MULTICAST_TTL; 1938 return (0); 1939 1940 case IP_MULTICAST_LOOP: 1941 loop = mtod(*mp, u_char *); 1942 (*mp)->m_len = 1; 1943 *loop = imo ? imo->imo_multicast_loop 1944 : IP_DEFAULT_MULTICAST_LOOP; 1945 return (0); 1946 1947 default: 1948 return (EOPNOTSUPP); 1949 } 1950 } 1951 1952 /* 1953 * Discard the IP multicast options. 1954 */ 1955 void 1956 ip_freemoptions(struct ip_moptions *imo) 1957 { 1958 int i; 1959 1960 if (imo != NULL) { 1961 for (i = 0; i < imo->imo_num_memberships; ++i) 1962 in_delmulti(imo->imo_membership[i]); 1963 free(imo, M_IPMOPTS); 1964 } 1965 } 1966 1967 /* 1968 * Routine called from ip_output() to loop back a copy of an IP multicast 1969 * packet to the input queue of a specified interface. Note that this 1970 * calls the output routine of the loopback "driver", but with an interface 1971 * pointer that might NOT be lo0ifp -- easier than replicating that code here. 1972 */ 1973 static void 1974 ip_mloopback(struct ifnet *ifp, struct mbuf *m, struct sockaddr_in *dst) 1975 { 1976 struct ip *ip; 1977 struct mbuf *copym; 1978 1979 copym = m_copy(m, 0, M_COPYALL); 1980 if (copym != NULL 1981 && (copym->m_flags & M_EXT || copym->m_len < sizeof(struct ip))) 1982 copym = m_pullup(copym, sizeof(struct ip)); 1983 if (copym != NULL) { 1984 /* 1985 * We don't bother to fragment if the IP length is greater 1986 * than the interface's MTU. Can this possibly matter? 1987 */ 1988 ip = mtod(copym, struct ip *); 1989 1990 if (copym->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) { 1991 in_delayed_cksum(copym); 1992 copym->m_pkthdr.csum_flags &= 1993 ~(M_CSUM_TCPv4|M_CSUM_UDPv4); 1994 } 1995 1996 ip->ip_sum = 0; 1997 ip->ip_sum = in_cksum(copym, ip->ip_hl << 2); 1998 (void) looutput(ifp, copym, sintosa(dst), NULL); 1999 } 2000 } 2001