xref: /openbsd-src/sys/netinet/ip_input.c (revision f763167468dba5339ed4b14b7ecaca2a397ab0f6)
1 /*	$OpenBSD: ip_input.c,v 1.322 2017/09/07 10:54:49 bluhm Exp $	*/
2 /*	$NetBSD: ip_input.c,v 1.30 1996/03/16 23:53:58 christos Exp $	*/
3 
4 /*
5  * Copyright (c) 1982, 1986, 1988, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the name of the University nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  *	@(#)ip_input.c	8.2 (Berkeley) 1/4/94
33  */
34 
35 #include "pf.h"
36 #include "carp.h"
37 
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/mbuf.h>
41 #include <sys/domain.h>
42 #include <sys/mutex.h>
43 #include <sys/protosw.h>
44 #include <sys/socket.h>
45 #include <sys/socketvar.h>
46 #include <sys/sysctl.h>
47 #include <sys/pool.h>
48 #include <sys/task.h>
49 
50 #include <net/if.h>
51 #include <net/if_var.h>
52 #include <net/if_dl.h>
53 #include <net/route.h>
54 #include <net/netisr.h>
55 
56 #include <netinet/in.h>
57 #include <netinet/in_systm.h>
58 #include <netinet/if_ether.h>
59 #include <netinet/ip.h>
60 #include <netinet/in_pcb.h>
61 #include <netinet/in_var.h>
62 #include <netinet/ip_var.h>
63 #include <netinet/ip_icmp.h>
64 
65 #ifdef INET6
66 #include <netinet6/ip6protosw.h>
67 #include <netinet6/ip6_var.h>
68 #endif
69 
70 #if NPF > 0
71 #include <net/pfvar.h>
72 #endif
73 
74 #ifdef MROUTING
75 #include <netinet/ip_mroute.h>
76 #endif
77 
78 #ifdef IPSEC
79 #include <netinet/ip_ipsp.h>
80 #endif /* IPSEC */
81 
82 #if NCARP > 0
83 #include <net/if_types.h>
84 #include <netinet/ip_carp.h>
85 #endif
86 
87 int encdebug = 0;
88 int ipsec_keep_invalid = IPSEC_DEFAULT_EMBRYONIC_SA_TIMEOUT;
89 int ipsec_require_pfs = IPSEC_DEFAULT_PFS;
90 int ipsec_soft_allocations = IPSEC_DEFAULT_SOFT_ALLOCATIONS;
91 int ipsec_exp_allocations = IPSEC_DEFAULT_EXP_ALLOCATIONS;
92 int ipsec_soft_bytes = IPSEC_DEFAULT_SOFT_BYTES;
93 int ipsec_exp_bytes = IPSEC_DEFAULT_EXP_BYTES;
94 int ipsec_soft_timeout = IPSEC_DEFAULT_SOFT_TIMEOUT;
95 int ipsec_exp_timeout = IPSEC_DEFAULT_EXP_TIMEOUT;
96 int ipsec_soft_first_use = IPSEC_DEFAULT_SOFT_FIRST_USE;
97 int ipsec_exp_first_use = IPSEC_DEFAULT_EXP_FIRST_USE;
98 int ipsec_expire_acquire = IPSEC_DEFAULT_EXPIRE_ACQUIRE;
99 char ipsec_def_enc[20];
100 char ipsec_def_auth[20];
101 char ipsec_def_comp[20];
102 
103 /* values controllable via sysctl */
104 int	ipforwarding = 0;
105 int	ipmforwarding = 0;
106 int	ipmultipath = 0;
107 int	ipsendredirects = 1;
108 int	ip_dosourceroute = 0;
109 int	ip_defttl = IPDEFTTL;
110 int	ip_mtudisc = 1;
111 u_int	ip_mtudisc_timeout = IPMTUDISCTIMEOUT;
112 int	ip_directedbcast = 0;
113 
114 struct rttimer_queue *ip_mtudisc_timeout_q = NULL;
115 
116 /* Protects `ipq' and `ip_frags'. */
117 struct mutex	ipq_mutex = MUTEX_INITIALIZER(IPL_SOFTNET);
118 
119 /* IP reassembly queue */
120 LIST_HEAD(, ipq) ipq;
121 
122 /* Keep track of memory used for reassembly */
123 int	ip_maxqueue = 300;
124 int	ip_frags = 0;
125 
126 int *ipctl_vars[IPCTL_MAXID] = IPCTL_VARS;
127 
128 struct niqueue ipintrq = NIQUEUE_INITIALIZER(IPQ_MAXLEN, NETISR_IP);
129 
130 struct pool ipqent_pool;
131 struct pool ipq_pool;
132 
133 struct cpumem *ipcounters;
134 
135 int ip_sysctl_ipstat(void *, size_t *, void *);
136 
137 static struct mbuf_queue	ipsend_mq;
138 
139 int	ip_ours(struct mbuf **, int *, int, int);
140 int	ip_local(struct mbuf **, int *, int, int);
141 int	ip_dooptions(struct mbuf *, struct ifnet *);
142 int	in_ouraddr(struct mbuf *, struct ifnet *, struct rtentry **);
143 
144 static void ip_send_dispatch(void *);
145 static struct task ipsend_task = TASK_INITIALIZER(ip_send_dispatch, &ipsend_mq);
146 /*
147  * Used to save the IP options in case a protocol wants to respond
148  * to an incoming packet over the same route if the packet got here
149  * using IP source routing.  This allows connection establishment and
150  * maintenance when the remote end is on a network that is not known
151  * to us.
152  */
153 struct ip_srcrt {
154 	int		isr_nhops;		   /* number of hops */
155 	struct in_addr	isr_dst;		   /* final destination */
156 	char		isr_nop;		   /* one NOP to align */
157 	char		isr_hdr[IPOPT_OFFSET + 1]; /* OPTVAL, OLEN & OFFSET */
158 	struct in_addr	isr_routes[MAX_IPOPTLEN/sizeof(struct in_addr)];
159 };
160 
161 void save_rte(struct mbuf *, u_char *, struct in_addr);
162 
163 /*
164  * IP initialization: fill in IP protocol switch table.
165  * All protocols not implemented in kernel go to raw IP protocol handler.
166  */
167 void
168 ip_init(void)
169 {
170 	struct protosw *pr;
171 	int i;
172 	const u_int16_t defbaddynamicports_tcp[] = DEFBADDYNAMICPORTS_TCP;
173 	const u_int16_t defbaddynamicports_udp[] = DEFBADDYNAMICPORTS_UDP;
174 	const u_int16_t defrootonlyports_tcp[] = DEFROOTONLYPORTS_TCP;
175 	const u_int16_t defrootonlyports_udp[] = DEFROOTONLYPORTS_UDP;
176 
177 	ipcounters = counters_alloc(ips_ncounters);
178 
179 	pool_init(&ipqent_pool, sizeof(struct ipqent), 0,
180 	    IPL_SOFTNET, 0, "ipqe",  NULL);
181 	pool_init(&ipq_pool, sizeof(struct ipq), 0,
182 	    IPL_SOFTNET, 0, "ipq", NULL);
183 
184 	pr = pffindproto(PF_INET, IPPROTO_RAW, SOCK_RAW);
185 	if (pr == NULL)
186 		panic("ip_init");
187 	for (i = 0; i < IPPROTO_MAX; i++)
188 		ip_protox[i] = pr - inetsw;
189 	for (pr = inetdomain.dom_protosw;
190 	    pr < inetdomain.dom_protoswNPROTOSW; pr++)
191 		if (pr->pr_domain->dom_family == PF_INET &&
192 		    pr->pr_protocol && pr->pr_protocol != IPPROTO_RAW &&
193 		    pr->pr_protocol < IPPROTO_MAX)
194 			ip_protox[pr->pr_protocol] = pr - inetsw;
195 	LIST_INIT(&ipq);
196 	if (ip_mtudisc != 0)
197 		ip_mtudisc_timeout_q =
198 		    rt_timer_queue_create(ip_mtudisc_timeout);
199 
200 	/* Fill in list of ports not to allocate dynamically. */
201 	memset(&baddynamicports, 0, sizeof(baddynamicports));
202 	for (i = 0; defbaddynamicports_tcp[i] != 0; i++)
203 		DP_SET(baddynamicports.tcp, defbaddynamicports_tcp[i]);
204 	for (i = 0; defbaddynamicports_udp[i] != 0; i++)
205 		DP_SET(baddynamicports.udp, defbaddynamicports_udp[i]);
206 
207 	/* Fill in list of ports only root can bind to. */
208 	memset(&rootonlyports, 0, sizeof(rootonlyports));
209 	for (i = 0; defrootonlyports_tcp[i] != 0; i++)
210 		DP_SET(rootonlyports.tcp, defrootonlyports_tcp[i]);
211 	for (i = 0; defrootonlyports_udp[i] != 0; i++)
212 		DP_SET(rootonlyports.udp, defrootonlyports_udp[i]);
213 
214 	strlcpy(ipsec_def_enc, IPSEC_DEFAULT_DEF_ENC, sizeof(ipsec_def_enc));
215 	strlcpy(ipsec_def_auth, IPSEC_DEFAULT_DEF_AUTH, sizeof(ipsec_def_auth));
216 	strlcpy(ipsec_def_comp, IPSEC_DEFAULT_DEF_COMP, sizeof(ipsec_def_comp));
217 
218 	mq_init(&ipsend_mq, 64, IPL_SOFTNET);
219 }
220 
221 /*
222  * Enqueue packet for local delivery.  Queuing is used as a boundary
223  * between the network layer (input/forward path) running without
224  * KERNEL_LOCK() and the transport layer still needing it.
225  */
226 int
227 ip_ours(struct mbuf **mp, int *offp, int nxt, int af)
228 {
229 	/* We are already in a IPv4/IPv6 local deliver loop. */
230 	if (af != AF_UNSPEC)
231 		return ip_local(mp, offp, nxt, af);
232 
233 	niq_enqueue(&ipintrq, *mp);
234 	*mp = NULL;
235 	return IPPROTO_DONE;
236 }
237 
238 /*
239  * Dequeue and process locally delivered packets.
240  */
241 void
242 ipintr(void)
243 {
244 	struct mbuf *m;
245 	int off, nxt;
246 
247 	while ((m = niq_dequeue(&ipintrq)) != NULL) {
248 #ifdef DIAGNOSTIC
249 		if ((m->m_flags & M_PKTHDR) == 0)
250 			panic("ipintr no HDR");
251 #endif
252 		off = 0;
253 		nxt = ip_local(&m, &off, IPPROTO_IPV4, AF_UNSPEC);
254 		KASSERT(nxt == IPPROTO_DONE);
255 	}
256 }
257 
258 /*
259  * IPv4 input routine.
260  *
261  * Checksum and byte swap header.  Process options. Forward or deliver.
262  */
263 void
264 ipv4_input(struct ifnet *ifp, struct mbuf *m)
265 {
266 	int off, nxt;
267 
268 	off = 0;
269 	nxt = ip_input_if(&m, &off, IPPROTO_IPV4, AF_UNSPEC, ifp);
270 	KASSERT(nxt == IPPROTO_DONE);
271 }
272 
273 int
274 ip_input_if(struct mbuf **mp, int *offp, int nxt, int af, struct ifnet *ifp)
275 {
276 	struct mbuf	*m = *mp;
277 	struct rtentry	*rt = NULL;
278 	struct ip	*ip;
279 	int hlen, len;
280 	in_addr_t pfrdr = 0;
281 
282 	KASSERT(*offp == 0);
283 
284 	ipstat_inc(ips_total);
285 	if (m->m_len < sizeof (struct ip) &&
286 	    (m = *mp = m_pullup(m, sizeof (struct ip))) == NULL) {
287 		ipstat_inc(ips_toosmall);
288 		goto bad;
289 	}
290 	ip = mtod(m, struct ip *);
291 	if (ip->ip_v != IPVERSION) {
292 		ipstat_inc(ips_badvers);
293 		goto bad;
294 	}
295 	hlen = ip->ip_hl << 2;
296 	if (hlen < sizeof(struct ip)) {	/* minimum header length */
297 		ipstat_inc(ips_badhlen);
298 		goto bad;
299 	}
300 	if (hlen > m->m_len) {
301 		if ((m = *mp = m_pullup(m, hlen)) == NULL) {
302 			ipstat_inc(ips_badhlen);
303 			goto bad;
304 		}
305 		ip = mtod(m, struct ip *);
306 	}
307 
308 	/* 127/8 must not appear on wire - RFC1122 */
309 	if ((ntohl(ip->ip_dst.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET ||
310 	    (ntohl(ip->ip_src.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET) {
311 		if ((ifp->if_flags & IFF_LOOPBACK) == 0) {
312 			ipstat_inc(ips_badaddr);
313 			goto bad;
314 		}
315 	}
316 
317 	if ((m->m_pkthdr.csum_flags & M_IPV4_CSUM_IN_OK) == 0) {
318 		if (m->m_pkthdr.csum_flags & M_IPV4_CSUM_IN_BAD) {
319 			ipstat_inc(ips_badsum);
320 			goto bad;
321 		}
322 
323 		ipstat_inc(ips_inswcsum);
324 		if (in_cksum(m, hlen) != 0) {
325 			ipstat_inc(ips_badsum);
326 			goto bad;
327 		}
328 	}
329 
330 	/* Retrieve the packet length. */
331 	len = ntohs(ip->ip_len);
332 
333 	/*
334 	 * Convert fields to host representation.
335 	 */
336 	if (len < hlen) {
337 		ipstat_inc(ips_badlen);
338 		goto bad;
339 	}
340 
341 	/*
342 	 * Check that the amount of data in the buffers
343 	 * is at least as much as the IP header would have us expect.
344 	 * Trim mbufs if longer than we expect.
345 	 * Drop packet if shorter than we expect.
346 	 */
347 	if (m->m_pkthdr.len < len) {
348 		ipstat_inc(ips_tooshort);
349 		goto bad;
350 	}
351 	if (m->m_pkthdr.len > len) {
352 		if (m->m_len == m->m_pkthdr.len) {
353 			m->m_len = len;
354 			m->m_pkthdr.len = len;
355 		} else
356 			m_adj(m, len - m->m_pkthdr.len);
357 	}
358 
359 #if NCARP > 0
360 	if (ifp->if_type == IFT_CARP &&
361 	    carp_lsdrop(m, AF_INET, &ip->ip_src.s_addr, &ip->ip_dst.s_addr,
362 	    (ip->ip_p == IPPROTO_ICMP ? 0 : 1)))
363 		goto bad;
364 #endif
365 
366 #if NPF > 0
367 	/*
368 	 * Packet filter
369 	 */
370 	pfrdr = ip->ip_dst.s_addr;
371 	if (pf_test(AF_INET, PF_IN, ifp, mp) != PF_PASS)
372 		goto bad;
373 	m = *mp;
374 	if (m == NULL)
375 		goto bad;
376 
377 	ip = mtod(m, struct ip *);
378 	hlen = ip->ip_hl << 2;
379 	pfrdr = (pfrdr != ip->ip_dst.s_addr);
380 #endif
381 
382 	/*
383 	 * Process options and, if not destined for us,
384 	 * ship it on.  ip_dooptions returns 1 when an
385 	 * error was detected (causing an icmp message
386 	 * to be sent and the original packet to be freed).
387 	 */
388 	if (hlen > sizeof (struct ip) && ip_dooptions(m, ifp)) {
389 		m = *mp = NULL;
390 		goto bad;
391 	}
392 
393 	if (ip->ip_dst.s_addr == INADDR_BROADCAST ||
394 	    ip->ip_dst.s_addr == INADDR_ANY) {
395 		nxt = ip_ours(mp, offp, nxt, af);
396 		goto out;
397 	}
398 
399 	if (in_ouraddr(m, ifp, &rt)) {
400 		nxt = ip_ours(mp, offp, nxt, af);
401 		goto out;
402 	}
403 
404 	if (IN_MULTICAST(ip->ip_dst.s_addr)) {
405 		/*
406 		 * Make sure M_MCAST is set.  It should theoretically
407 		 * already be there, but let's play safe because upper
408 		 * layers check for this flag.
409 		 */
410 		m->m_flags |= M_MCAST;
411 
412 #ifdef MROUTING
413 		if (ipmforwarding && ip_mrouter[ifp->if_rdomain]) {
414 			int error;
415 
416 			if (m->m_flags & M_EXT) {
417 				if ((m = *mp = m_pullup(m, hlen)) == NULL) {
418 					ipstat_inc(ips_toosmall);
419 					goto bad;
420 				}
421 				ip = mtod(m, struct ip *);
422 			}
423 			/*
424 			 * If we are acting as a multicast router, all
425 			 * incoming multicast packets are passed to the
426 			 * kernel-level multicast forwarding function.
427 			 * The packet is returned (relatively) intact; if
428 			 * ip_mforward() returns a non-zero value, the packet
429 			 * must be discarded, else it may be accepted below.
430 			 *
431 			 * (The IP ident field is put in the same byte order
432 			 * as expected when ip_mforward() is called from
433 			 * ip_output().)
434 			 */
435 			KERNEL_LOCK();
436 			error = ip_mforward(m, ifp);
437 			KERNEL_UNLOCK();
438 			if (error) {
439 				ipstat_inc(ips_cantforward);
440 				goto bad;
441 			}
442 
443 			/*
444 			 * The process-level routing daemon needs to receive
445 			 * all multicast IGMP packets, whether or not this
446 			 * host belongs to their destination groups.
447 			 */
448 			if (ip->ip_p == IPPROTO_IGMP) {
449 				nxt = ip_ours(mp, offp, nxt, af);
450 				goto out;
451 			}
452 			ipstat_inc(ips_forward);
453 		}
454 #endif
455 		/*
456 		 * See if we belong to the destination multicast group on the
457 		 * arrival interface.
458 		 */
459 		if (!in_hasmulti(&ip->ip_dst, ifp)) {
460 			ipstat_inc(ips_notmember);
461 			if (!IN_LOCAL_GROUP(ip->ip_dst.s_addr))
462 				ipstat_inc(ips_cantforward);
463 			goto bad;
464 		}
465 		nxt = ip_ours(mp, offp, nxt, af);
466 		goto out;
467 	}
468 
469 #if NCARP > 0
470 	if (ifp->if_type == IFT_CARP && ip->ip_p == IPPROTO_ICMP &&
471 	    carp_lsdrop(m, AF_INET, &ip->ip_src.s_addr, &ip->ip_dst.s_addr, 1))
472 		goto bad;
473 #endif
474 	/*
475 	 * Not for us; forward if possible and desirable.
476 	 */
477 	if (ipforwarding == 0) {
478 		ipstat_inc(ips_cantforward);
479 		goto bad;
480 	}
481 #ifdef IPSEC
482 	if (ipsec_in_use) {
483 		int rv;
484 
485 		KERNEL_ASSERT_LOCKED();
486 
487 		rv = ipsec_forward_check(m, hlen, AF_INET);
488 		if (rv != 0) {
489 			ipstat_inc(ips_cantforward);
490 			goto bad;
491 		}
492 		/*
493 		 * Fall through, forward packet. Outbound IPsec policy
494 		 * checking will occur in ip_output().
495 		 */
496 	}
497 #endif /* IPSEC */
498 
499 	ip_forward(m, ifp, rt, pfrdr);
500 	*mp = NULL;
501 	return IPPROTO_DONE;
502  bad:
503 	nxt = IPPROTO_DONE;
504 	m_freemp(mp);
505  out:
506 	rtfree(rt);
507 	return nxt;
508 }
509 
510 /*
511  * IPv4 local-delivery routine.
512  *
513  * If fragmented try to reassemble.  Pass to next level.
514  */
515 int
516 ip_local(struct mbuf **mp, int *offp, int nxt, int af)
517 {
518 	struct mbuf *m = *mp;
519 	struct ip *ip = mtod(m, struct ip *);
520 	struct ipq *fp;
521 	struct ipqent *ipqe;
522 	int mff, hlen;
523 
524 	hlen = ip->ip_hl << 2;
525 
526 	/*
527 	 * If offset or IP_MF are set, must reassemble.
528 	 * Otherwise, nothing need be done.
529 	 * (We could look in the reassembly queue to see
530 	 * if the packet was previously fragmented,
531 	 * but it's not worth the time; just let them time out.)
532 	 */
533 	if (ip->ip_off &~ htons(IP_DF | IP_RF)) {
534 		if (m->m_flags & M_EXT) {		/* XXX */
535 			if ((m = *mp = m_pullup(m, hlen)) == NULL) {
536 				ipstat_inc(ips_toosmall);
537 				return IPPROTO_DONE;
538 			}
539 			ip = mtod(m, struct ip *);
540 		}
541 
542 		mtx_enter(&ipq_mutex);
543 
544 		/*
545 		 * Look for queue of fragments
546 		 * of this datagram.
547 		 */
548 		LIST_FOREACH(fp, &ipq, ipq_q) {
549 			if (ip->ip_id == fp->ipq_id &&
550 			    ip->ip_src.s_addr == fp->ipq_src.s_addr &&
551 			    ip->ip_dst.s_addr == fp->ipq_dst.s_addr &&
552 			    ip->ip_p == fp->ipq_p)
553 				break;
554 		}
555 
556 		/*
557 		 * Adjust ip_len to not reflect header,
558 		 * set ipqe_mff if more fragments are expected,
559 		 * convert offset of this to bytes.
560 		 */
561 		ip->ip_len = htons(ntohs(ip->ip_len) - hlen);
562 		mff = (ip->ip_off & htons(IP_MF)) != 0;
563 		if (mff) {
564 			/*
565 			 * Make sure that fragments have a data length
566 			 * that's a non-zero multiple of 8 bytes.
567 			 */
568 			if (ntohs(ip->ip_len) == 0 ||
569 			    (ntohs(ip->ip_len) & 0x7) != 0) {
570 				ipstat_inc(ips_badfrags);
571 				goto bad;
572 			}
573 		}
574 		ip->ip_off = htons(ntohs(ip->ip_off) << 3);
575 
576 		/*
577 		 * If datagram marked as having more fragments
578 		 * or if this is not the first fragment,
579 		 * attempt reassembly; if it succeeds, proceed.
580 		 */
581 		if (mff || ip->ip_off) {
582 			ipstat_inc(ips_fragments);
583 			if (ip_frags + 1 > ip_maxqueue) {
584 				ip_flush();
585 				ipstat_inc(ips_rcvmemdrop);
586 				goto bad;
587 			}
588 
589 			ipqe = pool_get(&ipqent_pool, PR_NOWAIT);
590 			if (ipqe == NULL) {
591 				ipstat_inc(ips_rcvmemdrop);
592 				goto bad;
593 			}
594 			ip_frags++;
595 			ipqe->ipqe_mff = mff;
596 			ipqe->ipqe_m = m;
597 			ipqe->ipqe_ip = ip;
598 			m = *mp = ip_reass(ipqe, fp);
599 			if (m == NULL)
600 				goto bad;
601 			ipstat_inc(ips_reassembled);
602 			ip = mtod(m, struct ip *);
603 			hlen = ip->ip_hl << 2;
604 			ip->ip_len = htons(ntohs(ip->ip_len) + hlen);
605 		} else
606 			if (fp)
607 				ip_freef(fp);
608 
609 		mtx_leave(&ipq_mutex);
610 	}
611 
612 	*offp = hlen;
613 	nxt = ip->ip_p;
614 	/* Check wheter we are already in a IPv4/IPv6 local deliver loop. */
615 	if (af == AF_UNSPEC)
616 		nxt = ip_deliver(mp, offp, nxt, AF_INET);
617 	return nxt;
618  bad:
619 	mtx_leave(&ipq_mutex);
620 	m_freemp(mp);
621 	return IPPROTO_DONE;
622 }
623 
624 #ifndef INET6
625 #define IPSTAT_INC(name)	ipstat_inc(ips_##name)
626 #else
627 #define IPSTAT_INC(name)	(af == AF_INET ?	\
628     ipstat_inc(ips_##name) : ip6stat_inc(ip6s_##name))
629 #endif
630 
631 int
632 ip_deliver(struct mbuf **mp, int *offp, int nxt, int af)
633 {
634 	struct protosw *psw;
635 	int naf = af;
636 #ifdef INET6
637 	int nest = 0;
638 #endif /* INET6 */
639 
640 	KERNEL_ASSERT_LOCKED();
641 
642 	/* pf might have modified stuff, might have to chksum */
643 	switch (af) {
644 	case AF_INET:
645 		in_proto_cksum_out(*mp, NULL);
646 		break;
647 #ifdef INET6
648 	case AF_INET6:
649 		in6_proto_cksum_out(*mp, NULL);
650 		break;
651 #endif /* INET6 */
652 	}
653 
654 	/*
655 	 * Tell launch routine the next header
656 	 */
657 	IPSTAT_INC(delivered);
658 
659 	while (nxt != IPPROTO_DONE) {
660 #ifdef INET6
661 		if (af == AF_INET6 &&
662 		    ip6_hdrnestlimit && (++nest > ip6_hdrnestlimit)) {
663 			ip6stat_inc(ip6s_toomanyhdr);
664 			goto bad;
665 		}
666 #endif /* INET6 */
667 
668 		/*
669 		 * protection against faulty packet - there should be
670 		 * more sanity checks in header chain processing.
671 		 */
672 		if ((*mp)->m_pkthdr.len < *offp) {
673 			IPSTAT_INC(tooshort);
674 			goto bad;
675 		}
676 
677 #ifdef INET6
678 		/* draft-itojun-ipv6-tcp-to-anycast */
679 		if (af == AF_INET6 &&
680 		    ISSET((*mp)->m_flags, M_ACAST) && (nxt == IPPROTO_TCP)) {
681 			if ((*mp)->m_len >= sizeof(struct ip6_hdr)) {
682 				icmp6_error(*mp, ICMP6_DST_UNREACH,
683 					ICMP6_DST_UNREACH_ADDR,
684 					offsetof(struct ip6_hdr, ip6_dst));
685 				*mp = NULL;
686 			}
687 			goto bad;
688 		}
689 #endif /* INET6 */
690 
691 #ifdef IPSEC
692 		if (ipsec_in_use) {
693 			if (ipsec_local_check(*mp, *offp, nxt, af) != 0) {
694 				IPSTAT_INC(cantforward);
695 				goto bad;
696 			}
697 		}
698 		/* Otherwise, just fall through and deliver the packet */
699 #endif /* IPSEC */
700 
701 		switch (nxt) {
702 		case IPPROTO_IPV4:
703 			naf = AF_INET;
704 			ipstat_inc(ips_delivered);
705 			break;
706 #ifdef INET6
707 		case IPPROTO_IPV6:
708 			naf = AF_INET6;
709 			ip6stat_inc(ip6s_delivered);
710 			break;
711 #endif /* INET6 */
712 		}
713 		switch (af) {
714 		case AF_INET:
715 			psw = &inetsw[ip_protox[nxt]];
716 			break;
717 #ifdef INET6
718 		case AF_INET6:
719 			psw = &inet6sw[ip6_protox[nxt]];
720 			break;
721 #endif /* INET6 */
722 		}
723 		nxt = (*psw->pr_input)(mp, offp, nxt, af);
724 		af = naf;
725 	}
726 	return nxt;
727  bad:
728 	m_freemp(mp);
729 	return IPPROTO_DONE;
730 }
731 #undef IPSTAT_INC
732 
733 int
734 in_ouraddr(struct mbuf *m, struct ifnet *ifp, struct rtentry **prt)
735 {
736 	struct rtentry		*rt;
737 	struct ip		*ip;
738 	struct sockaddr_in	 sin;
739 	int			 match = 0;
740 
741 #if NPF > 0
742 	switch (pf_ouraddr(m)) {
743 	case 0:
744 		return (0);
745 	case 1:
746 		return (1);
747 	default:
748 		/* pf does not know it */
749 		break;
750 	}
751 #endif
752 
753 	ip = mtod(m, struct ip *);
754 
755 	memset(&sin, 0, sizeof(sin));
756 	sin.sin_len = sizeof(sin);
757 	sin.sin_family = AF_INET;
758 	sin.sin_addr = ip->ip_dst;
759 	rt = rtalloc_mpath(sintosa(&sin), &ip->ip_src.s_addr,
760 	    m->m_pkthdr.ph_rtableid);
761 	if (rtisvalid(rt)) {
762 		if (ISSET(rt->rt_flags, RTF_LOCAL))
763 			match = 1;
764 
765 		/*
766 		 * If directedbcast is enabled we only consider it local
767 		 * if it is received on the interface with that address.
768 		 */
769 		if (ISSET(rt->rt_flags, RTF_BROADCAST) &&
770 		    (!ip_directedbcast || rt->rt_ifidx == ifp->if_index)) {
771 			match = 1;
772 
773 			/* Make sure M_BCAST is set */
774 			m->m_flags |= M_BCAST;
775 		}
776 	}
777 	*prt = rt;
778 
779 	if (!match) {
780 		struct ifaddr *ifa;
781 
782 		/*
783 		 * No local address or broadcast address found, so check for
784 		 * ancient classful broadcast addresses.
785 		 * It must have been broadcast on the link layer, and for an
786 		 * address on the interface it was received on.
787 		 */
788 		if (!ISSET(m->m_flags, M_BCAST) ||
789 		    !IN_CLASSFULBROADCAST(ip->ip_dst.s_addr, ip->ip_dst.s_addr))
790 			return (0);
791 
792 		if (ifp->if_rdomain != rtable_l2(m->m_pkthdr.ph_rtableid))
793 			return (0);
794 		/*
795 		 * The check in the loop assumes you only rx a packet on an UP
796 		 * interface, and that M_BCAST will only be set on a BROADCAST
797 		 * interface.
798 		 */
799 		NET_ASSERT_LOCKED();
800 		TAILQ_FOREACH(ifa, &ifp->if_addrlist, ifa_list) {
801 			if (ifa->ifa_addr->sa_family != AF_INET)
802 				continue;
803 
804 			if (IN_CLASSFULBROADCAST(ip->ip_dst.s_addr,
805 			    ifatoia(ifa)->ia_addr.sin_addr.s_addr)) {
806 				match = 1;
807 				break;
808 			}
809 		}
810 	}
811 
812 	return (match);
813 }
814 
815 /*
816  * Take incoming datagram fragment and try to
817  * reassemble it into whole datagram.  If a chain for
818  * reassembly of this datagram already exists, then it
819  * is given as fp; otherwise have to make a chain.
820  */
821 struct mbuf *
822 ip_reass(struct ipqent *ipqe, struct ipq *fp)
823 {
824 	struct mbuf *m = ipqe->ipqe_m;
825 	struct ipqent *nq, *p, *q;
826 	struct ip *ip;
827 	struct mbuf *t;
828 	int hlen = ipqe->ipqe_ip->ip_hl << 2;
829 	int i, next;
830 	u_int8_t ecn, ecn0;
831 
832 	MUTEX_ASSERT_LOCKED(&ipq_mutex);
833 
834 	/*
835 	 * Presence of header sizes in mbufs
836 	 * would confuse code below.
837 	 */
838 	m->m_data += hlen;
839 	m->m_len -= hlen;
840 
841 	/*
842 	 * If first fragment to arrive, create a reassembly queue.
843 	 */
844 	if (fp == NULL) {
845 		fp = pool_get(&ipq_pool, PR_NOWAIT);
846 		if (fp == NULL)
847 			goto dropfrag;
848 		LIST_INSERT_HEAD(&ipq, fp, ipq_q);
849 		fp->ipq_ttl = IPFRAGTTL;
850 		fp->ipq_p = ipqe->ipqe_ip->ip_p;
851 		fp->ipq_id = ipqe->ipqe_ip->ip_id;
852 		LIST_INIT(&fp->ipq_fragq);
853 		fp->ipq_src = ipqe->ipqe_ip->ip_src;
854 		fp->ipq_dst = ipqe->ipqe_ip->ip_dst;
855 		p = NULL;
856 		goto insert;
857 	}
858 
859 	/*
860 	 * Handle ECN by comparing this segment with the first one;
861 	 * if CE is set, do not lose CE.
862 	 * drop if CE and not-ECT are mixed for the same packet.
863 	 */
864 	ecn = ipqe->ipqe_ip->ip_tos & IPTOS_ECN_MASK;
865 	ecn0 = LIST_FIRST(&fp->ipq_fragq)->ipqe_ip->ip_tos & IPTOS_ECN_MASK;
866 	if (ecn == IPTOS_ECN_CE) {
867 		if (ecn0 == IPTOS_ECN_NOTECT)
868 			goto dropfrag;
869 		if (ecn0 != IPTOS_ECN_CE)
870 			LIST_FIRST(&fp->ipq_fragq)->ipqe_ip->ip_tos |=
871 			    IPTOS_ECN_CE;
872 	}
873 	if (ecn == IPTOS_ECN_NOTECT && ecn0 != IPTOS_ECN_NOTECT)
874 		goto dropfrag;
875 
876 	/*
877 	 * Find a segment which begins after this one does.
878 	 */
879 	for (p = NULL, q = LIST_FIRST(&fp->ipq_fragq); q != NULL;
880 	    p = q, q = LIST_NEXT(q, ipqe_q))
881 		if (ntohs(q->ipqe_ip->ip_off) > ntohs(ipqe->ipqe_ip->ip_off))
882 			break;
883 
884 	/*
885 	 * If there is a preceding segment, it may provide some of
886 	 * our data already.  If so, drop the data from the incoming
887 	 * segment.  If it provides all of our data, drop us.
888 	 */
889 	if (p != NULL) {
890 		i = ntohs(p->ipqe_ip->ip_off) + ntohs(p->ipqe_ip->ip_len) -
891 		    ntohs(ipqe->ipqe_ip->ip_off);
892 		if (i > 0) {
893 			if (i >= ntohs(ipqe->ipqe_ip->ip_len))
894 				goto dropfrag;
895 			m_adj(ipqe->ipqe_m, i);
896 			ipqe->ipqe_ip->ip_off =
897 			    htons(ntohs(ipqe->ipqe_ip->ip_off) + i);
898 			ipqe->ipqe_ip->ip_len =
899 			    htons(ntohs(ipqe->ipqe_ip->ip_len) - i);
900 		}
901 	}
902 
903 	/*
904 	 * While we overlap succeeding segments trim them or,
905 	 * if they are completely covered, dequeue them.
906 	 */
907 	for (; q != NULL &&
908 	    ntohs(ipqe->ipqe_ip->ip_off) + ntohs(ipqe->ipqe_ip->ip_len) >
909 	    ntohs(q->ipqe_ip->ip_off); q = nq) {
910 		i = (ntohs(ipqe->ipqe_ip->ip_off) +
911 		    ntohs(ipqe->ipqe_ip->ip_len)) - ntohs(q->ipqe_ip->ip_off);
912 		if (i < ntohs(q->ipqe_ip->ip_len)) {
913 			q->ipqe_ip->ip_len =
914 			    htons(ntohs(q->ipqe_ip->ip_len) - i);
915 			q->ipqe_ip->ip_off =
916 			    htons(ntohs(q->ipqe_ip->ip_off) + i);
917 			m_adj(q->ipqe_m, i);
918 			break;
919 		}
920 		nq = LIST_NEXT(q, ipqe_q);
921 		m_freem(q->ipqe_m);
922 		LIST_REMOVE(q, ipqe_q);
923 		pool_put(&ipqent_pool, q);
924 		ip_frags--;
925 	}
926 
927 insert:
928 	/*
929 	 * Stick new segment in its place;
930 	 * check for complete reassembly.
931 	 */
932 	if (p == NULL) {
933 		LIST_INSERT_HEAD(&fp->ipq_fragq, ipqe, ipqe_q);
934 	} else {
935 		LIST_INSERT_AFTER(p, ipqe, ipqe_q);
936 	}
937 	next = 0;
938 	for (p = NULL, q = LIST_FIRST(&fp->ipq_fragq); q != NULL;
939 	    p = q, q = LIST_NEXT(q, ipqe_q)) {
940 		if (ntohs(q->ipqe_ip->ip_off) != next)
941 			return (0);
942 		next += ntohs(q->ipqe_ip->ip_len);
943 	}
944 	if (p->ipqe_mff)
945 		return (0);
946 
947 	/*
948 	 * Reassembly is complete.  Check for a bogus message size and
949 	 * concatenate fragments.
950 	 */
951 	q = LIST_FIRST(&fp->ipq_fragq);
952 	ip = q->ipqe_ip;
953 	if ((next + (ip->ip_hl << 2)) > IP_MAXPACKET) {
954 		ipstat_inc(ips_toolong);
955 		ip_freef(fp);
956 		return (0);
957 	}
958 	m = q->ipqe_m;
959 	t = m->m_next;
960 	m->m_next = 0;
961 	m_cat(m, t);
962 	nq = LIST_NEXT(q, ipqe_q);
963 	pool_put(&ipqent_pool, q);
964 	ip_frags--;
965 	for (q = nq; q != NULL; q = nq) {
966 		t = q->ipqe_m;
967 		nq = LIST_NEXT(q, ipqe_q);
968 		pool_put(&ipqent_pool, q);
969 		ip_frags--;
970 		m_cat(m, t);
971 	}
972 
973 	/*
974 	 * Create header for new ip packet by
975 	 * modifying header of first packet;
976 	 * dequeue and discard fragment reassembly header.
977 	 * Make header visible.
978 	 */
979 	ip->ip_len = htons(next);
980 	ip->ip_src = fp->ipq_src;
981 	ip->ip_dst = fp->ipq_dst;
982 	LIST_REMOVE(fp, ipq_q);
983 	pool_put(&ipq_pool, fp);
984 	m->m_len += (ip->ip_hl << 2);
985 	m->m_data -= (ip->ip_hl << 2);
986 	/* some debugging cruft by sklower, below, will go away soon */
987 	if (m->m_flags & M_PKTHDR) { /* XXX this should be done elsewhere */
988 		int plen = 0;
989 		for (t = m; t; t = t->m_next)
990 			plen += t->m_len;
991 		m->m_pkthdr.len = plen;
992 	}
993 	return (m);
994 
995 dropfrag:
996 	ipstat_inc(ips_fragdropped);
997 	m_freem(m);
998 	pool_put(&ipqent_pool, ipqe);
999 	ip_frags--;
1000 	return (NULL);
1001 }
1002 
1003 /*
1004  * Free a fragment reassembly header and all
1005  * associated datagrams.
1006  */
1007 void
1008 ip_freef(struct ipq *fp)
1009 {
1010 	struct ipqent *q;
1011 
1012 	MUTEX_ASSERT_LOCKED(&ipq_mutex);
1013 
1014 	while ((q = LIST_FIRST(&fp->ipq_fragq)) != NULL) {
1015 		LIST_REMOVE(q, ipqe_q);
1016 		m_freem(q->ipqe_m);
1017 		pool_put(&ipqent_pool, q);
1018 		ip_frags--;
1019 	}
1020 	LIST_REMOVE(fp, ipq_q);
1021 	pool_put(&ipq_pool, fp);
1022 }
1023 
1024 /*
1025  * IP timer processing;
1026  * if a timer expires on a reassembly queue, discard it.
1027  */
1028 void
1029 ip_slowtimo(void)
1030 {
1031 	struct ipq *fp, *nfp;
1032 
1033 	mtx_enter(&ipq_mutex);
1034 	LIST_FOREACH_SAFE(fp, &ipq, ipq_q, nfp) {
1035 		if (--fp->ipq_ttl == 0) {
1036 			ipstat_inc(ips_fragtimeout);
1037 			ip_freef(fp);
1038 		}
1039 	}
1040 	mtx_leave(&ipq_mutex);
1041 }
1042 
1043 /*
1044  * Drain off all datagram fragments.
1045  */
1046 void
1047 ip_drain(void)
1048 {
1049 	mtx_enter(&ipq_mutex);
1050 	while (!LIST_EMPTY(&ipq)) {
1051 		ipstat_inc(ips_fragdropped);
1052 		ip_freef(LIST_FIRST(&ipq));
1053 	}
1054 	mtx_leave(&ipq_mutex);
1055 }
1056 
1057 /*
1058  * Flush a bunch of datagram fragments, till we are down to 75%.
1059  */
1060 void
1061 ip_flush(void)
1062 {
1063 	int max = 50;
1064 
1065 	MUTEX_ASSERT_LOCKED(&ipq_mutex);
1066 
1067 	while (!LIST_EMPTY(&ipq) && ip_frags > ip_maxqueue * 3 / 4 && --max) {
1068 		ipstat_inc(ips_fragdropped);
1069 		ip_freef(LIST_FIRST(&ipq));
1070 	}
1071 }
1072 
1073 /*
1074  * Do option processing on a datagram,
1075  * possibly discarding it if bad options are encountered,
1076  * or forwarding it if source-routed.
1077  * Returns 1 if packet has been forwarded/freed,
1078  * 0 if the packet should be processed further.
1079  */
1080 int
1081 ip_dooptions(struct mbuf *m, struct ifnet *ifp)
1082 {
1083 	struct ip *ip = mtod(m, struct ip *);
1084 	unsigned int rtableid = m->m_pkthdr.ph_rtableid;
1085 	struct rtentry *rt;
1086 	struct sockaddr_in ipaddr;
1087 	u_char *cp;
1088 	struct ip_timestamp ipt;
1089 	struct in_ifaddr *ia;
1090 	int opt, optlen, cnt, off, code, type = ICMP_PARAMPROB, forward = 0;
1091 	struct in_addr sin, dst;
1092 	u_int32_t ntime;
1093 
1094 	dst = ip->ip_dst;
1095 	cp = (u_char *)(ip + 1);
1096 	cnt = (ip->ip_hl << 2) - sizeof (struct ip);
1097 
1098 	KERNEL_LOCK();
1099 	for (; cnt > 0; cnt -= optlen, cp += optlen) {
1100 		opt = cp[IPOPT_OPTVAL];
1101 		if (opt == IPOPT_EOL)
1102 			break;
1103 		if (opt == IPOPT_NOP)
1104 			optlen = 1;
1105 		else {
1106 			if (cnt < IPOPT_OLEN + sizeof(*cp)) {
1107 				code = &cp[IPOPT_OLEN] - (u_char *)ip;
1108 				goto bad;
1109 			}
1110 			optlen = cp[IPOPT_OLEN];
1111 			if (optlen < IPOPT_OLEN + sizeof(*cp) || optlen > cnt) {
1112 				code = &cp[IPOPT_OLEN] - (u_char *)ip;
1113 				goto bad;
1114 			}
1115 		}
1116 
1117 		switch (opt) {
1118 
1119 		default:
1120 			break;
1121 
1122 		/*
1123 		 * Source routing with record.
1124 		 * Find interface with current destination address.
1125 		 * If none on this machine then drop if strictly routed,
1126 		 * or do nothing if loosely routed.
1127 		 * Record interface address and bring up next address
1128 		 * component.  If strictly routed make sure next
1129 		 * address is on directly accessible net.
1130 		 */
1131 		case IPOPT_LSRR:
1132 		case IPOPT_SSRR:
1133 			if (!ip_dosourceroute) {
1134 				type = ICMP_UNREACH;
1135 				code = ICMP_UNREACH_SRCFAIL;
1136 				goto bad;
1137 			}
1138 			if ((off = cp[IPOPT_OFFSET]) < IPOPT_MINOFF) {
1139 				code = &cp[IPOPT_OFFSET] - (u_char *)ip;
1140 				goto bad;
1141 			}
1142 			memset(&ipaddr, 0, sizeof(ipaddr));
1143 			ipaddr.sin_family = AF_INET;
1144 			ipaddr.sin_len = sizeof(ipaddr);
1145 			ipaddr.sin_addr = ip->ip_dst;
1146 			ia = ifatoia(ifa_ifwithaddr(sintosa(&ipaddr),
1147 			    m->m_pkthdr.ph_rtableid));
1148 			if (ia == NULL) {
1149 				if (opt == IPOPT_SSRR) {
1150 					type = ICMP_UNREACH;
1151 					code = ICMP_UNREACH_SRCFAIL;
1152 					goto bad;
1153 				}
1154 				/*
1155 				 * Loose routing, and not at next destination
1156 				 * yet; nothing to do except forward.
1157 				 */
1158 				break;
1159 			}
1160 			off--;			/* 0 origin */
1161 			if ((off + sizeof(struct in_addr)) > optlen) {
1162 				/*
1163 				 * End of source route.  Should be for us.
1164 				 */
1165 				save_rte(m, cp, ip->ip_src);
1166 				break;
1167 			}
1168 
1169 			/*
1170 			 * locate outgoing interface
1171 			 */
1172 			memset(&ipaddr, 0, sizeof(ipaddr));
1173 			ipaddr.sin_family = AF_INET;
1174 			ipaddr.sin_len = sizeof(ipaddr);
1175 			memcpy(&ipaddr.sin_addr, cp + off,
1176 			    sizeof(ipaddr.sin_addr));
1177 			/* keep packet in the virtual instance */
1178 			rt = rtalloc(sintosa(&ipaddr), RT_RESOLVE, rtableid);
1179 			if (!rtisvalid(rt) || ((opt == IPOPT_SSRR) &&
1180 			    ISSET(rt->rt_flags, RTF_GATEWAY))) {
1181 				type = ICMP_UNREACH;
1182 				code = ICMP_UNREACH_SRCFAIL;
1183 				rtfree(rt);
1184 				goto bad;
1185 			}
1186 			ia = ifatoia(rt->rt_ifa);
1187 			memcpy(cp + off, &ia->ia_addr.sin_addr,
1188 			    sizeof(struct in_addr));
1189 			rtfree(rt);
1190 			cp[IPOPT_OFFSET] += sizeof(struct in_addr);
1191 			ip->ip_dst = ipaddr.sin_addr;
1192 			/*
1193 			 * Let ip_intr's mcast routing check handle mcast pkts
1194 			 */
1195 			forward = !IN_MULTICAST(ip->ip_dst.s_addr);
1196 			break;
1197 
1198 		case IPOPT_RR:
1199 			if (optlen < IPOPT_OFFSET + sizeof(*cp)) {
1200 				code = &cp[IPOPT_OLEN] - (u_char *)ip;
1201 				goto bad;
1202 			}
1203 			if ((off = cp[IPOPT_OFFSET]) < IPOPT_MINOFF) {
1204 				code = &cp[IPOPT_OFFSET] - (u_char *)ip;
1205 				goto bad;
1206 			}
1207 
1208 			/*
1209 			 * If no space remains, ignore.
1210 			 */
1211 			off--;			/* 0 origin */
1212 			if ((off + sizeof(struct in_addr)) > optlen)
1213 				break;
1214 			memset(&ipaddr, 0, sizeof(ipaddr));
1215 			ipaddr.sin_family = AF_INET;
1216 			ipaddr.sin_len = sizeof(ipaddr);
1217 			ipaddr.sin_addr = ip->ip_dst;
1218 			/*
1219 			 * locate outgoing interface; if we're the destination,
1220 			 * use the incoming interface (should be same).
1221 			 * Again keep the packet inside the virtual instance.
1222 			 */
1223 			rt = rtalloc(sintosa(&ipaddr), RT_RESOLVE, rtableid);
1224 			if (!rtisvalid(rt)) {
1225 				type = ICMP_UNREACH;
1226 				code = ICMP_UNREACH_HOST;
1227 				rtfree(rt);
1228 				goto bad;
1229 			}
1230 			ia = ifatoia(rt->rt_ifa);
1231 			memcpy(cp + off, &ia->ia_addr.sin_addr,
1232 			    sizeof(struct in_addr));
1233 			rtfree(rt);
1234 			cp[IPOPT_OFFSET] += sizeof(struct in_addr);
1235 			break;
1236 
1237 		case IPOPT_TS:
1238 			code = cp - (u_char *)ip;
1239 			if (optlen < sizeof(struct ip_timestamp))
1240 				goto bad;
1241 			memcpy(&ipt, cp, sizeof(struct ip_timestamp));
1242 			if (ipt.ipt_ptr < 5 || ipt.ipt_len < 5)
1243 				goto bad;
1244 			if (ipt.ipt_ptr - 1 + sizeof(u_int32_t) > ipt.ipt_len) {
1245 				if (++ipt.ipt_oflw == 0)
1246 					goto bad;
1247 				break;
1248 			}
1249 			memcpy(&sin, cp + ipt.ipt_ptr - 1, sizeof sin);
1250 			switch (ipt.ipt_flg) {
1251 
1252 			case IPOPT_TS_TSONLY:
1253 				break;
1254 
1255 			case IPOPT_TS_TSANDADDR:
1256 				if (ipt.ipt_ptr - 1 + sizeof(u_int32_t) +
1257 				    sizeof(struct in_addr) > ipt.ipt_len)
1258 					goto bad;
1259 				memset(&ipaddr, 0, sizeof(ipaddr));
1260 				ipaddr.sin_family = AF_INET;
1261 				ipaddr.sin_len = sizeof(ipaddr);
1262 				ipaddr.sin_addr = dst;
1263 				ia = ifatoia(ifaof_ifpforaddr(sintosa(&ipaddr),
1264 				    ifp));
1265 				if (ia == NULL)
1266 					continue;
1267 				memcpy(&sin, &ia->ia_addr.sin_addr,
1268 				    sizeof(struct in_addr));
1269 				ipt.ipt_ptr += sizeof(struct in_addr);
1270 				break;
1271 
1272 			case IPOPT_TS_PRESPEC:
1273 				if (ipt.ipt_ptr - 1 + sizeof(u_int32_t) +
1274 				    sizeof(struct in_addr) > ipt.ipt_len)
1275 					goto bad;
1276 				memset(&ipaddr, 0, sizeof(ipaddr));
1277 				ipaddr.sin_family = AF_INET;
1278 				ipaddr.sin_len = sizeof(ipaddr);
1279 				ipaddr.sin_addr = sin;
1280 				if (ifa_ifwithaddr(sintosa(&ipaddr),
1281 				    m->m_pkthdr.ph_rtableid) == NULL)
1282 					continue;
1283 				ipt.ipt_ptr += sizeof(struct in_addr);
1284 				break;
1285 
1286 			default:
1287 				/* XXX can't take &ipt->ipt_flg */
1288 				code = (u_char *)&ipt.ipt_ptr -
1289 				    (u_char *)ip + 1;
1290 				goto bad;
1291 			}
1292 			ntime = iptime();
1293 			memcpy(cp + ipt.ipt_ptr - 1, &ntime, sizeof(u_int32_t));
1294 			ipt.ipt_ptr += sizeof(u_int32_t);
1295 		}
1296 	}
1297 	KERNEL_UNLOCK();
1298 	if (forward && ipforwarding) {
1299 		ip_forward(m, ifp, NULL, 1);
1300 		return (1);
1301 	}
1302 	return (0);
1303 bad:
1304 	KERNEL_UNLOCK();
1305 	icmp_error(m, type, code, 0, 0);
1306 	ipstat_inc(ips_badoptions);
1307 	return (1);
1308 }
1309 
1310 /*
1311  * Save incoming source route for use in replies,
1312  * to be picked up later by ip_srcroute if the receiver is interested.
1313  */
1314 void
1315 save_rte(struct mbuf *m, u_char *option, struct in_addr dst)
1316 {
1317 	struct ip_srcrt *isr;
1318 	struct m_tag *mtag;
1319 	unsigned olen;
1320 
1321 	olen = option[IPOPT_OLEN];
1322 	if (olen > sizeof(isr->isr_hdr) + sizeof(isr->isr_routes))
1323 		return;
1324 
1325 	mtag = m_tag_get(PACKET_TAG_SRCROUTE, sizeof(*isr), M_NOWAIT);
1326 	if (mtag == NULL)
1327 		return;
1328 	isr = (struct ip_srcrt *)(mtag + 1);
1329 
1330 	memcpy(isr->isr_hdr, option, olen);
1331 	isr->isr_nhops = (olen - IPOPT_OFFSET - 1) / sizeof(struct in_addr);
1332 	isr->isr_dst = dst;
1333 	m_tag_prepend(m, mtag);
1334 }
1335 
1336 /*
1337  * Retrieve incoming source route for use in replies,
1338  * in the same form used by setsockopt.
1339  * The first hop is placed before the options, will be removed later.
1340  */
1341 struct mbuf *
1342 ip_srcroute(struct mbuf *m0)
1343 {
1344 	struct in_addr *p, *q;
1345 	struct mbuf *m;
1346 	struct ip_srcrt *isr;
1347 	struct m_tag *mtag;
1348 
1349 	if (!ip_dosourceroute)
1350 		return (NULL);
1351 
1352 	mtag = m_tag_find(m0, PACKET_TAG_SRCROUTE, NULL);
1353 	if (mtag == NULL)
1354 		return (NULL);
1355 	isr = (struct ip_srcrt *)(mtag + 1);
1356 
1357 	if (isr->isr_nhops == 0)
1358 		return (NULL);
1359 	m = m_get(M_DONTWAIT, MT_SOOPTS);
1360 	if (m == NULL)
1361 		return (NULL);
1362 
1363 #define OPTSIZ	(sizeof(isr->isr_nop) + sizeof(isr->isr_hdr))
1364 
1365 	/* length is (nhops+1)*sizeof(addr) + sizeof(nop + header) */
1366 	m->m_len = (isr->isr_nhops + 1) * sizeof(struct in_addr) + OPTSIZ;
1367 
1368 	/*
1369 	 * First save first hop for return route
1370 	 */
1371 	p = &(isr->isr_routes[isr->isr_nhops - 1]);
1372 	*(mtod(m, struct in_addr *)) = *p--;
1373 
1374 	/*
1375 	 * Copy option fields and padding (nop) to mbuf.
1376 	 */
1377 	isr->isr_nop = IPOPT_NOP;
1378 	isr->isr_hdr[IPOPT_OFFSET] = IPOPT_MINOFF;
1379 	memcpy(mtod(m, caddr_t) + sizeof(struct in_addr), &isr->isr_nop,
1380 	    OPTSIZ);
1381 	q = (struct in_addr *)(mtod(m, caddr_t) +
1382 	    sizeof(struct in_addr) + OPTSIZ);
1383 #undef OPTSIZ
1384 	/*
1385 	 * Record return path as an IP source route,
1386 	 * reversing the path (pointers are now aligned).
1387 	 */
1388 	while (p >= isr->isr_routes) {
1389 		*q++ = *p--;
1390 	}
1391 	/*
1392 	 * Last hop goes to final destination.
1393 	 */
1394 	*q = isr->isr_dst;
1395 	m_tag_delete(m0, (struct m_tag *)isr);
1396 	return (m);
1397 }
1398 
1399 /*
1400  * Strip out IP options, at higher level protocol in the kernel.
1401  */
1402 void
1403 ip_stripoptions(struct mbuf *m)
1404 {
1405 	int i;
1406 	struct ip *ip = mtod(m, struct ip *);
1407 	caddr_t opts;
1408 	int olen;
1409 
1410 	olen = (ip->ip_hl<<2) - sizeof (struct ip);
1411 	opts = (caddr_t)(ip + 1);
1412 	i = m->m_len - (sizeof (struct ip) + olen);
1413 	memmove(opts, opts  + olen, i);
1414 	m->m_len -= olen;
1415 	if (m->m_flags & M_PKTHDR)
1416 		m->m_pkthdr.len -= olen;
1417 	ip->ip_hl = sizeof(struct ip) >> 2;
1418 	ip->ip_len = htons(ntohs(ip->ip_len) - olen);
1419 }
1420 
1421 int inetctlerrmap[PRC_NCMDS] = {
1422 	0,		0,		0,		0,
1423 	0,		EMSGSIZE,	EHOSTDOWN,	EHOSTUNREACH,
1424 	EHOSTUNREACH,	EHOSTUNREACH,	ECONNREFUSED,	ECONNREFUSED,
1425 	EMSGSIZE,	EHOSTUNREACH,	0,		0,
1426 	0,		0,		0,		0,
1427 	ENOPROTOOPT
1428 };
1429 
1430 /*
1431  * Forward a packet.  If some error occurs return the sender
1432  * an icmp packet.  Note we can't always generate a meaningful
1433  * icmp message because icmp doesn't have a large enough repertoire
1434  * of codes and types.
1435  *
1436  * If not forwarding, just drop the packet.  This could be confusing
1437  * if ipforwarding was zero but some routing protocol was advancing
1438  * us as a gateway to somewhere.  However, we must let the routing
1439  * protocol deal with that.
1440  *
1441  * The srcrt parameter indicates whether the packet is being forwarded
1442  * via a source route.
1443  */
1444 void
1445 ip_forward(struct mbuf *m, struct ifnet *ifp, struct rtentry *rt, int srcrt)
1446 {
1447 	struct mbuf mfake, *mcopy = NULL;
1448 	struct ip *ip = mtod(m, struct ip *);
1449 	struct sockaddr_in *sin;
1450 	struct route ro;
1451 	int error, type = 0, code = 0, destmtu = 0, fake = 0, len;
1452 	u_int32_t dest;
1453 
1454 	dest = 0;
1455 	if (m->m_flags & (M_BCAST|M_MCAST) || in_canforward(ip->ip_dst) == 0) {
1456 		ipstat_inc(ips_cantforward);
1457 		m_freem(m);
1458 		goto freecopy;
1459 	}
1460 	if (ip->ip_ttl <= IPTTLDEC) {
1461 		icmp_error(m, ICMP_TIMXCEED, ICMP_TIMXCEED_INTRANS, dest, 0);
1462 		goto freecopy;
1463 	}
1464 
1465 	sin = satosin(&ro.ro_dst);
1466 	memset(sin, 0, sizeof(*sin));
1467 	sin->sin_family = AF_INET;
1468 	sin->sin_len = sizeof(*sin);
1469 	sin->sin_addr = ip->ip_dst;
1470 
1471 	if (!rtisvalid(rt)) {
1472 		rtfree(rt);
1473 		rt = rtalloc_mpath(sintosa(sin), &ip->ip_src.s_addr,
1474 		    m->m_pkthdr.ph_rtableid);
1475 		if (rt == NULL) {
1476 			icmp_error(m, ICMP_UNREACH, ICMP_UNREACH_HOST, dest, 0);
1477 			return;
1478 		}
1479 	}
1480 
1481 	/*
1482 	 * Save at most 68 bytes of the packet in case
1483 	 * we need to generate an ICMP message to the src.
1484 	 * The data is saved in the mbuf on the stack that
1485 	 * acts as a temporary storage not intended to be
1486 	 * passed down the IP stack or to the mfree.
1487 	 */
1488 	memset(&mfake.m_hdr, 0, sizeof(mfake.m_hdr));
1489 	mfake.m_type = m->m_type;
1490 	if (m_dup_pkthdr(&mfake, m, M_DONTWAIT) == 0) {
1491 		mfake.m_data = mfake.m_pktdat;
1492 		len = min(ntohs(ip->ip_len), 68);
1493 		m_copydata(m, 0, len, mfake.m_pktdat);
1494 		mfake.m_pkthdr.len = mfake.m_len = len;
1495 #if NPF > 0
1496 		pf_pkt_unlink_state_key(&mfake);
1497 #endif	/* NPF > 0 */
1498 		fake = 1;
1499 	}
1500 
1501 	ip->ip_ttl -= IPTTLDEC;
1502 
1503 	/*
1504 	 * If forwarding packet using same interface that it came in on,
1505 	 * perhaps should send a redirect to sender to shortcut a hop.
1506 	 * Only send redirect if source is sending directly to us,
1507 	 * and if packet was not source routed (or has any options).
1508 	 * Also, don't send redirect if forwarding using a default route
1509 	 * or a route modified by a redirect.
1510 	 * Don't send redirect if we advertise destination's arp address
1511 	 * as ours (proxy arp).
1512 	 */
1513 	if ((rt->rt_ifidx == ifp->if_index) &&
1514 	    (rt->rt_flags & (RTF_DYNAMIC|RTF_MODIFIED)) == 0 &&
1515 	    satosin(rt_key(rt))->sin_addr.s_addr != 0 &&
1516 	    ipsendredirects && !srcrt &&
1517 	    !arpproxy(satosin(rt_key(rt))->sin_addr, m->m_pkthdr.ph_rtableid)) {
1518 		if ((ip->ip_src.s_addr & ifatoia(rt->rt_ifa)->ia_netmask) ==
1519 		    ifatoia(rt->rt_ifa)->ia_net) {
1520 		    if (rt->rt_flags & RTF_GATEWAY)
1521 			dest = satosin(rt->rt_gateway)->sin_addr.s_addr;
1522 		    else
1523 			dest = ip->ip_dst.s_addr;
1524 		    /* Router requirements says to only send host redirects */
1525 		    type = ICMP_REDIRECT;
1526 		    code = ICMP_REDIRECT_HOST;
1527 		}
1528 	}
1529 
1530 	ro.ro_rt = rt;
1531 	ro.ro_tableid = m->m_pkthdr.ph_rtableid;
1532 	error = ip_output(m, NULL, &ro,
1533 	    (IP_FORWARDING | (ip_directedbcast ? IP_ALLOWBROADCAST : 0)),
1534 	    NULL, NULL, 0);
1535 	rt = ro.ro_rt;
1536 	if (error)
1537 		ipstat_inc(ips_cantforward);
1538 	else {
1539 		ipstat_inc(ips_forward);
1540 		if (type)
1541 			ipstat_inc(ips_redirectsent);
1542 		else
1543 			goto freecopy;
1544 	}
1545 	if (!fake)
1546 		goto freecopy;
1547 
1548 	switch (error) {
1549 
1550 	case 0:				/* forwarded, but need redirect */
1551 		/* type, code set above */
1552 		break;
1553 
1554 	case ENETUNREACH:		/* shouldn't happen, checked above */
1555 	case EHOSTUNREACH:
1556 	case ENETDOWN:
1557 	case EHOSTDOWN:
1558 	default:
1559 		type = ICMP_UNREACH;
1560 		code = ICMP_UNREACH_HOST;
1561 		break;
1562 
1563 	case EMSGSIZE:
1564 		type = ICMP_UNREACH;
1565 		code = ICMP_UNREACH_NEEDFRAG;
1566 
1567 #ifdef IPSEC
1568 		if (rt != NULL) {
1569 			if (rt->rt_mtu)
1570 				destmtu = rt->rt_mtu;
1571 			else {
1572 				struct ifnet *destifp;
1573 
1574 				destifp = if_get(rt->rt_ifidx);
1575 				if (destifp != NULL)
1576 					destmtu = destifp->if_mtu;
1577 				if_put(destifp);
1578 			}
1579 		}
1580 #endif /*IPSEC*/
1581 		ipstat_inc(ips_cantfrag);
1582 		break;
1583 
1584 	case EACCES:
1585 		/*
1586 		 * pf(4) blocked the packet. There is no need to send an ICMP
1587 		 * packet back since pf(4) takes care of it.
1588 		 */
1589 		goto freecopy;
1590 	case ENOBUFS:
1591 		/*
1592 		 * a router should not generate ICMP_SOURCEQUENCH as
1593 		 * required in RFC1812 Requirements for IP Version 4 Routers.
1594 		 * source quench could be a big problem under DoS attacks,
1595 		 * or the underlying interface is rate-limited.
1596 		 */
1597 		goto freecopy;
1598 	}
1599 
1600 	mcopy = m_copym(&mfake, 0, len, M_DONTWAIT);
1601 	if (mcopy)
1602 		icmp_error(mcopy, type, code, dest, destmtu);
1603 
1604 freecopy:
1605 	if (fake)
1606 		m_tag_delete_chain(&mfake);
1607 	rtfree(rt);
1608 }
1609 
1610 int
1611 ip_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp,
1612     size_t newlen)
1613 {
1614 	int error;
1615 #ifdef MROUTING
1616 	extern int ip_mrtproto;
1617 	extern struct mrtstat mrtstat;
1618 #endif
1619 
1620 	NET_ASSERT_LOCKED();
1621 
1622 	/* Almost all sysctl names at this level are terminal. */
1623 	if (namelen != 1 && name[0] != IPCTL_IFQUEUE)
1624 		return (ENOTDIR);
1625 
1626 	switch (name[0]) {
1627 #ifdef notyet
1628 	case IPCTL_DEFMTU:
1629 		return (sysctl_int(oldp, oldlenp, newp, newlen, &ip_mtu));
1630 #endif
1631 	case IPCTL_SOURCEROUTE:
1632 		/*
1633 		 * Don't allow this to change in a secure environment.
1634 		 */
1635 		if (newp && securelevel > 0)
1636 			return (EPERM);
1637 		return (sysctl_int(oldp, oldlenp, newp, newlen,
1638 		    &ip_dosourceroute));
1639 	case IPCTL_MTUDISC:
1640 		error = sysctl_int(oldp, oldlenp, newp, newlen,
1641 		    &ip_mtudisc);
1642 		if (ip_mtudisc != 0 && ip_mtudisc_timeout_q == NULL) {
1643 			ip_mtudisc_timeout_q =
1644 			    rt_timer_queue_create(ip_mtudisc_timeout);
1645 		} else if (ip_mtudisc == 0 && ip_mtudisc_timeout_q != NULL) {
1646 			rt_timer_queue_destroy(ip_mtudisc_timeout_q);
1647 			ip_mtudisc_timeout_q = NULL;
1648 		}
1649 		return error;
1650 	case IPCTL_MTUDISCTIMEOUT:
1651 		error = sysctl_int(oldp, oldlenp, newp, newlen,
1652 		   &ip_mtudisc_timeout);
1653 		if (ip_mtudisc_timeout_q != NULL)
1654 			rt_timer_queue_change(ip_mtudisc_timeout_q,
1655 					      ip_mtudisc_timeout);
1656 		return (error);
1657 	case IPCTL_IPSEC_ENC_ALGORITHM:
1658 		return (sysctl_tstring(oldp, oldlenp, newp, newlen,
1659 				       ipsec_def_enc, sizeof(ipsec_def_enc)));
1660 	case IPCTL_IPSEC_AUTH_ALGORITHM:
1661 		return (sysctl_tstring(oldp, oldlenp, newp, newlen,
1662 				       ipsec_def_auth,
1663 				       sizeof(ipsec_def_auth)));
1664 	case IPCTL_IPSEC_IPCOMP_ALGORITHM:
1665 		return (sysctl_tstring(oldp, oldlenp, newp, newlen,
1666 				       ipsec_def_comp,
1667 				       sizeof(ipsec_def_comp)));
1668 	case IPCTL_IFQUEUE:
1669 		return (sysctl_niq(name + 1, namelen - 1,
1670 		    oldp, oldlenp, newp, newlen, &ipintrq));
1671 	case IPCTL_STATS:
1672 		return (ip_sysctl_ipstat(oldp, oldlenp, newp));
1673 #ifdef MROUTING
1674 	case IPCTL_MRTSTATS:
1675 		return (sysctl_rdstruct(oldp, oldlenp, newp,
1676 		    &mrtstat, sizeof(mrtstat)));
1677 	case IPCTL_MRTPROTO:
1678 		return (sysctl_rdint(oldp, oldlenp, newp, ip_mrtproto));
1679 	case IPCTL_MRTMFC:
1680 		if (newp)
1681 			return (EPERM);
1682 		return mrt_sysctl_mfc(oldp, oldlenp);
1683 	case IPCTL_MRTVIF:
1684 		if (newp)
1685 			return (EPERM);
1686 		return mrt_sysctl_vif(oldp, oldlenp);
1687 #else
1688 	case IPCTL_MRTPROTO:
1689 	case IPCTL_MRTSTATS:
1690 	case IPCTL_MRTMFC:
1691 	case IPCTL_MRTVIF:
1692 		return (EOPNOTSUPP);
1693 #endif
1694 	default:
1695 		if (name[0] < IPCTL_MAXID)
1696 			return (sysctl_int_arr(ipctl_vars, name, namelen,
1697 			    oldp, oldlenp, newp, newlen));
1698 		return (EOPNOTSUPP);
1699 	}
1700 	/* NOTREACHED */
1701 }
1702 
1703 int
1704 ip_sysctl_ipstat(void *oldp, size_t *oldlenp, void *newp)
1705 {
1706 	uint64_t counters[ips_ncounters];
1707 	struct ipstat ipstat;
1708 	u_long *words = (u_long *)&ipstat;
1709 	int i;
1710 
1711 	CTASSERT(sizeof(ipstat) == (nitems(counters) * sizeof(u_long)));
1712 	memset(&ipstat, 0, sizeof ipstat);
1713 	counters_read(ipcounters, counters, nitems(counters));
1714 
1715 	for (i = 0; i < nitems(counters); i++)
1716 		words[i] = (u_long)counters[i];
1717 
1718 	return (sysctl_rdstruct(oldp, oldlenp, newp, &ipstat, sizeof(ipstat)));
1719 }
1720 
1721 void
1722 ip_savecontrol(struct inpcb *inp, struct mbuf **mp, struct ip *ip,
1723     struct mbuf *m)
1724 {
1725 	if (inp->inp_socket->so_options & SO_TIMESTAMP) {
1726 		struct timeval tv;
1727 
1728 		microtime(&tv);
1729 		*mp = sbcreatecontrol((caddr_t) &tv, sizeof(tv),
1730 		    SCM_TIMESTAMP, SOL_SOCKET);
1731 		if (*mp)
1732 			mp = &(*mp)->m_next;
1733 	}
1734 
1735 	if (inp->inp_flags & INP_RECVDSTADDR) {
1736 		*mp = sbcreatecontrol((caddr_t) &ip->ip_dst,
1737 		    sizeof(struct in_addr), IP_RECVDSTADDR, IPPROTO_IP);
1738 		if (*mp)
1739 			mp = &(*mp)->m_next;
1740 	}
1741 #ifdef notyet
1742 	/* this code is broken and will probably never be fixed. */
1743 	/* options were tossed already */
1744 	if (inp->inp_flags & INP_RECVOPTS) {
1745 		*mp = sbcreatecontrol((caddr_t) opts_deleted_above,
1746 		    sizeof(struct in_addr), IP_RECVOPTS, IPPROTO_IP);
1747 		if (*mp)
1748 			mp = &(*mp)->m_next;
1749 	}
1750 	/* ip_srcroute doesn't do what we want here, need to fix */
1751 	if (inp->inp_flags & INP_RECVRETOPTS) {
1752 		*mp = sbcreatecontrol((caddr_t) ip_srcroute(m),
1753 		    sizeof(struct in_addr), IP_RECVRETOPTS, IPPROTO_IP);
1754 		if (*mp)
1755 			mp = &(*mp)->m_next;
1756 	}
1757 #endif
1758 	if (inp->inp_flags & INP_RECVIF) {
1759 		struct sockaddr_dl sdl;
1760 		struct ifnet *ifp;
1761 
1762 		ifp = if_get(m->m_pkthdr.ph_ifidx);
1763 		if (ifp == NULL || ifp->if_sadl == NULL) {
1764 			memset(&sdl, 0, sizeof(sdl));
1765 			sdl.sdl_len = offsetof(struct sockaddr_dl, sdl_data[0]);
1766 			sdl.sdl_family = AF_LINK;
1767 			sdl.sdl_index = ifp != NULL ? ifp->if_index : 0;
1768 			sdl.sdl_nlen = sdl.sdl_alen = sdl.sdl_slen = 0;
1769 			*mp = sbcreatecontrol((caddr_t) &sdl, sdl.sdl_len,
1770 			    IP_RECVIF, IPPROTO_IP);
1771 		} else {
1772 			*mp = sbcreatecontrol((caddr_t) ifp->if_sadl,
1773 			    ifp->if_sadl->sdl_len, IP_RECVIF, IPPROTO_IP);
1774 		}
1775 		if (*mp)
1776 			mp = &(*mp)->m_next;
1777 		if_put(ifp);
1778 	}
1779 	if (inp->inp_flags & INP_RECVTTL) {
1780 		*mp = sbcreatecontrol((caddr_t) &ip->ip_ttl,
1781 		    sizeof(u_int8_t), IP_RECVTTL, IPPROTO_IP);
1782 		if (*mp)
1783 			mp = &(*mp)->m_next;
1784 	}
1785 	if (inp->inp_flags & INP_RECVRTABLE) {
1786 		u_int rtableid = inp->inp_rtableid;
1787 #if NPF > 0
1788 		struct pf_divert *divert;
1789 
1790 		if (m && m->m_pkthdr.pf.flags & PF_TAG_DIVERTED &&
1791 		    (divert = pf_find_divert(m)) != NULL)
1792 			rtableid = divert->rdomain;
1793 #endif
1794 
1795 		*mp = sbcreatecontrol((caddr_t) &rtableid,
1796 		    sizeof(u_int), IP_RECVRTABLE, IPPROTO_IP);
1797 		if (*mp)
1798 			mp = &(*mp)->m_next;
1799 	}
1800 }
1801 
1802 void
1803 ip_send_dispatch(void *xmq)
1804 {
1805 	struct mbuf_queue *mq = xmq;
1806 	struct mbuf *m;
1807 	struct mbuf_list ml;
1808 #ifdef IPSEC
1809 	int locked = 0;
1810 #endif /* IPSEC */
1811 
1812 	mq_delist(mq, &ml);
1813 	if (ml_empty(&ml))
1814 		return;
1815 
1816 	NET_LOCK();
1817 
1818 #ifdef IPSEC
1819 	/*
1820 	 * IPsec is not ready to run without KERNEL_LOCK().  So all
1821 	 * the traffic on your machine is punished if you have IPsec
1822 	 * enabled.
1823 	 */
1824 	extern int ipsec_in_use;
1825 	if (ipsec_in_use) {
1826 		NET_UNLOCK();
1827 		KERNEL_LOCK();
1828 		NET_LOCK();
1829 		locked = 1;
1830 	}
1831 #endif /* IPSEC */
1832 
1833 	while ((m = ml_dequeue(&ml)) != NULL) {
1834 		ip_output(m, NULL, NULL, 0, NULL, NULL, 0);
1835 	}
1836 	NET_UNLOCK();
1837 
1838 #ifdef IPSEC
1839 	if (locked)
1840 		KERNEL_UNLOCK();
1841 #endif /* IPSEC */
1842 }
1843 
1844 void
1845 ip_send(struct mbuf *m)
1846 {
1847 	mq_enqueue(&ipsend_mq, m);
1848 	task_add(softnettq, &ipsend_task);
1849 }
1850