xref: /netbsd-src/sys/netinet/ip_encap.c (revision f3cfa6f6ce31685c6c4a758bc430e69eb99f50a4)
1 /*	$NetBSD: ip_encap.c,v 1.71 2019/05/15 03:33:41 knakahara Exp $	*/
2 /*	$KAME: ip_encap.c,v 1.73 2001/10/02 08:30:58 itojun Exp $	*/
3 
4 /*
5  * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the name of the project nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  */
32 /*
33  * My grandfather said that there's a devil inside tunnelling technology...
34  *
35  * We have surprisingly many protocols that want packets with IP protocol
36  * #4 or #41.  Here's a list of protocols that want protocol #41:
37  *	RFC1933 configured tunnel
38  *	RFC1933 automatic tunnel
39  *	RFC2401 IPsec tunnel
40  *	RFC2473 IPv6 generic packet tunnelling
41  *	RFC2529 6over4 tunnel
42  *	RFC3056 6to4 tunnel
43  *	isatap tunnel
44  *	mobile-ip6 (uses RFC2473)
45  * Here's a list of protocol that want protocol #4:
46  *	RFC1853 IPv4-in-IPv4 tunnelling
47  *	RFC2003 IPv4 encapsulation within IPv4
48  *	RFC2344 reverse tunnelling for mobile-ip4
49  *	RFC2401 IPsec tunnel
50  * Well, what can I say.  They impose different en/decapsulation mechanism
51  * from each other, so they need separate protocol handler.  The only one
52  * we can easily determine by protocol # is IPsec, which always has
53  * AH/ESP/IPComp header right after outer IP header.
54  *
55  * So, clearly good old protosw does not work for protocol #4 and #41.
56  * The code will let you match protocol via src/dst address pair.
57  */
58 /* XXX is M_NETADDR correct? */
59 
60 /*
61  * With USE_RADIX the code will use radix table for tunnel lookup, for
62  * tunnels registered with encap_attach() with a addr/mask pair.
63  * Faster on machines with thousands of tunnel registerations (= interfaces).
64  *
65  * The code assumes that radix table code can handle non-continuous netmask,
66  * as it will pass radix table memory region with (src + dst) sockaddr pair.
67  */
68 #define USE_RADIX
69 
70 #include <sys/cdefs.h>
71 __KERNEL_RCSID(0, "$NetBSD: ip_encap.c,v 1.71 2019/05/15 03:33:41 knakahara Exp $");
72 
73 #ifdef _KERNEL_OPT
74 #include "opt_mrouting.h"
75 #include "opt_inet.h"
76 #include "opt_net_mpsafe.h"
77 #endif
78 
79 #include <sys/param.h>
80 #include <sys/systm.h>
81 #include <sys/socket.h>
82 #include <sys/socketvar.h> /* for softnet_lock */
83 #include <sys/sockio.h>
84 #include <sys/mbuf.h>
85 #include <sys/errno.h>
86 #include <sys/queue.h>
87 #include <sys/kmem.h>
88 #include <sys/mutex.h>
89 #include <sys/condvar.h>
90 #include <sys/psref.h>
91 #include <sys/pslist.h>
92 
93 #include <net/if.h>
94 
95 #include <netinet/in.h>
96 #include <netinet/in_systm.h>
97 #include <netinet/ip.h>
98 #include <netinet/ip_var.h>
99 #include <netinet/ip_encap.h>
100 #ifdef MROUTING
101 #include <netinet/ip_mroute.h>
102 #endif /* MROUTING */
103 
104 #ifdef INET6
105 #include <netinet/ip6.h>
106 #include <netinet6/ip6_var.h>
107 #include <netinet6/ip6protosw.h> /* for struct ip6ctlparam */
108 #include <netinet6/in6_var.h>
109 #include <netinet6/in6_pcb.h>
110 #include <netinet/icmp6.h>
111 #endif
112 
113 #ifdef NET_MPSAFE
114 #define ENCAP_MPSAFE	1
115 #endif
116 
117 enum direction { INBOUND, OUTBOUND };
118 
119 #ifdef INET
120 static struct encaptab *encap4_lookup(struct mbuf *, int, int, enum direction,
121     struct psref *);
122 #endif
123 #ifdef INET6
124 static struct encaptab *encap6_lookup(struct mbuf *, int, int, enum direction,
125     struct psref *);
126 #endif
127 static int encap_add(struct encaptab *);
128 static int encap_remove(struct encaptab *);
129 static int encap_afcheck(int, const struct sockaddr *, const struct sockaddr *);
130 #ifdef USE_RADIX
131 static struct radix_node_head *encap_rnh(int);
132 static int mask_matchlen(const struct sockaddr *);
133 #else
134 static int mask_match(const struct encaptab *, const struct sockaddr *,
135 		const struct sockaddr *);
136 #endif
137 
138 /*
139  * In encap[46]_lookup(), ep->func can sleep(e.g. rtalloc1) while walking
140  * encap_table. So, it cannot use pserialize_read_enter()
141  */
142 static struct {
143 	struct pslist_head	list;
144 	pserialize_t		psz;
145 	struct psref_class	*elem_class; /* for the element of et_list */
146 } encaptab  __cacheline_aligned = {
147 	.list = PSLIST_INITIALIZER,
148 };
149 #define encap_table encaptab.list
150 
151 static struct {
152 	kmutex_t	lock;
153 	kcondvar_t	cv;
154 	struct lwp	*busy;
155 } encap_whole __cacheline_aligned;
156 
157 #ifdef USE_RADIX
158 struct radix_node_head *encap_head[2];	/* 0 for AF_INET, 1 for AF_INET6 */
159 static bool encap_head_updating = false;
160 #endif
161 
162 static bool encap_initialized = false;
163 /*
164  * must be done before other encap interfaces initialization.
165  */
166 void
167 encapinit(void)
168 {
169 
170 	if (encap_initialized)
171 		return;
172 
173 	encaptab.psz = pserialize_create();
174 	encaptab.elem_class = psref_class_create("encapelem", IPL_SOFTNET);
175 
176 	mutex_init(&encap_whole.lock, MUTEX_DEFAULT, IPL_NONE);
177 	cv_init(&encap_whole.cv, "ip_encap cv");
178 	encap_whole.busy = NULL;
179 
180 	encap_initialized = true;
181 }
182 
183 void
184 encap_init(void)
185 {
186 	static int initialized = 0;
187 
188 	if (initialized)
189 		return;
190 	initialized++;
191 #if 0
192 	/*
193 	 * we cannot use LIST_INIT() here, since drivers may want to call
194 	 * encap_attach(), on driver attach.  encap_init() will be called
195 	 * on AF_INET{,6} initialization, which happens after driver
196 	 * initialization - using LIST_INIT() here can nuke encap_attach()
197 	 * from drivers.
198 	 */
199 	PSLIST_INIT(&encap_table);
200 #endif
201 
202 #ifdef USE_RADIX
203 	/*
204 	 * initialize radix lookup table when the radix subsystem is inited.
205 	 */
206 	rn_delayedinit((void *)&encap_head[0],
207 	    sizeof(struct sockaddr_pack) << 3);
208 #ifdef INET6
209 	rn_delayedinit((void *)&encap_head[1],
210 	    sizeof(struct sockaddr_pack) << 3);
211 #endif
212 #endif
213 }
214 
215 #ifdef INET
216 static struct encaptab *
217 encap4_lookup(struct mbuf *m, int off, int proto, enum direction dir,
218     struct psref *match_psref)
219 {
220 	struct ip *ip;
221 	struct ip_pack4 pack;
222 	struct encaptab *ep, *match;
223 	int prio, matchprio;
224 	int s;
225 #ifdef USE_RADIX
226 	struct radix_node_head *rnh = encap_rnh(AF_INET);
227 	struct radix_node *rn;
228 #endif
229 
230 	KASSERT(m->m_len >= sizeof(*ip));
231 
232 	ip = mtod(m, struct ip *);
233 
234 	memset(&pack, 0, sizeof(pack));
235 	pack.p.sp_len = sizeof(pack);
236 	pack.mine.sin_family = pack.yours.sin_family = AF_INET;
237 	pack.mine.sin_len = pack.yours.sin_len = sizeof(struct sockaddr_in);
238 	if (dir == INBOUND) {
239 		pack.mine.sin_addr = ip->ip_dst;
240 		pack.yours.sin_addr = ip->ip_src;
241 	} else {
242 		pack.mine.sin_addr = ip->ip_src;
243 		pack.yours.sin_addr = ip->ip_dst;
244 	}
245 
246 	match = NULL;
247 	matchprio = 0;
248 
249 	s = pserialize_read_enter();
250 #ifdef USE_RADIX
251 	if (encap_head_updating) {
252 		/*
253 		 * Update in progress. Do nothing.
254 		 */
255 		pserialize_read_exit(s);
256 		return NULL;
257 	}
258 
259 	rn = rnh->rnh_matchaddr((void *)&pack, rnh);
260 	if (rn && (rn->rn_flags & RNF_ROOT) == 0) {
261 		struct encaptab *encapp = (struct encaptab *)rn;
262 
263 		psref_acquire(match_psref, &encapp->psref,
264 		    encaptab.elem_class);
265 		match = encapp;
266 		matchprio = mask_matchlen(match->srcmask) +
267 		    mask_matchlen(match->dstmask);
268 	}
269 #endif
270 	PSLIST_READER_FOREACH(ep, &encap_table, struct encaptab, chain) {
271 		struct psref elem_psref;
272 
273 		if (ep->af != AF_INET)
274 			continue;
275 		if (ep->proto >= 0 && ep->proto != proto)
276 			continue;
277 
278 		psref_acquire(&elem_psref, &ep->psref,
279 		    encaptab.elem_class);
280 		if (ep->func) {
281 			pserialize_read_exit(s);
282 			/* ep->func is sleepable. e.g. rtalloc1 */
283 			prio = (*ep->func)(m, off, proto, ep->arg);
284 			s = pserialize_read_enter();
285 		} else {
286 #ifdef USE_RADIX
287 			psref_release(&elem_psref, &ep->psref,
288 			    encaptab.elem_class);
289 			continue;
290 #else
291 			prio = mask_match(ep, (struct sockaddr *)&pack.mine,
292 			    (struct sockaddr *)&pack.yours);
293 #endif
294 		}
295 
296 		/*
297 		 * We prioritize the matches by using bit length of the
298 		 * matches.  mask_match() and user-supplied matching function
299 		 * should return the bit length of the matches (for example,
300 		 * if both src/dst are matched for IPv4, 64 should be returned).
301 		 * 0 or negative return value means "it did not match".
302 		 *
303 		 * The question is, since we have two "mask" portion, we
304 		 * cannot really define total order between entries.
305 		 * For example, which of these should be preferred?
306 		 * mask_match() returns 48 (32 + 16) for both of them.
307 		 *	src=3ffe::/16, dst=3ffe:501::/32
308 		 *	src=3ffe:501::/32, dst=3ffe::/16
309 		 *
310 		 * We need to loop through all the possible candidates
311 		 * to get the best match - the search takes O(n) for
312 		 * n attachments (i.e. interfaces).
313 		 *
314 		 * For radix-based lookup, I guess source takes precedence.
315 		 * See rn_{refines,lexobetter} for the correct answer.
316 		 */
317 		if (prio <= 0) {
318 			psref_release(&elem_psref, &ep->psref,
319 			    encaptab.elem_class);
320 			continue;
321 		}
322 		if (prio > matchprio) {
323 			/* release last matched ep */
324 			if (match != NULL)
325 				psref_release(match_psref, &match->psref,
326 				    encaptab.elem_class);
327 
328 			psref_copy(match_psref, &elem_psref,
329 			    encaptab.elem_class);
330 			matchprio = prio;
331 			match = ep;
332 		}
333 		KASSERTMSG((match == NULL) || psref_held(&match->psref,
334 			encaptab.elem_class),
335 		    "current match = %p, but not hold its psref", match);
336 
337 		psref_release(&elem_psref, &ep->psref,
338 		    encaptab.elem_class);
339 	}
340 	pserialize_read_exit(s);
341 
342 	return match;
343 }
344 
345 void
346 encap4_input(struct mbuf *m, int off, int proto)
347 {
348 	const struct encapsw *esw;
349 	struct encaptab *match;
350 	struct psref match_psref;
351 
352 	match = encap4_lookup(m, off, proto, INBOUND, &match_psref);
353 	if (match) {
354 		/* found a match, "match" has the best one */
355 		esw = match->esw;
356 		if (esw && esw->encapsw4.pr_input) {
357 			(*esw->encapsw4.pr_input)(m, off, proto, match->arg);
358 			psref_release(&match_psref, &match->psref,
359 			    encaptab.elem_class);
360 		} else {
361 			psref_release(&match_psref, &match->psref,
362 			    encaptab.elem_class);
363 			m_freem(m);
364 		}
365 		return;
366 	}
367 
368 	/* last resort: inject to raw socket */
369 	SOFTNET_LOCK_IF_NET_MPSAFE();
370 	rip_input(m, off, proto);
371 	SOFTNET_UNLOCK_IF_NET_MPSAFE();
372 }
373 #endif
374 
375 #ifdef INET6
376 static struct encaptab *
377 encap6_lookup(struct mbuf *m, int off, int proto, enum direction dir,
378     struct psref *match_psref)
379 {
380 	struct ip6_hdr *ip6;
381 	struct ip_pack6 pack;
382 	int prio, matchprio;
383 	int s;
384 	struct encaptab *ep, *match;
385 #ifdef USE_RADIX
386 	struct radix_node_head *rnh = encap_rnh(AF_INET6);
387 	struct radix_node *rn;
388 #endif
389 
390 	KASSERT(m->m_len >= sizeof(*ip6));
391 
392 	ip6 = mtod(m, struct ip6_hdr *);
393 
394 	memset(&pack, 0, sizeof(pack));
395 	pack.p.sp_len = sizeof(pack);
396 	pack.mine.sin6_family = pack.yours.sin6_family = AF_INET6;
397 	pack.mine.sin6_len = pack.yours.sin6_len = sizeof(struct sockaddr_in6);
398 	if (dir == INBOUND) {
399 		pack.mine.sin6_addr = ip6->ip6_dst;
400 		pack.yours.sin6_addr = ip6->ip6_src;
401 	} else {
402 		pack.mine.sin6_addr = ip6->ip6_src;
403 		pack.yours.sin6_addr = ip6->ip6_dst;
404 	}
405 
406 	match = NULL;
407 	matchprio = 0;
408 
409 	s = pserialize_read_enter();
410 #ifdef USE_RADIX
411 	if (encap_head_updating) {
412 		/*
413 		 * Update in progress. Do nothing.
414 		 */
415 		pserialize_read_exit(s);
416 		return NULL;
417 	}
418 
419 	rn = rnh->rnh_matchaddr((void *)&pack, rnh);
420 	if (rn && (rn->rn_flags & RNF_ROOT) == 0) {
421 		struct encaptab *encapp = (struct encaptab *)rn;
422 
423 		psref_acquire(match_psref, &encapp->psref,
424 		    encaptab.elem_class);
425 		match = encapp;
426 		matchprio = mask_matchlen(match->srcmask) +
427 		    mask_matchlen(match->dstmask);
428 	}
429 #endif
430 	PSLIST_READER_FOREACH(ep, &encap_table, struct encaptab, chain) {
431 		struct psref elem_psref;
432 
433 		if (ep->af != AF_INET6)
434 			continue;
435 		if (ep->proto >= 0 && ep->proto != proto)
436 			continue;
437 
438 		psref_acquire(&elem_psref, &ep->psref,
439 		    encaptab.elem_class);
440 
441 		if (ep->func) {
442 			pserialize_read_exit(s);
443 			/* ep->func is sleepable. e.g. rtalloc1 */
444 			prio = (*ep->func)(m, off, proto, ep->arg);
445 			s = pserialize_read_enter();
446 		} else {
447 #ifdef USE_RADIX
448 			psref_release(&elem_psref, &ep->psref,
449 			    encaptab.elem_class);
450 			continue;
451 #else
452 			prio = mask_match(ep, (struct sockaddr *)&pack.mine,
453 			    (struct sockaddr *)&pack.yours);
454 #endif
455 		}
456 
457 		/* see encap4_lookup() for issues here */
458 		if (prio <= 0) {
459 			psref_release(&elem_psref, &ep->psref,
460 			    encaptab.elem_class);
461 			continue;
462 		}
463 		if (prio > matchprio) {
464 			/* release last matched ep */
465 			if (match != NULL)
466 				psref_release(match_psref, &match->psref,
467 				    encaptab.elem_class);
468 
469 			psref_copy(match_psref, &elem_psref,
470 			    encaptab.elem_class);
471 			matchprio = prio;
472 			match = ep;
473 		}
474 		KASSERTMSG((match == NULL) || psref_held(&match->psref,
475 			encaptab.elem_class),
476 		    "current match = %p, but not hold its psref", match);
477 
478 		psref_release(&elem_psref, &ep->psref,
479 		    encaptab.elem_class);
480 	}
481 	pserialize_read_exit(s);
482 
483 	return match;
484 }
485 
486 int
487 encap6_input(struct mbuf **mp, int *offp, int proto)
488 {
489 	struct mbuf *m = *mp;
490 	const struct encapsw *esw;
491 	struct encaptab *match;
492 	struct psref match_psref;
493 	int rv;
494 
495 	match = encap6_lookup(m, *offp, proto, INBOUND, &match_psref);
496 
497 	if (match) {
498 		/* found a match */
499 		esw = match->esw;
500 		if (esw && esw->encapsw6.pr_input) {
501 			int ret;
502 			ret = (*esw->encapsw6.pr_input)(mp, offp, proto,
503 			    match->arg);
504 			psref_release(&match_psref, &match->psref,
505 			    encaptab.elem_class);
506 			return ret;
507 		} else {
508 			psref_release(&match_psref, &match->psref,
509 			    encaptab.elem_class);
510 			m_freem(m);
511 			return IPPROTO_DONE;
512 		}
513 	}
514 
515 	/* last resort: inject to raw socket */
516 	SOFTNET_LOCK_IF_NET_MPSAFE();
517 	rv = rip6_input(mp, offp, proto);
518 	SOFTNET_UNLOCK_IF_NET_MPSAFE();
519 	return rv;
520 }
521 #endif
522 
523 /*
524  * XXX
525  * The encaptab list and the rnh radix tree must be manipulated atomically.
526  */
527 static int
528 encap_add(struct encaptab *ep)
529 {
530 #ifdef USE_RADIX
531 	struct radix_node_head *rnh = encap_rnh(ep->af);
532 #endif
533 
534 	KASSERT(encap_lock_held());
535 
536 #ifdef USE_RADIX
537 	if (!ep->func && rnh) {
538 		/* Disable access to the radix tree for reader. */
539 		encap_head_updating = true;
540 		/* Wait for all readers to drain. */
541 		pserialize_perform(encaptab.psz);
542 
543 		if (!rnh->rnh_addaddr((void *)ep->addrpack,
544 		    (void *)ep->maskpack, rnh, ep->nodes)) {
545 			encap_head_updating = false;
546 			return EEXIST;
547 		}
548 
549 		/*
550 		 * The ep added to the radix tree must be skipped while
551 		 * encap[46]_lookup walks encaptab list. In other words,
552 		 * encap_add() does not need to care whether the ep has
553 		 * been added encaptab list or not yet.
554 		 * So, we can re-enable access to the radix tree for now.
555 		 */
556 		encap_head_updating = false;
557 	}
558 #endif
559 	PSLIST_WRITER_INSERT_HEAD(&encap_table, ep, chain);
560 
561 	return 0;
562 }
563 
564 /*
565  * XXX
566  * The encaptab list and the rnh radix tree must be manipulated atomically.
567  */
568 static int
569 encap_remove(struct encaptab *ep)
570 {
571 #ifdef USE_RADIX
572 	struct radix_node_head *rnh = encap_rnh(ep->af);
573 #endif
574 	int error = 0;
575 
576 	KASSERT(encap_lock_held());
577 
578 #ifdef USE_RADIX
579 	if (!ep->func && rnh) {
580 		/* Disable access to the radix tree for reader. */
581 		encap_head_updating = true;
582 		/* Wait for all readers to drain. */
583 		pserialize_perform(encaptab.psz);
584 
585 		if (!rnh->rnh_deladdr((void *)ep->addrpack,
586 		    (void *)ep->maskpack, rnh))
587 			error = ESRCH;
588 
589 		/*
590 		 * The ep added to the radix tree must be skipped while
591 		 * encap[46]_lookup walks encaptab list. In other words,
592 		 * encap_add() does not need to care whether the ep has
593 		 * been added encaptab list or not yet.
594 		 * So, we can re-enable access to the radix tree for now.
595 		 */
596 		encap_head_updating = false;
597 	}
598 #endif
599 	PSLIST_WRITER_REMOVE(ep, chain);
600 
601 	return error;
602 }
603 
604 static int
605 encap_afcheck(int af, const struct sockaddr *sp, const struct sockaddr *dp)
606 {
607 	if (sp && dp) {
608 		if (sp->sa_len != dp->sa_len)
609 			return EINVAL;
610 		if (af != sp->sa_family || af != dp->sa_family)
611 			return EINVAL;
612 	} else if (!sp && !dp)
613 		;
614 	else
615 		return EINVAL;
616 
617 	switch (af) {
618 	case AF_INET:
619 		if (sp && sp->sa_len != sizeof(struct sockaddr_in))
620 			return EINVAL;
621 		if (dp && dp->sa_len != sizeof(struct sockaddr_in))
622 			return EINVAL;
623 		break;
624 #ifdef INET6
625 	case AF_INET6:
626 		if (sp && sp->sa_len != sizeof(struct sockaddr_in6))
627 			return EINVAL;
628 		if (dp && dp->sa_len != sizeof(struct sockaddr_in6))
629 			return EINVAL;
630 		break;
631 #endif
632 	default:
633 		return EAFNOSUPPORT;
634 	}
635 
636 	return 0;
637 }
638 
639 /*
640  * sp (src ptr) is always my side, and dp (dst ptr) is always remote side.
641  * length of mask (sm and dm) is assumed to be same as sp/dp.
642  * Return value will be necessary as input (cookie) for encap_detach().
643  */
644 const struct encaptab *
645 encap_attach(int af, int proto,
646     const struct sockaddr *sp, const struct sockaddr *sm,
647     const struct sockaddr *dp, const struct sockaddr *dm,
648     const struct encapsw *esw, void *arg)
649 {
650 	struct encaptab *ep;
651 	int error;
652 	int pss;
653 	size_t l;
654 	struct ip_pack4 *pack4;
655 #ifdef INET6
656 	struct ip_pack6 *pack6;
657 #endif
658 #ifndef ENCAP_MPSAFE
659 	int s;
660 
661 	s = splsoftnet();
662 #endif
663 	/* sanity check on args */
664 	error = encap_afcheck(af, sp, dp);
665 	if (error)
666 		goto fail;
667 
668 	/* check if anyone have already attached with exactly same config */
669 	pss = pserialize_read_enter();
670 	PSLIST_READER_FOREACH(ep, &encap_table, struct encaptab, chain) {
671 		if (ep->af != af)
672 			continue;
673 		if (ep->proto != proto)
674 			continue;
675 		if (ep->func)
676 			continue;
677 
678 		KASSERT(ep->src != NULL);
679 		KASSERT(ep->dst != NULL);
680 		KASSERT(ep->srcmask != NULL);
681 		KASSERT(ep->dstmask != NULL);
682 
683 		if (ep->src->sa_len != sp->sa_len ||
684 		    memcmp(ep->src, sp, sp->sa_len) != 0 ||
685 		    memcmp(ep->srcmask, sm, sp->sa_len) != 0)
686 			continue;
687 		if (ep->dst->sa_len != dp->sa_len ||
688 		    memcmp(ep->dst, dp, dp->sa_len) != 0 ||
689 		    memcmp(ep->dstmask, dm, dp->sa_len) != 0)
690 			continue;
691 
692 		error = EEXIST;
693 		pserialize_read_exit(pss);
694 		goto fail;
695 	}
696 	pserialize_read_exit(pss);
697 
698 	switch (af) {
699 	case AF_INET:
700 		l = sizeof(*pack4);
701 		break;
702 #ifdef INET6
703 	case AF_INET6:
704 		l = sizeof(*pack6);
705 		break;
706 #endif
707 	default:
708 		goto fail;
709 	}
710 
711 	/* M_NETADDR ok? */
712 	ep = kmem_zalloc(sizeof(*ep), KM_NOSLEEP);
713 	if (ep == NULL) {
714 		error = ENOBUFS;
715 		goto fail;
716 	}
717 	ep->addrpack = kmem_zalloc(l, KM_NOSLEEP);
718 	if (ep->addrpack == NULL) {
719 		error = ENOBUFS;
720 		goto gc;
721 	}
722 	ep->maskpack = kmem_zalloc(l, KM_NOSLEEP);
723 	if (ep->maskpack == NULL) {
724 		error = ENOBUFS;
725 		goto gc;
726 	}
727 
728 	ep->af = af;
729 	ep->proto = proto;
730 	ep->addrpack->sa_len = l & 0xff;
731 	ep->maskpack->sa_len = l & 0xff;
732 	switch (af) {
733 	case AF_INET:
734 		pack4 = (struct ip_pack4 *)ep->addrpack;
735 		ep->src = (struct sockaddr *)&pack4->mine;
736 		ep->dst = (struct sockaddr *)&pack4->yours;
737 		pack4 = (struct ip_pack4 *)ep->maskpack;
738 		ep->srcmask = (struct sockaddr *)&pack4->mine;
739 		ep->dstmask = (struct sockaddr *)&pack4->yours;
740 		break;
741 #ifdef INET6
742 	case AF_INET6:
743 		pack6 = (struct ip_pack6 *)ep->addrpack;
744 		ep->src = (struct sockaddr *)&pack6->mine;
745 		ep->dst = (struct sockaddr *)&pack6->yours;
746 		pack6 = (struct ip_pack6 *)ep->maskpack;
747 		ep->srcmask = (struct sockaddr *)&pack6->mine;
748 		ep->dstmask = (struct sockaddr *)&pack6->yours;
749 		break;
750 #endif
751 	}
752 
753 	memcpy(ep->src, sp, sp->sa_len);
754 	memcpy(ep->srcmask, sm, sp->sa_len);
755 	memcpy(ep->dst, dp, dp->sa_len);
756 	memcpy(ep->dstmask, dm, dp->sa_len);
757 	ep->esw = esw;
758 	ep->arg = arg;
759 	psref_target_init(&ep->psref, encaptab.elem_class);
760 
761 	error = encap_add(ep);
762 	if (error)
763 		goto gc;
764 
765 	error = 0;
766 #ifndef ENCAP_MPSAFE
767 	splx(s);
768 #endif
769 	return ep;
770 
771 gc:
772 	if (ep->addrpack)
773 		kmem_free(ep->addrpack, l);
774 	if (ep->maskpack)
775 		kmem_free(ep->maskpack, l);
776 	if (ep)
777 		kmem_free(ep, sizeof(*ep));
778 fail:
779 #ifndef ENCAP_MPSAFE
780 	splx(s);
781 #endif
782 	return NULL;
783 }
784 
785 const struct encaptab *
786 encap_attach_func(int af, int proto,
787     int (*func)(struct mbuf *, int, int, void *),
788     const struct encapsw *esw, void *arg)
789 {
790 	struct encaptab *ep;
791 	int error;
792 #ifndef ENCAP_MPSAFE
793 	int s;
794 
795 	s = splsoftnet();
796 #endif
797 	/* sanity check on args */
798 	if (!func) {
799 		error = EINVAL;
800 		goto fail;
801 	}
802 
803 	error = encap_afcheck(af, NULL, NULL);
804 	if (error)
805 		goto fail;
806 
807 	ep = kmem_alloc(sizeof(*ep), KM_NOSLEEP);	/*XXX*/
808 	if (ep == NULL) {
809 		error = ENOBUFS;
810 		goto fail;
811 	}
812 	memset(ep, 0, sizeof(*ep));
813 
814 	ep->af = af;
815 	ep->proto = proto;
816 	ep->func = func;
817 	ep->esw = esw;
818 	ep->arg = arg;
819 	psref_target_init(&ep->psref, encaptab.elem_class);
820 
821 	error = encap_add(ep);
822 	if (error)
823 		goto gc;
824 
825 	error = 0;
826 #ifndef ENCAP_MPSAFE
827 	splx(s);
828 #endif
829 	return ep;
830 
831 gc:
832 	kmem_free(ep, sizeof(*ep));
833 fail:
834 #ifndef ENCAP_MPSAFE
835 	splx(s);
836 #endif
837 	return NULL;
838 }
839 
840 /* XXX encap4_ctlinput() is necessary if we set DF=1 on outer IPv4 header */
841 
842 #ifdef INET6
843 void *
844 encap6_ctlinput(int cmd, const struct sockaddr *sa, void *d0)
845 {
846 	void *d = d0;
847 	struct ip6_hdr *ip6;
848 	struct mbuf *m;
849 	int off;
850 	struct ip6ctlparam *ip6cp = NULL;
851 	int nxt;
852 	int s;
853 	struct encaptab *ep;
854 	const struct encapsw *esw;
855 
856 	if (sa->sa_family != AF_INET6 ||
857 	    sa->sa_len != sizeof(struct sockaddr_in6))
858 		return NULL;
859 
860 	if ((unsigned)cmd >= PRC_NCMDS)
861 		return NULL;
862 	if (cmd == PRC_HOSTDEAD)
863 		d = NULL;
864 	else if (cmd == PRC_MSGSIZE)
865 		; /* special code is present, see below */
866 	else if (inet6ctlerrmap[cmd] == 0)
867 		return NULL;
868 
869 	/* if the parameter is from icmp6, decode it. */
870 	if (d != NULL) {
871 		ip6cp = (struct ip6ctlparam *)d;
872 		m = ip6cp->ip6c_m;
873 		ip6 = ip6cp->ip6c_ip6;
874 		off = ip6cp->ip6c_off;
875 		nxt = ip6cp->ip6c_nxt;
876 
877 		if (ip6 && cmd == PRC_MSGSIZE) {
878 			int valid = 0;
879 			struct encaptab *match;
880 			struct psref elem_psref;
881 
882 			/*
883 		 	* Check to see if we have a valid encap configuration.
884 		 	*/
885 			match = encap6_lookup(m, off, nxt, OUTBOUND,
886 			    &elem_psref);
887 			if (match)
888 				valid++;
889 			psref_release(&elem_psref, &match->psref,
890 			    encaptab.elem_class);
891 
892 			/*
893 		 	* Depending on the value of "valid" and routing table
894 		 	* size (mtudisc_{hi,lo}wat), we will:
895 		 	* - recalcurate the new MTU and create the
896 		 	*   corresponding routing entry, or
897 		 	* - ignore the MTU change notification.
898 		 	*/
899 			icmp6_mtudisc_update((struct ip6ctlparam *)d, valid);
900 		}
901 	} else {
902 		m = NULL;
903 		ip6 = NULL;
904 		nxt = -1;
905 	}
906 
907 	/* inform all listeners */
908 
909 	s = pserialize_read_enter();
910 	PSLIST_READER_FOREACH(ep, &encap_table, struct encaptab, chain) {
911 		struct psref elem_psref;
912 
913 		if (ep->af != AF_INET6)
914 			continue;
915 		if (ep->proto >= 0 && ep->proto != nxt)
916 			continue;
917 
918 		/* should optimize by looking at address pairs */
919 
920 		/* XXX need to pass ep->arg or ep itself to listeners */
921 		psref_acquire(&elem_psref, &ep->psref,
922 		    encaptab.elem_class);
923 		esw = ep->esw;
924 		if (esw && esw->encapsw6.pr_ctlinput) {
925 			pserialize_read_exit(s);
926 			/* pr_ctlinput is sleepable. e.g. rtcache_free */
927 			(*esw->encapsw6.pr_ctlinput)(cmd, sa, d, ep->arg);
928 			s = pserialize_read_enter();
929 		}
930 		psref_release(&elem_psref, &ep->psref,
931 		    encaptab.elem_class);
932 	}
933 	pserialize_read_exit(s);
934 
935 	rip6_ctlinput(cmd, sa, d0);
936 	return NULL;
937 }
938 #endif
939 
940 int
941 encap_detach(const struct encaptab *cookie)
942 {
943 	const struct encaptab *ep = cookie;
944 	struct encaptab *p;
945 	int error;
946 
947 	KASSERT(encap_lock_held());
948 
949 	PSLIST_WRITER_FOREACH(p, &encap_table, struct encaptab, chain) {
950 		if (p == ep) {
951 			error = encap_remove(p);
952 			if (error)
953 				return error;
954 			else
955 				break;
956 		}
957 	}
958 	if (p == NULL)
959 		return ENOENT;
960 
961 	pserialize_perform(encaptab.psz);
962 	psref_target_destroy(&p->psref,
963 	    encaptab.elem_class);
964 	if (!ep->func) {
965 		kmem_free(p->addrpack, ep->addrpack->sa_len);
966 		kmem_free(p->maskpack, ep->maskpack->sa_len);
967 	}
968 	kmem_free(p, sizeof(*p));
969 
970 	return 0;
971 }
972 
973 #ifdef USE_RADIX
974 static struct radix_node_head *
975 encap_rnh(int af)
976 {
977 
978 	switch (af) {
979 	case AF_INET:
980 		return encap_head[0];
981 #ifdef INET6
982 	case AF_INET6:
983 		return encap_head[1];
984 #endif
985 	default:
986 		return NULL;
987 	}
988 }
989 
990 static int
991 mask_matchlen(const struct sockaddr *sa)
992 {
993 	const char *p, *ep;
994 	int l;
995 
996 	p = (const char *)sa;
997 	ep = p + sa->sa_len;
998 	p += 2;	/* sa_len + sa_family */
999 
1000 	l = 0;
1001 	while (p < ep) {
1002 		l += (*p ? 8 : 0);	/* estimate */
1003 		p++;
1004 	}
1005 	return l;
1006 }
1007 #endif
1008 
1009 #ifndef USE_RADIX
1010 static int
1011 mask_match(const struct encaptab *ep,
1012 	   const struct sockaddr *sp,
1013 	   const struct sockaddr *dp)
1014 {
1015 	struct sockaddr_storage s;
1016 	struct sockaddr_storage d;
1017 	int i;
1018 	const u_int8_t *p, *q;
1019 	u_int8_t *r;
1020 	int matchlen;
1021 
1022 	KASSERTMSG(ep->func == NULL, "wrong encaptab passed to mask_match");
1023 
1024 	if (sp->sa_len > sizeof(s) || dp->sa_len > sizeof(d))
1025 		return 0;
1026 	if (sp->sa_family != ep->af || dp->sa_family != ep->af)
1027 		return 0;
1028 	if (sp->sa_len != ep->src->sa_len || dp->sa_len != ep->dst->sa_len)
1029 		return 0;
1030 
1031 	matchlen = 0;
1032 
1033 	p = (const u_int8_t *)sp;
1034 	q = (const u_int8_t *)ep->srcmask;
1035 	r = (u_int8_t *)&s;
1036 	for (i = 0 ; i < sp->sa_len; i++) {
1037 		r[i] = p[i] & q[i];
1038 		/* XXX estimate */
1039 		matchlen += (q[i] ? 8 : 0);
1040 	}
1041 
1042 	p = (const u_int8_t *)dp;
1043 	q = (const u_int8_t *)ep->dstmask;
1044 	r = (u_int8_t *)&d;
1045 	for (i = 0 ; i < dp->sa_len; i++) {
1046 		r[i] = p[i] & q[i];
1047 		/* XXX rough estimate */
1048 		matchlen += (q[i] ? 8 : 0);
1049 	}
1050 
1051 	/* need to overwrite len/family portion as we don't compare them */
1052 	s.ss_len = sp->sa_len;
1053 	s.ss_family = sp->sa_family;
1054 	d.ss_len = dp->sa_len;
1055 	d.ss_family = dp->sa_family;
1056 
1057 	if (memcmp(&s, ep->src, ep->src->sa_len) == 0 &&
1058 	    memcmp(&d, ep->dst, ep->dst->sa_len) == 0) {
1059 		return matchlen;
1060 	} else
1061 		return 0;
1062 }
1063 #endif
1064 
1065 int
1066 encap_lock_enter(void)
1067 {
1068 	int error;
1069 
1070 	mutex_enter(&encap_whole.lock);
1071 	while (encap_whole.busy != NULL) {
1072 		error = cv_wait_sig(&encap_whole.cv, &encap_whole.lock);
1073 		if (error) {
1074 			mutex_exit(&encap_whole.lock);
1075 			return error;
1076 		}
1077 	}
1078 	KASSERT(encap_whole.busy == NULL);
1079 	encap_whole.busy = curlwp;
1080 	mutex_exit(&encap_whole.lock);
1081 
1082 	return 0;
1083 }
1084 
1085 void
1086 encap_lock_exit(void)
1087 {
1088 
1089 	mutex_enter(&encap_whole.lock);
1090 	KASSERT(encap_whole.busy == curlwp);
1091 	encap_whole.busy = NULL;
1092 	cv_broadcast(&encap_whole.cv);
1093 	mutex_exit(&encap_whole.lock);
1094 }
1095 
1096 bool
1097 encap_lock_held(void)
1098 {
1099 
1100 	return (encap_whole.busy == curlwp);
1101 }
1102