xref: /netbsd-src/sys/netinet/ip_encap.c (revision bdc22b2e01993381dcefeff2bc9b56ca75a4235c)
1 /*	$NetBSD: ip_encap.c,v 1.69 2018/06/21 10:37:50 knakahara Exp $	*/
2 /*	$KAME: ip_encap.c,v 1.73 2001/10/02 08:30:58 itojun Exp $	*/
3 
4 /*
5  * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the name of the project nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  */
32 /*
33  * My grandfather said that there's a devil inside tunnelling technology...
34  *
35  * We have surprisingly many protocols that want packets with IP protocol
36  * #4 or #41.  Here's a list of protocols that want protocol #41:
37  *	RFC1933 configured tunnel
38  *	RFC1933 automatic tunnel
39  *	RFC2401 IPsec tunnel
40  *	RFC2473 IPv6 generic packet tunnelling
41  *	RFC2529 6over4 tunnel
42  *	RFC3056 6to4 tunnel
43  *	isatap tunnel
44  *	mobile-ip6 (uses RFC2473)
45  * Here's a list of protocol that want protocol #4:
46  *	RFC1853 IPv4-in-IPv4 tunnelling
47  *	RFC2003 IPv4 encapsulation within IPv4
48  *	RFC2344 reverse tunnelling for mobile-ip4
49  *	RFC2401 IPsec tunnel
50  * Well, what can I say.  They impose different en/decapsulation mechanism
51  * from each other, so they need separate protocol handler.  The only one
52  * we can easily determine by protocol # is IPsec, which always has
53  * AH/ESP/IPComp header right after outer IP header.
54  *
55  * So, clearly good old protosw does not work for protocol #4 and #41.
56  * The code will let you match protocol via src/dst address pair.
57  */
58 /* XXX is M_NETADDR correct? */
59 
60 /*
61  * With USE_RADIX the code will use radix table for tunnel lookup, for
62  * tunnels registered with encap_attach() with a addr/mask pair.
63  * Faster on machines with thousands of tunnel registerations (= interfaces).
64  *
65  * The code assumes that radix table code can handle non-continuous netmask,
66  * as it will pass radix table memory region with (src + dst) sockaddr pair.
67  */
68 #define USE_RADIX
69 
70 #include <sys/cdefs.h>
71 __KERNEL_RCSID(0, "$NetBSD: ip_encap.c,v 1.69 2018/06/21 10:37:50 knakahara Exp $");
72 
73 #ifdef _KERNEL_OPT
74 #include "opt_mrouting.h"
75 #include "opt_inet.h"
76 #include "opt_net_mpsafe.h"
77 #endif
78 
79 #include <sys/param.h>
80 #include <sys/systm.h>
81 #include <sys/socket.h>
82 #include <sys/sockio.h>
83 #include <sys/mbuf.h>
84 #include <sys/errno.h>
85 #include <sys/queue.h>
86 #include <sys/kmem.h>
87 #include <sys/mutex.h>
88 #include <sys/condvar.h>
89 #include <sys/psref.h>
90 #include <sys/pslist.h>
91 
92 #include <net/if.h>
93 
94 #include <netinet/in.h>
95 #include <netinet/in_systm.h>
96 #include <netinet/ip.h>
97 #include <netinet/ip_var.h>
98 #include <netinet/ip_encap.h>
99 #ifdef MROUTING
100 #include <netinet/ip_mroute.h>
101 #endif /* MROUTING */
102 
103 #ifdef INET6
104 #include <netinet/ip6.h>
105 #include <netinet6/ip6_var.h>
106 #include <netinet6/ip6protosw.h> /* for struct ip6ctlparam */
107 #include <netinet6/in6_var.h>
108 #include <netinet6/in6_pcb.h>
109 #include <netinet/icmp6.h>
110 #endif
111 
112 #ifdef NET_MPSAFE
113 #define ENCAP_MPSAFE	1
114 #endif
115 
116 enum direction { INBOUND, OUTBOUND };
117 
118 #ifdef INET
119 static struct encaptab *encap4_lookup(struct mbuf *, int, int, enum direction,
120     struct psref *);
121 #endif
122 #ifdef INET6
123 static struct encaptab *encap6_lookup(struct mbuf *, int, int, enum direction,
124     struct psref *);
125 #endif
126 static int encap_add(struct encaptab *);
127 static int encap_remove(struct encaptab *);
128 static int encap_afcheck(int, const struct sockaddr *, const struct sockaddr *);
129 #ifdef USE_RADIX
130 static struct radix_node_head *encap_rnh(int);
131 static int mask_matchlen(const struct sockaddr *);
132 #else
133 static int mask_match(const struct encaptab *, const struct sockaddr *,
134 		const struct sockaddr *);
135 #endif
136 
137 /*
138  * In encap[46]_lookup(), ep->func can sleep(e.g. rtalloc1) while walking
139  * encap_table. So, it cannot use pserialize_read_enter()
140  */
141 static struct {
142 	struct pslist_head	list;
143 	pserialize_t		psz;
144 	struct psref_class	*elem_class; /* for the element of et_list */
145 } encaptab  __cacheline_aligned = {
146 	.list = PSLIST_INITIALIZER,
147 };
148 #define encap_table encaptab.list
149 
150 static struct {
151 	kmutex_t	lock;
152 	kcondvar_t	cv;
153 	struct lwp	*busy;
154 } encap_whole __cacheline_aligned;
155 
156 #ifdef USE_RADIX
157 struct radix_node_head *encap_head[2];	/* 0 for AF_INET, 1 for AF_INET6 */
158 static bool encap_head_updating = false;
159 #endif
160 
161 static bool encap_initialized = false;
162 /*
163  * must be done before other encap interfaces initialization.
164  */
165 void
166 encapinit(void)
167 {
168 
169 	if (encap_initialized)
170 		return;
171 
172 	encaptab.psz = pserialize_create();
173 	encaptab.elem_class = psref_class_create("encapelem", IPL_SOFTNET);
174 
175 	mutex_init(&encap_whole.lock, MUTEX_DEFAULT, IPL_NONE);
176 	cv_init(&encap_whole.cv, "ip_encap cv");
177 	encap_whole.busy = NULL;
178 
179 	encap_initialized = true;
180 }
181 
182 void
183 encap_init(void)
184 {
185 	static int initialized = 0;
186 
187 	if (initialized)
188 		return;
189 	initialized++;
190 #if 0
191 	/*
192 	 * we cannot use LIST_INIT() here, since drivers may want to call
193 	 * encap_attach(), on driver attach.  encap_init() will be called
194 	 * on AF_INET{,6} initialization, which happens after driver
195 	 * initialization - using LIST_INIT() here can nuke encap_attach()
196 	 * from drivers.
197 	 */
198 	PSLIST_INIT(&encap_table);
199 #endif
200 
201 #ifdef USE_RADIX
202 	/*
203 	 * initialize radix lookup table when the radix subsystem is inited.
204 	 */
205 	rn_delayedinit((void *)&encap_head[0],
206 	    sizeof(struct sockaddr_pack) << 3);
207 #ifdef INET6
208 	rn_delayedinit((void *)&encap_head[1],
209 	    sizeof(struct sockaddr_pack) << 3);
210 #endif
211 #endif
212 }
213 
214 #ifdef INET
215 static struct encaptab *
216 encap4_lookup(struct mbuf *m, int off, int proto, enum direction dir,
217     struct psref *match_psref)
218 {
219 	struct ip *ip;
220 	struct ip_pack4 pack;
221 	struct encaptab *ep, *match;
222 	int prio, matchprio;
223 	int s;
224 #ifdef USE_RADIX
225 	struct radix_node_head *rnh = encap_rnh(AF_INET);
226 	struct radix_node *rn;
227 #endif
228 
229 	KASSERT(m->m_len >= sizeof(*ip));
230 
231 	ip = mtod(m, struct ip *);
232 
233 	memset(&pack, 0, sizeof(pack));
234 	pack.p.sp_len = sizeof(pack);
235 	pack.mine.sin_family = pack.yours.sin_family = AF_INET;
236 	pack.mine.sin_len = pack.yours.sin_len = sizeof(struct sockaddr_in);
237 	if (dir == INBOUND) {
238 		pack.mine.sin_addr = ip->ip_dst;
239 		pack.yours.sin_addr = ip->ip_src;
240 	} else {
241 		pack.mine.sin_addr = ip->ip_src;
242 		pack.yours.sin_addr = ip->ip_dst;
243 	}
244 
245 	match = NULL;
246 	matchprio = 0;
247 
248 	s = pserialize_read_enter();
249 #ifdef USE_RADIX
250 	if (encap_head_updating) {
251 		/*
252 		 * Update in progress. Do nothing.
253 		 */
254 		pserialize_read_exit(s);
255 		return NULL;
256 	}
257 
258 	rn = rnh->rnh_matchaddr((void *)&pack, rnh);
259 	if (rn && (rn->rn_flags & RNF_ROOT) == 0) {
260 		struct encaptab *encapp = (struct encaptab *)rn;
261 
262 		psref_acquire(match_psref, &encapp->psref,
263 		    encaptab.elem_class);
264 		match = encapp;
265 		matchprio = mask_matchlen(match->srcmask) +
266 		    mask_matchlen(match->dstmask);
267 	}
268 #endif
269 	PSLIST_READER_FOREACH(ep, &encap_table, struct encaptab, chain) {
270 		struct psref elem_psref;
271 
272 		if (ep->af != AF_INET)
273 			continue;
274 		if (ep->proto >= 0 && ep->proto != proto)
275 			continue;
276 
277 		psref_acquire(&elem_psref, &ep->psref,
278 		    encaptab.elem_class);
279 		if (ep->func) {
280 			pserialize_read_exit(s);
281 			/* ep->func is sleepable. e.g. rtalloc1 */
282 			prio = (*ep->func)(m, off, proto, ep->arg);
283 			s = pserialize_read_enter();
284 		} else {
285 #ifdef USE_RADIX
286 			psref_release(&elem_psref, &ep->psref,
287 			    encaptab.elem_class);
288 			continue;
289 #else
290 			prio = mask_match(ep, (struct sockaddr *)&pack.mine,
291 			    (struct sockaddr *)&pack.yours);
292 #endif
293 		}
294 
295 		/*
296 		 * We prioritize the matches by using bit length of the
297 		 * matches.  mask_match() and user-supplied matching function
298 		 * should return the bit length of the matches (for example,
299 		 * if both src/dst are matched for IPv4, 64 should be returned).
300 		 * 0 or negative return value means "it did not match".
301 		 *
302 		 * The question is, since we have two "mask" portion, we
303 		 * cannot really define total order between entries.
304 		 * For example, which of these should be preferred?
305 		 * mask_match() returns 48 (32 + 16) for both of them.
306 		 *	src=3ffe::/16, dst=3ffe:501::/32
307 		 *	src=3ffe:501::/32, dst=3ffe::/16
308 		 *
309 		 * We need to loop through all the possible candidates
310 		 * to get the best match - the search takes O(n) for
311 		 * n attachments (i.e. interfaces).
312 		 *
313 		 * For radix-based lookup, I guess source takes precedence.
314 		 * See rn_{refines,lexobetter} for the correct answer.
315 		 */
316 		if (prio <= 0) {
317 			psref_release(&elem_psref, &ep->psref,
318 			    encaptab.elem_class);
319 			continue;
320 		}
321 		if (prio > matchprio) {
322 			/* release last matched ep */
323 			if (match != NULL)
324 				psref_release(match_psref, &match->psref,
325 				    encaptab.elem_class);
326 
327 			psref_copy(match_psref, &elem_psref,
328 			    encaptab.elem_class);
329 			matchprio = prio;
330 			match = ep;
331 		}
332 		KASSERTMSG((match == NULL) || psref_held(&match->psref,
333 			encaptab.elem_class),
334 		    "current match = %p, but not hold its psref", match);
335 
336 		psref_release(&elem_psref, &ep->psref,
337 		    encaptab.elem_class);
338 	}
339 	pserialize_read_exit(s);
340 
341 	return match;
342 }
343 
344 void
345 encap4_input(struct mbuf *m, ...)
346 {
347 	int off, proto;
348 	va_list ap;
349 	const struct encapsw *esw;
350 	struct encaptab *match;
351 	struct psref match_psref;
352 
353 	va_start(ap, m);
354 	off = va_arg(ap, int);
355 	proto = va_arg(ap, int);
356 	va_end(ap);
357 
358 	match = encap4_lookup(m, off, proto, INBOUND, &match_psref);
359 	if (match) {
360 		/* found a match, "match" has the best one */
361 		esw = match->esw;
362 		if (esw && esw->encapsw4.pr_input) {
363 			(*esw->encapsw4.pr_input)(m, off, proto, match->arg);
364 			psref_release(&match_psref, &match->psref,
365 			    encaptab.elem_class);
366 		} else {
367 			psref_release(&match_psref, &match->psref,
368 			    encaptab.elem_class);
369 			m_freem(m);
370 		}
371 		return;
372 	}
373 
374 	/* last resort: inject to raw socket */
375 	SOFTNET_LOCK_IF_NET_MPSAFE();
376 	rip_input(m, off, proto);
377 	SOFTNET_UNLOCK_IF_NET_MPSAFE();
378 }
379 #endif
380 
381 #ifdef INET6
382 static struct encaptab *
383 encap6_lookup(struct mbuf *m, int off, int proto, enum direction dir,
384     struct psref *match_psref)
385 {
386 	struct ip6_hdr *ip6;
387 	struct ip_pack6 pack;
388 	int prio, matchprio;
389 	int s;
390 	struct encaptab *ep, *match;
391 #ifdef USE_RADIX
392 	struct radix_node_head *rnh = encap_rnh(AF_INET6);
393 	struct radix_node *rn;
394 #endif
395 
396 	KASSERT(m->m_len >= sizeof(*ip6));
397 
398 	ip6 = mtod(m, struct ip6_hdr *);
399 
400 	memset(&pack, 0, sizeof(pack));
401 	pack.p.sp_len = sizeof(pack);
402 	pack.mine.sin6_family = pack.yours.sin6_family = AF_INET6;
403 	pack.mine.sin6_len = pack.yours.sin6_len = sizeof(struct sockaddr_in6);
404 	if (dir == INBOUND) {
405 		pack.mine.sin6_addr = ip6->ip6_dst;
406 		pack.yours.sin6_addr = ip6->ip6_src;
407 	} else {
408 		pack.mine.sin6_addr = ip6->ip6_src;
409 		pack.yours.sin6_addr = ip6->ip6_dst;
410 	}
411 
412 	match = NULL;
413 	matchprio = 0;
414 
415 	s = pserialize_read_enter();
416 #ifdef USE_RADIX
417 	if (encap_head_updating) {
418 		/*
419 		 * Update in progress. Do nothing.
420 		 */
421 		pserialize_read_exit(s);
422 		return NULL;
423 	}
424 
425 	rn = rnh->rnh_matchaddr((void *)&pack, rnh);
426 	if (rn && (rn->rn_flags & RNF_ROOT) == 0) {
427 		struct encaptab *encapp = (struct encaptab *)rn;
428 
429 		psref_acquire(match_psref, &encapp->psref,
430 		    encaptab.elem_class);
431 		match = encapp;
432 		matchprio = mask_matchlen(match->srcmask) +
433 		    mask_matchlen(match->dstmask);
434 	}
435 #endif
436 	PSLIST_READER_FOREACH(ep, &encap_table, struct encaptab, chain) {
437 		struct psref elem_psref;
438 
439 		if (ep->af != AF_INET6)
440 			continue;
441 		if (ep->proto >= 0 && ep->proto != proto)
442 			continue;
443 
444 		psref_acquire(&elem_psref, &ep->psref,
445 		    encaptab.elem_class);
446 
447 		if (ep->func) {
448 			pserialize_read_exit(s);
449 			/* ep->func is sleepable. e.g. rtalloc1 */
450 			prio = (*ep->func)(m, off, proto, ep->arg);
451 			s = pserialize_read_enter();
452 		} else {
453 #ifdef USE_RADIX
454 			psref_release(&elem_psref, &ep->psref,
455 			    encaptab.elem_class);
456 			continue;
457 #else
458 			prio = mask_match(ep, (struct sockaddr *)&pack.mine,
459 			    (struct sockaddr *)&pack.yours);
460 #endif
461 		}
462 
463 		/* see encap4_lookup() for issues here */
464 		if (prio <= 0) {
465 			psref_release(&elem_psref, &ep->psref,
466 			    encaptab.elem_class);
467 			continue;
468 		}
469 		if (prio > matchprio) {
470 			/* release last matched ep */
471 			if (match != NULL)
472 				psref_release(match_psref, &match->psref,
473 				    encaptab.elem_class);
474 
475 			psref_copy(match_psref, &elem_psref,
476 			    encaptab.elem_class);
477 			matchprio = prio;
478 			match = ep;
479 		}
480 		KASSERTMSG((match == NULL) || psref_held(&match->psref,
481 			encaptab.elem_class),
482 		    "current match = %p, but not hold its psref", match);
483 
484 		psref_release(&elem_psref, &ep->psref,
485 		    encaptab.elem_class);
486 	}
487 	pserialize_read_exit(s);
488 
489 	return match;
490 }
491 
492 int
493 encap6_input(struct mbuf **mp, int *offp, int proto)
494 {
495 	struct mbuf *m = *mp;
496 	const struct encapsw *esw;
497 	struct encaptab *match;
498 	struct psref match_psref;
499 	int rv;
500 
501 	match = encap6_lookup(m, *offp, proto, INBOUND, &match_psref);
502 
503 	if (match) {
504 		/* found a match */
505 		esw = match->esw;
506 		if (esw && esw->encapsw6.pr_input) {
507 			int ret;
508 			ret = (*esw->encapsw6.pr_input)(mp, offp, proto,
509 			    match->arg);
510 			psref_release(&match_psref, &match->psref,
511 			    encaptab.elem_class);
512 			return ret;
513 		} else {
514 			psref_release(&match_psref, &match->psref,
515 			    encaptab.elem_class);
516 			m_freem(m);
517 			return IPPROTO_DONE;
518 		}
519 	}
520 
521 	/* last resort: inject to raw socket */
522 	SOFTNET_LOCK_IF_NET_MPSAFE();
523 	rv = rip6_input(mp, offp, proto);
524 	SOFTNET_UNLOCK_IF_NET_MPSAFE();
525 	return rv;
526 }
527 #endif
528 
529 /*
530  * XXX
531  * The encaptab list and the rnh radix tree must be manipulated atomically.
532  */
533 static int
534 encap_add(struct encaptab *ep)
535 {
536 #ifdef USE_RADIX
537 	struct radix_node_head *rnh = encap_rnh(ep->af);
538 #endif
539 
540 	KASSERT(encap_lock_held());
541 
542 #ifdef USE_RADIX
543 	if (!ep->func && rnh) {
544 		/* Disable access to the radix tree for reader. */
545 		encap_head_updating = true;
546 		/* Wait for all readers to drain. */
547 		pserialize_perform(encaptab.psz);
548 
549 		if (!rnh->rnh_addaddr((void *)ep->addrpack,
550 		    (void *)ep->maskpack, rnh, ep->nodes)) {
551 			encap_head_updating = false;
552 			return EEXIST;
553 		}
554 
555 		/*
556 		 * The ep added to the radix tree must be skipped while
557 		 * encap[46]_lookup walks encaptab list. In other words,
558 		 * encap_add() does not need to care whether the ep has
559 		 * been added encaptab list or not yet.
560 		 * So, we can re-enable access to the radix tree for now.
561 		 */
562 		encap_head_updating = false;
563 	}
564 #endif
565 	PSLIST_WRITER_INSERT_HEAD(&encap_table, ep, chain);
566 
567 	return 0;
568 }
569 
570 /*
571  * XXX
572  * The encaptab list and the rnh radix tree must be manipulated atomically.
573  */
574 static int
575 encap_remove(struct encaptab *ep)
576 {
577 #ifdef USE_RADIX
578 	struct radix_node_head *rnh = encap_rnh(ep->af);
579 #endif
580 	int error = 0;
581 
582 	KASSERT(encap_lock_held());
583 
584 #ifdef USE_RADIX
585 	if (!ep->func && rnh) {
586 		/* Disable access to the radix tree for reader. */
587 		encap_head_updating = true;
588 		/* Wait for all readers to drain. */
589 		pserialize_perform(encaptab.psz);
590 
591 		if (!rnh->rnh_deladdr((void *)ep->addrpack,
592 		    (void *)ep->maskpack, rnh))
593 			error = ESRCH;
594 
595 		/*
596 		 * The ep added to the radix tree must be skipped while
597 		 * encap[46]_lookup walks encaptab list. In other words,
598 		 * encap_add() does not need to care whether the ep has
599 		 * been added encaptab list or not yet.
600 		 * So, we can re-enable access to the radix tree for now.
601 		 */
602 		encap_head_updating = false;
603 	}
604 #endif
605 	PSLIST_WRITER_REMOVE(ep, chain);
606 
607 	return error;
608 }
609 
610 static int
611 encap_afcheck(int af, const struct sockaddr *sp, const struct sockaddr *dp)
612 {
613 	if (sp && dp) {
614 		if (sp->sa_len != dp->sa_len)
615 			return EINVAL;
616 		if (af != sp->sa_family || af != dp->sa_family)
617 			return EINVAL;
618 	} else if (!sp && !dp)
619 		;
620 	else
621 		return EINVAL;
622 
623 	switch (af) {
624 	case AF_INET:
625 		if (sp && sp->sa_len != sizeof(struct sockaddr_in))
626 			return EINVAL;
627 		if (dp && dp->sa_len != sizeof(struct sockaddr_in))
628 			return EINVAL;
629 		break;
630 #ifdef INET6
631 	case AF_INET6:
632 		if (sp && sp->sa_len != sizeof(struct sockaddr_in6))
633 			return EINVAL;
634 		if (dp && dp->sa_len != sizeof(struct sockaddr_in6))
635 			return EINVAL;
636 		break;
637 #endif
638 	default:
639 		return EAFNOSUPPORT;
640 	}
641 
642 	return 0;
643 }
644 
645 /*
646  * sp (src ptr) is always my side, and dp (dst ptr) is always remote side.
647  * length of mask (sm and dm) is assumed to be same as sp/dp.
648  * Return value will be necessary as input (cookie) for encap_detach().
649  */
650 const struct encaptab *
651 encap_attach(int af, int proto,
652     const struct sockaddr *sp, const struct sockaddr *sm,
653     const struct sockaddr *dp, const struct sockaddr *dm,
654     const struct encapsw *esw, void *arg)
655 {
656 	struct encaptab *ep;
657 	int error;
658 	int pss;
659 	size_t l;
660 	struct ip_pack4 *pack4;
661 #ifdef INET6
662 	struct ip_pack6 *pack6;
663 #endif
664 #ifndef ENCAP_MPSAFE
665 	int s;
666 
667 	s = splsoftnet();
668 #endif
669 	/* sanity check on args */
670 	error = encap_afcheck(af, sp, dp);
671 	if (error)
672 		goto fail;
673 
674 	/* check if anyone have already attached with exactly same config */
675 	pss = pserialize_read_enter();
676 	PSLIST_READER_FOREACH(ep, &encap_table, struct encaptab, chain) {
677 		if (ep->af != af)
678 			continue;
679 		if (ep->proto != proto)
680 			continue;
681 		if (ep->func)
682 			continue;
683 
684 		KASSERT(ep->src != NULL);
685 		KASSERT(ep->dst != NULL);
686 		KASSERT(ep->srcmask != NULL);
687 		KASSERT(ep->dstmask != NULL);
688 
689 		if (ep->src->sa_len != sp->sa_len ||
690 		    memcmp(ep->src, sp, sp->sa_len) != 0 ||
691 		    memcmp(ep->srcmask, sm, sp->sa_len) != 0)
692 			continue;
693 		if (ep->dst->sa_len != dp->sa_len ||
694 		    memcmp(ep->dst, dp, dp->sa_len) != 0 ||
695 		    memcmp(ep->dstmask, dm, dp->sa_len) != 0)
696 			continue;
697 
698 		error = EEXIST;
699 		pserialize_read_exit(pss);
700 		goto fail;
701 	}
702 	pserialize_read_exit(pss);
703 
704 	switch (af) {
705 	case AF_INET:
706 		l = sizeof(*pack4);
707 		break;
708 #ifdef INET6
709 	case AF_INET6:
710 		l = sizeof(*pack6);
711 		break;
712 #endif
713 	default:
714 		goto fail;
715 	}
716 
717 	/* M_NETADDR ok? */
718 	ep = kmem_zalloc(sizeof(*ep), KM_NOSLEEP);
719 	if (ep == NULL) {
720 		error = ENOBUFS;
721 		goto fail;
722 	}
723 	ep->addrpack = kmem_zalloc(l, KM_NOSLEEP);
724 	if (ep->addrpack == NULL) {
725 		error = ENOBUFS;
726 		goto gc;
727 	}
728 	ep->maskpack = kmem_zalloc(l, KM_NOSLEEP);
729 	if (ep->maskpack == NULL) {
730 		error = ENOBUFS;
731 		goto gc;
732 	}
733 
734 	ep->af = af;
735 	ep->proto = proto;
736 	ep->addrpack->sa_len = l & 0xff;
737 	ep->maskpack->sa_len = l & 0xff;
738 	switch (af) {
739 	case AF_INET:
740 		pack4 = (struct ip_pack4 *)ep->addrpack;
741 		ep->src = (struct sockaddr *)&pack4->mine;
742 		ep->dst = (struct sockaddr *)&pack4->yours;
743 		pack4 = (struct ip_pack4 *)ep->maskpack;
744 		ep->srcmask = (struct sockaddr *)&pack4->mine;
745 		ep->dstmask = (struct sockaddr *)&pack4->yours;
746 		break;
747 #ifdef INET6
748 	case AF_INET6:
749 		pack6 = (struct ip_pack6 *)ep->addrpack;
750 		ep->src = (struct sockaddr *)&pack6->mine;
751 		ep->dst = (struct sockaddr *)&pack6->yours;
752 		pack6 = (struct ip_pack6 *)ep->maskpack;
753 		ep->srcmask = (struct sockaddr *)&pack6->mine;
754 		ep->dstmask = (struct sockaddr *)&pack6->yours;
755 		break;
756 #endif
757 	}
758 
759 	memcpy(ep->src, sp, sp->sa_len);
760 	memcpy(ep->srcmask, sm, sp->sa_len);
761 	memcpy(ep->dst, dp, dp->sa_len);
762 	memcpy(ep->dstmask, dm, dp->sa_len);
763 	ep->esw = esw;
764 	ep->arg = arg;
765 	psref_target_init(&ep->psref, encaptab.elem_class);
766 
767 	error = encap_add(ep);
768 	if (error)
769 		goto gc;
770 
771 	error = 0;
772 #ifndef ENCAP_MPSAFE
773 	splx(s);
774 #endif
775 	return ep;
776 
777 gc:
778 	if (ep->addrpack)
779 		kmem_free(ep->addrpack, l);
780 	if (ep->maskpack)
781 		kmem_free(ep->maskpack, l);
782 	if (ep)
783 		kmem_free(ep, sizeof(*ep));
784 fail:
785 #ifndef ENCAP_MPSAFE
786 	splx(s);
787 #endif
788 	return NULL;
789 }
790 
791 const struct encaptab *
792 encap_attach_func(int af, int proto,
793     int (*func)(struct mbuf *, int, int, void *),
794     const struct encapsw *esw, void *arg)
795 {
796 	struct encaptab *ep;
797 	int error;
798 #ifndef ENCAP_MPSAFE
799 	int s;
800 
801 	s = splsoftnet();
802 #endif
803 	/* sanity check on args */
804 	if (!func) {
805 		error = EINVAL;
806 		goto fail;
807 	}
808 
809 	error = encap_afcheck(af, NULL, NULL);
810 	if (error)
811 		goto fail;
812 
813 	ep = kmem_alloc(sizeof(*ep), KM_NOSLEEP);	/*XXX*/
814 	if (ep == NULL) {
815 		error = ENOBUFS;
816 		goto fail;
817 	}
818 	memset(ep, 0, sizeof(*ep));
819 
820 	ep->af = af;
821 	ep->proto = proto;
822 	ep->func = func;
823 	ep->esw = esw;
824 	ep->arg = arg;
825 	psref_target_init(&ep->psref, encaptab.elem_class);
826 
827 	error = encap_add(ep);
828 	if (error)
829 		goto gc;
830 
831 	error = 0;
832 #ifndef ENCAP_MPSAFE
833 	splx(s);
834 #endif
835 	return ep;
836 
837 gc:
838 	kmem_free(ep, sizeof(*ep));
839 fail:
840 #ifndef ENCAP_MPSAFE
841 	splx(s);
842 #endif
843 	return NULL;
844 }
845 
846 /* XXX encap4_ctlinput() is necessary if we set DF=1 on outer IPv4 header */
847 
848 #ifdef INET6
849 void *
850 encap6_ctlinput(int cmd, const struct sockaddr *sa, void *d0)
851 {
852 	void *d = d0;
853 	struct ip6_hdr *ip6;
854 	struct mbuf *m;
855 	int off;
856 	struct ip6ctlparam *ip6cp = NULL;
857 	int nxt;
858 	int s;
859 	struct encaptab *ep;
860 	const struct encapsw *esw;
861 
862 	if (sa->sa_family != AF_INET6 ||
863 	    sa->sa_len != sizeof(struct sockaddr_in6))
864 		return NULL;
865 
866 	if ((unsigned)cmd >= PRC_NCMDS)
867 		return NULL;
868 	if (cmd == PRC_HOSTDEAD)
869 		d = NULL;
870 	else if (cmd == PRC_MSGSIZE)
871 		; /* special code is present, see below */
872 	else if (inet6ctlerrmap[cmd] == 0)
873 		return NULL;
874 
875 	/* if the parameter is from icmp6, decode it. */
876 	if (d != NULL) {
877 		ip6cp = (struct ip6ctlparam *)d;
878 		m = ip6cp->ip6c_m;
879 		ip6 = ip6cp->ip6c_ip6;
880 		off = ip6cp->ip6c_off;
881 		nxt = ip6cp->ip6c_nxt;
882 
883 		if (ip6 && cmd == PRC_MSGSIZE) {
884 			int valid = 0;
885 			struct encaptab *match;
886 			struct psref elem_psref;
887 
888 			/*
889 		 	* Check to see if we have a valid encap configuration.
890 		 	*/
891 			match = encap6_lookup(m, off, nxt, OUTBOUND,
892 			    &elem_psref);
893 			if (match)
894 				valid++;
895 			psref_release(&elem_psref, &match->psref,
896 			    encaptab.elem_class);
897 
898 			/*
899 		 	* Depending on the value of "valid" and routing table
900 		 	* size (mtudisc_{hi,lo}wat), we will:
901 		 	* - recalcurate the new MTU and create the
902 		 	*   corresponding routing entry, or
903 		 	* - ignore the MTU change notification.
904 		 	*/
905 			icmp6_mtudisc_update((struct ip6ctlparam *)d, valid);
906 		}
907 	} else {
908 		m = NULL;
909 		ip6 = NULL;
910 		nxt = -1;
911 	}
912 
913 	/* inform all listeners */
914 
915 	s = pserialize_read_enter();
916 	PSLIST_READER_FOREACH(ep, &encap_table, struct encaptab, chain) {
917 		struct psref elem_psref;
918 
919 		if (ep->af != AF_INET6)
920 			continue;
921 		if (ep->proto >= 0 && ep->proto != nxt)
922 			continue;
923 
924 		/* should optimize by looking at address pairs */
925 
926 		/* XXX need to pass ep->arg or ep itself to listeners */
927 		psref_acquire(&elem_psref, &ep->psref,
928 		    encaptab.elem_class);
929 		esw = ep->esw;
930 		if (esw && esw->encapsw6.pr_ctlinput) {
931 			pserialize_read_exit(s);
932 			/* pr_ctlinput is sleepable. e.g. rtcache_free */
933 			(*esw->encapsw6.pr_ctlinput)(cmd, sa, d, ep->arg);
934 			s = pserialize_read_enter();
935 		}
936 		psref_release(&elem_psref, &ep->psref,
937 		    encaptab.elem_class);
938 	}
939 	pserialize_read_exit(s);
940 
941 	rip6_ctlinput(cmd, sa, d0);
942 	return NULL;
943 }
944 #endif
945 
946 int
947 encap_detach(const struct encaptab *cookie)
948 {
949 	const struct encaptab *ep = cookie;
950 	struct encaptab *p;
951 	int error;
952 
953 	KASSERT(encap_lock_held());
954 
955 	PSLIST_WRITER_FOREACH(p, &encap_table, struct encaptab, chain) {
956 		if (p == ep) {
957 			error = encap_remove(p);
958 			if (error)
959 				return error;
960 			else
961 				break;
962 		}
963 	}
964 	if (p == NULL)
965 		return ENOENT;
966 
967 	pserialize_perform(encaptab.psz);
968 	psref_target_destroy(&p->psref,
969 	    encaptab.elem_class);
970 	if (!ep->func) {
971 		kmem_free(p->addrpack, ep->addrpack->sa_len);
972 		kmem_free(p->maskpack, ep->maskpack->sa_len);
973 	}
974 	kmem_free(p, sizeof(*p));
975 
976 	return 0;
977 }
978 
979 #ifdef USE_RADIX
980 static struct radix_node_head *
981 encap_rnh(int af)
982 {
983 
984 	switch (af) {
985 	case AF_INET:
986 		return encap_head[0];
987 #ifdef INET6
988 	case AF_INET6:
989 		return encap_head[1];
990 #endif
991 	default:
992 		return NULL;
993 	}
994 }
995 
996 static int
997 mask_matchlen(const struct sockaddr *sa)
998 {
999 	const char *p, *ep;
1000 	int l;
1001 
1002 	p = (const char *)sa;
1003 	ep = p + sa->sa_len;
1004 	p += 2;	/* sa_len + sa_family */
1005 
1006 	l = 0;
1007 	while (p < ep) {
1008 		l += (*p ? 8 : 0);	/* estimate */
1009 		p++;
1010 	}
1011 	return l;
1012 }
1013 #endif
1014 
1015 #ifndef USE_RADIX
1016 static int
1017 mask_match(const struct encaptab *ep,
1018 	   const struct sockaddr *sp,
1019 	   const struct sockaddr *dp)
1020 {
1021 	struct sockaddr_storage s;
1022 	struct sockaddr_storage d;
1023 	int i;
1024 	const u_int8_t *p, *q;
1025 	u_int8_t *r;
1026 	int matchlen;
1027 
1028 	KASSERTMSG(ep->func == NULL, "wrong encaptab passed to mask_match");
1029 
1030 	if (sp->sa_len > sizeof(s) || dp->sa_len > sizeof(d))
1031 		return 0;
1032 	if (sp->sa_family != ep->af || dp->sa_family != ep->af)
1033 		return 0;
1034 	if (sp->sa_len != ep->src->sa_len || dp->sa_len != ep->dst->sa_len)
1035 		return 0;
1036 
1037 	matchlen = 0;
1038 
1039 	p = (const u_int8_t *)sp;
1040 	q = (const u_int8_t *)ep->srcmask;
1041 	r = (u_int8_t *)&s;
1042 	for (i = 0 ; i < sp->sa_len; i++) {
1043 		r[i] = p[i] & q[i];
1044 		/* XXX estimate */
1045 		matchlen += (q[i] ? 8 : 0);
1046 	}
1047 
1048 	p = (const u_int8_t *)dp;
1049 	q = (const u_int8_t *)ep->dstmask;
1050 	r = (u_int8_t *)&d;
1051 	for (i = 0 ; i < dp->sa_len; i++) {
1052 		r[i] = p[i] & q[i];
1053 		/* XXX rough estimate */
1054 		matchlen += (q[i] ? 8 : 0);
1055 	}
1056 
1057 	/* need to overwrite len/family portion as we don't compare them */
1058 	s.ss_len = sp->sa_len;
1059 	s.ss_family = sp->sa_family;
1060 	d.ss_len = dp->sa_len;
1061 	d.ss_family = dp->sa_family;
1062 
1063 	if (memcmp(&s, ep->src, ep->src->sa_len) == 0 &&
1064 	    memcmp(&d, ep->dst, ep->dst->sa_len) == 0) {
1065 		return matchlen;
1066 	} else
1067 		return 0;
1068 }
1069 #endif
1070 
1071 int
1072 encap_lock_enter(void)
1073 {
1074 	int error;
1075 
1076 	mutex_enter(&encap_whole.lock);
1077 	while (encap_whole.busy != NULL) {
1078 		error = cv_wait_sig(&encap_whole.cv, &encap_whole.lock);
1079 		if (error) {
1080 			mutex_exit(&encap_whole.lock);
1081 			return error;
1082 		}
1083 	}
1084 	KASSERT(encap_whole.busy == NULL);
1085 	encap_whole.busy = curlwp;
1086 	mutex_exit(&encap_whole.lock);
1087 
1088 	return 0;
1089 }
1090 
1091 void
1092 encap_lock_exit(void)
1093 {
1094 
1095 	mutex_enter(&encap_whole.lock);
1096 	KASSERT(encap_whole.busy == curlwp);
1097 	encap_whole.busy = NULL;
1098 	cv_broadcast(&encap_whole.cv);
1099 	mutex_exit(&encap_whole.lock);
1100 }
1101 
1102 bool
1103 encap_lock_held(void)
1104 {
1105 
1106 	return (encap_whole.busy == curlwp);
1107 }
1108