xref: /netbsd-src/sys/net/bpf_stub.c (revision c6a4b11da81149e301ef0fa5adb0f10dbcf2c6c3)
1*c6a4b11dSmsaitoh /*	$NetBSD: bpf_stub.c,v 1.8 2018/06/25 03:22:14 msaitoh Exp $	*/
221958f98Spooka 
321958f98Spooka /*
421958f98Spooka  * Copyright (c) 2010 The NetBSD Foundation, Inc.
521958f98Spooka  * All rights reserved.
621958f98Spooka  *
721958f98Spooka  * Redistribution and use in source and binary forms, with or without
821958f98Spooka  * modification, are permitted provided that the following conditions
921958f98Spooka  * are met:
1021958f98Spooka  * 1. Redistributions of source code must retain the above copyright
1121958f98Spooka  *    notice, this list of conditions and the following disclaimer.
1221958f98Spooka  * 2. Redistributions in binary form must reproduce the above copyright
1321958f98Spooka  *    notice, this list of conditions and the following disclaimer in the
1421958f98Spooka  *    documentation and/or other materials provided with the distribution.
1521958f98Spooka  *
1621958f98Spooka  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
1721958f98Spooka  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
1821958f98Spooka  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
1921958f98Spooka  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
2021958f98Spooka  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
2121958f98Spooka  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
2221958f98Spooka  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
2321958f98Spooka  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
2421958f98Spooka  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
2521958f98Spooka  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
2621958f98Spooka  * POSSIBILITY OF SUCH DAMAGE.
2721958f98Spooka  */
2821958f98Spooka 
2910fe49d7Spooka #include <sys/cdefs.h>
30*c6a4b11dSmsaitoh __KERNEL_RCSID(0, "$NetBSD: bpf_stub.c,v 1.8 2018/06/25 03:22:14 msaitoh Exp $");
3110fe49d7Spooka 
3210fe49d7Spooka #include <sys/param.h>
33b2bb0f38Spooka #include <sys/kmem.h>
3410fe49d7Spooka #include <sys/mbuf.h>
3510fe49d7Spooka 
3610fe49d7Spooka #include <net/bpf.h>
3710fe49d7Spooka 
38b2bb0f38Spooka struct laglist {
39b2bb0f38Spooka 	struct ifnet *lag_ifp;
40b2bb0f38Spooka 	u_int lag_dlt;
41b2bb0f38Spooka 	u_int lag_hlen;
42b2bb0f38Spooka 	struct bpf_if **lag_drvp;
4310fe49d7Spooka 
44b2bb0f38Spooka 	TAILQ_ENTRY(laglist) lag_entries;
45b2bb0f38Spooka };
46b2bb0f38Spooka 
47b2bb0f38Spooka static TAILQ_HEAD(, laglist) lagdrvs = TAILQ_HEAD_INITIALIZER(lagdrvs);
48b2bb0f38Spooka 
49b2bb0f38Spooka static void bpf_stub_attach(struct ifnet *, u_int, u_int, struct bpf_if **);
50b2bb0f38Spooka static void bpf_stub_detach(struct ifnet *);
51b2bb0f38Spooka 
52b2bb0f38Spooka static void bpf_stub_null(void);
53b2bb0f38Spooka static void bpf_stub_warn(void);
54b2bb0f38Spooka 
55b2bb0f38Spooka static kmutex_t handovermtx;
56b2bb0f38Spooka static kcondvar_t handovercv;
57b2bb0f38Spooka static bool handover;
58b2bb0f38Spooka 
59b2bb0f38Spooka struct bpf_ops bpf_ops_stub = {
60b2bb0f38Spooka 	.bpf_attach =		bpf_stub_attach,
61b2bb0f38Spooka 	.bpf_detach =		bpf_stub_detach,
62b2bb0f38Spooka 	.bpf_change_type =	(void *)bpf_stub_null,
63b2bb0f38Spooka 
64b2bb0f38Spooka 	.bpf_mtap = 		(void *)bpf_stub_warn,
65b2bb0f38Spooka 	.bpf_mtap2 = 		(void *)bpf_stub_warn,
66b2bb0f38Spooka 	.bpf_mtap_af = 		(void *)bpf_stub_warn,
67b2bb0f38Spooka 	.bpf_mtap_sl_in = 	(void *)bpf_stub_warn,
68b2bb0f38Spooka 	.bpf_mtap_sl_out =	(void *)bpf_stub_warn,
6987e988a7Sozaki-r 
7087e988a7Sozaki-r 	.bpf_mtap_softint_init =	(void *)bpf_stub_null,
7187e988a7Sozaki-r 	.bpf_mtap_softint =		(void *)bpf_stub_warn,
72b2bb0f38Spooka };
73b2bb0f38Spooka struct bpf_ops *bpf_ops;
74b2bb0f38Spooka 
75b2bb0f38Spooka static void
bpf_stub_attach(struct ifnet * ifp,u_int dlt,u_int hlen,struct bpf_if ** drvp)76b2bb0f38Spooka bpf_stub_attach(struct ifnet *ifp, u_int dlt, u_int hlen, struct bpf_if **drvp)
77b2bb0f38Spooka {
78b2bb0f38Spooka 	struct laglist *lag;
79b2bb0f38Spooka 	bool storeattach = true;
80b2bb0f38Spooka 
81b2bb0f38Spooka 	lag = kmem_alloc(sizeof(*lag), KM_SLEEP);
82b2bb0f38Spooka 	lag->lag_ifp = ifp;
83b2bb0f38Spooka 	lag->lag_dlt = dlt;
84b2bb0f38Spooka 	lag->lag_hlen = hlen;
85b2bb0f38Spooka 	lag->lag_drvp = drvp;
86b2bb0f38Spooka 
87b2bb0f38Spooka 	mutex_enter(&handovermtx);
88b2bb0f38Spooka 	/*
89b2bb0f38Spooka 	 * If handover is in progress, wait for it to finish and complete
90b2bb0f38Spooka 	 * attach after that.  Otherwise record ourselves.
91b2bb0f38Spooka 	 */
92b2bb0f38Spooka 	while (handover) {
93b2bb0f38Spooka 		storeattach = false;
94b2bb0f38Spooka 		cv_wait(&handovercv, &handovermtx);
95b2bb0f38Spooka 	}
96b2bb0f38Spooka 
97b2bb0f38Spooka 	if (storeattach == false) {
98b2bb0f38Spooka 		mutex_exit(&handovermtx);
99b2bb0f38Spooka 		kmem_free(lag, sizeof(*lag));
100b2bb0f38Spooka 		KASSERT(bpf_ops != &bpf_ops_stub); /* revisit when unloadable */
101b2bb0f38Spooka 		bpf_ops->bpf_attach(ifp, dlt, hlen, drvp);
102b2bb0f38Spooka 	} else {
10310fe49d7Spooka 		*drvp = NULL;
104b2bb0f38Spooka 		TAILQ_INSERT_TAIL(&lagdrvs, lag, lag_entries);
105b2bb0f38Spooka 		mutex_exit(&handovermtx);
106b2bb0f38Spooka 	}
107b2bb0f38Spooka }
108b2bb0f38Spooka 
109b2bb0f38Spooka static void
bpf_stub_detach(struct ifnet * ifp)110b2bb0f38Spooka bpf_stub_detach(struct ifnet *ifp)
111b2bb0f38Spooka {
112b2bb0f38Spooka 	TAILQ_HEAD(, laglist) rmlist;
113b2bb0f38Spooka 	struct laglist *lag, *lag_next;
114b2bb0f38Spooka 	bool didhand;
115b2bb0f38Spooka 
116b2bb0f38Spooka 	TAILQ_INIT(&rmlist);
117b2bb0f38Spooka 
118b2bb0f38Spooka 	didhand = false;
119b2bb0f38Spooka 	mutex_enter(&handovermtx);
120b2bb0f38Spooka 	while (handover) {
121b2bb0f38Spooka 		didhand = true;
122b2bb0f38Spooka 		cv_wait(&handovercv, &handovermtx);
123b2bb0f38Spooka 	}
124b2bb0f38Spooka 
125b2bb0f38Spooka 	if (didhand == false) {
126b2bb0f38Spooka 		/* atomically remove all */
127b2bb0f38Spooka 		for (lag = TAILQ_FIRST(&lagdrvs); lag; lag = lag_next) {
128b2bb0f38Spooka 			lag_next = TAILQ_NEXT(lag, lag_entries);
129b2bb0f38Spooka 			if (lag->lag_ifp == ifp) {
130b2bb0f38Spooka 				TAILQ_REMOVE(&lagdrvs, lag, lag_entries);
131b2bb0f38Spooka 				TAILQ_INSERT_HEAD(&rmlist, lag, lag_entries);
132b2bb0f38Spooka 			}
133b2bb0f38Spooka 		}
134b2bb0f38Spooka 		mutex_exit(&handovermtx);
135b2bb0f38Spooka 		while ((lag = TAILQ_FIRST(&rmlist)) != NULL) {
136b2bb0f38Spooka 			TAILQ_REMOVE(&rmlist, lag, lag_entries);
137b2bb0f38Spooka 			kmem_free(lag, sizeof(*lag));
138b2bb0f38Spooka 		}
139b2bb0f38Spooka 	} else {
140b2bb0f38Spooka 		mutex_exit(&handovermtx);
141b2bb0f38Spooka 		KASSERT(bpf_ops != &bpf_ops_stub); /* revisit when unloadable */
142b2bb0f38Spooka 		bpf_ops->bpf_detach(ifp);
143b2bb0f38Spooka 	}
14410fe49d7Spooka }
14510fe49d7Spooka 
14610fe49d7Spooka static void
bpf_stub_null(void)14710fe49d7Spooka bpf_stub_null(void)
14810fe49d7Spooka {
14910fe49d7Spooka 
15010fe49d7Spooka }
15110fe49d7Spooka 
15210fe49d7Spooka static void
bpf_stub_warn(void)15310fe49d7Spooka bpf_stub_warn(void)
15410fe49d7Spooka {
15510fe49d7Spooka 
15610fe49d7Spooka #ifdef DEBUG
15710fe49d7Spooka 	panic("bpf method called without attached bpf_if");
15810fe49d7Spooka #endif
15910fe49d7Spooka #ifdef DIAGNOSTIC
16010fe49d7Spooka 	printf("bpf method called without attached bpf_if\n");
16110fe49d7Spooka #endif
16210fe49d7Spooka }
16310fe49d7Spooka 
16410fe49d7Spooka void
bpf_setops(void)1654b50cb78Smatt bpf_setops(void)
16610fe49d7Spooka {
16710fe49d7Spooka 
168b2bb0f38Spooka 	mutex_init(&handovermtx, MUTEX_DEFAULT, IPL_NONE);
169b2bb0f38Spooka 	cv_init(&handovercv, "bpfops");
17010fe49d7Spooka 	bpf_ops = &bpf_ops_stub;
17110fe49d7Spooka }
172b2bb0f38Spooka 
173b2bb0f38Spooka /*
174b2bb0f38Spooka  * Party's over, prepare for handover.
175b2bb0f38Spooka  * It needs to happen *before* bpf_ops is set to make it atomic
176b2bb0f38Spooka  * to callers (see also stub implementations, which wait if
177b2bb0f38Spooka  * called during handover).  The likelyhood of seeing a full
178b2bb0f38Spooka  * attach-detach *during* handover comes close to astronomical,
179b2bb0f38Spooka  * but handle it anyway since it's relatively easy.
180b2bb0f38Spooka  */
181b2bb0f38Spooka void
bpf_ops_handover_enter(struct bpf_ops * newops)182b2bb0f38Spooka bpf_ops_handover_enter(struct bpf_ops *newops)
183b2bb0f38Spooka {
184b2bb0f38Spooka 	struct laglist *lag;
185b2bb0f38Spooka 
186b2bb0f38Spooka 	mutex_enter(&handovermtx);
187b2bb0f38Spooka 	handover = true;
188b2bb0f38Spooka 
189b2bb0f38Spooka 	while ((lag = TAILQ_FIRST(&lagdrvs)) != NULL) {
190b2bb0f38Spooka 		TAILQ_REMOVE(&lagdrvs, lag, lag_entries);
191b2bb0f38Spooka 		mutex_exit(&handovermtx);
192b2bb0f38Spooka 		newops->bpf_attach(lag->lag_ifp, lag->lag_dlt,
193b2bb0f38Spooka 		    lag->lag_hlen, lag->lag_drvp);
194b2bb0f38Spooka 		kmem_free(lag, sizeof(*lag));
195b2bb0f38Spooka 		mutex_enter(&handovermtx);
196b2bb0f38Spooka 	}
197b2bb0f38Spooka 	mutex_exit(&handovermtx);
198b2bb0f38Spooka }
199b2bb0f38Spooka 
200b2bb0f38Spooka /* hangover done */
201b2bb0f38Spooka void
bpf_ops_handover_exit(void)2024b50cb78Smatt bpf_ops_handover_exit(void)
203b2bb0f38Spooka {
204b2bb0f38Spooka 
205b2bb0f38Spooka 	mutex_enter(&handovermtx);
206b2bb0f38Spooka 	handover = false;
207b2bb0f38Spooka 	cv_broadcast(&handovercv);
208b2bb0f38Spooka 	mutex_exit(&handovermtx);
209b2bb0f38Spooka }
210