1 /* $NetBSD: bpf_stub.c,v 1.5 2010/04/05 07:22:22 joerg Exp $ */ 2 3 /* 4 * Copyright (c) 2010 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 17 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 26 * POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __KERNEL_RCSID(0, "$NetBSD: bpf_stub.c,v 1.5 2010/04/05 07:22:22 joerg Exp $"); 31 32 #include <sys/param.h> 33 #include <sys/kmem.h> 34 #include <sys/mbuf.h> 35 36 #include <net/bpf.h> 37 38 struct laglist { 39 struct ifnet *lag_ifp; 40 u_int lag_dlt; 41 u_int lag_hlen; 42 struct bpf_if **lag_drvp; 43 44 TAILQ_ENTRY(laglist) lag_entries; 45 }; 46 47 static TAILQ_HEAD(, laglist) lagdrvs = TAILQ_HEAD_INITIALIZER(lagdrvs); 48 49 static void bpf_stub_attach(struct ifnet *, u_int, u_int, struct bpf_if **); 50 static void bpf_stub_detach(struct ifnet *); 51 52 static void bpf_stub_null(void); 53 static void bpf_stub_warn(void); 54 55 static kmutex_t handovermtx; 56 static kcondvar_t handovercv; 57 static bool handover; 58 59 struct bpf_ops bpf_ops_stub = { 60 .bpf_attach = bpf_stub_attach, 61 .bpf_detach = bpf_stub_detach, 62 .bpf_change_type = (void *)bpf_stub_null, 63 64 .bpf_tap = (void *)bpf_stub_warn, 65 .bpf_mtap = (void *)bpf_stub_warn, 66 .bpf_mtap2 = (void *)bpf_stub_warn, 67 .bpf_mtap_af = (void *)bpf_stub_warn, 68 .bpf_mtap_sl_in = (void *)bpf_stub_warn, 69 .bpf_mtap_sl_out = (void *)bpf_stub_warn, 70 }; 71 struct bpf_ops *bpf_ops; 72 73 static void 74 bpf_stub_attach(struct ifnet *ifp, u_int dlt, u_int hlen, struct bpf_if **drvp) 75 { 76 struct laglist *lag; 77 bool storeattach = true; 78 79 lag = kmem_alloc(sizeof(*lag), KM_SLEEP); 80 lag->lag_ifp = ifp; 81 lag->lag_dlt = dlt; 82 lag->lag_hlen = hlen; 83 lag->lag_drvp = drvp; 84 85 mutex_enter(&handovermtx); 86 /* 87 * If handover is in progress, wait for it to finish and complete 88 * attach after that. Otherwise record ourselves. 89 */ 90 while (handover) { 91 storeattach = false; 92 cv_wait(&handovercv, &handovermtx); 93 } 94 95 if (storeattach == false) { 96 mutex_exit(&handovermtx); 97 kmem_free(lag, sizeof(*lag)); 98 KASSERT(bpf_ops != &bpf_ops_stub); /* revisit when unloadable */ 99 bpf_ops->bpf_attach(ifp, dlt, hlen, drvp); 100 } else { 101 *drvp = NULL; 102 TAILQ_INSERT_TAIL(&lagdrvs, lag, lag_entries); 103 mutex_exit(&handovermtx); 104 } 105 } 106 107 static void 108 bpf_stub_detach(struct ifnet *ifp) 109 { 110 TAILQ_HEAD(, laglist) rmlist; 111 struct laglist *lag, *lag_next; 112 bool didhand; 113 114 TAILQ_INIT(&rmlist); 115 116 didhand = false; 117 mutex_enter(&handovermtx); 118 while (handover) { 119 didhand = true; 120 cv_wait(&handovercv, &handovermtx); 121 } 122 123 if (didhand == false) { 124 /* atomically remove all */ 125 for (lag = TAILQ_FIRST(&lagdrvs); lag; lag = lag_next) { 126 lag_next = TAILQ_NEXT(lag, lag_entries); 127 if (lag->lag_ifp == ifp) { 128 TAILQ_REMOVE(&lagdrvs, lag, lag_entries); 129 TAILQ_INSERT_HEAD(&rmlist, lag, lag_entries); 130 } 131 } 132 mutex_exit(&handovermtx); 133 while ((lag = TAILQ_FIRST(&rmlist)) != NULL) { 134 TAILQ_REMOVE(&rmlist, lag, lag_entries); 135 kmem_free(lag, sizeof(*lag)); 136 } 137 } else { 138 mutex_exit(&handovermtx); 139 KASSERT(bpf_ops != &bpf_ops_stub); /* revisit when unloadable */ 140 bpf_ops->bpf_detach(ifp); 141 } 142 } 143 144 static void 145 bpf_stub_null(void) 146 { 147 148 } 149 150 static void 151 bpf_stub_warn(void) 152 { 153 154 #ifdef DEBUG 155 panic("bpf method called without attached bpf_if"); 156 #endif 157 #ifdef DIAGNOSTIC 158 printf("bpf method called without attached bpf_if\n"); 159 #endif 160 } 161 162 void 163 bpf_setops() 164 { 165 166 mutex_init(&handovermtx, MUTEX_DEFAULT, IPL_NONE); 167 cv_init(&handovercv, "bpfops"); 168 bpf_ops = &bpf_ops_stub; 169 } 170 171 /* 172 * Party's over, prepare for handover. 173 * It needs to happen *before* bpf_ops is set to make it atomic 174 * to callers (see also stub implementations, which wait if 175 * called during handover). The likelyhood of seeing a full 176 * attach-detach *during* handover comes close to astronomical, 177 * but handle it anyway since it's relatively easy. 178 */ 179 void 180 bpf_ops_handover_enter(struct bpf_ops *newops) 181 { 182 struct laglist *lag; 183 184 mutex_enter(&handovermtx); 185 handover = true; 186 187 while ((lag = TAILQ_FIRST(&lagdrvs)) != NULL) { 188 TAILQ_REMOVE(&lagdrvs, lag, lag_entries); 189 mutex_exit(&handovermtx); 190 newops->bpf_attach(lag->lag_ifp, lag->lag_dlt, 191 lag->lag_hlen, lag->lag_drvp); 192 kmem_free(lag, sizeof(*lag)); 193 mutex_enter(&handovermtx); 194 } 195 mutex_exit(&handovermtx); 196 } 197 198 /* hangover done */ 199 void 200 bpf_ops_handover_exit() 201 { 202 203 mutex_enter(&handovermtx); 204 handover = false; 205 cv_broadcast(&handovercv); 206 mutex_exit(&handovermtx); 207 } 208