1 /* $NetBSD: bpf_stub.c,v 1.4 2010/01/25 22:18:17 pooka Exp $ */ 2 3 /* 4 * Copyright (c) 2010 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 17 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 26 * POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __KERNEL_RCSID(0, "$NetBSD: bpf_stub.c,v 1.4 2010/01/25 22:18:17 pooka Exp $"); 31 32 #include <sys/param.h> 33 #include <sys/kmem.h> 34 #include <sys/mbuf.h> 35 36 #include <net/bpf.h> 37 38 struct laglist { 39 struct ifnet *lag_ifp; 40 u_int lag_dlt; 41 u_int lag_hlen; 42 struct bpf_if **lag_drvp; 43 44 TAILQ_ENTRY(laglist) lag_entries; 45 }; 46 47 static TAILQ_HEAD(, laglist) lagdrvs = TAILQ_HEAD_INITIALIZER(lagdrvs); 48 49 static void bpf_stub_attach(struct ifnet *, u_int, u_int, struct bpf_if **); 50 static void bpf_stub_detach(struct ifnet *); 51 52 static void bpf_stub_null(void); 53 static void bpf_stub_warn(void); 54 55 static kmutex_t handovermtx; 56 static kcondvar_t handovercv; 57 static bool handover; 58 59 struct bpf_ops bpf_ops_stub = { 60 .bpf_attach = bpf_stub_attach, 61 .bpf_detach = bpf_stub_detach, 62 .bpf_change_type = (void *)bpf_stub_null, 63 64 .bpf_tap = (void *)bpf_stub_warn, 65 .bpf_mtap = (void *)bpf_stub_warn, 66 .bpf_mtap2 = (void *)bpf_stub_warn, 67 .bpf_mtap_af = (void *)bpf_stub_warn, 68 .bpf_mtap_et = (void *)bpf_stub_warn, 69 .bpf_mtap_sl_in = (void *)bpf_stub_warn, 70 .bpf_mtap_sl_out = (void *)bpf_stub_warn, 71 }; 72 struct bpf_ops *bpf_ops; 73 74 static void 75 bpf_stub_attach(struct ifnet *ifp, u_int dlt, u_int hlen, struct bpf_if **drvp) 76 { 77 struct laglist *lag; 78 bool storeattach = true; 79 80 lag = kmem_alloc(sizeof(*lag), KM_SLEEP); 81 lag->lag_ifp = ifp; 82 lag->lag_dlt = dlt; 83 lag->lag_hlen = hlen; 84 lag->lag_drvp = drvp; 85 86 mutex_enter(&handovermtx); 87 /* 88 * If handover is in progress, wait for it to finish and complete 89 * attach after that. Otherwise record ourselves. 90 */ 91 while (handover) { 92 storeattach = false; 93 cv_wait(&handovercv, &handovermtx); 94 } 95 96 if (storeattach == false) { 97 mutex_exit(&handovermtx); 98 kmem_free(lag, sizeof(*lag)); 99 KASSERT(bpf_ops != &bpf_ops_stub); /* revisit when unloadable */ 100 bpf_ops->bpf_attach(ifp, dlt, hlen, drvp); 101 } else { 102 *drvp = NULL; 103 TAILQ_INSERT_TAIL(&lagdrvs, lag, lag_entries); 104 mutex_exit(&handovermtx); 105 } 106 } 107 108 static void 109 bpf_stub_detach(struct ifnet *ifp) 110 { 111 TAILQ_HEAD(, laglist) rmlist; 112 struct laglist *lag, *lag_next; 113 bool didhand; 114 115 TAILQ_INIT(&rmlist); 116 117 didhand = false; 118 mutex_enter(&handovermtx); 119 while (handover) { 120 didhand = true; 121 cv_wait(&handovercv, &handovermtx); 122 } 123 124 if (didhand == false) { 125 /* atomically remove all */ 126 for (lag = TAILQ_FIRST(&lagdrvs); lag; lag = lag_next) { 127 lag_next = TAILQ_NEXT(lag, lag_entries); 128 if (lag->lag_ifp == ifp) { 129 TAILQ_REMOVE(&lagdrvs, lag, lag_entries); 130 TAILQ_INSERT_HEAD(&rmlist, lag, lag_entries); 131 } 132 } 133 mutex_exit(&handovermtx); 134 while ((lag = TAILQ_FIRST(&rmlist)) != NULL) { 135 TAILQ_REMOVE(&rmlist, lag, lag_entries); 136 kmem_free(lag, sizeof(*lag)); 137 } 138 } else { 139 mutex_exit(&handovermtx); 140 KASSERT(bpf_ops != &bpf_ops_stub); /* revisit when unloadable */ 141 bpf_ops->bpf_detach(ifp); 142 } 143 } 144 145 static void 146 bpf_stub_null(void) 147 { 148 149 } 150 151 static void 152 bpf_stub_warn(void) 153 { 154 155 #ifdef DEBUG 156 panic("bpf method called without attached bpf_if"); 157 #endif 158 #ifdef DIAGNOSTIC 159 printf("bpf method called without attached bpf_if\n"); 160 #endif 161 } 162 163 void 164 bpf_setops() 165 { 166 167 mutex_init(&handovermtx, MUTEX_DEFAULT, IPL_NONE); 168 cv_init(&handovercv, "bpfops"); 169 bpf_ops = &bpf_ops_stub; 170 } 171 172 /* 173 * Party's over, prepare for handover. 174 * It needs to happen *before* bpf_ops is set to make it atomic 175 * to callers (see also stub implementations, which wait if 176 * called during handover). The likelyhood of seeing a full 177 * attach-detach *during* handover comes close to astronomical, 178 * but handle it anyway since it's relatively easy. 179 */ 180 void 181 bpf_ops_handover_enter(struct bpf_ops *newops) 182 { 183 struct laglist *lag; 184 185 mutex_enter(&handovermtx); 186 handover = true; 187 188 while ((lag = TAILQ_FIRST(&lagdrvs)) != NULL) { 189 TAILQ_REMOVE(&lagdrvs, lag, lag_entries); 190 mutex_exit(&handovermtx); 191 newops->bpf_attach(lag->lag_ifp, lag->lag_dlt, 192 lag->lag_hlen, lag->lag_drvp); 193 kmem_free(lag, sizeof(*lag)); 194 mutex_enter(&handovermtx); 195 } 196 mutex_exit(&handovermtx); 197 } 198 199 /* hangover done */ 200 void 201 bpf_ops_handover_exit() 202 { 203 204 mutex_enter(&handovermtx); 205 handover = false; 206 cv_broadcast(&handovercv); 207 mutex_exit(&handovermtx); 208 } 209