1 /*- 2 * Copyright (c) 2009-2013 The NetBSD Foundation, Inc. 3 * All rights reserved. 4 * 5 * This material is based upon work partially supported by The 6 * NetBSD Foundation under a contract with Mindaugas Rasiukevicius. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 19 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 20 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 21 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 22 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 25 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 27 * POSSIBILITY OF SUCH DAMAGE. 28 */ 29 30 /* 31 * NPF main: dynamic load/initialisation and unload routines. 32 */ 33 34 #ifdef _KERNEL 35 #include <sys/cdefs.h> 36 __KERNEL_RCSID(0, "$NetBSD: npf.c,v 1.42 2020/02/07 12:35:33 thorpej Exp $"); 37 38 #include <sys/param.h> 39 #include <sys/types.h> 40 41 #include <sys/conf.h> 42 #include <sys/kmem.h> 43 #include <sys/percpu.h> 44 #include <sys/xcall.h> 45 #endif 46 47 #include "npf_impl.h" 48 #include "npf_conn.h" 49 50 static __read_mostly npf_t * npf_kernel_ctx = NULL; 51 52 __dso_public int 53 npfk_sysinit(unsigned nworkers) 54 { 55 npf_bpf_sysinit(); 56 npf_tableset_sysinit(); 57 npf_nat_sysinit(); 58 return npf_worker_sysinit(nworkers); 59 } 60 61 __dso_public void 62 npfk_sysfini(void) 63 { 64 npf_worker_sysfini(); 65 npf_nat_sysfini(); 66 npf_tableset_sysfini(); 67 npf_bpf_sysfini(); 68 } 69 70 __dso_public npf_t * 71 npfk_create(int flags, const npf_mbufops_t *mbufops, const npf_ifops_t *ifops) 72 { 73 npf_t *npf; 74 75 npf = kmem_zalloc(sizeof(npf_t), KM_SLEEP); 76 npf->ebr = npf_ebr_create(); 77 npf->stats_percpu = percpu_alloc(NPF_STATS_SIZE); 78 npf->mbufops = mbufops; 79 80 npf_param_init(npf); 81 npf_state_sysinit(npf); 82 npf_ifmap_init(npf, ifops); 83 npf_conn_init(npf); 84 npf_portmap_init(npf); 85 npf_alg_init(npf); 86 npf_ext_init(npf); 87 88 /* Load an empty configuration. */ 89 npf_config_init(npf); 90 91 if ((flags & NPF_NO_GC) == 0) { 92 npf_worker_register(npf, npf_conn_worker); 93 } 94 return npf; 95 } 96 97 __dso_public void 98 npfk_destroy(npf_t *npf) 99 { 100 /* 101 * Destroy the current configuration. Note: at this point all 102 * handlers must be deactivated; we will drain any processing. 103 */ 104 npf_config_fini(npf); 105 106 /* Finally, safe to destroy the subsystems. */ 107 npf_ext_fini(npf); 108 npf_alg_fini(npf); 109 npf_portmap_fini(npf); 110 npf_conn_fini(npf); 111 npf_ifmap_fini(npf); 112 npf_state_sysfini(npf); 113 npf_param_fini(npf); 114 115 npf_ebr_destroy(npf->ebr); 116 percpu_free(npf->stats_percpu, NPF_STATS_SIZE); 117 kmem_free(npf, sizeof(npf_t)); 118 } 119 120 __dso_public int 121 npfk_load(npf_t *npf, void *config_ref, npf_error_t *err) 122 { 123 return npfctl_load(npf, 0, config_ref); 124 } 125 126 __dso_public void 127 npfk_gc(npf_t *npf) 128 { 129 npf_conn_worker(npf); 130 } 131 132 __dso_public void 133 npfk_thread_register(npf_t *npf) 134 { 135 npf_ebr_register(npf->ebr); 136 } 137 138 __dso_public void 139 npfk_thread_unregister(npf_t *npf) 140 { 141 npf_ebr_full_sync(npf->ebr); 142 npf_ebr_unregister(npf->ebr); 143 } 144 145 void 146 npf_setkernctx(npf_t *npf) 147 { 148 npf_kernel_ctx = npf; 149 } 150 151 npf_t * 152 npf_getkernctx(void) 153 { 154 return npf_kernel_ctx; 155 } 156 157 /* 158 * NPF statistics interface. 159 */ 160 161 void 162 npf_stats_inc(npf_t *npf, npf_stats_t st) 163 { 164 uint64_t *stats = percpu_getref(npf->stats_percpu); 165 stats[st]++; 166 percpu_putref(npf->stats_percpu); 167 } 168 169 void 170 npf_stats_dec(npf_t *npf, npf_stats_t st) 171 { 172 uint64_t *stats = percpu_getref(npf->stats_percpu); 173 stats[st]--; 174 percpu_putref(npf->stats_percpu); 175 } 176 177 static void 178 npf_stats_collect(void *mem, void *arg, struct cpu_info *ci) 179 { 180 uint64_t *percpu_stats = mem, *full_stats = arg; 181 182 for (unsigned i = 0; i < NPF_STATS_COUNT; i++) { 183 full_stats[i] += percpu_stats[i]; 184 } 185 } 186 187 static void 188 npf_stats_clear_cb(void *mem, void *arg, struct cpu_info *ci) 189 { 190 uint64_t *percpu_stats = mem; 191 192 for (unsigned i = 0; i < NPF_STATS_COUNT; i++) { 193 percpu_stats[i] = 0; 194 } 195 } 196 197 /* 198 * npf_stats: export collected statistics. 199 */ 200 201 __dso_public void 202 npfk_stats(npf_t *npf, uint64_t *buf) 203 { 204 memset(buf, 0, NPF_STATS_SIZE); 205 percpu_foreach_xcall(npf->stats_percpu, XC_HIGHPRI_IPL(IPL_SOFTNET), 206 npf_stats_collect, buf); 207 } 208 209 __dso_public void 210 npfk_stats_clear(npf_t *npf) 211 { 212 percpu_foreach_xcall(npf->stats_percpu, XC_HIGHPRI_IPL(IPL_SOFTNET), 213 npf_stats_clear_cb, NULL); 214 } 215