xref: /netbsd-src/sys/net/npf/npf.c (revision cef8759bd76c1b621f8eab8faa6f208faabc2e15)
1 /*-
2  * Copyright (c) 2009-2013 The NetBSD Foundation, Inc.
3  * All rights reserved.
4  *
5  * This material is based upon work partially supported by The
6  * NetBSD Foundation under a contract with Mindaugas Rasiukevicius.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
18  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
19  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
21  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27  * POSSIBILITY OF SUCH DAMAGE.
28  */
29 
30 /*
31  * NPF main: dynamic load/initialisation and unload routines.
32  */
33 
34 #ifdef _KERNEL
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: npf.c,v 1.43 2020/05/30 14:16:56 rmind Exp $");
37 
38 #include <sys/param.h>
39 #include <sys/types.h>
40 
41 #include <sys/conf.h>
42 #include <sys/kmem.h>
43 #include <sys/percpu.h>
44 #include <sys/xcall.h>
45 #endif
46 
47 #include "npf_impl.h"
48 #include "npf_conn.h"
49 
50 static __read_mostly npf_t *	npf_kernel_ctx = NULL;
51 
52 __dso_public int
53 npfk_sysinit(unsigned nworkers)
54 {
55 	npf_bpf_sysinit();
56 	npf_tableset_sysinit();
57 	npf_nat_sysinit();
58 	return npf_worker_sysinit(nworkers);
59 }
60 
61 __dso_public void
62 npfk_sysfini(void)
63 {
64 	npf_worker_sysfini();
65 	npf_nat_sysfini();
66 	npf_tableset_sysfini();
67 	npf_bpf_sysfini();
68 }
69 
70 __dso_public npf_t *
71 npfk_create(int flags, const npf_mbufops_t *mbufops,
72     const npf_ifops_t *ifops, void *arg)
73 {
74 	npf_t *npf;
75 
76 	npf = kmem_zalloc(sizeof(npf_t), KM_SLEEP);
77 	npf->ebr = npf_ebr_create();
78 	npf->stats_percpu = percpu_alloc(NPF_STATS_SIZE);
79 	npf->mbufops = mbufops;
80 	npf->arg = arg;
81 
82 	npf_param_init(npf);
83 	npf_state_sysinit(npf);
84 	npf_ifmap_init(npf, ifops);
85 	npf_conn_init(npf);
86 	npf_portmap_init(npf);
87 	npf_alg_init(npf);
88 	npf_ext_init(npf);
89 
90 	/* Load an empty configuration. */
91 	npf_config_init(npf);
92 
93 	if ((flags & NPF_NO_GC) == 0) {
94 		npf_worker_enlist(npf);
95 	}
96 	return npf;
97 }
98 
99 __dso_public void
100 npfk_destroy(npf_t *npf)
101 {
102 	npf_worker_discharge(npf);
103 
104 	/*
105 	 * Destroy the current configuration.  Note: at this point all
106 	 * handlers must be deactivated; we will drain any processing.
107 	 */
108 	npf_config_fini(npf);
109 
110 	/* Finally, safe to destroy the subsystems. */
111 	npf_ext_fini(npf);
112 	npf_alg_fini(npf);
113 	npf_portmap_fini(npf);
114 	npf_conn_fini(npf);
115 	npf_ifmap_fini(npf);
116 	npf_state_sysfini(npf);
117 	npf_param_fini(npf);
118 
119 	npf_ebr_destroy(npf->ebr);
120 	percpu_free(npf->stats_percpu, NPF_STATS_SIZE);
121 	kmem_free(npf, sizeof(npf_t));
122 }
123 
124 
125 /*
126  * npfk_load: (re)load the configuration.
127  *
128  * => Will not modify the configuration reference.
129  */
130 __dso_public int
131 npfk_load(npf_t *npf, const void *config_ref, npf_error_t *err)
132 {
133 	const nvlist_t *req = (const nvlist_t *)config_ref;
134 	nvlist_t *resp;
135 	int error;
136 
137 	resp = nvlist_create(0);
138 	error = npfctl_run_op(npf, IOC_NPF_LOAD, req, resp);
139 	nvlist_destroy(resp);
140 
141 	return error;
142 }
143 
144 __dso_public void
145 npfk_gc(npf_t *npf)
146 {
147 	npf_conn_worker(npf);
148 }
149 
150 __dso_public void
151 npfk_thread_register(npf_t *npf)
152 {
153 	npf_ebr_register(npf->ebr);
154 }
155 
156 __dso_public void
157 npfk_thread_unregister(npf_t *npf)
158 {
159 	npf_ebr_full_sync(npf->ebr);
160 	npf_ebr_unregister(npf->ebr);
161 }
162 
163 __dso_public void *
164 npfk_getarg(npf_t *npf)
165 {
166 	return npf->arg;
167 }
168 
169 void
170 npf_setkernctx(npf_t *npf)
171 {
172 	npf_kernel_ctx = npf;
173 }
174 
175 npf_t *
176 npf_getkernctx(void)
177 {
178 	return npf_kernel_ctx;
179 }
180 
181 /*
182  * NPF statistics interface.
183  */
184 
185 void
186 npf_stats_inc(npf_t *npf, npf_stats_t st)
187 {
188 	uint64_t *stats = percpu_getref(npf->stats_percpu);
189 	stats[st]++;
190 	percpu_putref(npf->stats_percpu);
191 }
192 
193 void
194 npf_stats_dec(npf_t *npf, npf_stats_t st)
195 {
196 	uint64_t *stats = percpu_getref(npf->stats_percpu);
197 	stats[st]--;
198 	percpu_putref(npf->stats_percpu);
199 }
200 
201 static void
202 npf_stats_collect(void *mem, void *arg, struct cpu_info *ci)
203 {
204 	uint64_t *percpu_stats = mem, *full_stats = arg;
205 
206 	for (unsigned i = 0; i < NPF_STATS_COUNT; i++) {
207 		full_stats[i] += percpu_stats[i];
208 	}
209 }
210 
211 static void
212 npf_stats_clear_cb(void *mem, void *arg, struct cpu_info *ci)
213 {
214 	uint64_t *percpu_stats = mem;
215 
216 	for (unsigned i = 0; i < NPF_STATS_COUNT; i++) {
217 		percpu_stats[i] = 0;
218 	}
219 }
220 
221 /*
222  * npf_stats: export collected statistics.
223  */
224 
225 __dso_public void
226 npfk_stats(npf_t *npf, uint64_t *buf)
227 {
228 	memset(buf, 0, NPF_STATS_SIZE);
229 	percpu_foreach_xcall(npf->stats_percpu, XC_HIGHPRI_IPL(IPL_SOFTNET),
230 	    npf_stats_collect, buf);
231 }
232 
233 __dso_public void
234 npfk_stats_clear(npf_t *npf)
235 {
236 	percpu_foreach_xcall(npf->stats_percpu, XC_HIGHPRI_IPL(IPL_SOFTNET),
237 	    npf_stats_clear_cb, NULL);
238 }
239