xref: /netbsd-src/sys/net/npf/npf_os.c (revision f3cfa6f6ce31685c6c4a758bc430e69eb99f50a4)
1 /*-
2  * Copyright (c) 2009-2016 The NetBSD Foundation, Inc.
3  * All rights reserved.
4  *
5  * This material is based upon work partially supported by The
6  * NetBSD Foundation under a contract with Mindaugas Rasiukevicius.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
18  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
19  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
21  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27  * POSSIBILITY OF SUCH DAMAGE.
28  */
29 
30 /*
31  * NPF main: dynamic load/initialisation and unload routines.
32  */
33 
34 #ifdef _KERNEL
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: npf_os.c,v 1.11 2019/02/27 21:37:24 mrg Exp $");
37 
38 #ifdef _KERNEL_OPT
39 #include "pf.h"
40 #if NPF > 0
41 #error "NPF and PF are mutually exclusive; please select one"
42 #endif
43 #endif
44 
45 #include <sys/param.h>
46 #include <sys/types.h>
47 
48 #include <sys/conf.h>
49 #include <sys/kauth.h>
50 #include <sys/kmem.h>
51 #include <sys/lwp.h>
52 #include <sys/module.h>
53 #include <sys/socketvar.h>
54 #include <sys/uio.h>
55 
56 #include <netinet/in.h>
57 #include <netinet6/in6_var.h>
58 #endif
59 
60 #include "npf_impl.h"
61 #include "npfkern.h"
62 
63 #ifdef _KERNEL
64 #ifndef _MODULE
65 #include "opt_modular.h"
66 #include "opt_net_mpsafe.h"
67 #endif
68 #include "ioconf.h"
69 #endif
70 
71 /*
72  * Module and device structures.
73  */
74 #ifndef _MODULE
75 /*
76  * Modular kernels load drivers too early, and we need percpu to be inited
77  * So we make this misc; a better way would be to have early boot and late
78  * boot drivers.
79  */
80 MODULE(MODULE_CLASS_MISC, npf, "bpf");
81 #else
82 /* This module autoloads via /dev/npf so it needs to be a driver */
83 MODULE(MODULE_CLASS_DRIVER, npf, "bpf");
84 #endif
85 
86 static int	npf_dev_open(dev_t, int, int, lwp_t *);
87 static int	npf_dev_close(dev_t, int, int, lwp_t *);
88 static int	npf_dev_ioctl(dev_t, u_long, void *, int, lwp_t *);
89 static int	npf_dev_poll(dev_t, int, lwp_t *);
90 static int	npf_dev_read(dev_t, struct uio *, int);
91 
92 const struct cdevsw npf_cdevsw = {
93 	.d_open = npf_dev_open,
94 	.d_close = npf_dev_close,
95 	.d_read = npf_dev_read,
96 	.d_write = nowrite,
97 	.d_ioctl = npf_dev_ioctl,
98 	.d_stop = nostop,
99 	.d_tty = notty,
100 	.d_poll = npf_dev_poll,
101 	.d_mmap = nommap,
102 	.d_kqfilter = nokqfilter,
103 	.d_discard = nodiscard,
104 	.d_flag = D_OTHER | D_MPSAFE
105 };
106 
107 static const char *	npf_ifop_getname(ifnet_t *);
108 static ifnet_t *	npf_ifop_lookup(const char *);
109 static void		npf_ifop_flush(void *);
110 static void *		npf_ifop_getmeta(const ifnet_t *);
111 static void		npf_ifop_setmeta(ifnet_t *, void *);
112 
113 static const unsigned	nworkers = 1;
114 
115 static bool		pfil_registered = false;
116 static pfil_head_t *	npf_ph_if = NULL;
117 static pfil_head_t *	npf_ph_inet = NULL;
118 static pfil_head_t *	npf_ph_inet6 = NULL;
119 
120 static const npf_ifops_t kern_ifops = {
121 	.getname	= npf_ifop_getname,
122 	.lookup		= npf_ifop_lookup,
123 	.flush		= npf_ifop_flush,
124 	.getmeta	= npf_ifop_getmeta,
125 	.setmeta	= npf_ifop_setmeta,
126 };
127 
128 static int
129 npf_fini(void)
130 {
131 	npf_t *npf = npf_getkernctx();
132 
133 	/* At first, detach device and remove pfil hooks. */
134 #ifdef _MODULE
135 	devsw_detach(NULL, &npf_cdevsw);
136 #endif
137 	npf_pfil_unregister(true);
138 	npf_destroy(npf);
139 	npf_sysfini();
140 	return 0;
141 }
142 
143 #if 1
144 /*
145  * When npf_init() is static and inlined into npf_modcmd() directly (either
146  * by the human or GCC 7), then GCC 7 on 32 bit sparc do * something wrong
147  * and CPUs hang up.  Making it not static works for some reason.
148  *
149  * Revert this when the real problem is found.
150  */
151 int npf_init(void);
152 int
153 #else
154 static int
155 #endif
156 npf_init(void)
157 {
158 	npf_t *npf;
159 	int error = 0;
160 
161 	error = npf_sysinit(nworkers);
162 	if (error)
163 		return error;
164 	npf = npf_create(0, NULL, &kern_ifops);
165 	npf_setkernctx(npf);
166 	npf_pfil_register(true);
167 
168 #ifdef _MODULE
169 	devmajor_t bmajor = NODEVMAJOR, cmajor = NODEVMAJOR;
170 
171 	/* Attach /dev/npf device. */
172 	error = devsw_attach("npf", NULL, &bmajor, &npf_cdevsw, &cmajor);
173 	if (error) {
174 		/* It will call devsw_detach(), which is safe. */
175 		(void)npf_fini();
176 	}
177 #endif
178 	return error;
179 }
180 
181 
182 /*
183  * Module interface.
184  */
185 static int
186 npf_modcmd(modcmd_t cmd, void *arg)
187 {
188 	switch (cmd) {
189 	case MODULE_CMD_INIT:
190 		return npf_init();
191 	case MODULE_CMD_FINI:
192 		return npf_fini();
193 	case MODULE_CMD_AUTOUNLOAD:
194 		if (npf_autounload_p()) {
195 			return EBUSY;
196 		}
197 		break;
198 	default:
199 		return ENOTTY;
200 	}
201 	return 0;
202 }
203 
204 void
205 npfattach(int nunits)
206 {
207 	/* Nothing */
208 }
209 
210 static int
211 npf_dev_open(dev_t dev, int flag, int mode, lwp_t *l)
212 {
213 	/* Available only for super-user. */
214 	if (kauth_authorize_network(l->l_cred, KAUTH_NETWORK_FIREWALL,
215 	    KAUTH_REQ_NETWORK_FIREWALL_FW, NULL, NULL, NULL)) {
216 		return EPERM;
217 	}
218 	return 0;
219 }
220 
221 static int
222 npf_dev_close(dev_t dev, int flag, int mode, lwp_t *l)
223 {
224 	return 0;
225 }
226 
227 static int
228 npf_stats_export(npf_t *npf, void *data)
229 {
230 	uint64_t *fullst, *uptr = *(uint64_t **)data;
231 	int error;
232 
233 	fullst = kmem_alloc(NPF_STATS_SIZE, KM_SLEEP);
234 	npf_stats(npf, fullst); /* will zero the buffer */
235 	error = copyout(fullst, uptr, NPF_STATS_SIZE);
236 	kmem_free(fullst, NPF_STATS_SIZE);
237 	return error;
238 }
239 
240 static int
241 npf_dev_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l)
242 {
243 	npf_t *npf = npf_getkernctx();
244 	int error;
245 
246 	/* Available only for super-user. */
247 	if (kauth_authorize_network(l->l_cred, KAUTH_NETWORK_FIREWALL,
248 	    KAUTH_REQ_NETWORK_FIREWALL_FW, NULL, NULL, NULL)) {
249 		return EPERM;
250 	}
251 
252 	switch (cmd) {
253 	case IOC_NPF_TABLE:
254 		error = npfctl_table(npf, data);
255 		break;
256 	case IOC_NPF_RULE:
257 		error = npfctl_rule(npf, cmd, data);
258 		break;
259 	case IOC_NPF_STATS:
260 		error = npf_stats_export(npf, data);
261 		break;
262 	case IOC_NPF_SAVE:
263 		error = npfctl_save(npf, cmd, data);
264 		break;
265 	case IOC_NPF_SWITCH:
266 		error = npfctl_switch(data);
267 		break;
268 	case IOC_NPF_LOAD:
269 		error = npfctl_load(npf, cmd, data);
270 		break;
271 	case IOC_NPF_CONN_LOOKUP:
272 		error = npfctl_conn_lookup(npf, cmd, data);
273 		break;
274 	case IOC_NPF_VERSION:
275 		*(int *)data = NPF_VERSION;
276 		error = 0;
277 		break;
278 	default:
279 		error = ENOTTY;
280 		break;
281 	}
282 	return error;
283 }
284 
285 static int
286 npf_dev_poll(dev_t dev, int events, lwp_t *l)
287 {
288 	return ENOTSUP;
289 }
290 
291 static int
292 npf_dev_read(dev_t dev, struct uio *uio, int flag)
293 {
294 	return ENOTSUP;
295 }
296 
297 bool
298 npf_autounload_p(void)
299 {
300 	npf_t *npf = npf_getkernctx();
301 	return !npf_pfil_registered_p() && npf_default_pass(npf);
302 }
303 
304 /*
305  * Interface operations.
306  */
307 
308 static const char *
309 npf_ifop_getname(ifnet_t *ifp)
310 {
311 	return ifp->if_xname;
312 }
313 
314 static ifnet_t *
315 npf_ifop_lookup(const char *name)
316 {
317 	return ifunit(name);
318 }
319 
320 static void
321 npf_ifop_flush(void *arg)
322 {
323 	ifnet_t *ifp;
324 
325 	KERNEL_LOCK(1, NULL);
326 	IFNET_GLOBAL_LOCK();
327 	IFNET_WRITER_FOREACH(ifp) {
328 		ifp->if_pf_kif = arg;
329 	}
330 	IFNET_GLOBAL_UNLOCK();
331 	KERNEL_UNLOCK_ONE(NULL);
332 }
333 
334 static void *
335 npf_ifop_getmeta(const ifnet_t *ifp)
336 {
337 	return ifp->if_pf_kif;
338 }
339 
340 static void
341 npf_ifop_setmeta(ifnet_t *ifp, void *arg)
342 {
343 	ifp->if_pf_kif = arg;
344 }
345 
346 #ifdef _KERNEL
347 
348 /*
349  * Wrapper of the main packet handler to pass the kernel NPF context.
350  */
351 static int
352 npfkern_packet_handler(void *arg, struct mbuf **mp, ifnet_t *ifp, int di)
353 {
354 	npf_t *npf = npf_getkernctx();
355 	return npf_packet_handler(npf, mp, ifp, di);
356 }
357 
358 /*
359  * npf_ifhook: hook handling interface changes.
360  */
361 static void
362 npf_ifhook(void *arg, unsigned long cmd, void *arg2)
363 {
364 	npf_t *npf = npf_getkernctx();
365 	ifnet_t *ifp = arg2;
366 
367 	switch (cmd) {
368 	case PFIL_IFNET_ATTACH:
369 		npf_ifmap_attach(npf, ifp);
370 		npf_ifaddr_sync(npf, ifp);
371 		break;
372 	case PFIL_IFNET_DETACH:
373 		npf_ifmap_detach(npf, ifp);
374 		npf_ifaddr_flush(npf, ifp);
375 		break;
376 	}
377 }
378 
379 static void
380 npf_ifaddrhook(void *arg, u_long cmd, void *arg2)
381 {
382 	npf_t *npf = npf_getkernctx();
383 	struct ifaddr *ifa = arg2;
384 
385 	switch (cmd) {
386 	case SIOCSIFADDR:
387 	case SIOCAIFADDR:
388 	case SIOCDIFADDR:
389 #ifdef INET6
390 	case SIOCSIFADDR_IN6:
391 	case SIOCAIFADDR_IN6:
392 	case SIOCDIFADDR_IN6:
393 #endif
394 		break;
395 	default:
396 		return;
397 	}
398 	npf_ifaddr_sync(npf, ifa->ifa_ifp);
399 }
400 
401 /*
402  * npf_pfil_register: register pfil(9) hooks.
403  */
404 int
405 npf_pfil_register(bool init)
406 {
407 	npf_t *npf = npf_getkernctx();
408 	int error = 0;
409 
410 	SOFTNET_KERNEL_LOCK_UNLESS_NET_MPSAFE();
411 
412 	/* Init: interface re-config and attach/detach hook. */
413 	if (!npf_ph_if) {
414 		npf_ph_if = pfil_head_get(PFIL_TYPE_IFNET, 0);
415 		if (!npf_ph_if) {
416 			error = ENOENT;
417 			goto out;
418 		}
419 
420 		error = pfil_add_ihook(npf_ifhook, NULL,
421 		    PFIL_IFNET, npf_ph_if);
422 		KASSERT(error == 0);
423 
424 		error = pfil_add_ihook(npf_ifaddrhook, NULL,
425 		    PFIL_IFADDR, npf_ph_if);
426 		KASSERT(error == 0);
427 	}
428 	if (init) {
429 		goto out;
430 	}
431 
432 	/* Check if pfil hooks are not already registered. */
433 	if (pfil_registered) {
434 		error = EEXIST;
435 		goto out;
436 	}
437 
438 	/* Capture points of the activity in the IP layer. */
439 	npf_ph_inet = pfil_head_get(PFIL_TYPE_AF, (void *)AF_INET);
440 	npf_ph_inet6 = pfil_head_get(PFIL_TYPE_AF, (void *)AF_INET6);
441 	if (!npf_ph_inet && !npf_ph_inet6) {
442 		error = ENOENT;
443 		goto out;
444 	}
445 
446 	/* Packet IN/OUT handlers for IP layer. */
447 	if (npf_ph_inet) {
448 		error = pfil_add_hook(npfkern_packet_handler, npf,
449 		    PFIL_ALL, npf_ph_inet);
450 		KASSERT(error == 0);
451 	}
452 	if (npf_ph_inet6) {
453 		error = pfil_add_hook(npfkern_packet_handler, npf,
454 		    PFIL_ALL, npf_ph_inet6);
455 		KASSERT(error == 0);
456 	}
457 
458 	/*
459 	 * It is necessary to re-sync all/any interface address tables,
460 	 * since we did not listen for any changes.
461 	 */
462 	npf_ifaddr_syncall(npf);
463 	pfil_registered = true;
464 out:
465 	SOFTNET_KERNEL_UNLOCK_UNLESS_NET_MPSAFE();
466 
467 	return error;
468 }
469 
470 /*
471  * npf_pfil_unregister: unregister pfil(9) hooks.
472  */
473 void
474 npf_pfil_unregister(bool fini)
475 {
476 	npf_t *npf = npf_getkernctx();
477 
478 	SOFTNET_KERNEL_LOCK_UNLESS_NET_MPSAFE();
479 
480 	if (fini && npf_ph_if) {
481 		(void)pfil_remove_ihook(npf_ifhook, NULL,
482 		    PFIL_IFNET, npf_ph_if);
483 		(void)pfil_remove_ihook(npf_ifaddrhook, NULL,
484 		    PFIL_IFADDR, npf_ph_if);
485 	}
486 	if (npf_ph_inet) {
487 		(void)pfil_remove_hook(npfkern_packet_handler, npf,
488 		    PFIL_ALL, npf_ph_inet);
489 	}
490 	if (npf_ph_inet6) {
491 		(void)pfil_remove_hook(npfkern_packet_handler, npf,
492 		    PFIL_ALL, npf_ph_inet6);
493 	}
494 	pfil_registered = false;
495 
496 	SOFTNET_KERNEL_UNLOCK_UNLESS_NET_MPSAFE();
497 }
498 
499 bool
500 npf_pfil_registered_p(void)
501 {
502 	return pfil_registered;
503 }
504 #endif
505