xref: /netbsd-src/sys/net/npf/npf_os.c (revision 181254a7b1bdde6873432bffef2d2decc4b5c22f)
1 /*-
2  * Copyright (c) 2009-2016 The NetBSD Foundation, Inc.
3  * All rights reserved.
4  *
5  * This material is based upon work partially supported by The
6  * NetBSD Foundation under a contract with Mindaugas Rasiukevicius.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
18  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
19  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
21  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27  * POSSIBILITY OF SUCH DAMAGE.
28  */
29 
30 /*
31  * NPF main: dynamic load/initialisation and unload routines.
32  */
33 
34 #ifdef _KERNEL
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: npf_os.c,v 1.19 2020/08/18 07:53:24 maxv Exp $");
37 
38 #ifdef _KERNEL_OPT
39 #include "pf.h"
40 #if NPF > 0
41 #error "NPF and PF are mutually exclusive; please select one"
42 #endif
43 #endif
44 
45 #include <sys/param.h>
46 #include <sys/types.h>
47 
48 #include <sys/conf.h>
49 #include <sys/kauth.h>
50 #include <sys/kmem.h>
51 #include <sys/lwp.h>
52 #include <sys/module.h>
53 #include <sys/pserialize.h>
54 #include <sys/socketvar.h>
55 #include <sys/uio.h>
56 
57 #include <netinet/in.h>
58 #include <netinet6/in6_var.h>
59 #endif
60 
61 #include "npf_impl.h"
62 #include "npfkern.h"
63 
64 #ifdef _KERNEL
65 #ifndef _MODULE
66 #include "opt_modular.h"
67 #include "opt_net_mpsafe.h"
68 #endif
69 #include "ioconf.h"
70 #endif
71 
72 /*
73  * Module and device structures.
74  */
75 #ifndef _MODULE
76 /*
77  * Modular kernels load drivers too early, and we need percpu to be inited
78  * So we make this misc; a better way would be to have early boot and late
79  * boot drivers.
80  */
81 MODULE(MODULE_CLASS_MISC, npf, "bpf");
82 #else
83 /* This module autoloads via /dev/npf so it needs to be a driver */
84 MODULE(MODULE_CLASS_DRIVER, npf, "bpf");
85 #endif
86 
87 #define	NPF_IOCTL_DATA_LIMIT	(4 * 1024 * 1024)
88 
89 static int	npf_pfil_register(bool);
90 static void	npf_pfil_unregister(bool);
91 
92 static int	npf_dev_open(dev_t, int, int, lwp_t *);
93 static int	npf_dev_close(dev_t, int, int, lwp_t *);
94 static int	npf_dev_ioctl(dev_t, u_long, void *, int, lwp_t *);
95 static int	npf_dev_poll(dev_t, int, lwp_t *);
96 static int	npf_dev_read(dev_t, struct uio *, int);
97 
98 const struct cdevsw npf_cdevsw = {
99 	.d_open = npf_dev_open,
100 	.d_close = npf_dev_close,
101 	.d_read = npf_dev_read,
102 	.d_write = nowrite,
103 	.d_ioctl = npf_dev_ioctl,
104 	.d_stop = nostop,
105 	.d_tty = notty,
106 	.d_poll = npf_dev_poll,
107 	.d_mmap = nommap,
108 	.d_kqfilter = nokqfilter,
109 	.d_discard = nodiscard,
110 	.d_flag = D_OTHER | D_MPSAFE
111 };
112 
113 static const char *	npf_ifop_getname(npf_t *, ifnet_t *);
114 static ifnet_t *	npf_ifop_lookup(npf_t *, const char *);
115 static void		npf_ifop_flush(npf_t *, void *);
116 static void *		npf_ifop_getmeta(npf_t *, const ifnet_t *);
117 static void		npf_ifop_setmeta(npf_t *, ifnet_t *, void *);
118 
119 static const unsigned	nworkers = 1;
120 
121 static bool		pfil_registered = false;
122 static pfil_head_t *	npf_ph_if = NULL;
123 static pfil_head_t *	npf_ph_inet = NULL;
124 static pfil_head_t *	npf_ph_inet6 = NULL;
125 
126 static const npf_ifops_t kern_ifops = {
127 	.getname	= npf_ifop_getname,
128 	.lookup		= npf_ifop_lookup,
129 	.flush		= npf_ifop_flush,
130 	.getmeta	= npf_ifop_getmeta,
131 	.setmeta	= npf_ifop_setmeta,
132 };
133 
134 static int
135 npf_fini(void)
136 {
137 	npf_t *npf = npf_getkernctx();
138 
139 	/* At first, detach device and remove pfil hooks. */
140 #ifdef _MODULE
141 	devsw_detach(NULL, &npf_cdevsw);
142 #endif
143 	npf_pfil_unregister(true);
144 	npfk_destroy(npf);
145 	npfk_sysfini();
146 	return 0;
147 }
148 
149 static int
150 npf_init(void)
151 {
152 	npf_t *npf;
153 	int error = 0;
154 
155 	error = npfk_sysinit(nworkers);
156 	if (error)
157 		return error;
158 	npf = npfk_create(0, NULL, &kern_ifops, NULL);
159 	npf_setkernctx(npf);
160 	npf_pfil_register(true);
161 
162 #ifdef _MODULE
163 	devmajor_t bmajor = NODEVMAJOR, cmajor = NODEVMAJOR;
164 
165 	/* Attach /dev/npf device. */
166 	error = devsw_attach("npf", NULL, &bmajor, &npf_cdevsw, &cmajor);
167 	if (error) {
168 		/* It will call devsw_detach(), which is safe. */
169 		(void)npf_fini();
170 	}
171 #endif
172 	return error;
173 }
174 
175 
176 /*
177  * Module interface.
178  */
179 static int
180 npf_modcmd(modcmd_t cmd, void *arg)
181 {
182 	switch (cmd) {
183 	case MODULE_CMD_INIT:
184 		return npf_init();
185 	case MODULE_CMD_FINI:
186 		return npf_fini();
187 	case MODULE_CMD_AUTOUNLOAD:
188 		if (npf_autounload_p()) {
189 			return EBUSY;
190 		}
191 		break;
192 	default:
193 		return ENOTTY;
194 	}
195 	return 0;
196 }
197 
198 void
199 npfattach(int nunits)
200 {
201 	/* Nothing */
202 }
203 
204 static int
205 npf_dev_open(dev_t dev, int flag, int mode, lwp_t *l)
206 {
207 	/* Available only for super-user. */
208 	if (kauth_authorize_network(l->l_cred, KAUTH_NETWORK_FIREWALL,
209 	    KAUTH_REQ_NETWORK_FIREWALL_FW, NULL, NULL, NULL)) {
210 		return EPERM;
211 	}
212 	return 0;
213 }
214 
215 static int
216 npf_dev_close(dev_t dev, int flag, int mode, lwp_t *l)
217 {
218 	return 0;
219 }
220 
221 static int
222 npf_stats_export(npf_t *npf, void *data)
223 {
224 	uint64_t *fullst, *uptr = *(uint64_t **)data;
225 	int error;
226 
227 	fullst = kmem_alloc(NPF_STATS_SIZE, KM_SLEEP);
228 	npfk_stats(npf, fullst); /* will zero the buffer */
229 	error = copyout(fullst, uptr, NPF_STATS_SIZE);
230 	kmem_free(fullst, NPF_STATS_SIZE);
231 	return error;
232 }
233 
234 /*
235  * npfctl_switch: enable or disable packet inspection.
236  */
237 static int
238 npfctl_switch(void *data)
239 {
240 	const bool onoff = *(int *)data ? true : false;
241 	int error;
242 
243 	if (onoff) {
244 		/* Enable: add pfil hooks. */
245 		error = npf_pfil_register(false);
246 	} else {
247 		/* Disable: remove pfil hooks. */
248 		npf_pfil_unregister(false);
249 		error = 0;
250 	}
251 	return error;
252 }
253 
254 static int
255 npf_dev_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l)
256 {
257 	npf_t *npf = npf_getkernctx();
258 	nvlist_t *req, *resp;
259 	int error;
260 
261 	/* Available only for super-user. */
262 	if (kauth_authorize_network(l->l_cred, KAUTH_NETWORK_FIREWALL,
263 	    KAUTH_REQ_NETWORK_FIREWALL_FW, NULL, NULL, NULL)) {
264 		return EPERM;
265 	}
266 
267 	switch (cmd) {
268 	case IOC_NPF_VERSION:
269 		*(int *)data = NPF_VERSION;
270 		return 0;
271 	case IOC_NPF_SWITCH:
272 		return npfctl_switch(data);
273 	case IOC_NPF_TABLE:
274 		return npfctl_table(npf, data);
275 	case IOC_NPF_STATS:
276 		return npf_stats_export(npf, data);
277 	case IOC_NPF_LOAD:
278 	case IOC_NPF_SAVE:
279 	case IOC_NPF_RULE:
280 	case IOC_NPF_CONN_LOOKUP:
281 	case IOC_NPF_TABLE_REPLACE:
282 		/* nvlist_ref_t argument, handled below */
283 		break;
284 	default:
285 		return EINVAL;
286 	}
287 
288 	error = nvlist_copyin(data, &req, NPF_IOCTL_DATA_LIMIT);
289 	if (__predict_false(error)) {
290 #ifdef __NetBSD__
291 		/* Until the version bump. */
292 		if (cmd != IOC_NPF_SAVE) {
293 			return error;
294 		}
295 		req = nvlist_create(0);
296 #else
297 		return error;
298 #endif
299 	}
300 	resp = nvlist_create(0);
301 	npfctl_run_op(npf, cmd, req, resp);
302 	error = nvlist_copyout(data, resp);
303 	nvlist_destroy(resp);
304 	nvlist_destroy(req);
305 
306 	return error;
307 }
308 
309 static int
310 npf_dev_poll(dev_t dev, int events, lwp_t *l)
311 {
312 	return ENOTSUP;
313 }
314 
315 static int
316 npf_dev_read(dev_t dev, struct uio *uio, int flag)
317 {
318 	return ENOTSUP;
319 }
320 
321 bool
322 npf_autounload_p(void)
323 {
324 	npf_t *npf = npf_getkernctx();
325 	return !npf_active_p() && npf_default_pass(npf);
326 }
327 
328 /*
329  * Interface operations.
330  */
331 
332 static const char *
333 npf_ifop_getname(npf_t *npf __unused, ifnet_t *ifp)
334 {
335 	return ifp->if_xname;
336 }
337 
338 static ifnet_t *
339 npf_ifop_lookup(npf_t *npf __unused, const char *name)
340 {
341 	return ifunit(name);
342 }
343 
344 static void
345 npf_ifop_flush(npf_t *npf __unused, void *arg)
346 {
347 	ifnet_t *ifp;
348 
349 	KERNEL_LOCK(1, NULL);
350 	IFNET_GLOBAL_LOCK();
351 	IFNET_WRITER_FOREACH(ifp) {
352 		ifp->if_npf_private = arg;
353 	}
354 	IFNET_GLOBAL_UNLOCK();
355 	KERNEL_UNLOCK_ONE(NULL);
356 }
357 
358 static void *
359 npf_ifop_getmeta(npf_t *npf __unused, const ifnet_t *ifp)
360 {
361 	return ifp->if_npf_private;
362 }
363 
364 static void
365 npf_ifop_setmeta(npf_t *npf __unused, ifnet_t *ifp, void *arg)
366 {
367 	ifp->if_npf_private = arg;
368 }
369 
370 #ifdef _KERNEL
371 
372 /*
373  * Wrapper of the main packet handler to pass the kernel NPF context.
374  */
375 static int
376 npfos_packet_handler(void *arg, struct mbuf **mp, ifnet_t *ifp, int di)
377 {
378 	npf_t *npf = npf_getkernctx();
379 	return npfk_packet_handler(npf, mp, ifp, di);
380 }
381 
382 /*
383  * npf_ifhook: hook handling interface changes.
384  */
385 static void
386 npf_ifhook(void *arg, unsigned long cmd, void *arg2)
387 {
388 	npf_t *npf = npf_getkernctx();
389 	ifnet_t *ifp = arg2;
390 
391 	switch (cmd) {
392 	case PFIL_IFNET_ATTACH:
393 		npfk_ifmap_attach(npf, ifp);
394 		npf_ifaddr_sync(npf, ifp);
395 		break;
396 	case PFIL_IFNET_DETACH:
397 		npfk_ifmap_detach(npf, ifp);
398 		npf_ifaddr_flush(npf, ifp);
399 		break;
400 	}
401 }
402 
403 static void
404 npf_ifaddrhook(void *arg, u_long cmd, void *arg2)
405 {
406 	npf_t *npf = npf_getkernctx();
407 	struct ifaddr *ifa = arg2;
408 
409 	switch (cmd) {
410 	case SIOCSIFADDR:
411 	case SIOCAIFADDR:
412 	case SIOCDIFADDR:
413 #ifdef INET6
414 	case SIOCSIFADDR_IN6:
415 	case SIOCAIFADDR_IN6:
416 	case SIOCDIFADDR_IN6:
417 #endif
418 		KASSERT(ifa != NULL);
419 		break;
420 	default:
421 		return;
422 	}
423 	npf_ifaddr_sync(npf, ifa->ifa_ifp);
424 }
425 
426 /*
427  * npf_pfil_register: register pfil(9) hooks.
428  */
429 static int
430 npf_pfil_register(bool init)
431 {
432 	npf_t *npf = npf_getkernctx();
433 	int error = 0;
434 
435 	SOFTNET_KERNEL_LOCK_UNLESS_NET_MPSAFE();
436 
437 	/* Init: interface re-config and attach/detach hook. */
438 	if (!npf_ph_if) {
439 		npf_ph_if = pfil_head_get(PFIL_TYPE_IFNET, 0);
440 		if (!npf_ph_if) {
441 			error = ENOENT;
442 			goto out;
443 		}
444 
445 		error = pfil_add_ihook(npf_ifhook, NULL,
446 		    PFIL_IFNET, npf_ph_if);
447 		KASSERT(error == 0);
448 
449 		error = pfil_add_ihook(npf_ifaddrhook, NULL,
450 		    PFIL_IFADDR, npf_ph_if);
451 		KASSERT(error == 0);
452 	}
453 	if (init) {
454 		goto out;
455 	}
456 
457 	/* Check if pfil hooks are not already registered. */
458 	if (pfil_registered) {
459 		error = EEXIST;
460 		goto out;
461 	}
462 
463 	/* Capture points of the activity in the IP layer. */
464 	npf_ph_inet = pfil_head_get(PFIL_TYPE_AF, (void *)AF_INET);
465 	npf_ph_inet6 = pfil_head_get(PFIL_TYPE_AF, (void *)AF_INET6);
466 	if (!npf_ph_inet && !npf_ph_inet6) {
467 		error = ENOENT;
468 		goto out;
469 	}
470 
471 	/* Packet IN/OUT handlers for IP layer. */
472 	if (npf_ph_inet) {
473 		error = pfil_add_hook(npfos_packet_handler, npf,
474 		    PFIL_ALL, npf_ph_inet);
475 		KASSERT(error == 0);
476 	}
477 	if (npf_ph_inet6) {
478 		error = pfil_add_hook(npfos_packet_handler, npf,
479 		    PFIL_ALL, npf_ph_inet6);
480 		KASSERT(error == 0);
481 	}
482 
483 	/*
484 	 * It is necessary to re-sync all/any interface address tables,
485 	 * since we did not listen for any changes.
486 	 */
487 	npf_ifaddr_syncall(npf);
488 	pfil_registered = true;
489 out:
490 	SOFTNET_KERNEL_UNLOCK_UNLESS_NET_MPSAFE();
491 
492 	return error;
493 }
494 
495 /*
496  * npf_pfil_unregister: unregister pfil(9) hooks.
497  */
498 static void
499 npf_pfil_unregister(bool fini)
500 {
501 	npf_t *npf = npf_getkernctx();
502 
503 	SOFTNET_KERNEL_LOCK_UNLESS_NET_MPSAFE();
504 
505 	if (fini && npf_ph_if) {
506 		(void)pfil_remove_ihook(npf_ifhook, NULL,
507 		    PFIL_IFNET, npf_ph_if);
508 		(void)pfil_remove_ihook(npf_ifaddrhook, NULL,
509 		    PFIL_IFADDR, npf_ph_if);
510 	}
511 	if (npf_ph_inet) {
512 		(void)pfil_remove_hook(npfos_packet_handler, npf,
513 		    PFIL_ALL, npf_ph_inet);
514 	}
515 	if (npf_ph_inet6) {
516 		(void)pfil_remove_hook(npfos_packet_handler, npf,
517 		    PFIL_ALL, npf_ph_inet6);
518 	}
519 	pfil_registered = false;
520 
521 	SOFTNET_KERNEL_UNLOCK_UNLESS_NET_MPSAFE();
522 }
523 
524 bool
525 npf_active_p(void)
526 {
527 	return pfil_registered;
528 }
529 
530 #endif
531 
532 #ifdef __NetBSD__
533 
534 /*
535  * Epoch-Based Reclamation (EBR) wrappers: in NetBSD, we rely on the
536  * passive serialization mechanism (see pserialize(9) manual page),
537  * which provides sufficient guarantees for NPF.
538  */
539 
540 ebr_t *
541 npf_ebr_create(void)
542 {
543 	return pserialize_create();
544 }
545 
546 void
547 npf_ebr_destroy(ebr_t *ebr)
548 {
549 	pserialize_destroy(ebr);
550 }
551 
552 void
553 npf_ebr_register(ebr_t *ebr)
554 {
555 	KASSERT(ebr != NULL); (void)ebr;
556 }
557 
558 void
559 npf_ebr_unregister(ebr_t *ebr)
560 {
561 	KASSERT(ebr != NULL); (void)ebr;
562 }
563 
564 int
565 npf_ebr_enter(ebr_t *ebr)
566 {
567 	KASSERT(ebr != NULL); (void)ebr;
568 	return pserialize_read_enter();
569 }
570 
571 void
572 npf_ebr_exit(ebr_t *ebr, int s)
573 {
574 	KASSERT(ebr != NULL); (void)ebr;
575 	pserialize_read_exit(s);
576 }
577 
578 void
579 npf_ebr_full_sync(ebr_t *ebr)
580 {
581 	pserialize_perform(ebr);
582 }
583 
584 bool
585 npf_ebr_incrit_p(ebr_t *ebr)
586 {
587 	KASSERT(ebr != NULL); (void)ebr;
588 	return pserialize_in_read_section();
589 }
590 
591 #endif
592