xref: /netbsd-src/sys/net/npf/npf_ruleset.c (revision b1c86f5f087524e68db12794ee9c3e3da1ab17a0)
1 /*	$NetBSD: npf_ruleset.c,v 1.1 2010/08/22 18:56:22 rmind Exp $	*/
2 
3 /*-
4  * Copyright (c) 2009-2010 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This material is based upon work partially supported by The
8  * NetBSD Foundation under a contract with Mindaugas Rasiukevicius.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * NPF ruleset module.
34  *
35  * Lock order:
36  *
37  *	ruleset_lock -> table_lock -> npf_table_t::t_lock
38  */
39 
40 #ifdef _KERNEL
41 #include <sys/cdefs.h>
42 __KERNEL_RCSID(0, "$NetBSD: npf_ruleset.c,v 1.1 2010/08/22 18:56:22 rmind Exp $");
43 
44 #include <sys/param.h>
45 #include <sys/kernel.h>
46 #endif
47 
48 #include <sys/atomic.h>
49 #include <sys/kmem.h>
50 #include <sys/pool.h>
51 #include <sys/queue.h>
52 #include <sys/rwlock.h>
53 #include <sys/types.h>
54 
55 #include <net/if.h>
56 #include <net/pfil.h>
57 
58 #include "npf_ncode.h"
59 #include "npf_impl.h"
60 
61 struct npf_hook {
62 	void				(*hk_fn)(const npf_cache_t *, void *);
63 	void *				hk_arg;
64 	LIST_ENTRY(npf_hook)		hk_entry;
65 };
66 
67 struct npf_ruleset {
68 	TAILQ_HEAD(, npf_rule)		rs_queue;
69 	npf_rule_t *			rs_default;
70 	int				_reserved;
71 };
72 
73 /* Rule structure. */
74 struct npf_rule {
75 	/* List entry in the ruleset. */
76 	TAILQ_ENTRY(npf_rule)		r_entry;
77 	/* Optional: sub-ruleset, NAT policy. */
78 	npf_ruleset_t			r_subset;
79 	npf_natpolicy_t *		r_nat;
80 	/* Rule priority: (highest) 0, 1, 2 ... n (lowest). */
81 	u_int				r_priority;
82 	/* N-code to process. */
83 	void *				r_ncode;
84 	size_t				r_nc_size;
85 	/* Attributes of this rule. */
86 	int				r_attr;
87 	/* Interface. */
88 	u_int				r_ifid;
89 	/* Hit counter. */
90 	u_long				r_hitcount;
91 	/* List of hooks to process on match. */
92 	LIST_HEAD(, npf_hook)		r_hooks;
93 };
94 
95 /* Global ruleset, its lock, cache and NAT ruleset. */
96 static npf_ruleset_t *			ruleset;
97 static krwlock_t			ruleset_lock;
98 static pool_cache_t			rule_cache;
99 
100 /*
101  * npf_ruleset_sysinit: initialise ruleset structures.
102  */
103 int
104 npf_ruleset_sysinit(void)
105 {
106 
107 	rule_cache = pool_cache_init(sizeof(npf_rule_t), coherency_unit,
108 	    0, 0, "npfrlpl", NULL, IPL_NONE, NULL, NULL, NULL);
109 	if (rule_cache == NULL) {
110 		return ENOMEM;
111 	}
112 	rw_init(&ruleset_lock);
113 	ruleset = npf_ruleset_create();
114 	return 0;
115 }
116 
117 void
118 npf_ruleset_sysfini(void)
119 {
120 
121 	npf_ruleset_destroy(ruleset);
122 	rw_destroy(&ruleset_lock);
123 	pool_cache_destroy(rule_cache);
124 }
125 
126 npf_ruleset_t *
127 npf_ruleset_create(void)
128 {
129 	npf_ruleset_t *rlset;
130 
131 	rlset = kmem_zalloc(sizeof(npf_ruleset_t), KM_SLEEP);
132 	TAILQ_INIT(&rlset->rs_queue);
133 	return rlset;
134 }
135 
136 void
137 npf_ruleset_destroy(npf_ruleset_t *rlset)
138 {
139 	npf_rule_t *rl;
140 
141 	while ((rl = TAILQ_FIRST(&rlset->rs_queue)) != NULL) {
142 		TAILQ_REMOVE(&rlset->rs_queue, rl, r_entry);
143 		npf_rule_free(rl);
144 	}
145 	kmem_free(rlset, sizeof(npf_ruleset_t));
146 }
147 
148 /*
149  * npf_ruleset_insert: insert the rule into the specified ruleset.
150  *
151  * Note: multiple rules at the same priority are allowed.
152  */
153 void
154 npf_ruleset_insert(npf_ruleset_t *rlset, npf_rule_t *rl)
155 {
156 	npf_rule_t *it;
157 
158 	if (rl->r_attr & NPF_RULE_DEFAULT) {
159 		rlset->rs_default = rl;
160 		return;
161 	}
162 	TAILQ_FOREACH(it, &rlset->rs_queue, r_entry) {
163 		/* Rule priority: (highest) 0, 1, 2, 4 ... n (lowest). */
164 		if (it->r_priority > rl->r_priority)
165 			break;
166 	}
167 	if (it == NULL) {
168 		TAILQ_INSERT_TAIL(&rlset->rs_queue, rl, r_entry);
169 	} else {
170 		TAILQ_INSERT_BEFORE(it, rl, r_entry);
171 	}
172 }
173 
174 /*
175  * npf_ruleset_reload: atomically load new ruleset and tableset,
176  * and destroy old structures.
177  */
178 void
179 npf_ruleset_reload(npf_ruleset_t *nrlset, npf_tableset_t *ntblset)
180 {
181 	npf_ruleset_t *oldrlset;
182 	npf_tableset_t *oldtblset;
183 
184 	/*
185 	 * Swap old ruleset with the new.
186 	 * XXX: Rework to be fully lock-less; later.
187 	 */
188 	rw_enter(&ruleset_lock, RW_WRITER);
189 	oldrlset = atomic_swap_ptr(&ruleset, nrlset);
190 
191 	/*
192 	 * Setup a new tableset.  It will lock the global tableset lock,
193 	 * therefore ensures atomicity.  We shall free the old table-set.
194 	 */
195 	oldtblset = npf_tableset_reload(ntblset);
196 	KASSERT(oldtblset != NULL);
197 	/* Unlock.  Everything goes "live" now. */
198 	rw_exit(&ruleset_lock);
199 
200 	npf_tableset_destroy(oldtblset);
201 	npf_ruleset_destroy(oldrlset);
202 }
203 
204 /*
205  * npf_rule_alloc: allocate a rule and copy ncode from user-space.
206  */
207 npf_rule_t *
208 npf_rule_alloc(int attr, pri_t pri, int ifidx, void *nc, size_t sz)
209 {
210 	npf_rule_t *rl;
211 	int errat;
212 
213 	/* Perform validation & building of n-code. */
214 	if (nc && npf_ncode_validate(nc, sz, &errat)) {
215 		return NULL;
216 	}
217 	/* Allocate a rule structure. */
218 	rl = pool_cache_get(rule_cache, PR_WAITOK);
219 	if (rl == NULL) {
220 		return NULL;
221 	}
222 	TAILQ_INIT(&rl->r_subset.rs_queue);
223 	LIST_INIT(&rl->r_hooks);
224 	rl->r_priority = pri;
225 	rl->r_attr = attr;
226 	rl->r_ifid = ifidx;
227 	rl->r_ncode = nc;
228 	rl->r_nc_size = sz;
229 	rl->r_hitcount = 0;
230 	rl->r_nat = NULL;
231 	return rl;
232 }
233 #if 0
234 /*
235  * npf_activate_rule: activate rule by inserting it into the global ruleset.
236  */
237 void
238 npf_activate_rule(npf_rule_t *rl)
239 {
240 
241 	rw_enter(&ruleset_lock, RW_WRITER);
242 	npf_ruleset_insert(ruleset, rl);
243 	rw_exit(&ruleset_lock);
244 }
245 
246 /*
247  * npf_deactivate_rule: deactivate rule by removing it from the ruleset.
248  */
249 void
250 npf_deactivate_rule(npf_rule_t *)
251 {
252 
253 	rw_enter(&ruleset_lock, RW_WRITER);
254 	TAILQ_REMOVE(&ruleset->rs_queue, rl, r_entry);
255 	rw_exit(&ruleset_lock);
256 }
257 #endif
258 
259 /*
260  * npf_rule_free: free the specified rule.
261  */
262 void
263 npf_rule_free(npf_rule_t *rl)
264 {
265 
266 	if (rl->r_ncode) {
267 		/* Free n-code (if any). */
268 		npf_ncode_free(rl->r_ncode, rl->r_nc_size);
269 	}
270 	if (rl->r_nat) {
271 		/* Free NAT policy (if associated). */
272 		npf_nat_freepolicy(rl->r_nat);
273 	}
274 	pool_cache_put(rule_cache, rl);
275 }
276 
277 /*
278  * npf_rule_subset: return sub-ruleset, if any.
279  * npf_rule_getnat: get NAT policy assigned to the rule.
280  * npf_rule_setnat: assign NAT policy to the rule.
281  */
282 
283 npf_ruleset_t *
284 npf_rule_subset(npf_rule_t *rl)
285 {
286 	return &rl->r_subset;
287 }
288 
289 npf_natpolicy_t *
290 npf_rule_getnat(const npf_rule_t *rl)
291 {
292 	return rl->r_nat;
293 }
294 
295 void
296 npf_rule_setnat(npf_rule_t *rl, npf_natpolicy_t *np)
297 {
298 	rl->r_nat = np;
299 }
300 
301 /*
302  * npf_hook_register: register action hook in the rule.
303  */
304 npf_hook_t *
305 npf_hook_register(npf_rule_t *rl,
306     void (*fn)(const npf_cache_t *, void *), void *arg)
307 {
308 	npf_hook_t *hk;
309 
310 	hk = kmem_alloc(sizeof(npf_hook_t), KM_SLEEP);
311 	if (hk != NULL) {
312 		hk->hk_fn = fn;
313 		hk->hk_arg = arg;
314 		rw_enter(&ruleset_lock, RW_WRITER);
315 		LIST_INSERT_HEAD(&rl->r_hooks, hk, hk_entry);
316 		rw_exit(&ruleset_lock);
317 	}
318 	return hk;
319 }
320 
321 /*
322  * npf_hook_unregister: unregister a specified hook.
323  *
324  * => Hook should have been registered in the rule.
325  */
326 void
327 npf_hook_unregister(npf_rule_t *rl, npf_hook_t *hk)
328 {
329 
330 	rw_enter(&ruleset_lock, RW_WRITER);
331 	LIST_REMOVE(hk, hk_entry);
332 	rw_exit(&ruleset_lock);
333 	kmem_free(hk, sizeof(npf_hook_t));
334 }
335 
336 /*
337  * npf_ruleset_match: inspect the packet against the ruleset.
338  *
339  * Loop for each rule in the set and perform run n-code processor of each
340  * rule against the packet (nbuf chain).  If sub-ruleset found, inspect it.
341  *
342  * => If found, ruleset is kept read-locked.
343  * => Caller should protect the nbuf chain.
344  */
345 npf_rule_t *
346 npf_ruleset_match(npf_ruleset_t *rlset0, npf_cache_t *npc, nbuf_t *nbuf,
347     struct ifnet *ifp, const int di, const int layer)
348 {
349 	npf_rule_t *final_rl = NULL, *rl;
350 	npf_ruleset_t *rlset = rlset0;
351 
352 	KASSERT(((di & PFIL_IN) != 0) ^ ((di & PFIL_OUT) != 0));
353 reinspect:
354 	TAILQ_FOREACH(rl, &rlset->rs_queue, r_entry) {
355 		KASSERT(!final_rl || rl->r_priority >= final_rl->r_priority);
356 
357 		/* Match the interface. */
358 		if (rl->r_ifid && rl->r_ifid != ifp->if_index) {
359 			continue;
360 		}
361 		/* Match the direction. */
362 		if ((rl->r_attr & NPF_RULE_DIMASK) != NPF_RULE_DIMASK) {
363 			const int di_mask =
364 			    (di & PFIL_IN) ? NPF_RULE_IN : NPF_RULE_OUT;
365 
366 			if ((rl->r_attr & di_mask) == 0)
367 				continue;
368 		}
369 		/* Process the n-code, if any. */
370 		const void *nc = rl->r_ncode;
371 		if (nc && npf_ncode_process(npc, nc, nbuf, layer)) {
372 			continue;
373 		}
374 		/* Set the matching rule and check for "final". */
375 		final_rl = rl;
376 		if (rl->r_attr & NPF_RULE_FINAL) {
377 			goto final;
378 		}
379 	}
380 	/* Default, if no final rule. */
381 	if (final_rl == NULL) {
382 		rlset = rlset0;
383 		final_rl = rlset->rs_default;
384 	}
385 	/* Inspect the sub-ruleset, if any. */
386 	if (final_rl) {
387 final:
388 		if (TAILQ_EMPTY(&final_rl->r_subset.rs_queue)) {
389 			return final_rl;
390 		}
391 		rlset = &final_rl->r_subset;
392 		final_rl = NULL;
393 		goto reinspect;
394 	}
395 	return final_rl;
396 }
397 
398 /*
399  * npf_ruleset_inspect: inspection of the main ruleset for filtering.
400  */
401 npf_rule_t *
402 npf_ruleset_inspect(npf_cache_t *npc, nbuf_t *nbuf,
403     struct ifnet *ifp, const int di, const int layer)
404 {
405 	npf_rule_t *rl;
406 
407 	rw_enter(&ruleset_lock, RW_READER);
408 	rl = npf_ruleset_match(ruleset, npc, nbuf, ifp, di, layer);
409 	if (rl == NULL) {
410 		rw_exit(&ruleset_lock);
411 	}
412 	return rl;
413 }
414 
415 /*
416  * npf_rule_apply: apply the rule i.e. run hooks and return appropriate value.
417  *
418  * => Returns ENETUNREACH if "block" and 0 if "pass".
419  * => Releases the ruleset lock.
420  */
421 int
422 npf_rule_apply(const npf_cache_t *npc, npf_rule_t *rl, bool *keepstate)
423 {
424 	npf_hook_t *hk;
425 
426 	KASSERT(rw_lock_held(&ruleset_lock));
427 
428 	/* Update the "hit" counter. */
429 	if (rl->r_attr & NPF_RULE_COUNT) {
430 		atomic_inc_ulong(&rl->r_hitcount);
431 	}
432 
433 	/* If not passing - drop the packet. */
434 	if ((rl->r_attr & NPF_RULE_PASS) == 0) {
435 		rw_exit(&ruleset_lock);
436 		return ENETUNREACH;
437 	}
438 
439 	/* Passing.  Run the hooks. */
440 	LIST_FOREACH(hk, &rl->r_hooks, hk_entry) {
441 		KASSERT(hk->hk_fn != NULL);
442 		(*hk->hk_fn)(npc, hk->hk_arg);
443 	}
444 	*keepstate = (rl->r_attr & NPF_RULE_KEEPSTATE) != 0;
445 	rw_exit(&ruleset_lock);
446 
447 	return 0;
448 }
449 
450 #if defined(DDB) || defined(_NPF_TESTING)
451 
452 void
453 npf_rulenc_dump(npf_rule_t *rl)
454 {
455 	uint32_t *op = rl->r_ncode;
456 	size_t n = rl->r_nc_size;
457 
458 	do {
459 		printf("\t> |0x%02x|\n", (uint32_t)*op);
460 		op++;
461 		n -= sizeof(*op);
462 	} while (n);
463 
464 	printf("-> %s\n", (rl->r_attr & NPF_RULE_PASS) ? "pass" : "block");
465 }
466 
467 #endif
468