xref: /netbsd-src/sys/net/npf/npf_ruleset.c (revision 4817a0b0b8fe9612e8ebe21a9bf2d97b95038a97)
1 /*	$NetBSD: npf_ruleset.c,v 1.5 2010/12/27 14:58:55 uebayasi Exp $	*/
2 
3 /*-
4  * Copyright (c) 2009-2010 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This material is based upon work partially supported by The
8  * NetBSD Foundation under a contract with Mindaugas Rasiukevicius.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * NPF ruleset module.
34  */
35 
36 #ifdef _KERNEL
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: npf_ruleset.c,v 1.5 2010/12/27 14:58:55 uebayasi Exp $");
39 
40 #include <sys/param.h>
41 #include <sys/kernel.h>
42 
43 #include <sys/atomic.h>
44 #include <sys/kmem.h>
45 #include <sys/pool.h>
46 #include <sys/queue.h>
47 #include <sys/types.h>
48 
49 #include <net/pfil.h>
50 #include <net/if.h>
51 #endif
52 
53 #include "npf_ncode.h"
54 #include "npf_impl.h"
55 
56 /* Ruleset structre (queue and default rule). */
57 struct npf_ruleset {
58 	TAILQ_HEAD(, npf_rule)	rs_queue;
59 	npf_rule_t *		rs_default;
60 };
61 
62 /* Rule hook entry. */
63 struct npf_hook {
64 	void			(*hk_fn)(npf_cache_t *, nbuf_t *, void *);
65 	void *			hk_arg;
66 	LIST_ENTRY(npf_hook)	hk_entry;
67 };
68 
69 /* Rule processing structure. */
70 struct npf_rproc {
71 	/* Reference count. */
72 	u_int			rp_refcnt;
73 	/* Normalization options. */
74 	bool			rp_rnd_ipid;
75 	bool			rp_no_df;
76 	u_int			rp_minttl;
77 	u_int			rp_maxmss;
78 	/* Logging interface. */
79 	u_int			rp_log_ifid;
80 };
81 
82 /* Rule structure. */
83 struct npf_rule {
84 	TAILQ_ENTRY(npf_rule)	r_entry;
85 	/* Optional: sub-ruleset, NAT policy. */
86 	npf_ruleset_t		r_subset;
87 	npf_natpolicy_t *	r_natp;
88 	/* Rule priority: (highest) 0, 1, 2 ... n (lowest). */
89 	u_int			r_priority;
90 	/* N-code to process. */
91 	void *			r_ncode;
92 	size_t			r_nc_size;
93 	/* Attributes of this rule. */
94 	uint32_t		r_attr;
95 	/* Interface. */
96 	u_int			r_ifid;
97 	/* Hit counter. */
98 	u_long			r_hitcount;
99 	/* Rule processing data. */
100 	npf_rproc_t *		r_rproc;
101 	/* List of hooks to process on match. */
102 	kmutex_t		r_hooks_lock;
103 	LIST_HEAD(, npf_hook)	r_hooks;
104 };
105 
106 npf_ruleset_t *
107 npf_ruleset_create(void)
108 {
109 	npf_ruleset_t *rlset;
110 
111 	rlset = kmem_zalloc(sizeof(npf_ruleset_t), KM_SLEEP);
112 	TAILQ_INIT(&rlset->rs_queue);
113 	return rlset;
114 }
115 
116 void
117 npf_ruleset_destroy(npf_ruleset_t *rlset)
118 {
119 	npf_rule_t *rl;
120 
121 	while ((rl = TAILQ_FIRST(&rlset->rs_queue)) != NULL) {
122 		TAILQ_REMOVE(&rlset->rs_queue, rl, r_entry);
123 		npf_rule_free(rl);
124 	}
125 	kmem_free(rlset, sizeof(npf_ruleset_t));
126 }
127 
128 /*
129  * npf_ruleset_insert: insert the rule into the specified ruleset.
130  *
131  * Note: multiple rules at the same priority are allowed.
132  */
133 void
134 npf_ruleset_insert(npf_ruleset_t *rlset, npf_rule_t *rl)
135 {
136 	npf_rule_t *it;
137 
138 	if (rl->r_attr & NPF_RULE_DEFAULT) {
139 		rlset->rs_default = rl;
140 		return;
141 	}
142 	TAILQ_FOREACH(it, &rlset->rs_queue, r_entry) {
143 		/* Rule priority: (highest) 0, 1, 2, 4 ... n (lowest). */
144 		if (it->r_priority > rl->r_priority)
145 			break;
146 	}
147 	if (it == NULL) {
148 		TAILQ_INSERT_TAIL(&rlset->rs_queue, rl, r_entry);
149 	} else {
150 		TAILQ_INSERT_BEFORE(it, rl, r_entry);
151 	}
152 }
153 
154 /*
155  * npf_ruleset_matchnat: find a matching NAT policy in the ruleset.
156  */
157 npf_rule_t *
158 npf_ruleset_matchnat(npf_ruleset_t *rlset, npf_natpolicy_t *mnp)
159 {
160 	npf_rule_t *rl;
161 
162 	/* Find a matching NAT policy in the old ruleset. */
163 	TAILQ_FOREACH(rl, &rlset->rs_queue, r_entry) {
164 		if (npf_nat_matchpolicy(rl->r_natp, mnp))
165 			break;
166 	}
167 	return rl;
168 }
169 
170 /*
171  * npf_ruleset_natreload: minimum reload of NAT policies by maching
172  * two (active  and new) NAT rulesets.
173  *
174  * => Active ruleset should be exclusively locked.
175  */
176 void
177 npf_ruleset_natreload(npf_ruleset_t *nrlset, npf_ruleset_t *arlset)
178 {
179 	npf_natpolicy_t *np, *anp;
180 	npf_rule_t *rl, *arl;
181 
182 	KASSERT(npf_core_locked());
183 
184 	/* Scan a new NAT ruleset against NAT policies in old ruleset. */
185 	TAILQ_FOREACH(rl, &nrlset->rs_queue, r_entry) {
186 		np = rl->r_natp;
187 		arl = npf_ruleset_matchnat(arlset, np);
188 		if (arl == NULL) {
189 			continue;
190 		}
191 		/* On match - we exchange NAT policies. */
192 		anp = arl->r_natp;
193 		rl->r_natp = anp;
194 		arl->r_natp = np;
195 	}
196 }
197 
198 npf_rproc_t *
199 npf_rproc_create(prop_dictionary_t rpdict)
200 {
201 	npf_rproc_t *rp;
202 	prop_object_t obj;
203 
204 	rp = kmem_alloc(sizeof(npf_rproc_t), KM_SLEEP);
205 	rp->rp_refcnt = 1;
206 
207 	/* Logging interface ID (integer). */
208 	obj = prop_dictionary_get(rpdict, "log-interface");
209 	rp->rp_log_ifid = prop_number_integer_value(obj);
210 
211 	/* Randomize IP ID (bool). */
212 	obj = prop_dictionary_get(rpdict, "randomize-id");
213 	rp->rp_rnd_ipid = prop_bool_true(obj);
214 
215 	/* IP_DF flag cleansing (bool). */
216 	obj = prop_dictionary_get(rpdict, "no-df");
217 	rp->rp_no_df = prop_bool_true(obj);
218 
219 	/* Minimum IP TTL (integer). */
220 	obj = prop_dictionary_get(rpdict, "min-ttl");
221 	rp->rp_minttl = prop_number_integer_value(obj);
222 
223 	/* Maximum TCP MSS (integer). */
224 	obj = prop_dictionary_get(rpdict, "max-mss");
225 	rp->rp_maxmss = prop_number_integer_value(obj);
226 
227 	return rp;
228 }
229 
230 npf_rproc_t *
231 npf_rproc_return(npf_rule_t *rl)
232 {
233 	npf_rproc_t *rp = rl->r_rproc;
234 
235 	if (rp) {
236 		atomic_inc_uint(&rp->rp_refcnt);
237 	}
238 	return rp;
239 }
240 
241 void
242 npf_rproc_release(npf_rproc_t *rp)
243 {
244 
245 	/* Destroy on last reference. */
246 	if (atomic_dec_uint_nv(&rp->rp_refcnt) != 0) {
247 		return;
248 	}
249 	kmem_free(rp, sizeof(npf_rproc_t));
250 }
251 
252 void
253 npf_rproc_run(npf_cache_t *npc, nbuf_t *nbuf, npf_rproc_t *rp)
254 {
255 
256 	KASSERT(rp->rp_refcnt > 0);
257 
258 	/* Normalize the packet, if required. */
259 	(void)npf_normalize(npc, nbuf,
260 	    rp->rp_rnd_ipid, rp->rp_no_df, rp->rp_minttl, rp->rp_maxmss);
261 
262 	/* Log packet, if required. */
263 	if (rp->rp_log_ifid) {
264 		npf_log_packet(npc, nbuf, rp->rp_log_ifid);
265 	}
266 
267 }
268 
269 /*
270  * npf_rule_alloc: allocate a rule and copy ncode from user-space.
271  *
272  * => N-code should be validated by the caller.
273  */
274 npf_rule_t *
275 npf_rule_alloc(prop_dictionary_t rldict, void *nc, size_t nc_size)
276 {
277 	npf_rule_t *rl;
278 	prop_object_t obj;
279 #ifdef DIAGNOSTIC
280 	int errat;
281 #endif
282 
283 	/* Allocate a rule structure. */
284 	rl = kmem_alloc(sizeof(npf_rule_t), KM_SLEEP);
285 	TAILQ_INIT(&rl->r_subset.rs_queue);
286 	mutex_init(&rl->r_hooks_lock, MUTEX_DEFAULT, IPL_SOFTNET);
287 	LIST_INIT(&rl->r_hooks);
288 	rl->r_hitcount = 0;
289 	rl->r_natp = NULL;
290 
291 	/* N-code. */
292 	KASSERT(nc == NULL || npf_ncode_validate(nc, nc_size, &errat) == 0);
293 	rl->r_ncode = nc;
294 	rl->r_nc_size = nc_size;
295 
296 	/* Attributes (integer). */
297 	obj = prop_dictionary_get(rldict, "attributes");
298 	rl->r_attr = prop_number_integer_value(obj);
299 
300 	/* Priority (integer). */
301 	obj = prop_dictionary_get(rldict, "priority");
302 	rl->r_priority = prop_number_integer_value(obj);
303 
304 	/* Interface ID (integer). */
305 	obj = prop_dictionary_get(rldict, "interface");
306 	rl->r_ifid = prop_number_integer_value(obj);
307 
308 	/* Create rule processing structure, if any. */
309 	if (rl->r_attr & (NPF_RULE_LOG | NPF_RULE_NORMALIZE)) {
310 		rl->r_rproc = npf_rproc_create(rldict);
311 	} else {
312 		rl->r_rproc = NULL;
313 	}
314 	return rl;
315 }
316 
317 /*
318  * npf_rule_free: free the specified rule.
319  */
320 void
321 npf_rule_free(npf_rule_t *rl)
322 {
323 	npf_natpolicy_t *np = rl->r_natp;
324 	npf_rproc_t *rp = rl->r_rproc;
325 
326 	if (np) {
327 		/* Free NAT policy. */
328 		npf_nat_freepolicy(np);
329 	}
330 	if (rp) {
331 		/* Release/free rule processing structure. */
332 		npf_rproc_release(rp);
333 	}
334 	if (rl->r_ncode) {
335 		/* Free n-code. */
336 		npf_ncode_free(rl->r_ncode, rl->r_nc_size);
337 	}
338 	mutex_destroy(&rl->r_hooks_lock);
339 	kmem_free(rl, sizeof(npf_rule_t));
340 }
341 
342 /*
343  * npf_rule_subset: return sub-ruleset, if any.
344  * npf_rule_getnat: get NAT policy assigned to the rule.
345  */
346 
347 npf_ruleset_t *
348 npf_rule_subset(npf_rule_t *rl)
349 {
350 	return &rl->r_subset;
351 }
352 
353 npf_natpolicy_t *
354 npf_rule_getnat(const npf_rule_t *rl)
355 {
356 	return rl->r_natp;
357 }
358 
359 /*
360  * npf_rule_setnat: assign NAT policy to the rule and insert into the
361  * NAT policy list in the ruleset.
362  */
363 void
364 npf_rule_setnat(npf_rule_t *rl, npf_natpolicy_t *np)
365 {
366 
367 	KASSERT(rl->r_natp == NULL);
368 	rl->r_natp = np;
369 }
370 
371 /*
372  * npf_hook_register: register action hook in the rule.
373  */
374 npf_hook_t *
375 npf_hook_register(npf_rule_t *rl,
376     void (*fn)(npf_cache_t *, nbuf_t *, void *), void *arg)
377 {
378 	npf_hook_t *hk;
379 
380 	hk = kmem_alloc(sizeof(npf_hook_t), KM_SLEEP);
381 	if (hk != NULL) {
382 		hk->hk_fn = fn;
383 		hk->hk_arg = arg;
384 		mutex_enter(&rl->r_hooks_lock);
385 		LIST_INSERT_HEAD(&rl->r_hooks, hk, hk_entry);
386 		mutex_exit(&rl->r_hooks_lock);
387 	}
388 	return hk;
389 }
390 
391 /*
392  * npf_hook_unregister: unregister a specified hook.
393  *
394  * => Hook should have been registered in the rule.
395  */
396 void
397 npf_hook_unregister(npf_rule_t *rl, npf_hook_t *hk)
398 {
399 
400 	mutex_enter(&rl->r_hooks_lock);
401 	LIST_REMOVE(hk, hk_entry);
402 	mutex_exit(&rl->r_hooks_lock);
403 	kmem_free(hk, sizeof(npf_hook_t));
404 }
405 
406 /*
407  * npf_ruleset_match: inspect the packet against the given ruleset.
408  *
409  * Loop for each rule in the set and run n-code processor of each rule
410  * against the packet (nbuf chain).
411  */
412 npf_rule_t *
413 npf_ruleset_match(npf_ruleset_t *rlset, npf_cache_t *npc, nbuf_t *nbuf,
414     struct ifnet *ifp, const int di, const int layer)
415 {
416 	npf_rule_t *final_rl = NULL, *rl;
417 
418 	KASSERT(((di & PFIL_IN) != 0) ^ ((di & PFIL_OUT) != 0));
419 
420 	TAILQ_FOREACH(rl, &rlset->rs_queue, r_entry) {
421 		KASSERT(!final_rl || rl->r_priority >= final_rl->r_priority);
422 
423 		/* Match the interface. */
424 		if (rl->r_ifid && rl->r_ifid != ifp->if_index) {
425 			continue;
426 		}
427 		/* Match the direction. */
428 		if ((rl->r_attr & NPF_RULE_DIMASK) != NPF_RULE_DIMASK) {
429 			const int di_mask =
430 			    (di & PFIL_IN) ? NPF_RULE_IN : NPF_RULE_OUT;
431 
432 			if ((rl->r_attr & di_mask) == 0)
433 				continue;
434 		}
435 		/* Process the n-code, if any. */
436 		const void *nc = rl->r_ncode;
437 		if (nc && npf_ncode_process(npc, nc, nbuf, layer)) {
438 			continue;
439 		}
440 		/* Set the matching rule and check for "final". */
441 		final_rl = rl;
442 		if (rl->r_attr & NPF_RULE_FINAL) {
443 			break;
444 		}
445 	}
446 	return final_rl;
447 }
448 
449 /*
450  * npf_ruleset_inspect: inspection of the main ruleset for filtering.
451  * If sub-ruleset is found, inspect it.
452  *
453  * => If found, ruleset is kept read-locked.
454  * => Caller should protect the nbuf chain.
455  */
456 npf_rule_t *
457 npf_ruleset_inspect(npf_cache_t *npc, nbuf_t *nbuf,
458     struct ifnet *ifp, const int di, const int layer)
459 {
460 	npf_ruleset_t *rlset;
461 	npf_rule_t *rl;
462 	bool defed;
463 
464 	defed = false;
465 	npf_core_enter();
466 	rlset = npf_core_ruleset();
467 reinspect:
468 	rl = npf_ruleset_match(rlset, npc, nbuf, ifp, di, layer);
469 
470 	/* If no final rule, then - default. */
471 	if (rl == NULL && !defed) {
472 		npf_ruleset_t *mainrlset = npf_core_ruleset();
473 		rl = mainrlset->rs_default;
474 		defed = true;
475 	}
476 	/* Inspect the sub-ruleset, if any. */
477 	if (rl && !TAILQ_EMPTY(&rl->r_subset.rs_queue)) {
478 		rlset = &rl->r_subset;
479 		goto reinspect;
480 	}
481 	if (rl == NULL) {
482 		npf_core_exit();
483 	}
484 	return rl;
485 }
486 
487 /*
488  * npf_rule_apply: apply the rule i.e. run hooks and return appropriate value.
489  *
490  * => Returns ENETUNREACH if "block" and 0 if "pass".
491  * => Releases the ruleset lock.
492  */
493 int
494 npf_rule_apply(npf_cache_t *npc, nbuf_t *nbuf, npf_rule_t *rl, int *retfl)
495 {
496 	npf_hook_t *hk;
497 	int error;
498 
499 	KASSERT(npf_core_locked());
500 
501 	/* Update the "hit" counter. */
502 	if (rl->r_attr & NPF_RULE_COUNT) {
503 		atomic_inc_ulong(&rl->r_hitcount);
504 	}
505 
506 	/* If not passing - drop the packet. */
507 	if ((rl->r_attr & NPF_RULE_PASS) == 0) {
508 		error = ENETUNREACH;
509 		goto done;
510 	}
511 	error = 0;
512 
513 	/* Passing.  Run the hooks. */
514 	LIST_FOREACH(hk, &rl->r_hooks, hk_entry) {
515 		KASSERT(hk->hk_fn != NULL);
516 		(*hk->hk_fn)(npc, nbuf, hk->hk_arg);
517 	}
518 done:
519 	*retfl = rl->r_attr;
520 	npf_core_exit();
521 	return error;
522 }
523 
524 #if defined(DDB) || defined(_NPF_TESTING)
525 
526 void
527 npf_rulenc_dump(npf_rule_t *rl)
528 {
529 	uint32_t *op = rl->r_ncode;
530 	size_t n = rl->r_nc_size;
531 
532 	while (n) {
533 		printf("\t> |0x%02x|\n", (uint32_t)*op);
534 		op++;
535 		n -= sizeof(*op);
536 	}
537 	printf("-> %s\n", (rl->r_attr & NPF_RULE_PASS) ? "pass" : "block");
538 }
539 
540 #endif
541