xref: /netbsd-src/sys/net/npf/npf_ruleset.c (revision 479d8f7d843cc1b22d497efdf1f27a50ee8418d4)
1 /*	$NetBSD: npf_ruleset.c,v 1.44 2016/12/28 21:55:04 christos Exp $	*/
2 
3 /*-
4  * Copyright (c) 2009-2015 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This material is based upon work partially supported by The
8  * NetBSD Foundation under a contract with Mindaugas Rasiukevicius.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * NPF ruleset module.
34  */
35 
36 #ifdef _KERNEL
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: npf_ruleset.c,v 1.44 2016/12/28 21:55:04 christos Exp $");
39 
40 #include <sys/param.h>
41 #include <sys/types.h>
42 
43 #include <sys/atomic.h>
44 #include <sys/kmem.h>
45 #include <sys/queue.h>
46 #include <sys/mbuf.h>
47 #include <sys/types.h>
48 
49 #include <net/bpf.h>
50 #include <net/bpfjit.h>
51 #include <net/pfil.h>
52 #include <net/if.h>
53 #endif
54 
55 #include "npf_impl.h"
56 
57 struct npf_ruleset {
58 	/*
59 	 * - List of all rules.
60 	 * - Dynamic (i.e. named) rules.
61 	 * - G/C list for convenience.
62 	 */
63 	LIST_HEAD(, npf_rule)	rs_all;
64 	LIST_HEAD(, npf_rule)	rs_dynamic;
65 	LIST_HEAD(, npf_rule)	rs_gc;
66 
67 	/* Unique ID counter. */
68 	uint64_t		rs_idcnt;
69 
70 	/* Number of array slots and active rules. */
71 	u_int			rs_slots;
72 	u_int			rs_nitems;
73 
74 	/* Array of ordered rules. */
75 	npf_rule_t *		rs_rules[];
76 };
77 
78 struct npf_rule {
79 	/* Attributes, interface and skip slot. */
80 	uint32_t		r_attr;
81 	u_int			r_ifid;
82 	u_int			r_skip_to;
83 
84 	/* Code to process, if any. */
85 	int			r_type;
86 	bpfjit_func_t		r_jcode;
87 	void *			r_code;
88 	u_int			r_clen;
89 
90 	/* NAT policy (optional), rule procedure and subset. */
91 	npf_natpolicy_t *	r_natp;
92 	npf_rproc_t *		r_rproc;
93 
94 	union {
95 		/*
96 		 * Dynamic group: rule subset and a group list entry.
97 		 */
98 		struct {
99 			npf_rule_t *		r_subset;
100 			LIST_ENTRY(npf_rule)	r_dentry;
101 		};
102 
103 		/*
104 		 * Dynamic rule: priority, parent group and next rule.
105 		 */
106 		struct {
107 			int			r_priority;
108 			npf_rule_t *		r_parent;
109 			npf_rule_t *		r_next;
110 		};
111 	};
112 
113 	/* Rule ID, name and the optional key. */
114 	uint64_t		r_id;
115 	char			r_name[NPF_RULE_MAXNAMELEN];
116 	uint8_t			r_key[NPF_RULE_MAXKEYLEN];
117 
118 	/* All-list entry and the auxiliary info. */
119 	LIST_ENTRY(npf_rule)	r_aentry;
120 	prop_data_t		r_info;
121 };
122 
123 #define	SKIPTO_ADJ_FLAG		(1U << 31)
124 #define	SKIPTO_MASK		(SKIPTO_ADJ_FLAG - 1)
125 
126 static int	npf_rule_export(npf_t *, const npf_ruleset_t *,
127     const npf_rule_t *, prop_dictionary_t);
128 
129 /*
130  * Private attributes - must be in the NPF_RULE_PRIVMASK range.
131  */
132 #define	NPF_RULE_KEEPNAT	(0x01000000 & NPF_RULE_PRIVMASK)
133 
134 #define	NPF_DYNAMIC_GROUP_P(attr) \
135     (((attr) & NPF_DYNAMIC_GROUP) == NPF_DYNAMIC_GROUP)
136 
137 #define	NPF_DYNAMIC_RULE_P(attr) \
138     (((attr) & NPF_DYNAMIC_GROUP) == NPF_RULE_DYNAMIC)
139 
140 npf_ruleset_t *
141 npf_ruleset_create(size_t slots)
142 {
143 	size_t len = offsetof(npf_ruleset_t, rs_rules[slots]);
144 	npf_ruleset_t *rlset;
145 
146 	rlset = kmem_zalloc(len, KM_SLEEP);
147 	LIST_INIT(&rlset->rs_dynamic);
148 	LIST_INIT(&rlset->rs_all);
149 	LIST_INIT(&rlset->rs_gc);
150 	rlset->rs_slots = slots;
151 
152 	return rlset;
153 }
154 
155 void
156 npf_ruleset_destroy(npf_ruleset_t *rlset)
157 {
158 	size_t len = offsetof(npf_ruleset_t, rs_rules[rlset->rs_slots]);
159 	npf_rule_t *rl;
160 
161 	while ((rl = LIST_FIRST(&rlset->rs_all)) != NULL) {
162 		if (NPF_DYNAMIC_GROUP_P(rl->r_attr)) {
163 			/*
164 			 * Note: r_subset may point to the rules which
165 			 * were inherited by a new ruleset.
166 			 */
167 			rl->r_subset = NULL;
168 			LIST_REMOVE(rl, r_dentry);
169 		}
170 		if (NPF_DYNAMIC_RULE_P(rl->r_attr)) {
171 			/* Not removing from r_subset, see above. */
172 			KASSERT(rl->r_parent != NULL);
173 		}
174 		LIST_REMOVE(rl, r_aentry);
175 		npf_rule_free(rl);
176 	}
177 	KASSERT(LIST_EMPTY(&rlset->rs_dynamic));
178 
179 	npf_ruleset_gc(rlset);
180 	KASSERT(LIST_EMPTY(&rlset->rs_gc));
181 	kmem_free(rlset, len);
182 }
183 
184 /*
185  * npf_ruleset_insert: insert the rule into the specified ruleset.
186  */
187 void
188 npf_ruleset_insert(npf_ruleset_t *rlset, npf_rule_t *rl)
189 {
190 	u_int n = rlset->rs_nitems;
191 
192 	KASSERT(n < rlset->rs_slots);
193 
194 	LIST_INSERT_HEAD(&rlset->rs_all, rl, r_aentry);
195 	if (NPF_DYNAMIC_GROUP_P(rl->r_attr)) {
196 		LIST_INSERT_HEAD(&rlset->rs_dynamic, rl, r_dentry);
197 	} else {
198 		KASSERTMSG(rl->r_parent == NULL, "cannot be dynamic rule");
199 		rl->r_attr &= ~NPF_RULE_DYNAMIC;
200 	}
201 
202 	rlset->rs_rules[n] = rl;
203 	rlset->rs_nitems++;
204 
205 	if (rl->r_skip_to < ++n) {
206 		rl->r_skip_to = SKIPTO_ADJ_FLAG | n;
207 	}
208 }
209 
210 static npf_rule_t *
211 npf_ruleset_lookup(npf_ruleset_t *rlset, const char *name)
212 {
213 	npf_rule_t *rl;
214 
215 	LIST_FOREACH(rl, &rlset->rs_dynamic, r_dentry) {
216 		KASSERT(NPF_DYNAMIC_GROUP_P(rl->r_attr));
217 		if (strncmp(rl->r_name, name, NPF_RULE_MAXNAMELEN) == 0)
218 			break;
219 	}
220 	return rl;
221 }
222 
223 /*
224  * npf_ruleset_add: insert dynamic rule into the (active) ruleset.
225  */
226 int
227 npf_ruleset_add(npf_ruleset_t *rlset, const char *rname, npf_rule_t *rl)
228 {
229 	npf_rule_t *rg, *it, *target;
230 	int priocmd;
231 
232 	if (!NPF_DYNAMIC_RULE_P(rl->r_attr)) {
233 		return EINVAL;
234 	}
235 	rg = npf_ruleset_lookup(rlset, rname);
236 	if (rg == NULL) {
237 		return ESRCH;
238 	}
239 
240 	/* Dynamic rule - assign a unique ID and save the parent. */
241 	rl->r_id = ++rlset->rs_idcnt;
242 	rl->r_parent = rg;
243 
244 	/*
245 	 * Rule priority: (highest) 1, 2 ... n (lowest).
246 	 * Negative priority indicates an operation and is reset to zero.
247 	 */
248 	if ((priocmd = rl->r_priority) < 0) {
249 		rl->r_priority = 0;
250 	}
251 
252 	/*
253 	 * WARNING: once rg->subset or target->r_next of an *active*
254 	 * rule is set, then our rule becomes globally visible and active.
255 	 * Must issue a load fence to ensure rl->r_next visibility first.
256 	 */
257 	switch (priocmd) {
258 	case NPF_PRI_LAST:
259 	default:
260 		target = NULL;
261 		it = rg->r_subset;
262 		while (it && it->r_priority <= rl->r_priority) {
263 			target = it;
264 			it = it->r_next;
265 		}
266 		if (target) {
267 			rl->r_next = target->r_next;
268 			membar_producer();
269 			target->r_next = rl;
270 			break;
271 		}
272 		/* FALLTHROUGH */
273 
274 	case NPF_PRI_FIRST:
275 		rl->r_next = rg->r_subset;
276 		membar_producer();
277 		rg->r_subset = rl;
278 		break;
279 	}
280 
281 	/* Finally, add into the all-list. */
282 	LIST_INSERT_HEAD(&rlset->rs_all, rl, r_aentry);
283 	return 0;
284 }
285 
286 static void
287 npf_ruleset_unlink(npf_rule_t *rl, npf_rule_t *prev)
288 {
289 	KASSERT(NPF_DYNAMIC_RULE_P(rl->r_attr));
290 	if (prev) {
291 		prev->r_next = rl->r_next;
292 	} else {
293 		npf_rule_t *rg = rl->r_parent;
294 		rg->r_subset = rl->r_next;
295 	}
296 	LIST_REMOVE(rl, r_aentry);
297 }
298 
299 /*
300  * npf_ruleset_remove: remove the dynamic rule given the rule ID.
301  */
302 int
303 npf_ruleset_remove(npf_ruleset_t *rlset, const char *rname, uint64_t id)
304 {
305 	npf_rule_t *rg, *prev = NULL;
306 
307 	if ((rg = npf_ruleset_lookup(rlset, rname)) == NULL) {
308 		return ESRCH;
309 	}
310 	for (npf_rule_t *rl = rg->r_subset; rl; rl = rl->r_next) {
311 		KASSERT(rl->r_parent == rg);
312 		KASSERT(NPF_DYNAMIC_RULE_P(rl->r_attr));
313 
314 		/* Compare ID.  On match, remove and return. */
315 		if (rl->r_id == id) {
316 			npf_ruleset_unlink(rl, prev);
317 			LIST_INSERT_HEAD(&rlset->rs_gc, rl, r_aentry);
318 			return 0;
319 		}
320 		prev = rl;
321 	}
322 	return ENOENT;
323 }
324 
325 /*
326  * npf_ruleset_remkey: remove the dynamic rule given the rule key.
327  */
328 int
329 npf_ruleset_remkey(npf_ruleset_t *rlset, const char *rname,
330     const void *key, size_t len)
331 {
332 	npf_rule_t *rg, *rlast = NULL, *prev = NULL, *lastprev = NULL;
333 
334 	KASSERT(len && len <= NPF_RULE_MAXKEYLEN);
335 
336 	if ((rg = npf_ruleset_lookup(rlset, rname)) == NULL) {
337 		return ESRCH;
338 	}
339 
340 	/* Compare the key and find the last in the list. */
341 	for (npf_rule_t *rl = rg->r_subset; rl; rl = rl->r_next) {
342 		KASSERT(rl->r_parent == rg);
343 		KASSERT(NPF_DYNAMIC_RULE_P(rl->r_attr));
344 		if (memcmp(rl->r_key, key, len) == 0) {
345 			lastprev = prev;
346 			rlast = rl;
347 		}
348 		prev = rl;
349 	}
350 	if (!rlast) {
351 		return ENOENT;
352 	}
353 	npf_ruleset_unlink(rlast, lastprev);
354 	LIST_INSERT_HEAD(&rlset->rs_gc, rlast, r_aentry);
355 	return 0;
356 }
357 
358 /*
359  * npf_ruleset_list: serialise and return the dynamic rules.
360  */
361 prop_dictionary_t
362 npf_ruleset_list(npf_t *npf, npf_ruleset_t *rlset, const char *rname)
363 {
364 	prop_dictionary_t rgdict;
365 	prop_array_t rules;
366 	npf_rule_t *rg;
367 
368 	KASSERT(npf_config_locked_p(npf));
369 
370 	if ((rg = npf_ruleset_lookup(rlset, rname)) == NULL) {
371 		return NULL;
372 	}
373 	if ((rgdict = prop_dictionary_create()) == NULL) {
374 		return NULL;
375 	}
376 	if ((rules = prop_array_create()) == NULL) {
377 		prop_object_release(rgdict);
378 		return NULL;
379 	}
380 
381 	for (npf_rule_t *rl = rg->r_subset; rl; rl = rl->r_next) {
382 		prop_dictionary_t rldict;
383 
384 		KASSERT(rl->r_parent == rg);
385 		KASSERT(NPF_DYNAMIC_RULE_P(rl->r_attr));
386 
387 		rldict = prop_dictionary_create();
388 		if (npf_rule_export(npf, rlset, rl, rldict)) {
389 			prop_object_release(rldict);
390 			prop_object_release(rules);
391 			return NULL;
392 		}
393 		prop_array_add(rules, rldict);
394 		prop_object_release(rldict);
395 	}
396 
397 	if (!prop_dictionary_set(rgdict, "rules", rules)) {
398 		prop_object_release(rgdict);
399 		rgdict = NULL;
400 	}
401 	prop_object_release(rules);
402 	return rgdict;
403 }
404 
405 /*
406  * npf_ruleset_flush: flush the dynamic rules in the ruleset by inserting
407  * them into the G/C list.
408  */
409 int
410 npf_ruleset_flush(npf_ruleset_t *rlset, const char *rname)
411 {
412 	npf_rule_t *rg, *rl;
413 
414 	if ((rg = npf_ruleset_lookup(rlset, rname)) == NULL) {
415 		return ESRCH;
416 	}
417 
418 	rl = atomic_swap_ptr(&rg->r_subset, NULL);
419 	membar_producer();
420 
421 	while (rl) {
422 		KASSERT(NPF_DYNAMIC_RULE_P(rl->r_attr));
423 		KASSERT(rl->r_parent == rg);
424 
425 		LIST_REMOVE(rl, r_aentry);
426 		LIST_INSERT_HEAD(&rlset->rs_gc, rl, r_aentry);
427 		rl = rl->r_next;
428 	}
429 	return 0;
430 }
431 
432 /*
433  * npf_ruleset_gc: destroy the rules in G/C list.
434  */
435 void
436 npf_ruleset_gc(npf_ruleset_t *rlset)
437 {
438 	npf_rule_t *rl;
439 
440 	while ((rl = LIST_FIRST(&rlset->rs_gc)) != NULL) {
441 		LIST_REMOVE(rl, r_aentry);
442 		npf_rule_free(rl);
443 	}
444 }
445 
446 /*
447  * npf_ruleset_export: serialise and return the static rules.
448  */
449 int
450 npf_ruleset_export(npf_t *npf, const npf_ruleset_t *rlset, prop_array_t rules)
451 {
452 	const u_int nitems = rlset->rs_nitems;
453 	int error = 0;
454 	u_int n = 0;
455 
456 	KASSERT(npf_config_locked_p(npf));
457 
458 	while (n < nitems) {
459 		const npf_rule_t *rl = rlset->rs_rules[n];
460 		const npf_natpolicy_t *natp = rl->r_natp;
461 		prop_dictionary_t rldict;
462 
463 		rldict = prop_dictionary_create();
464 		if ((error = npf_rule_export(npf, rlset, rl, rldict)) != 0) {
465 			prop_object_release(rldict);
466 			break;
467 		}
468 		if (natp && (error = npf_nat_policyexport(natp, rldict)) != 0) {
469 			prop_object_release(rldict);
470 			break;
471 		}
472 		prop_array_add(rules, rldict);
473 		prop_object_release(rldict);
474 		n++;
475 	}
476 	return error;
477 }
478 
479 /*
480  * npf_ruleset_reload: prepare the new ruleset by scanning the active
481  * ruleset and: 1) sharing the dynamic rules 2) sharing NAT policies.
482  *
483  * => The active (old) ruleset should be exclusively locked.
484  */
485 void
486 npf_ruleset_reload(npf_t *npf, npf_ruleset_t *newset,
487     npf_ruleset_t *oldset, bool load)
488 {
489 	npf_rule_t *rg, *rl;
490 	uint64_t nid = 0;
491 
492 	KASSERT(npf_config_locked_p(npf));
493 
494 	/*
495 	 * Scan the dynamic rules and share (migrate) if needed.
496 	 */
497 	LIST_FOREACH(rg, &newset->rs_dynamic, r_dentry) {
498 		npf_rule_t *active_rgroup;
499 
500 		/* Look for a dynamic ruleset group with such name. */
501 		active_rgroup = npf_ruleset_lookup(oldset, rg->r_name);
502 		if (active_rgroup == NULL) {
503 			continue;
504 		}
505 
506 		/*
507 		 * ATOMICITY: Copy the head pointer of the linked-list,
508 		 * but do not remove the rules from the active r_subset.
509 		 * This is necessary because the rules are still active
510 		 * and therefore are accessible for inspection via the
511 		 * old ruleset.
512 		 */
513 		rg->r_subset = active_rgroup->r_subset;
514 
515 		/*
516 		 * We can safely migrate to the new all-rule list and
517 		 * reset the parent rule, though.
518 		 */
519 		for (rl = rg->r_subset; rl; rl = rl->r_next) {
520 			KASSERT(NPF_DYNAMIC_RULE_P(rl->r_attr));
521 			LIST_REMOVE(rl, r_aentry);
522 			LIST_INSERT_HEAD(&newset->rs_all, rl, r_aentry);
523 
524 			KASSERT(rl->r_parent == active_rgroup);
525 			rl->r_parent = rg;
526 		}
527 	}
528 
529 	/*
530 	 * If performing the load of connections then NAT policies may
531 	 * already have translated connections associated with them and
532 	 * we should not share or inherit anything.
533 	 */
534 	if (load)
535 		return;
536 
537 	/*
538 	 * Scan all rules in the new ruleset and share NAT policies.
539 	 * Also, assign a unique ID for each policy here.
540 	 */
541 	LIST_FOREACH(rl, &newset->rs_all, r_aentry) {
542 		npf_natpolicy_t *np;
543 		npf_rule_t *actrl;
544 
545 		/* Does the rule have a NAT policy associated? */
546 		if ((np = rl->r_natp) == NULL) {
547 			continue;
548 		}
549 
550 		/*
551 		 * First, try to share the active port map.  If this
552 		 * policy will be unused, npf_nat_freepolicy() will
553 		 * drop the reference.
554 		 */
555 		npf_ruleset_sharepm(oldset, np);
556 
557 		/* Does it match with any policy in the active ruleset? */
558 		LIST_FOREACH(actrl, &oldset->rs_all, r_aentry) {
559 			if (!actrl->r_natp)
560 				continue;
561 			if ((actrl->r_attr & NPF_RULE_KEEPNAT) != 0)
562 				continue;
563 			if (npf_nat_cmppolicy(actrl->r_natp, np))
564 				break;
565 		}
566 		if (!actrl) {
567 			/* No: just set the ID and continue. */
568 			npf_nat_setid(np, ++nid);
569 			continue;
570 		}
571 
572 		/* Yes: inherit the matching NAT policy. */
573 		rl->r_natp = actrl->r_natp;
574 		npf_nat_setid(rl->r_natp, ++nid);
575 
576 		/*
577 		 * Finally, mark the active rule to not destroy its NAT
578 		 * policy later as we inherited it (but the rule must be
579 		 * kept active for now).  Destroy the new/unused policy.
580 		 */
581 		actrl->r_attr |= NPF_RULE_KEEPNAT;
582 		npf_nat_freepolicy(np);
583 	}
584 
585 	/* Inherit the ID counter. */
586 	newset->rs_idcnt = oldset->rs_idcnt;
587 }
588 
589 /*
590  * npf_ruleset_sharepm: attempt to share the active NAT portmap.
591  */
592 npf_rule_t *
593 npf_ruleset_sharepm(npf_ruleset_t *rlset, npf_natpolicy_t *mnp)
594 {
595 	npf_natpolicy_t *np;
596 	npf_rule_t *rl;
597 
598 	/*
599 	 * Scan the NAT policies in the ruleset and match with the
600 	 * given policy based on the translation IP address.  If they
601 	 * match - adjust the given NAT policy to use the active NAT
602 	 * portmap.  In such case the reference on the old portmap is
603 	 * dropped and acquired on the active one.
604 	 */
605 	LIST_FOREACH(rl, &rlset->rs_all, r_aentry) {
606 		np = rl->r_natp;
607 		if (np == NULL || np == mnp)
608 			continue;
609 		if (npf_nat_sharepm(np, mnp))
610 			break;
611 	}
612 	return rl;
613 }
614 
615 npf_natpolicy_t *
616 npf_ruleset_findnat(npf_ruleset_t *rlset, uint64_t id)
617 {
618 	npf_rule_t *rl;
619 
620 	LIST_FOREACH(rl, &rlset->rs_all, r_aentry) {
621 		npf_natpolicy_t *np = rl->r_natp;
622 		if (np && npf_nat_getid(np) == id) {
623 			return np;
624 		}
625 	}
626 	return NULL;
627 }
628 
629 /*
630  * npf_ruleset_freealg: inspect the ruleset and disassociate specified
631  * ALG from all NAT entries using it.
632  */
633 void
634 npf_ruleset_freealg(npf_ruleset_t *rlset, npf_alg_t *alg)
635 {
636 	npf_rule_t *rl;
637 	npf_natpolicy_t *np;
638 
639 	LIST_FOREACH(rl, &rlset->rs_all, r_aentry) {
640 		if ((np = rl->r_natp) != NULL) {
641 			npf_nat_freealg(np, alg);
642 		}
643 	}
644 }
645 
646 /*
647  * npf_rule_alloc: allocate a rule and initialise it.
648  */
649 npf_rule_t *
650 npf_rule_alloc(npf_t *npf, prop_dictionary_t rldict)
651 {
652 	npf_rule_t *rl;
653 	const char *rname;
654 	prop_data_t d;
655 
656 	/* Allocate a rule structure. */
657 	rl = kmem_zalloc(sizeof(npf_rule_t), KM_SLEEP);
658 	rl->r_natp = NULL;
659 
660 	/* Name (optional) */
661 	if (prop_dictionary_get_cstring_nocopy(rldict, "name", &rname)) {
662 		strlcpy(rl->r_name, rname, NPF_RULE_MAXNAMELEN);
663 	} else {
664 		rl->r_name[0] = '\0';
665 	}
666 
667 	/* Attributes, priority and interface ID (optional). */
668 	prop_dictionary_get_uint32(rldict, "attr", &rl->r_attr);
669 	rl->r_attr &= ~NPF_RULE_PRIVMASK;
670 
671 	if (NPF_DYNAMIC_RULE_P(rl->r_attr)) {
672 		/* Priority of the dynamic rule. */
673 		prop_dictionary_get_int32(rldict, "prio", &rl->r_priority);
674 	} else {
675 		/* The skip-to index.  No need to validate it. */
676 		prop_dictionary_get_uint32(rldict, "skip-to", &rl->r_skip_to);
677 	}
678 
679 	/* Interface name; register and get the npf-if-id. */
680 	if (prop_dictionary_get_cstring_nocopy(rldict, "ifname", &rname)) {
681 		if ((rl->r_ifid = npf_ifmap_register(npf, rname)) == 0) {
682 			kmem_free(rl, sizeof(npf_rule_t));
683 			return NULL;
684 		}
685 	} else {
686 		rl->r_ifid = 0;
687 	}
688 
689 	/* Key (optional). */
690 	prop_object_t obj = prop_dictionary_get(rldict, "key");
691 	const void *key = prop_data_data_nocopy(obj);
692 
693 	if (key) {
694 		size_t len = prop_data_size(obj);
695 		if (len > NPF_RULE_MAXKEYLEN) {
696 			kmem_free(rl, sizeof(npf_rule_t));
697 			return NULL;
698 		}
699 		memcpy(rl->r_key, key, len);
700 	}
701 
702 	if ((d = prop_dictionary_get(rldict, "info")) != NULL) {
703 		rl->r_info = prop_data_copy(d);
704 	}
705 	return rl;
706 }
707 
708 static int
709 npf_rule_export(npf_t *npf, const npf_ruleset_t *rlset,
710     const npf_rule_t *rl, prop_dictionary_t rldict)
711 {
712 	u_int skip_to = 0;
713 	prop_data_t d;
714 
715 	prop_dictionary_set_uint32(rldict, "attr", rl->r_attr);
716 	prop_dictionary_set_int32(rldict, "prio", rl->r_priority);
717 	if ((rl->r_skip_to & SKIPTO_ADJ_FLAG) == 0) {
718 		skip_to = rl->r_skip_to & SKIPTO_MASK;
719 	}
720 	prop_dictionary_set_uint32(rldict, "skip-to", skip_to);
721 	prop_dictionary_set_int32(rldict, "code-type", rl->r_type);
722 	if (rl->r_code) {
723 		d = prop_data_create_data(rl->r_code, rl->r_clen);
724 		prop_dictionary_set_and_rel(rldict, "code", d);
725 	}
726 
727 	if (rl->r_ifid) {
728 		const char *ifname = npf_ifmap_getname(npf, rl->r_ifid);
729 		prop_dictionary_set_cstring(rldict, "ifname", ifname);
730 	}
731 	prop_dictionary_set_uint64(rldict, "id", rl->r_id);
732 
733 	if (rl->r_name[0]) {
734 		prop_dictionary_set_cstring(rldict, "name", rl->r_name);
735 	}
736 	if (NPF_DYNAMIC_RULE_P(rl->r_attr)) {
737 		d = prop_data_create_data(rl->r_key, NPF_RULE_MAXKEYLEN);
738 		prop_dictionary_set_and_rel(rldict, "key", d);
739 	}
740 	if (rl->r_info) {
741 		prop_dictionary_set(rldict, "info", rl->r_info);
742 	}
743 
744 	npf_rproc_t *rp = npf_rule_getrproc(rl);
745 	if (rp != NULL) {
746 		prop_dictionary_set_cstring(rldict, "rproc",
747 		    npf_rproc_getname(rp));
748 		npf_rproc_release(rp);
749 	}
750 
751 	return 0;
752 }
753 
754 /*
755  * npf_rule_setcode: assign filter code to the rule.
756  *
757  * => The code must be validated by the caller.
758  * => JIT compilation may be performed here.
759  */
760 void
761 npf_rule_setcode(npf_rule_t *rl, const int type, void *code, size_t size)
762 {
763 	KASSERT(type == NPF_CODE_BPF);
764 
765 	rl->r_type = type;
766 	rl->r_code = code;
767 	rl->r_clen = size;
768 	rl->r_jcode = npf_bpf_compile(code, size);
769 }
770 
771 /*
772  * npf_rule_setrproc: assign a rule procedure and hold a reference on it.
773  */
774 void
775 npf_rule_setrproc(npf_rule_t *rl, npf_rproc_t *rp)
776 {
777 	npf_rproc_acquire(rp);
778 	rl->r_rproc = rp;
779 }
780 
781 /*
782  * npf_rule_free: free the specified rule.
783  */
784 void
785 npf_rule_free(npf_rule_t *rl)
786 {
787 	npf_natpolicy_t *np = rl->r_natp;
788 	npf_rproc_t *rp = rl->r_rproc;
789 
790 	if (np && (rl->r_attr & NPF_RULE_KEEPNAT) == 0) {
791 		/* Free NAT policy. */
792 		npf_nat_freepolicy(np);
793 	}
794 	if (rp) {
795 		/* Release rule procedure. */
796 		npf_rproc_release(rp);
797 	}
798 	if (rl->r_code) {
799 		/* Free byte-code. */
800 		kmem_free(rl->r_code, rl->r_clen);
801 	}
802 	if (rl->r_jcode) {
803 		/* Free JIT code. */
804 		bpf_jit_freecode(rl->r_jcode);
805 	}
806 	if (rl->r_info) {
807 		prop_object_release(rl->r_info);
808 	}
809 	kmem_free(rl, sizeof(npf_rule_t));
810 }
811 
812 /*
813  * npf_rule_getid: return the unique ID of a rule.
814  * npf_rule_getrproc: acquire a reference and return rule procedure, if any.
815  * npf_rule_getnat: get NAT policy assigned to the rule.
816  */
817 
818 uint64_t
819 npf_rule_getid(const npf_rule_t *rl)
820 {
821 	KASSERT(NPF_DYNAMIC_RULE_P(rl->r_attr));
822 	return rl->r_id;
823 }
824 
825 npf_rproc_t *
826 npf_rule_getrproc(const npf_rule_t *rl)
827 {
828 	npf_rproc_t *rp = rl->r_rproc;
829 
830 	if (rp) {
831 		npf_rproc_acquire(rp);
832 	}
833 	return rp;
834 }
835 
836 npf_natpolicy_t *
837 npf_rule_getnat(const npf_rule_t *rl)
838 {
839 	return rl->r_natp;
840 }
841 
842 /*
843  * npf_rule_setnat: assign NAT policy to the rule and insert into the
844  * NAT policy list in the ruleset.
845  */
846 void
847 npf_rule_setnat(npf_rule_t *rl, npf_natpolicy_t *np)
848 {
849 	KASSERT(rl->r_natp == NULL);
850 	rl->r_natp = np;
851 }
852 
853 /*
854  * npf_rule_inspect: match the interface, direction and run the filter code.
855  * Returns true if rule matches and false otherwise.
856  */
857 static inline bool
858 npf_rule_inspect(const npf_rule_t *rl, bpf_args_t *bc_args,
859     const int di_mask, const u_int ifid)
860 {
861 	/* Match the interface. */
862 	if (rl->r_ifid && rl->r_ifid != ifid) {
863 		return false;
864 	}
865 
866 	/* Match the direction. */
867 	if ((rl->r_attr & NPF_RULE_DIMASK) != NPF_RULE_DIMASK) {
868 		if ((rl->r_attr & di_mask) == 0)
869 			return false;
870 	}
871 
872 	/* Any code? */
873 	if (!rl->r_code) {
874 		KASSERT(rl->r_jcode == NULL);
875 		return true;
876 	}
877 	KASSERT(rl->r_type == NPF_CODE_BPF);
878 	return npf_bpf_filter(bc_args, rl->r_code, rl->r_jcode) != 0;
879 }
880 
881 /*
882  * npf_rule_reinspect: re-inspect the dynamic rule by iterating its list.
883  * This is only for the dynamic rules.  Subrules cannot have nested rules.
884  */
885 static inline npf_rule_t *
886 npf_rule_reinspect(const npf_rule_t *rg, bpf_args_t *bc_args,
887     const int di_mask, const u_int ifid)
888 {
889 	npf_rule_t *final_rl = NULL, *rl;
890 
891 	KASSERT(NPF_DYNAMIC_GROUP_P(rg->r_attr));
892 
893 	for (rl = rg->r_subset; rl; rl = rl->r_next) {
894 		KASSERT(!final_rl || rl->r_priority >= final_rl->r_priority);
895 		if (!npf_rule_inspect(rl, bc_args, di_mask, ifid)) {
896 			continue;
897 		}
898 		if (rl->r_attr & NPF_RULE_FINAL) {
899 			return rl;
900 		}
901 		final_rl = rl;
902 	}
903 	return final_rl;
904 }
905 
906 /*
907  * npf_ruleset_inspect: inspect the packet against the given ruleset.
908  *
909  * Loop through the rules in the set and run the byte-code of each rule
910  * against the packet (nbuf chain).  If sub-ruleset is found, inspect it.
911  */
912 npf_rule_t *
913 npf_ruleset_inspect(npf_cache_t *npc, const npf_ruleset_t *rlset,
914     const int di, const int layer)
915 {
916 	nbuf_t *nbuf = npc->npc_nbuf;
917 	const int di_mask = (di & PFIL_IN) ? NPF_RULE_IN : NPF_RULE_OUT;
918 	const u_int nitems = rlset->rs_nitems;
919 	const u_int ifid = nbuf->nb_ifid;
920 	npf_rule_t *final_rl = NULL;
921 	bpf_args_t bc_args;
922 	u_int n = 0;
923 
924 	KASSERT(((di & PFIL_IN) != 0) ^ ((di & PFIL_OUT) != 0));
925 
926 	/*
927 	 * Prepare the external memory store and the arguments for
928 	 * the BPF programs to be executed.  Reset mbuf before taking
929 	 * any pointers for the BPF.
930 	 */
931 	uint32_t bc_words[NPF_BPF_NWORDS];
932 
933 	nbuf_reset(nbuf);
934 	npf_bpf_prepare(npc, &bc_args, bc_words);
935 
936 	while (n < nitems) {
937 		npf_rule_t *rl = rlset->rs_rules[n];
938 		const u_int skip_to = rl->r_skip_to & SKIPTO_MASK;
939 		const uint32_t attr = rl->r_attr;
940 
941 		KASSERT(!nbuf_flag_p(nbuf, NBUF_DATAREF_RESET));
942 		KASSERT(n < skip_to);
943 
944 		/* Group is a barrier: return a matching if found any. */
945 		if ((attr & NPF_RULE_GROUP) != 0 && final_rl) {
946 			break;
947 		}
948 
949 		/* Main inspection of the rule. */
950 		if (!npf_rule_inspect(rl, &bc_args, di_mask, ifid)) {
951 			n = skip_to;
952 			continue;
953 		}
954 
955 		if (NPF_DYNAMIC_GROUP_P(attr)) {
956 			/*
957 			 * If this is a dynamic rule, re-inspect the subrules.
958 			 * If it has any matching rule, then it is final.
959 			 */
960 			rl = npf_rule_reinspect(rl, &bc_args, di_mask, ifid);
961 			if (rl != NULL) {
962 				final_rl = rl;
963 				break;
964 			}
965 		} else if ((attr & NPF_RULE_GROUP) == 0) {
966 			/*
967 			 * Groups themselves are not matching.
968 			 */
969 			final_rl = rl;
970 		}
971 
972 		/* Set the matching rule and check for "final". */
973 		if (attr & NPF_RULE_FINAL) {
974 			break;
975 		}
976 		n++;
977 	}
978 
979 	KASSERT(!nbuf_flag_p(nbuf, NBUF_DATAREF_RESET));
980 	return final_rl;
981 }
982 
983 /*
984  * npf_rule_conclude: return decision and the flags for conclusion.
985  *
986  * => Returns ENETUNREACH if "block" and 0 if "pass".
987  */
988 int
989 npf_rule_conclude(const npf_rule_t *rl, int *retfl)
990 {
991 	/* If not passing - drop the packet. */
992 	*retfl = rl->r_attr;
993 	return (rl->r_attr & NPF_RULE_PASS) ? 0 : ENETUNREACH;
994 }
995 
996 
997 #if defined(DDB) || defined(_NPF_TESTING)
998 
999 void
1000 npf_ruleset_dump(npf_t *npf, const char *name)
1001 {
1002 	npf_ruleset_t *rlset = npf_config_ruleset(npf);
1003 	npf_rule_t *rg, *rl;
1004 
1005 	LIST_FOREACH(rg, &rlset->rs_dynamic, r_dentry) {
1006 		printf("ruleset '%s':\n", rg->r_name);
1007 		for (rl = rg->r_subset; rl; rl = rl->r_next) {
1008 			printf("\tid %"PRIu64", key: ", rl->r_id);
1009 			for (u_int i = 0; i < NPF_RULE_MAXKEYLEN; i++)
1010 				printf("%x", rl->r_key[i]);
1011 			printf("\n");
1012 		}
1013 	}
1014 }
1015 
1016 #endif
1017