xref: /netbsd-src/sys/net/npf/npf_ruleset.c (revision bdc22b2e01993381dcefeff2bc9b56ca75a4235c)
1 /*	$NetBSD: npf_ruleset.c,v 1.46 2017/12/10 01:18:21 rmind Exp $	*/
2 
3 /*-
4  * Copyright (c) 2009-2015 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This material is based upon work partially supported by The
8  * NetBSD Foundation under a contract with Mindaugas Rasiukevicius.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * NPF ruleset module.
34  */
35 
36 #ifdef _KERNEL
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: npf_ruleset.c,v 1.46 2017/12/10 01:18:21 rmind Exp $");
39 
40 #include <sys/param.h>
41 #include <sys/types.h>
42 
43 #include <sys/atomic.h>
44 #include <sys/kmem.h>
45 #include <sys/queue.h>
46 #include <sys/mbuf.h>
47 #include <sys/types.h>
48 
49 #include <net/bpf.h>
50 #include <net/bpfjit.h>
51 #include <net/pfil.h>
52 #include <net/if.h>
53 #endif
54 
55 #include "npf_impl.h"
56 
57 struct npf_ruleset {
58 	/*
59 	 * - List of all rules.
60 	 * - Dynamic (i.e. named) rules.
61 	 * - G/C list for convenience.
62 	 */
63 	LIST_HEAD(, npf_rule)	rs_all;
64 	LIST_HEAD(, npf_rule)	rs_dynamic;
65 	LIST_HEAD(, npf_rule)	rs_gc;
66 
67 	/* Unique ID counter. */
68 	uint64_t		rs_idcnt;
69 
70 	/* Number of array slots and active rules. */
71 	u_int			rs_slots;
72 	u_int			rs_nitems;
73 
74 	/* Array of ordered rules. */
75 	npf_rule_t *		rs_rules[];
76 };
77 
78 struct npf_rule {
79 	/* Attributes, interface and skip slot. */
80 	uint32_t		r_attr;
81 	u_int			r_ifid;
82 	u_int			r_skip_to;
83 
84 	/* Code to process, if any. */
85 	int			r_type;
86 	bpfjit_func_t		r_jcode;
87 	void *			r_code;
88 	u_int			r_clen;
89 
90 	/* NAT policy (optional), rule procedure and subset. */
91 	npf_natpolicy_t *	r_natp;
92 	npf_rproc_t *		r_rproc;
93 
94 	union {
95 		/*
96 		 * Dynamic group: rule subset and a group list entry.
97 		 */
98 		struct {
99 			npf_rule_t *		r_subset;
100 			LIST_ENTRY(npf_rule)	r_dentry;
101 		};
102 
103 		/*
104 		 * Dynamic rule: priority, parent group and next rule.
105 		 */
106 		struct {
107 			int			r_priority;
108 			npf_rule_t *		r_parent;
109 			npf_rule_t *		r_next;
110 		};
111 	};
112 
113 	/* Rule ID, name and the optional key. */
114 	uint64_t		r_id;
115 	char			r_name[NPF_RULE_MAXNAMELEN];
116 	uint8_t			r_key[NPF_RULE_MAXKEYLEN];
117 
118 	/* All-list entry and the auxiliary info. */
119 	LIST_ENTRY(npf_rule)	r_aentry;
120 	prop_data_t		r_info;
121 };
122 
123 #define	SKIPTO_ADJ_FLAG		(1U << 31)
124 #define	SKIPTO_MASK		(SKIPTO_ADJ_FLAG - 1)
125 
126 static int	npf_rule_export(npf_t *, const npf_ruleset_t *,
127     const npf_rule_t *, prop_dictionary_t);
128 
129 /*
130  * Private attributes - must be in the NPF_RULE_PRIVMASK range.
131  */
132 #define	NPF_RULE_KEEPNAT	(0x01000000 & NPF_RULE_PRIVMASK)
133 
134 #define	NPF_DYNAMIC_GROUP_P(attr) \
135     (((attr) & NPF_DYNAMIC_GROUP) == NPF_DYNAMIC_GROUP)
136 
137 #define	NPF_DYNAMIC_RULE_P(attr) \
138     (((attr) & NPF_DYNAMIC_GROUP) == NPF_RULE_DYNAMIC)
139 
140 npf_ruleset_t *
141 npf_ruleset_create(size_t slots)
142 {
143 	size_t len = offsetof(npf_ruleset_t, rs_rules[slots]);
144 	npf_ruleset_t *rlset;
145 
146 	rlset = kmem_zalloc(len, KM_SLEEP);
147 	LIST_INIT(&rlset->rs_dynamic);
148 	LIST_INIT(&rlset->rs_all);
149 	LIST_INIT(&rlset->rs_gc);
150 	rlset->rs_slots = slots;
151 
152 	return rlset;
153 }
154 
155 void
156 npf_ruleset_destroy(npf_ruleset_t *rlset)
157 {
158 	size_t len = offsetof(npf_ruleset_t, rs_rules[rlset->rs_slots]);
159 	npf_rule_t *rl;
160 
161 	while ((rl = LIST_FIRST(&rlset->rs_all)) != NULL) {
162 		if (NPF_DYNAMIC_GROUP_P(rl->r_attr)) {
163 			/*
164 			 * Note: r_subset may point to the rules which
165 			 * were inherited by a new ruleset.
166 			 */
167 			rl->r_subset = NULL;
168 			LIST_REMOVE(rl, r_dentry);
169 		}
170 		if (NPF_DYNAMIC_RULE_P(rl->r_attr)) {
171 			/* Not removing from r_subset, see above. */
172 			KASSERT(rl->r_parent != NULL);
173 		}
174 		LIST_REMOVE(rl, r_aentry);
175 		npf_rule_free(rl);
176 	}
177 	KASSERT(LIST_EMPTY(&rlset->rs_dynamic));
178 
179 	npf_ruleset_gc(rlset);
180 	KASSERT(LIST_EMPTY(&rlset->rs_gc));
181 	kmem_free(rlset, len);
182 }
183 
184 /*
185  * npf_ruleset_insert: insert the rule into the specified ruleset.
186  */
187 void
188 npf_ruleset_insert(npf_ruleset_t *rlset, npf_rule_t *rl)
189 {
190 	u_int n = rlset->rs_nitems;
191 
192 	KASSERT(n < rlset->rs_slots);
193 
194 	LIST_INSERT_HEAD(&rlset->rs_all, rl, r_aentry);
195 	if (NPF_DYNAMIC_GROUP_P(rl->r_attr)) {
196 		LIST_INSERT_HEAD(&rlset->rs_dynamic, rl, r_dentry);
197 	} else {
198 		KASSERTMSG(rl->r_parent == NULL, "cannot be dynamic rule");
199 		rl->r_attr &= ~NPF_RULE_DYNAMIC;
200 	}
201 
202 	rlset->rs_rules[n] = rl;
203 	rlset->rs_nitems++;
204 	rl->r_id = ++rlset->rs_idcnt;
205 
206 	if (rl->r_skip_to < ++n) {
207 		rl->r_skip_to = SKIPTO_ADJ_FLAG | n;
208 	}
209 }
210 
211 npf_rule_t *
212 npf_ruleset_lookup(npf_ruleset_t *rlset, const char *name)
213 {
214 	npf_rule_t *rl;
215 
216 	LIST_FOREACH(rl, &rlset->rs_dynamic, r_dentry) {
217 		KASSERT(NPF_DYNAMIC_GROUP_P(rl->r_attr));
218 		if (strncmp(rl->r_name, name, NPF_RULE_MAXNAMELEN) == 0)
219 			break;
220 	}
221 	return rl;
222 }
223 
224 /*
225  * npf_ruleset_add: insert dynamic rule into the (active) ruleset.
226  */
227 int
228 npf_ruleset_add(npf_ruleset_t *rlset, const char *rname, npf_rule_t *rl)
229 {
230 	npf_rule_t *rg, *it, *target;
231 	int priocmd;
232 
233 	if (!NPF_DYNAMIC_RULE_P(rl->r_attr)) {
234 		return EINVAL;
235 	}
236 	rg = npf_ruleset_lookup(rlset, rname);
237 	if (rg == NULL) {
238 		return ESRCH;
239 	}
240 
241 	/* Dynamic rule - assign a unique ID and save the parent. */
242 	rl->r_id = ++rlset->rs_idcnt;
243 	rl->r_parent = rg;
244 
245 	/*
246 	 * Rule priority: (highest) 1, 2 ... n (lowest).
247 	 * Negative priority indicates an operation and is reset to zero.
248 	 */
249 	if ((priocmd = rl->r_priority) < 0) {
250 		rl->r_priority = 0;
251 	}
252 
253 	/*
254 	 * WARNING: once rg->subset or target->r_next of an *active*
255 	 * rule is set, then our rule becomes globally visible and active.
256 	 * Must issue a load fence to ensure rl->r_next visibility first.
257 	 */
258 	switch (priocmd) {
259 	case NPF_PRI_LAST:
260 	default:
261 		target = NULL;
262 		it = rg->r_subset;
263 		while (it && it->r_priority <= rl->r_priority) {
264 			target = it;
265 			it = it->r_next;
266 		}
267 		if (target) {
268 			rl->r_next = target->r_next;
269 			membar_producer();
270 			target->r_next = rl;
271 			break;
272 		}
273 		/* FALLTHROUGH */
274 
275 	case NPF_PRI_FIRST:
276 		rl->r_next = rg->r_subset;
277 		membar_producer();
278 		rg->r_subset = rl;
279 		break;
280 	}
281 
282 	/* Finally, add into the all-list. */
283 	LIST_INSERT_HEAD(&rlset->rs_all, rl, r_aentry);
284 	return 0;
285 }
286 
287 static void
288 npf_ruleset_unlink(npf_rule_t *rl, npf_rule_t *prev)
289 {
290 	KASSERT(NPF_DYNAMIC_RULE_P(rl->r_attr));
291 	if (prev) {
292 		prev->r_next = rl->r_next;
293 	} else {
294 		npf_rule_t *rg = rl->r_parent;
295 		rg->r_subset = rl->r_next;
296 	}
297 	LIST_REMOVE(rl, r_aentry);
298 }
299 
300 /*
301  * npf_ruleset_remove: remove the dynamic rule given the rule ID.
302  */
303 int
304 npf_ruleset_remove(npf_ruleset_t *rlset, const char *rname, uint64_t id)
305 {
306 	npf_rule_t *rg, *prev = NULL;
307 
308 	if ((rg = npf_ruleset_lookup(rlset, rname)) == NULL) {
309 		return ESRCH;
310 	}
311 	for (npf_rule_t *rl = rg->r_subset; rl; rl = rl->r_next) {
312 		KASSERT(rl->r_parent == rg);
313 		KASSERT(NPF_DYNAMIC_RULE_P(rl->r_attr));
314 
315 		/* Compare ID.  On match, remove and return. */
316 		if (rl->r_id == id) {
317 			npf_ruleset_unlink(rl, prev);
318 			LIST_INSERT_HEAD(&rlset->rs_gc, rl, r_aentry);
319 			return 0;
320 		}
321 		prev = rl;
322 	}
323 	return ENOENT;
324 }
325 
326 /*
327  * npf_ruleset_remkey: remove the dynamic rule given the rule key.
328  */
329 int
330 npf_ruleset_remkey(npf_ruleset_t *rlset, const char *rname,
331     const void *key, size_t len)
332 {
333 	npf_rule_t *rg, *rlast = NULL, *prev = NULL, *lastprev = NULL;
334 
335 	KASSERT(len && len <= NPF_RULE_MAXKEYLEN);
336 
337 	if ((rg = npf_ruleset_lookup(rlset, rname)) == NULL) {
338 		return ESRCH;
339 	}
340 
341 	/* Compare the key and find the last in the list. */
342 	for (npf_rule_t *rl = rg->r_subset; rl; rl = rl->r_next) {
343 		KASSERT(rl->r_parent == rg);
344 		KASSERT(NPF_DYNAMIC_RULE_P(rl->r_attr));
345 		if (memcmp(rl->r_key, key, len) == 0) {
346 			lastprev = prev;
347 			rlast = rl;
348 		}
349 		prev = rl;
350 	}
351 	if (!rlast) {
352 		return ENOENT;
353 	}
354 	npf_ruleset_unlink(rlast, lastprev);
355 	LIST_INSERT_HEAD(&rlset->rs_gc, rlast, r_aentry);
356 	return 0;
357 }
358 
359 /*
360  * npf_ruleset_list: serialise and return the dynamic rules.
361  */
362 prop_dictionary_t
363 npf_ruleset_list(npf_t *npf, npf_ruleset_t *rlset, const char *rname)
364 {
365 	prop_dictionary_t rgdict;
366 	prop_array_t rules;
367 	npf_rule_t *rg;
368 
369 	KASSERT(npf_config_locked_p(npf));
370 
371 	if ((rg = npf_ruleset_lookup(rlset, rname)) == NULL) {
372 		return NULL;
373 	}
374 	if ((rgdict = prop_dictionary_create()) == NULL) {
375 		return NULL;
376 	}
377 	if ((rules = prop_array_create()) == NULL) {
378 		prop_object_release(rgdict);
379 		return NULL;
380 	}
381 
382 	for (npf_rule_t *rl = rg->r_subset; rl; rl = rl->r_next) {
383 		prop_dictionary_t rldict;
384 
385 		KASSERT(rl->r_parent == rg);
386 		KASSERT(NPF_DYNAMIC_RULE_P(rl->r_attr));
387 
388 		rldict = prop_dictionary_create();
389 		if (npf_rule_export(npf, rlset, rl, rldict)) {
390 			prop_object_release(rldict);
391 			prop_object_release(rules);
392 			return NULL;
393 		}
394 		prop_array_add(rules, rldict);
395 		prop_object_release(rldict);
396 	}
397 
398 	if (!prop_dictionary_set(rgdict, "rules", rules)) {
399 		prop_object_release(rgdict);
400 		rgdict = NULL;
401 	}
402 	prop_object_release(rules);
403 	return rgdict;
404 }
405 
406 /*
407  * npf_ruleset_flush: flush the dynamic rules in the ruleset by inserting
408  * them into the G/C list.
409  */
410 int
411 npf_ruleset_flush(npf_ruleset_t *rlset, const char *rname)
412 {
413 	npf_rule_t *rg, *rl;
414 
415 	if ((rg = npf_ruleset_lookup(rlset, rname)) == NULL) {
416 		return ESRCH;
417 	}
418 
419 	rl = atomic_swap_ptr(&rg->r_subset, NULL);
420 	membar_producer();
421 
422 	while (rl) {
423 		KASSERT(NPF_DYNAMIC_RULE_P(rl->r_attr));
424 		KASSERT(rl->r_parent == rg);
425 
426 		LIST_REMOVE(rl, r_aentry);
427 		LIST_INSERT_HEAD(&rlset->rs_gc, rl, r_aentry);
428 		rl = rl->r_next;
429 	}
430 	rlset->rs_idcnt = 0;
431 	return 0;
432 }
433 
434 /*
435  * npf_ruleset_gc: destroy the rules in G/C list.
436  */
437 void
438 npf_ruleset_gc(npf_ruleset_t *rlset)
439 {
440 	npf_rule_t *rl;
441 
442 	while ((rl = LIST_FIRST(&rlset->rs_gc)) != NULL) {
443 		LIST_REMOVE(rl, r_aentry);
444 		npf_rule_free(rl);
445 	}
446 }
447 
448 /*
449  * npf_ruleset_export: serialise and return the static rules.
450  */
451 int
452 npf_ruleset_export(npf_t *npf, const npf_ruleset_t *rlset, prop_array_t rules)
453 {
454 	const u_int nitems = rlset->rs_nitems;
455 	int error = 0;
456 	u_int n = 0;
457 
458 	KASSERT(npf_config_locked_p(npf));
459 
460 	while (n < nitems) {
461 		const npf_rule_t *rl = rlset->rs_rules[n];
462 		const npf_natpolicy_t *natp = rl->r_natp;
463 		prop_dictionary_t rldict;
464 
465 		rldict = prop_dictionary_create();
466 		if ((error = npf_rule_export(npf, rlset, rl, rldict)) != 0) {
467 			prop_object_release(rldict);
468 			break;
469 		}
470 		if (natp && (error = npf_nat_policyexport(natp, rldict)) != 0) {
471 			prop_object_release(rldict);
472 			break;
473 		}
474 		prop_array_add(rules, rldict);
475 		prop_object_release(rldict);
476 		n++;
477 	}
478 	return error;
479 }
480 
481 /*
482  * npf_ruleset_reload: prepare the new ruleset by scanning the active
483  * ruleset and: 1) sharing the dynamic rules 2) sharing NAT policies.
484  *
485  * => The active (old) ruleset should be exclusively locked.
486  */
487 void
488 npf_ruleset_reload(npf_t *npf, npf_ruleset_t *newset,
489     npf_ruleset_t *oldset, bool load)
490 {
491 	npf_rule_t *rg, *rl;
492 	uint64_t nid = 0;
493 
494 	KASSERT(npf_config_locked_p(npf));
495 
496 	/*
497 	 * Scan the dynamic rules and share (migrate) if needed.
498 	 */
499 	LIST_FOREACH(rg, &newset->rs_dynamic, r_dentry) {
500 		npf_rule_t *active_rgroup;
501 
502 		/* Look for a dynamic ruleset group with such name. */
503 		active_rgroup = npf_ruleset_lookup(oldset, rg->r_name);
504 		if (active_rgroup == NULL) {
505 			continue;
506 		}
507 
508 		/*
509 		 * ATOMICITY: Copy the head pointer of the linked-list,
510 		 * but do not remove the rules from the active r_subset.
511 		 * This is necessary because the rules are still active
512 		 * and therefore are accessible for inspection via the
513 		 * old ruleset.
514 		 */
515 		rg->r_subset = active_rgroup->r_subset;
516 
517 		/*
518 		 * We can safely migrate to the new all-rule list and
519 		 * reset the parent rule, though.
520 		 */
521 		for (rl = rg->r_subset; rl; rl = rl->r_next) {
522 			KASSERT(NPF_DYNAMIC_RULE_P(rl->r_attr));
523 			LIST_REMOVE(rl, r_aentry);
524 			LIST_INSERT_HEAD(&newset->rs_all, rl, r_aentry);
525 
526 			KASSERT(rl->r_parent == active_rgroup);
527 			rl->r_parent = rg;
528 		}
529 	}
530 
531 	/*
532 	 * If performing the load of connections then NAT policies may
533 	 * already have translated connections associated with them and
534 	 * we should not share or inherit anything.
535 	 */
536 	if (load)
537 		return;
538 
539 	/*
540 	 * Scan all rules in the new ruleset and share NAT policies.
541 	 * Also, assign a unique ID for each policy here.
542 	 */
543 	LIST_FOREACH(rl, &newset->rs_all, r_aentry) {
544 		npf_natpolicy_t *np;
545 		npf_rule_t *actrl;
546 
547 		/* Does the rule have a NAT policy associated? */
548 		if ((np = rl->r_natp) == NULL) {
549 			continue;
550 		}
551 
552 		/*
553 		 * First, try to share the active port map.  If this
554 		 * policy will be unused, npf_nat_freepolicy() will
555 		 * drop the reference.
556 		 */
557 		npf_ruleset_sharepm(oldset, np);
558 
559 		/* Does it match with any policy in the active ruleset? */
560 		LIST_FOREACH(actrl, &oldset->rs_all, r_aentry) {
561 			if (!actrl->r_natp)
562 				continue;
563 			if ((actrl->r_attr & NPF_RULE_KEEPNAT) != 0)
564 				continue;
565 			if (npf_nat_cmppolicy(actrl->r_natp, np))
566 				break;
567 		}
568 		if (!actrl) {
569 			/* No: just set the ID and continue. */
570 			npf_nat_setid(np, ++nid);
571 			continue;
572 		}
573 
574 		/* Yes: inherit the matching NAT policy. */
575 		rl->r_natp = actrl->r_natp;
576 		npf_nat_setid(rl->r_natp, ++nid);
577 
578 		/*
579 		 * Finally, mark the active rule to not destroy its NAT
580 		 * policy later as we inherited it (but the rule must be
581 		 * kept active for now).  Destroy the new/unused policy.
582 		 */
583 		actrl->r_attr |= NPF_RULE_KEEPNAT;
584 		npf_nat_freepolicy(np);
585 	}
586 
587 	/* Inherit the ID counter. */
588 	newset->rs_idcnt = oldset->rs_idcnt;
589 }
590 
591 /*
592  * npf_ruleset_sharepm: attempt to share the active NAT portmap.
593  */
594 npf_rule_t *
595 npf_ruleset_sharepm(npf_ruleset_t *rlset, npf_natpolicy_t *mnp)
596 {
597 	npf_natpolicy_t *np;
598 	npf_rule_t *rl;
599 
600 	/*
601 	 * Scan the NAT policies in the ruleset and match with the
602 	 * given policy based on the translation IP address.  If they
603 	 * match - adjust the given NAT policy to use the active NAT
604 	 * portmap.  In such case the reference on the old portmap is
605 	 * dropped and acquired on the active one.
606 	 */
607 	LIST_FOREACH(rl, &rlset->rs_all, r_aentry) {
608 		np = rl->r_natp;
609 		if (np == NULL || np == mnp)
610 			continue;
611 		if (npf_nat_sharepm(np, mnp))
612 			break;
613 	}
614 	return rl;
615 }
616 
617 npf_natpolicy_t *
618 npf_ruleset_findnat(npf_ruleset_t *rlset, uint64_t id)
619 {
620 	npf_rule_t *rl;
621 
622 	LIST_FOREACH(rl, &rlset->rs_all, r_aentry) {
623 		npf_natpolicy_t *np = rl->r_natp;
624 		if (np && npf_nat_getid(np) == id) {
625 			return np;
626 		}
627 	}
628 	return NULL;
629 }
630 
631 /*
632  * npf_ruleset_freealg: inspect the ruleset and disassociate specified
633  * ALG from all NAT entries using it.
634  */
635 void
636 npf_ruleset_freealg(npf_ruleset_t *rlset, npf_alg_t *alg)
637 {
638 	npf_rule_t *rl;
639 	npf_natpolicy_t *np;
640 
641 	LIST_FOREACH(rl, &rlset->rs_all, r_aentry) {
642 		if ((np = rl->r_natp) != NULL) {
643 			npf_nat_freealg(np, alg);
644 		}
645 	}
646 }
647 
648 /*
649  * npf_rule_alloc: allocate a rule and initialise it.
650  */
651 npf_rule_t *
652 npf_rule_alloc(npf_t *npf, prop_dictionary_t rldict)
653 {
654 	npf_rule_t *rl;
655 	const char *rname;
656 	prop_data_t d;
657 
658 	/* Allocate a rule structure. */
659 	rl = kmem_zalloc(sizeof(npf_rule_t), KM_SLEEP);
660 	rl->r_natp = NULL;
661 
662 	/* Name (optional) */
663 	if (prop_dictionary_get_cstring_nocopy(rldict, "name", &rname)) {
664 		strlcpy(rl->r_name, rname, NPF_RULE_MAXNAMELEN);
665 	} else {
666 		rl->r_name[0] = '\0';
667 	}
668 
669 	/* Attributes, priority and interface ID (optional). */
670 	prop_dictionary_get_uint32(rldict, "attr", &rl->r_attr);
671 	rl->r_attr &= ~NPF_RULE_PRIVMASK;
672 
673 	if (NPF_DYNAMIC_RULE_P(rl->r_attr)) {
674 		/* Priority of the dynamic rule. */
675 		prop_dictionary_get_int32(rldict, "prio", &rl->r_priority);
676 	} else {
677 		/* The skip-to index.  No need to validate it. */
678 		prop_dictionary_get_uint32(rldict, "skip-to", &rl->r_skip_to);
679 	}
680 
681 	/* Interface name; register and get the npf-if-id. */
682 	if (prop_dictionary_get_cstring_nocopy(rldict, "ifname", &rname)) {
683 		if ((rl->r_ifid = npf_ifmap_register(npf, rname)) == 0) {
684 			kmem_free(rl, sizeof(npf_rule_t));
685 			return NULL;
686 		}
687 	} else {
688 		rl->r_ifid = 0;
689 	}
690 
691 	/* Key (optional). */
692 	prop_object_t obj = prop_dictionary_get(rldict, "key");
693 	const void *key = prop_data_data_nocopy(obj);
694 
695 	if (key) {
696 		size_t len = prop_data_size(obj);
697 		if (len > NPF_RULE_MAXKEYLEN) {
698 			kmem_free(rl, sizeof(npf_rule_t));
699 			return NULL;
700 		}
701 		memcpy(rl->r_key, key, len);
702 	}
703 
704 	if ((d = prop_dictionary_get(rldict, "info")) != NULL) {
705 		rl->r_info = prop_data_copy(d);
706 	}
707 	return rl;
708 }
709 
710 static int
711 npf_rule_export(npf_t *npf, const npf_ruleset_t *rlset,
712     const npf_rule_t *rl, prop_dictionary_t rldict)
713 {
714 	u_int skip_to = 0;
715 	prop_data_t d;
716 
717 	prop_dictionary_set_uint32(rldict, "attr", rl->r_attr);
718 	prop_dictionary_set_int32(rldict, "prio", rl->r_priority);
719 	if ((rl->r_skip_to & SKIPTO_ADJ_FLAG) == 0) {
720 		skip_to = rl->r_skip_to & SKIPTO_MASK;
721 	}
722 	prop_dictionary_set_uint32(rldict, "skip-to", skip_to);
723 	prop_dictionary_set_int32(rldict, "code-type", rl->r_type);
724 	if (rl->r_code) {
725 		d = prop_data_create_data(rl->r_code, rl->r_clen);
726 		prop_dictionary_set_and_rel(rldict, "code", d);
727 	}
728 
729 	if (rl->r_ifid) {
730 		const char *ifname = npf_ifmap_getname(npf, rl->r_ifid);
731 		prop_dictionary_set_cstring(rldict, "ifname", ifname);
732 	}
733 	prop_dictionary_set_uint64(rldict, "id", rl->r_id);
734 
735 	if (rl->r_name[0]) {
736 		prop_dictionary_set_cstring(rldict, "name", rl->r_name);
737 	}
738 	if (NPF_DYNAMIC_RULE_P(rl->r_attr)) {
739 		d = prop_data_create_data(rl->r_key, NPF_RULE_MAXKEYLEN);
740 		prop_dictionary_set_and_rel(rldict, "key", d);
741 	}
742 	if (rl->r_info) {
743 		prop_dictionary_set(rldict, "info", rl->r_info);
744 	}
745 
746 	npf_rproc_t *rp = npf_rule_getrproc(rl);
747 	if (rp != NULL) {
748 		prop_dictionary_set_cstring(rldict, "rproc",
749 		    npf_rproc_getname(rp));
750 		npf_rproc_release(rp);
751 	}
752 
753 	return 0;
754 }
755 
756 /*
757  * npf_rule_setcode: assign filter code to the rule.
758  *
759  * => The code must be validated by the caller.
760  * => JIT compilation may be performed here.
761  */
762 void
763 npf_rule_setcode(npf_rule_t *rl, const int type, void *code, size_t size)
764 {
765 	KASSERT(type == NPF_CODE_BPF);
766 
767 	rl->r_type = type;
768 	rl->r_code = code;
769 	rl->r_clen = size;
770 	rl->r_jcode = npf_bpf_compile(code, size);
771 }
772 
773 /*
774  * npf_rule_setrproc: assign a rule procedure and hold a reference on it.
775  */
776 void
777 npf_rule_setrproc(npf_rule_t *rl, npf_rproc_t *rp)
778 {
779 	npf_rproc_acquire(rp);
780 	rl->r_rproc = rp;
781 }
782 
783 /*
784  * npf_rule_free: free the specified rule.
785  */
786 void
787 npf_rule_free(npf_rule_t *rl)
788 {
789 	npf_natpolicy_t *np = rl->r_natp;
790 	npf_rproc_t *rp = rl->r_rproc;
791 
792 	if (np && (rl->r_attr & NPF_RULE_KEEPNAT) == 0) {
793 		/* Free NAT policy. */
794 		npf_nat_freepolicy(np);
795 	}
796 	if (rp) {
797 		/* Release rule procedure. */
798 		npf_rproc_release(rp);
799 	}
800 	if (rl->r_code) {
801 		/* Free byte-code. */
802 		kmem_free(rl->r_code, rl->r_clen);
803 	}
804 	if (rl->r_jcode) {
805 		/* Free JIT code. */
806 		bpf_jit_freecode(rl->r_jcode);
807 	}
808 	if (rl->r_info) {
809 		prop_object_release(rl->r_info);
810 	}
811 	kmem_free(rl, sizeof(npf_rule_t));
812 }
813 
814 /*
815  * npf_rule_getid: return the unique ID of a rule.
816  * npf_rule_getrproc: acquire a reference and return rule procedure, if any.
817  * npf_rule_getnat: get NAT policy assigned to the rule.
818  */
819 
820 uint64_t
821 npf_rule_getid(const npf_rule_t *rl)
822 {
823 	KASSERT(NPF_DYNAMIC_RULE_P(rl->r_attr));
824 	return rl->r_id;
825 }
826 
827 npf_rproc_t *
828 npf_rule_getrproc(const npf_rule_t *rl)
829 {
830 	npf_rproc_t *rp = rl->r_rproc;
831 
832 	if (rp) {
833 		npf_rproc_acquire(rp);
834 	}
835 	return rp;
836 }
837 
838 npf_natpolicy_t *
839 npf_rule_getnat(const npf_rule_t *rl)
840 {
841 	return rl->r_natp;
842 }
843 
844 /*
845  * npf_rule_setnat: assign NAT policy to the rule and insert into the
846  * NAT policy list in the ruleset.
847  */
848 void
849 npf_rule_setnat(npf_rule_t *rl, npf_natpolicy_t *np)
850 {
851 	KASSERT(rl->r_natp == NULL);
852 	rl->r_natp = np;
853 }
854 
855 /*
856  * npf_rule_inspect: match the interface, direction and run the filter code.
857  * Returns true if rule matches and false otherwise.
858  */
859 static inline bool
860 npf_rule_inspect(const npf_rule_t *rl, bpf_args_t *bc_args,
861     const int di_mask, const u_int ifid)
862 {
863 	/* Match the interface. */
864 	if (rl->r_ifid && rl->r_ifid != ifid) {
865 		return false;
866 	}
867 
868 	/* Match the direction. */
869 	if ((rl->r_attr & NPF_RULE_DIMASK) != NPF_RULE_DIMASK) {
870 		if ((rl->r_attr & di_mask) == 0)
871 			return false;
872 	}
873 
874 	/* Any code? */
875 	if (!rl->r_code) {
876 		KASSERT(rl->r_jcode == NULL);
877 		return true;
878 	}
879 	KASSERT(rl->r_type == NPF_CODE_BPF);
880 	return npf_bpf_filter(bc_args, rl->r_code, rl->r_jcode) != 0;
881 }
882 
883 /*
884  * npf_rule_reinspect: re-inspect the dynamic rule by iterating its list.
885  * This is only for the dynamic rules.  Subrules cannot have nested rules.
886  */
887 static inline npf_rule_t *
888 npf_rule_reinspect(const npf_rule_t *rg, bpf_args_t *bc_args,
889     const int di_mask, const u_int ifid)
890 {
891 	npf_rule_t *final_rl = NULL, *rl;
892 
893 	KASSERT(NPF_DYNAMIC_GROUP_P(rg->r_attr));
894 
895 	for (rl = rg->r_subset; rl; rl = rl->r_next) {
896 		KASSERT(!final_rl || rl->r_priority >= final_rl->r_priority);
897 		if (!npf_rule_inspect(rl, bc_args, di_mask, ifid)) {
898 			continue;
899 		}
900 		if (rl->r_attr & NPF_RULE_FINAL) {
901 			return rl;
902 		}
903 		final_rl = rl;
904 	}
905 	return final_rl;
906 }
907 
908 /*
909  * npf_ruleset_inspect: inspect the packet against the given ruleset.
910  *
911  * Loop through the rules in the set and run the byte-code of each rule
912  * against the packet (nbuf chain).  If sub-ruleset is found, inspect it.
913  */
914 npf_rule_t *
915 npf_ruleset_inspect(npf_cache_t *npc, const npf_ruleset_t *rlset,
916     const int di, const int layer)
917 {
918 	nbuf_t *nbuf = npc->npc_nbuf;
919 	const int di_mask = (di & PFIL_IN) ? NPF_RULE_IN : NPF_RULE_OUT;
920 	const u_int nitems = rlset->rs_nitems;
921 	const u_int ifid = nbuf->nb_ifid;
922 	npf_rule_t *final_rl = NULL;
923 	bpf_args_t bc_args;
924 	u_int n = 0;
925 
926 	KASSERT(((di & PFIL_IN) != 0) ^ ((di & PFIL_OUT) != 0));
927 
928 	/*
929 	 * Prepare the external memory store and the arguments for
930 	 * the BPF programs to be executed.  Reset mbuf before taking
931 	 * any pointers for the BPF.
932 	 */
933 	uint32_t bc_words[NPF_BPF_NWORDS];
934 
935 	nbuf_reset(nbuf);
936 	npf_bpf_prepare(npc, &bc_args, bc_words);
937 
938 	while (n < nitems) {
939 		npf_rule_t *rl = rlset->rs_rules[n];
940 		const u_int skip_to = rl->r_skip_to & SKIPTO_MASK;
941 		const uint32_t attr = rl->r_attr;
942 
943 		KASSERT(!nbuf_flag_p(nbuf, NBUF_DATAREF_RESET));
944 		KASSERT(n < skip_to);
945 
946 		/* Group is a barrier: return a matching if found any. */
947 		if ((attr & NPF_RULE_GROUP) != 0 && final_rl) {
948 			break;
949 		}
950 
951 		/* Main inspection of the rule. */
952 		if (!npf_rule_inspect(rl, &bc_args, di_mask, ifid)) {
953 			n = skip_to;
954 			continue;
955 		}
956 
957 		if (NPF_DYNAMIC_GROUP_P(attr)) {
958 			/*
959 			 * If this is a dynamic rule, re-inspect the subrules.
960 			 * If it has any matching rule, then it is final.
961 			 */
962 			rl = npf_rule_reinspect(rl, &bc_args, di_mask, ifid);
963 			if (rl != NULL) {
964 				final_rl = rl;
965 				break;
966 			}
967 		} else if ((attr & NPF_RULE_GROUP) == 0) {
968 			/*
969 			 * Groups themselves are not matching.
970 			 */
971 			final_rl = rl;
972 		}
973 
974 		/* Set the matching rule and check for "final". */
975 		if (attr & NPF_RULE_FINAL) {
976 			break;
977 		}
978 		n++;
979 	}
980 
981 	KASSERT(!nbuf_flag_p(nbuf, NBUF_DATAREF_RESET));
982 	return final_rl;
983 }
984 
985 /*
986  * npf_rule_conclude: return decision and the flags for conclusion.
987  *
988  * => Returns ENETUNREACH if "block" and 0 if "pass".
989  */
990 int
991 npf_rule_conclude(const npf_rule_t *rl, npf_match_info_t *mi)
992 {
993 	/* If not passing - drop the packet. */
994 	mi->mi_retfl = rl->r_attr;
995 	mi->mi_rid = rl->r_id;
996 	return (rl->r_attr & NPF_RULE_PASS) ? 0 : ENETUNREACH;
997 }
998 
999 
1000 #if defined(DDB) || defined(_NPF_TESTING)
1001 
1002 void
1003 npf_ruleset_dump(npf_t *npf, const char *name)
1004 {
1005 	npf_ruleset_t *rlset = npf_config_ruleset(npf);
1006 	npf_rule_t *rg, *rl;
1007 
1008 	LIST_FOREACH(rg, &rlset->rs_dynamic, r_dentry) {
1009 		printf("ruleset '%s':\n", rg->r_name);
1010 		for (rl = rg->r_subset; rl; rl = rl->r_next) {
1011 			printf("\tid %"PRIu64", key: ", rl->r_id);
1012 			for (u_int i = 0; i < NPF_RULE_MAXKEYLEN; i++)
1013 				printf("%x", rl->r_key[i]);
1014 			printf("\n");
1015 		}
1016 	}
1017 }
1018 
1019 #endif
1020