xref: /netbsd-src/sys/net/npf/npf_ruleset.c (revision a4ddc2c8fb9af816efe3b1c375a5530aef0e89e9)
1 /*	$NetBSD: npf_ruleset.c,v 1.20 2013/03/18 02:24:45 rmind Exp $	*/
2 
3 /*-
4  * Copyright (c) 2009-2013 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This material is based upon work partially supported by The
8  * NetBSD Foundation under a contract with Mindaugas Rasiukevicius.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * NPF ruleset module.
34  */
35 
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: npf_ruleset.c,v 1.20 2013/03/18 02:24:45 rmind Exp $");
38 
39 #include <sys/param.h>
40 #include <sys/types.h>
41 
42 #include <sys/atomic.h>
43 #include <sys/kmem.h>
44 #include <sys/queue.h>
45 #include <sys/mbuf.h>
46 #include <sys/types.h>
47 
48 #include <net/bpf.h>
49 #include <net/bpfjit.h>
50 #include <net/pfil.h>
51 #include <net/if.h>
52 
53 #include "npf_ncode.h"
54 #include "npf_impl.h"
55 
56 struct npf_ruleset {
57 	/*
58 	 * - List of all rules.
59 	 * - Dynamic (i.e. named) rules.
60 	 * - G/C list for convenience.
61 	 */
62 	LIST_HEAD(, npf_rule)	rs_all;
63 	LIST_HEAD(, npf_rule)	rs_dynamic;
64 	LIST_HEAD(, npf_rule)	rs_gc;
65 
66 	/* Unique ID counter. */
67 	uint64_t		rs_idcnt;
68 
69 	/* Number of array slots and active rules. */
70 	u_int			rs_slots;
71 	u_int			rs_nitems;
72 
73 	/* Array of ordered rules. */
74 	npf_rule_t *		rs_rules[];
75 };
76 
77 struct npf_rule {
78 	/* Attributes, interface and skip slot. */
79 	uint32_t		r_attr;
80 	u_int			r_ifid;
81 	u_int			r_skip_to;
82 
83 	/* Code to process, if any. */
84 	int			r_type;
85 	bpfjit_function_t	r_jcode;
86 	void *			r_code;
87 	size_t			r_clen;
88 
89 	/* NAT policy (optional), rule procedure and subset. */
90 	npf_natpolicy_t *	r_natp;
91 	npf_rproc_t *		r_rproc;
92 
93 	/* Rule priority: (highest) 1, 2 ... n (lowest). */
94 	pri_t			r_priority;
95 
96 	/*
97 	 * Dynamic group: subset queue and a dynamic group list entry.
98 	 * Dynamic rule: entry and the parent rule (the group).
99 	 */
100 	union {
101 		TAILQ_HEAD(npf_ruleq, npf_rule) r_subset;
102 		TAILQ_ENTRY(npf_rule)	r_entry;
103 	} /* C11 */;
104 	union {
105 		LIST_ENTRY(npf_rule)	r_dentry;
106 		npf_rule_t *		r_parent;
107 	} /* C11 */;
108 
109 	/* Rule ID and the original dictionary. */
110 	uint64_t		r_id;
111 	prop_dictionary_t	r_dict;
112 
113 	/* Rule name and all-list entry. */
114 	char			r_name[NPF_RULE_MAXNAMELEN];
115 	LIST_ENTRY(npf_rule)	r_aentry;
116 
117 	/* Key (optional). */
118 	uint8_t			r_key[NPF_RULE_MAXKEYLEN];
119 };
120 
121 #define	NPF_DYNAMIC_GROUP_P(attr) \
122     (((attr) & NPF_DYNAMIC_GROUP) == NPF_DYNAMIC_GROUP)
123 
124 #define	NPF_DYNAMIC_RULE_P(attr) \
125     (((attr) & NPF_DYNAMIC_GROUP) == NPF_RULE_DYNAMIC)
126 
127 npf_ruleset_t *
128 npf_ruleset_create(size_t slots)
129 {
130 	size_t len = offsetof(npf_ruleset_t, rs_rules[slots]);
131 	npf_ruleset_t *rlset;
132 
133 	rlset = kmem_zalloc(len, KM_SLEEP);
134 	LIST_INIT(&rlset->rs_dynamic);
135 	LIST_INIT(&rlset->rs_all);
136 	LIST_INIT(&rlset->rs_gc);
137 	rlset->rs_slots = slots;
138 
139 	return rlset;
140 }
141 
142 static void
143 npf_ruleset_unlink(npf_ruleset_t *rlset, npf_rule_t *rl)
144 {
145 	if (NPF_DYNAMIC_GROUP_P(rl->r_attr)) {
146 		LIST_REMOVE(rl, r_dentry);
147 	}
148 	if (NPF_DYNAMIC_RULE_P(rl->r_attr)) {
149 		npf_rule_t *rg = rl->r_parent;
150 		TAILQ_REMOVE(&rg->r_subset, rl, r_entry);
151 	}
152 	LIST_REMOVE(rl, r_aentry);
153 }
154 
155 void
156 npf_ruleset_destroy(npf_ruleset_t *rlset)
157 {
158 	size_t len = offsetof(npf_ruleset_t, rs_rules[rlset->rs_slots]);
159 	npf_rule_t *rl;
160 
161 	while ((rl = LIST_FIRST(&rlset->rs_all)) != NULL) {
162 		npf_ruleset_unlink(rlset, rl);
163 		npf_rule_free(rl);
164 	}
165 	KASSERT(LIST_EMPTY(&rlset->rs_dynamic));
166 	KASSERT(LIST_EMPTY(&rlset->rs_gc));
167 	kmem_free(rlset, len);
168 }
169 
170 /*
171  * npf_ruleset_insert: insert the rule into the specified ruleset.
172  */
173 void
174 npf_ruleset_insert(npf_ruleset_t *rlset, npf_rule_t *rl)
175 {
176 	u_int n = rlset->rs_nitems;
177 
178 	KASSERT(n < rlset->rs_slots);
179 
180 	LIST_INSERT_HEAD(&rlset->rs_all, rl, r_aentry);
181 	if (NPF_DYNAMIC_GROUP_P(rl->r_attr)) {
182 		LIST_INSERT_HEAD(&rlset->rs_dynamic, rl, r_dentry);
183 	}
184 
185 	rlset->rs_rules[n] = rl;
186 	rlset->rs_nitems++;
187 
188 	if (rl->r_skip_to < ++n) {
189 		rl->r_skip_to = n;
190 	}
191 }
192 
193 static npf_rule_t *
194 npf_ruleset_lookup(npf_ruleset_t *rlset, const char *name)
195 {
196 	npf_rule_t *rl;
197 
198 	KASSERT(npf_config_locked_p());
199 
200 	LIST_FOREACH(rl, &rlset->rs_dynamic, r_dentry) {
201 		KASSERT(NPF_DYNAMIC_GROUP_P(rl->r_attr));
202 		if (strncmp(rl->r_name, name, NPF_RULE_MAXNAMELEN) == 0)
203 			break;
204 	}
205 	return rl;
206 }
207 
208 int
209 npf_ruleset_add(npf_ruleset_t *rlset, const char *rname, npf_rule_t *rl)
210 {
211 	npf_rule_t *rg, *it;
212 	pri_t priocmd;
213 
214 	rg = npf_ruleset_lookup(rlset, rname);
215 	if (rg == NULL) {
216 		return ESRCH;
217 	}
218 	if (!NPF_DYNAMIC_RULE_P(rl->r_attr)) {
219 		return EINVAL;
220 	}
221 
222 	/* Dynamic rule - assign a unique ID and save the parent. */
223 	rl->r_id = ++rlset->rs_idcnt;
224 	rl->r_parent = rg;
225 
226 	/*
227 	 * Rule priority: (highest) 1, 2 ... n (lowest).
228 	 * Negative priority indicates an operation and is reset to zero.
229 	 */
230 	if ((priocmd = rl->r_priority) < 0) {
231 		rl->r_priority = 0;
232 	}
233 
234 	switch (priocmd) {
235 	case NPF_PRI_FIRST:
236 		TAILQ_FOREACH(it, &rg->r_subset, r_entry) {
237 			if (rl->r_priority <= it->r_priority)
238 				break;
239 		}
240 		if (it) {
241 			TAILQ_INSERT_BEFORE(it, rl, r_entry);
242 		} else {
243 			TAILQ_INSERT_HEAD(&rg->r_subset, rl, r_entry);
244 		}
245 		break;
246 	case NPF_PRI_LAST:
247 	default:
248 		TAILQ_FOREACH(it, &rg->r_subset, r_entry) {
249 			if (rl->r_priority < it->r_priority)
250 				break;
251 		}
252 		if (it) {
253 			TAILQ_INSERT_BEFORE(it, rl, r_entry);
254 		} else {
255 			TAILQ_INSERT_TAIL(&rg->r_subset, rl, r_entry);
256 		}
257 		break;
258 	}
259 
260 	/* Finally, add into the all-list. */
261 	LIST_INSERT_HEAD(&rlset->rs_all, rl, r_aentry);
262 	return 0;
263 }
264 
265 int
266 npf_ruleset_remove(npf_ruleset_t *rlset, const char *rname, uint64_t id)
267 {
268 	npf_rule_t *rg, *rl;
269 
270 	if ((rg = npf_ruleset_lookup(rlset, rname)) == NULL) {
271 		return ESRCH;
272 	}
273 	TAILQ_FOREACH(rl, &rg->r_subset, r_entry) {
274 		/* Compare ID.  On match, remove and return. */
275 		if (rl->r_id == id) {
276 			npf_ruleset_unlink(rlset, rl);
277 			LIST_INSERT_HEAD(&rlset->rs_gc, rl, r_aentry);
278 			return 0;
279 		}
280 	}
281 	return ENOENT;
282 }
283 
284 int
285 npf_ruleset_remkey(npf_ruleset_t *rlset, const char *rname,
286     const void *key, size_t len)
287 {
288 	npf_rule_t *rg, *rl;
289 
290 	KASSERT(len && len <= NPF_RULE_MAXKEYLEN);
291 
292 	if ((rg = npf_ruleset_lookup(rlset, rname)) == NULL) {
293 		return ESRCH;
294 	}
295 
296 	/* Find the last in the list. */
297 	TAILQ_FOREACH_REVERSE(rl, &rg->r_subset, npf_ruleq, r_entry) {
298 		/* Compare the key.  On match, remove and return. */
299 		if (memcmp(rl->r_key, key, len) == 0) {
300 			npf_ruleset_unlink(rlset, rl);
301 			LIST_INSERT_HEAD(&rlset->rs_gc, rl, r_aentry);
302 			return 0;
303 		}
304 	}
305 	return ENOENT;
306 }
307 
308 prop_dictionary_t
309 npf_ruleset_list(npf_ruleset_t *rlset, const char *rname)
310 {
311 	prop_dictionary_t rldict;
312 	prop_array_t rules;
313 	npf_rule_t *rg, *rl;
314 
315 	if ((rg = npf_ruleset_lookup(rlset, rname)) == NULL) {
316 		return NULL;
317 	}
318 	if ((rldict = prop_dictionary_create()) == NULL) {
319 		return NULL;
320 	}
321 	if ((rules = prop_array_create()) == NULL) {
322 		prop_object_release(rldict);
323 		return NULL;
324 	}
325 
326 	TAILQ_FOREACH(rl, &rg->r_subset, r_entry) {
327 		if (rl->r_dict && !prop_array_add(rules, rl->r_dict)) {
328 			prop_object_release(rldict);
329 			prop_object_release(rules);
330 			return NULL;
331 		}
332 	}
333 
334 	if (!prop_dictionary_set(rldict, "rules", rules)) {
335 		prop_object_release(rldict);
336 		rldict = NULL;
337 	}
338 	prop_object_release(rules);
339 	return rldict;
340 }
341 
342 int
343 npf_ruleset_flush(npf_ruleset_t *rlset, const char *rname)
344 {
345 	npf_rule_t *rg, *rl;
346 
347 	if ((rg = npf_ruleset_lookup(rlset, rname)) == NULL) {
348 		return ESRCH;
349 	}
350 	while ((rl = TAILQ_FIRST(&rg->r_subset)) != NULL) {
351 		npf_ruleset_unlink(rlset, rl);
352 		LIST_INSERT_HEAD(&rlset->rs_gc, rl, r_aentry);
353 	}
354 	return 0;
355 }
356 
357 void
358 npf_ruleset_gc(npf_ruleset_t *rlset)
359 {
360 	npf_rule_t *rl;
361 
362 	while ((rl = LIST_FIRST(&rlset->rs_gc)) != NULL) {
363 		LIST_REMOVE(rl, r_aentry);
364 		npf_rule_free(rl);
365 	}
366 }
367 
368 /*
369  * npf_ruleset_reload: share the dynamic rules.
370  *
371  * => Active ruleset should be exclusively locked.
372  */
373 void
374 npf_ruleset_reload(npf_ruleset_t *rlset, npf_ruleset_t *arlset)
375 {
376 	npf_rule_t *rg;
377 
378 	KASSERT(npf_config_locked_p());
379 
380 	LIST_FOREACH(rg, &rlset->rs_dynamic, r_dentry) {
381 		npf_rule_t *arg, *rl;
382 
383 		if ((arg = npf_ruleset_lookup(arlset, rg->r_name)) == NULL) {
384 			continue;
385 		}
386 
387 		/*
388 		 * Copy the list-head structure and move the rules from the
389 		 * old ruleset to the new by reinserting to a new all-rules
390 		 * list and resetting the parent rule.  Note that the rules
391 		 * are still active and therefore accessible for inspection
392 		 * via the old ruleset.
393 		 */
394 		memcpy(&rg->r_subset, &arg->r_subset, sizeof(rg->r_subset));
395 		TAILQ_FOREACH(rl, &rg->r_subset, r_entry) {
396 			LIST_REMOVE(rl, r_aentry);
397 			LIST_INSERT_HEAD(&rlset->rs_all, rl, r_aentry);
398 			rl->r_parent = rg;
399 		}
400 	}
401 
402 	/* Inherit the ID counter. */
403 	rlset->rs_idcnt = arlset->rs_idcnt;
404 }
405 
406 /*
407  * npf_ruleset_matchnat: find a matching NAT policy in the ruleset.
408  */
409 npf_rule_t *
410 npf_ruleset_matchnat(npf_ruleset_t *rlset, npf_natpolicy_t *mnp)
411 {
412 	npf_rule_t *rl;
413 
414 	/* Find a matching NAT policy in the old ruleset. */
415 	LIST_FOREACH(rl, &rlset->rs_all, r_aentry) {
416 		if (npf_nat_matchpolicy(rl->r_natp, mnp))
417 			break;
418 	}
419 	return rl;
420 }
421 
422 npf_rule_t *
423 npf_ruleset_sharepm(npf_ruleset_t *rlset, npf_natpolicy_t *mnp)
424 {
425 	npf_natpolicy_t *np;
426 	npf_rule_t *rl;
427 
428 	/* Find a matching NAT policy in the old ruleset. */
429 	LIST_FOREACH(rl, &rlset->rs_all, r_aentry) {
430 		/*
431 		 * NAT policy might not yet be set during the creation of
432 		 * the ruleset (in such case, rule is for our policy), or
433 		 * policies might be equal due to rule exchange on reload.
434 		 */
435 		np = rl->r_natp;
436 		if (np == NULL || np == mnp)
437 			continue;
438 		if (npf_nat_sharepm(np, mnp))
439 			break;
440 	}
441 	return rl;
442 }
443 
444 /*
445  * npf_ruleset_freealg: inspect the ruleset and disassociate specified
446  * ALG from all NAT entries using it.
447  */
448 void
449 npf_ruleset_freealg(npf_ruleset_t *rlset, npf_alg_t *alg)
450 {
451 	npf_rule_t *rl;
452 	npf_natpolicy_t *np;
453 
454 	LIST_FOREACH(rl, &rlset->rs_all, r_aentry) {
455 		if ((np = rl->r_natp) != NULL) {
456 			npf_nat_freealg(np, alg);
457 		}
458 	}
459 }
460 
461 /*
462  * npf_ruleset_natreload: minimum reload of NAT policies by maching
463  * two (active and new) NAT rulesets.
464  *
465  * => Active ruleset should be exclusively locked.
466  */
467 void
468 npf_ruleset_natreload(npf_ruleset_t *nrlset, npf_ruleset_t *arlset)
469 {
470 	npf_natpolicy_t *np, *anp;
471 	npf_rule_t *rl, *arl;
472 
473 	/* Scan a new NAT ruleset against NAT policies in old ruleset. */
474 	LIST_FOREACH(rl, &nrlset->rs_all, r_aentry) {
475 		np = rl->r_natp;
476 		arl = npf_ruleset_matchnat(arlset, np);
477 		if (arl == NULL) {
478 			continue;
479 		}
480 		/* On match - we exchange NAT policies. */
481 		anp = arl->r_natp;
482 		rl->r_natp = anp;
483 		arl->r_natp = np;
484 		/* Update other NAT policies to share portmap. */
485 		(void)npf_ruleset_sharepm(nrlset, anp);
486 	}
487 }
488 
489 /*
490  * npf_rule_alloc: allocate a rule and copy n-code from user-space.
491  */
492 npf_rule_t *
493 npf_rule_alloc(prop_dictionary_t rldict)
494 {
495 	npf_rule_t *rl;
496 	const char *rname;
497 
498 	/* Allocate a rule structure. */
499 	rl = kmem_zalloc(sizeof(npf_rule_t), KM_SLEEP);
500 	TAILQ_INIT(&rl->r_subset);
501 	rl->r_natp = NULL;
502 
503 	/* Name (optional) */
504 	if (prop_dictionary_get_cstring_nocopy(rldict, "name", &rname)) {
505 		strlcpy(rl->r_name, rname, NPF_RULE_MAXNAMELEN);
506 	} else {
507 		rl->r_name[0] = '\0';
508 	}
509 
510 	/* Attributes, priority and interface ID (optional). */
511 	prop_dictionary_get_uint32(rldict, "attributes", &rl->r_attr);
512 	prop_dictionary_get_int32(rldict, "priority", &rl->r_priority);
513 	prop_dictionary_get_uint32(rldict, "interface", &rl->r_ifid);
514 
515 	/* Get the skip-to index.  No need to validate it. */
516 	prop_dictionary_get_uint32(rldict, "skip-to", &rl->r_skip_to);
517 
518 	/* Key (optional). */
519 	prop_object_t obj = prop_dictionary_get(rldict, "key");
520 	const void *key = prop_data_data_nocopy(obj);
521 
522 	if (key) {
523 		size_t len = prop_data_size(obj);
524 		if (len > NPF_RULE_MAXKEYLEN) {
525 			kmem_free(rl, sizeof(npf_rule_t));
526 			return NULL;
527 		}
528 		memcpy(rl->r_key, key, len);
529 	}
530 
531 	if (NPF_DYNAMIC_RULE_P(rl->r_attr)) {
532 		rl->r_dict = prop_dictionary_copy(rldict);
533 	}
534 
535 	return rl;
536 }
537 
538 /*
539  * npf_rule_setcode: assign filter code to the rule.
540  *
541  * => The code must be validated by the caller.
542  * => JIT compilation may be performed here.
543  */
544 void
545 npf_rule_setcode(npf_rule_t *rl, const int type, void *code, size_t size)
546 {
547 	/* Perform BPF JIT if possible. */
548 	if (type == NPF_CODE_BPF && (membar_consumer(),
549 	    bpfjit_module_ops.bj_generate_code != NULL)) {
550 		KASSERT(rl->r_jcode == NULL);
551 		rl->r_jcode = bpfjit_module_ops.bj_generate_code(code, size);
552 	}
553 	rl->r_type = type;
554 	rl->r_code = code;
555 	rl->r_clen = size;
556 }
557 
558 /*
559  * npf_rule_setrproc: assign a rule procedure and hold a reference on it.
560  */
561 void
562 npf_rule_setrproc(npf_rule_t *rl, npf_rproc_t *rp)
563 {
564 	npf_rproc_acquire(rp);
565 	rl->r_rproc = rp;
566 }
567 
568 /*
569  * npf_rule_free: free the specified rule.
570  */
571 void
572 npf_rule_free(npf_rule_t *rl)
573 {
574 	npf_natpolicy_t *np = rl->r_natp;
575 	npf_rproc_t *rp = rl->r_rproc;
576 
577 	if (np) {
578 		/* Free NAT policy. */
579 		npf_nat_freepolicy(np);
580 	}
581 	if (rp) {
582 		/* Release rule procedure. */
583 		npf_rproc_release(rp);
584 	}
585 	if (rl->r_code) {
586 		/* Free byte-code. */
587 		kmem_free(rl->r_code, rl->r_clen);
588 	}
589 	if (rl->r_jcode) {
590 		/* Free JIT code. */
591 		KASSERT(bpfjit_module_ops.bj_free_code != NULL);
592 		bpfjit_module_ops.bj_free_code(rl->r_jcode);
593 	}
594 	if (rl->r_dict) {
595 		/* Destroy the dictionary. */
596 		prop_object_release(rl->r_dict);
597 	}
598 	kmem_free(rl, sizeof(npf_rule_t));
599 }
600 
601 /*
602  * npf_rule_getid: return the unique ID of a rule.
603  * npf_rule_getrproc: acquire a reference and return rule procedure, if any.
604  * npf_rule_getnat: get NAT policy assigned to the rule.
605  */
606 
607 uint64_t
608 npf_rule_getid(const npf_rule_t *rl)
609 {
610 	KASSERT(NPF_DYNAMIC_RULE_P(rl->r_attr));
611 	return rl->r_id;
612 }
613 
614 npf_rproc_t *
615 npf_rule_getrproc(npf_rule_t *rl)
616 {
617 	npf_rproc_t *rp = rl->r_rproc;
618 
619 	if (rp) {
620 		npf_rproc_acquire(rp);
621 	}
622 	return rp;
623 }
624 
625 npf_natpolicy_t *
626 npf_rule_getnat(const npf_rule_t *rl)
627 {
628 	return rl->r_natp;
629 }
630 
631 /*
632  * npf_rule_setnat: assign NAT policy to the rule and insert into the
633  * NAT policy list in the ruleset.
634  */
635 void
636 npf_rule_setnat(npf_rule_t *rl, npf_natpolicy_t *np)
637 {
638 
639 	KASSERT(rl->r_natp == NULL);
640 	rl->r_natp = np;
641 }
642 
643 /*
644  * npf_rule_inspect: match the interface, direction and run the filter code.
645  * Returns true if rule matches, false otherise.
646  */
647 static inline bool
648 npf_rule_inspect(npf_cache_t *npc, nbuf_t *nbuf, const npf_rule_t *rl,
649     const int di_mask, const int layer)
650 {
651 	const ifnet_t *ifp = nbuf->nb_ifp;
652 	const void *code;
653 
654 	/* Match the interface. */
655 	if (rl->r_ifid && rl->r_ifid != ifp->if_index) {
656 		return false;
657 	}
658 
659 	/* Match the direction. */
660 	if ((rl->r_attr & NPF_RULE_DIMASK) != NPF_RULE_DIMASK) {
661 		if ((rl->r_attr & di_mask) == 0)
662 			return false;
663 	}
664 
665 	/* Execute JIT code, if any. */
666 	if (__predict_true(rl->r_jcode)) {
667 		struct mbuf *m = nbuf_head_mbuf(nbuf);
668 		size_t pktlen = m_length(m);
669 
670 		return rl->r_jcode((unsigned char *)m, pktlen, 0) != 0;
671 	}
672 
673 	/* Execute the byte-code, if any. */
674 	if ((code = rl->r_code) == NULL) {
675 		return true;
676 	}
677 
678 	switch (rl->r_type) {
679 	case NPF_CODE_NC:
680 		return npf_ncode_process(npc, code, nbuf, layer) == 0;
681 	case NPF_CODE_BPF: {
682 		struct mbuf *m = nbuf_head_mbuf(nbuf);
683 		size_t pktlen = m_length(m);
684 		return bpf_filter(code, (unsigned char *)m, pktlen, 0) != 0;
685 	}
686 	default:
687 		KASSERT(false);
688 	}
689 	return false;
690 }
691 
692 /*
693  * npf_rule_reinspect: re-inspect the dynamic rule by iterating its list.
694  * This is only for the dynamic rules.  Subrules cannot have nested rules.
695  */
696 static npf_rule_t *
697 npf_rule_reinspect(npf_cache_t *npc, nbuf_t *nbuf, const npf_rule_t *drl,
698     const int di_mask, const int layer)
699 {
700 	npf_rule_t *final_rl = NULL, *rl;
701 
702 	KASSERT(NPF_DYNAMIC_GROUP_P(drl->r_attr));
703 
704 	TAILQ_FOREACH(rl, &drl->r_subset, r_entry) {
705 		if (!npf_rule_inspect(npc, nbuf, rl, di_mask, layer)) {
706 			continue;
707 		}
708 		if (rl->r_attr & NPF_RULE_FINAL) {
709 			return rl;
710 		}
711 		final_rl = rl;
712 	}
713 	return final_rl;
714 }
715 
716 /*
717  * npf_ruleset_inspect: inspect the packet against the given ruleset.
718  *
719  * Loop through the rules in the set and run n-code processor of each rule
720  * against the packet (nbuf chain).  If sub-ruleset is found, inspect it.
721  *
722  * => Caller is responsible for nbuf chain protection.
723  */
724 npf_rule_t *
725 npf_ruleset_inspect(npf_cache_t *npc, nbuf_t *nbuf,
726     const npf_ruleset_t *rlset, const int di, const int layer)
727 {
728 	const int di_mask = (di & PFIL_IN) ? NPF_RULE_IN : NPF_RULE_OUT;
729 	const u_int nitems = rlset->rs_nitems;
730 	npf_rule_t *final_rl = NULL;
731 	u_int n = 0;
732 
733 	KASSERT(((di & PFIL_IN) != 0) ^ ((di & PFIL_OUT) != 0));
734 
735 	while (n < nitems) {
736 		npf_rule_t *rl = rlset->rs_rules[n];
737 		const u_int skip_to = rl->r_skip_to;
738 		const uint32_t attr = rl->r_attr;
739 
740 		KASSERT(!nbuf_flag_p(nbuf, NBUF_DATAREF_RESET));
741 		KASSERT(!final_rl || rl->r_priority >= final_rl->r_priority);
742 		KASSERT(n < skip_to);
743 
744 		/* Group is a barrier: return a matching if found any. */
745 		if ((attr & NPF_RULE_GROUP) != 0 && final_rl) {
746 			break;
747 		}
748 
749 		/* Main inspection of the rule. */
750 		if (!npf_rule_inspect(npc, nbuf, rl, di_mask, layer)) {
751 			n = skip_to;
752 			continue;
753 		}
754 
755 		if (NPF_DYNAMIC_GROUP_P(attr)) {
756 			/*
757 			 * If this is a dynamic rule, re-inspect the subrules.
758 			 * If it has any matching rule, then it is final.
759 			 */
760 			rl = npf_rule_reinspect(npc, nbuf, rl, di_mask, layer);
761 			if (rl != NULL) {
762 				final_rl = rl;
763 				break;
764 			}
765 		} else if ((attr & NPF_RULE_GROUP) == 0) {
766 			/*
767 			 * Groups themselves are not matching.
768 			 */
769 			final_rl = rl;
770 		}
771 
772 		/* Set the matching rule and check for "final". */
773 		if (attr & NPF_RULE_FINAL) {
774 			break;
775 		}
776 		n++;
777 	}
778 
779 	KASSERT(!nbuf_flag_p(nbuf, NBUF_DATAREF_RESET));
780 	return final_rl;
781 }
782 
783 /*
784  * npf_rule_conclude: return decision and the flags for conclusion.
785  *
786  * => Returns ENETUNREACH if "block" and 0 if "pass".
787  */
788 int
789 npf_rule_conclude(const npf_rule_t *rl, int *retfl)
790 {
791 	/* If not passing - drop the packet. */
792 	*retfl = rl->r_attr;
793 	return (rl->r_attr & NPF_RULE_PASS) ? 0 : ENETUNREACH;
794 }
795 
796 #if defined(DDB) || defined(_NPF_TESTING)
797 
798 void
799 npf_rulenc_dump(const npf_rule_t *rl)
800 {
801 	const uint32_t *op = rl->r_code;
802 	size_t n = rl->r_clen;
803 
804 	while (n) {
805 		printf("\t> |0x%02x|\n", (uint32_t)*op);
806 		op++;
807 		n -= sizeof(*op);
808 	}
809 	printf("-> %s\n", (rl->r_attr & NPF_RULE_PASS) ? "pass" : "block");
810 }
811 
812 #endif
813