xref: /dflybsd-src/sys/net/pf/pf_table.c (revision 7d605468abd560d066ad7e90dd023009e5b309ad)
1 /*	$OpenBSD: pf_table.c,v 1.78 2008/06/14 03:50:14 art Exp $	*/
2 
3 /*
4  * Copyright (c) 2010 The DragonFly Project.  All rights reserved.
5  *
6  * Copyright (c) 2002 Cedric Berger
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  *
13  *    - Redistributions of source code must retain the above copyright
14  *      notice, this list of conditions and the following disclaimer.
15  *    - Redistributions in binary form must reproduce the above
16  *      copyright notice, this list of conditions and the following
17  *      disclaimer in the documentation and/or other materials provided
18  *      with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
30  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  *
33  */
34 
35 #include "opt_inet.h"
36 #include "opt_inet6.h"
37 
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/socket.h>
41 #include <sys/mbuf.h>
42 #include <sys/kernel.h>
43 #include <sys/malloc.h>
44 #include <sys/thread2.h>
45 
46 #include <net/if.h>
47 #include <net/route.h>
48 #include <netinet/in.h>
49 #include <net/pf/pfvar.h>
50 
51 #define ACCEPT_FLAGS(flags, oklist)		\
52 	do {					\
53 		if ((flags & ~(oklist)) &	\
54 		    PFR_FLAG_ALLMASK)		\
55 			return (EINVAL);	\
56 	} while (0)
57 
58 #define COPYIN(from, to, size, flags)		\
59 	((flags & PFR_FLAG_USERIOCTL) ?		\
60 	copyin((from), (to), (size)) :		\
61 	(bcopy((from), (to), (size)), 0))
62 
63 #define COPYOUT(from, to, size, flags)		\
64 	((flags & PFR_FLAG_USERIOCTL) ?		\
65 	copyout((from), (to), (size)) :		\
66 	(bcopy((from), (to), (size)), 0))
67 
68 #define	FILLIN_SIN(sin, addr)			\
69 	do {					\
70 		(sin).sin_len = sizeof(sin);	\
71 		(sin).sin_family = AF_INET;	\
72 		(sin).sin_addr = (addr);	\
73 	} while (0)
74 
75 #define	FILLIN_SIN6(sin6, addr)			\
76 	do {					\
77 		(sin6).sin6_len = sizeof(sin6);	\
78 		(sin6).sin6_family = AF_INET6;	\
79 		(sin6).sin6_addr = (addr);	\
80 	} while (0)
81 
82 #define SWAP(type, a1, a2)			\
83 	do {					\
84 		type tmp = a1;			\
85 		a1 = a2;			\
86 		a2 = tmp;			\
87 	} while (0)
88 
89 #define SUNION2PF(su, af) (((af)==AF_INET) ?	\
90     (struct pf_addr *)&(su)->sin.sin_addr :	\
91     (struct pf_addr *)&(su)->sin6.sin6_addr)
92 
93 #define	AF_BITS(af)		(((af)==AF_INET)?32:128)
94 #define	ADDR_NETWORK(ad)	((ad)->pfra_net < AF_BITS((ad)->pfra_af))
95 #define	KENTRY_NETWORK(ke)	((ke)->pfrke_net < AF_BITS((ke)->pfrke_af))
96 #define KENTRY_RNF_ROOT(ke) \
97 		((((struct radix_node *)(ke))->rn_flags & RNF_ROOT) != 0)
98 
99 #define NO_ADDRESSES		(-1)
100 #define ENQUEUE_UNMARKED_ONLY	(1)
101 #define INVERT_NEG_FLAG		(1)
102 
103 static MALLOC_DEFINE(M_PFRKTABLEPL, "pfrktable", "pf radix table pool list");
104 static MALLOC_DEFINE(M_PFRKENTRYPL, "pfrkentry", "pf radix entry pool list");
105 static MALLOC_DEFINE(M_PFRKENTRYPL2, "pfrkentry2", "pf radix entry 2 pool list");
106 static MALLOC_DEFINE(M_PFRKCOUNTERSPL, "pfrkcounters", "pf radix counters");
107 
108 struct pfr_walktree {
109 	enum pfrw_op {
110 		PFRW_MARK,
111 		PFRW_SWEEP,
112 		PFRW_ENQUEUE,
113 		PFRW_GET_ADDRS,
114 		PFRW_GET_ASTATS,
115 		PFRW_POOL_GET,
116 		PFRW_DYNADDR_UPDATE
117 	}	 pfrw_op;
118 	union {
119 		struct pfr_addr		*pfrw1_addr;
120 		struct pfr_astats	*pfrw1_astats;
121 		struct pfr_kentryworkq	*pfrw1_workq;
122 		struct pfr_kentry	*pfrw1_kentry;
123 		struct pfi_dynaddr	*pfrw1_dyn;
124 	}	 pfrw_1;
125 	int	 pfrw_free;
126 	int	 pfrw_flags;
127 };
128 #define pfrw_addr	pfrw_1.pfrw1_addr
129 #define pfrw_astats	pfrw_1.pfrw1_astats
130 #define pfrw_workq	pfrw_1.pfrw1_workq
131 #define pfrw_kentry	pfrw_1.pfrw1_kentry
132 #define pfrw_dyn	pfrw_1.pfrw1_dyn
133 #define pfrw_cnt	pfrw_free
134 
135 #define senderr(e)	do { rv = (e); goto _bad; } while (0)
136 struct malloc_type	*pfr_ktable_pl;
137 struct malloc_type	*pfr_kentry_pl;
138 struct malloc_type	*pfr_kentry_pl2;
139 static struct pf_addr	 pfr_ffaddr;		/* constant after setup */
140 
141 void			 pfr_copyout_addr(struct pfr_addr *,
142 			    struct pfr_kentry *ke);
143 int			 pfr_validate_addr(struct pfr_addr *);
144 void			 pfr_enqueue_addrs(struct pfr_ktable *,
145 			    struct pfr_kentryworkq *, int *, int);
146 void			 pfr_mark_addrs(struct pfr_ktable *);
147 struct pfr_kentry	*pfr_lookup_addr(struct pfr_ktable *,
148 			    struct pfr_addr *, int);
149 struct pfr_kentry	*pfr_create_kentry(struct pfr_addr *, int);
150 void			 pfr_destroy_kentries(struct pfr_kentryworkq *);
151 void			 pfr_destroy_kentry(struct pfr_kentry *);
152 void			 pfr_insert_kentries(struct pfr_ktable *,
153 			    struct pfr_kentryworkq *, long);
154 void			 pfr_remove_kentries(struct pfr_ktable *,
155 			    struct pfr_kentryworkq *);
156 void			 pfr_clstats_kentries(struct pfr_kentryworkq *, long,
157 			    int);
158 void			 pfr_reset_feedback(struct pfr_addr *, int, int);
159 void			 pfr_prepare_network(union sockaddr_union *, int, int);
160 int			 pfr_route_kentry(struct pfr_ktable *,
161 			    struct pfr_kentry *);
162 int			 pfr_unroute_kentry(struct pfr_ktable *,
163 			    struct pfr_kentry *);
164 int			 pfr_walktree(struct radix_node *, void *);
165 int			 pfr_validate_table(struct pfr_table *, int, int);
166 int			 pfr_fix_anchor(char *);
167 void			 pfr_commit_ktable(struct pfr_ktable *, long);
168 void			 pfr_insert_ktables(struct pfr_ktableworkq *);
169 void			 pfr_insert_ktable(struct pfr_ktable *);
170 void			 pfr_setflags_ktables(struct pfr_ktableworkq *);
171 void			 pfr_setflags_ktable(struct pfr_ktable *, int);
172 void			 pfr_clstats_ktables(struct pfr_ktableworkq *, long,
173 			    int);
174 void			 pfr_clstats_ktable(struct pfr_ktable *, long, int);
175 struct pfr_ktable	*pfr_create_ktable(struct pfr_table *, long, int);
176 void			 pfr_destroy_ktables(struct pfr_ktableworkq *, int);
177 void			 pfr_destroy_ktable(struct pfr_ktable *, int);
178 int			 pfr_ktable_compare(struct pfr_ktable *,
179 			    struct pfr_ktable *);
180 struct pfr_ktable	*pfr_lookup_table(struct pfr_table *);
181 void			 pfr_clean_node_mask(struct pfr_ktable *,
182 			    struct pfr_kentryworkq *);
183 int			 pfr_table_count(struct pfr_table *, int);
184 int			 pfr_skip_table(struct pfr_table *,
185 			    struct pfr_ktable *, int);
186 struct pfr_kentry	*pfr_kentry_byidx(struct pfr_ktable *, int, int);
187 
188 RB_PROTOTYPE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
189 RB_GENERATE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
190 
191 struct pfr_ktablehead	 pfr_ktables;
192 struct pfr_table	 pfr_nulltable;
193 int			 pfr_ktable_cnt;
194 
195 void
196 pfr_initialize(void)
197 {
198 	memset(&pfr_ffaddr, 0xff, sizeof(pfr_ffaddr));
199 }
200 
201 int
202 pfr_clr_addrs(struct pfr_table *tbl, int *ndel, int flags)
203 {
204 	struct pfr_ktable	*kt;
205 	struct pfr_kentryworkq	 workq;
206 
207 	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
208 	if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
209 		return (EINVAL);
210 	kt = pfr_lookup_table(tbl);
211 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
212 		return (ESRCH);
213 	if (kt->pfrkt_flags & PFR_TFLAG_CONST)
214 		return (EPERM);
215 	pfr_enqueue_addrs(kt, &workq, ndel, 0);
216 
217 	if (!(flags & PFR_FLAG_DUMMY)) {
218 		if (flags & PFR_FLAG_ATOMIC)
219 			crit_enter();
220 		pfr_remove_kentries(kt, &workq);
221 		if (flags & PFR_FLAG_ATOMIC)
222 			crit_exit();
223 		if (kt->pfrkt_cnt) {
224 			kprintf("pfr_clr_addrs: corruption detected (%d).\n",
225 			    kt->pfrkt_cnt);
226 			kt->pfrkt_cnt = 0;
227 		}
228 	}
229 	return (0);
230 }
231 
232 int
233 pfr_add_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
234     int *nadd, int flags)
235 {
236 	struct pfr_ktable	*kt, *tmpkt;
237 	struct pfr_kentryworkq	 workq;
238 	struct pfr_kentry	*p, *q;
239 	struct pfr_addr		 ad;
240 	int			 i, rv, xadd = 0;
241 	long			 tzero = time_second;
242 
243 	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
244 	    PFR_FLAG_FEEDBACK);
245 	if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
246 		return (EINVAL);
247 	kt = pfr_lookup_table(tbl);
248 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
249 		return (ESRCH);
250 	if (kt->pfrkt_flags & PFR_TFLAG_CONST)
251 		return (EPERM);
252 	tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0);
253 	if (tmpkt == NULL)
254 		return (ENOMEM);
255 	SLIST_INIT(&workq);
256 	for (i = 0; i < size; i++) {
257 		if (COPYIN(addr+i, &ad, sizeof(ad), flags))
258 			senderr(EFAULT);
259 		if (pfr_validate_addr(&ad))
260 			senderr(EINVAL);
261 		p = pfr_lookup_addr(kt, &ad, 1);
262 		q = pfr_lookup_addr(tmpkt, &ad, 1);
263 		if (flags & PFR_FLAG_FEEDBACK) {
264 			if (q != NULL)
265 				ad.pfra_fback = PFR_FB_DUPLICATE;
266 			else if (p == NULL)
267 				ad.pfra_fback = PFR_FB_ADDED;
268 			else if (p->pfrke_not != ad.pfra_not)
269 				ad.pfra_fback = PFR_FB_CONFLICT;
270 			else
271 				ad.pfra_fback = PFR_FB_NONE;
272 		}
273 		if (p == NULL && q == NULL) {
274 			p = pfr_create_kentry(&ad,
275 			    !(flags & PFR_FLAG_USERIOCTL));
276 			if (p == NULL)
277 				senderr(ENOMEM);
278 			if (pfr_route_kentry(tmpkt, p)) {
279 				pfr_destroy_kentry(p);
280 				ad.pfra_fback = PFR_FB_NONE;
281 			} else {
282 				SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
283 				xadd++;
284 			}
285 		}
286 		if (flags & PFR_FLAG_FEEDBACK)
287 			if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
288 				senderr(EFAULT);
289 	}
290 	pfr_clean_node_mask(tmpkt, &workq);
291 	if (!(flags & PFR_FLAG_DUMMY)) {
292 		if (flags & PFR_FLAG_ATOMIC)
293 			crit_enter();
294 		pfr_insert_kentries(kt, &workq, tzero);
295 		if (flags & PFR_FLAG_ATOMIC)
296 			crit_exit();
297 	} else
298 		pfr_destroy_kentries(&workq);
299 	if (nadd != NULL)
300 		*nadd = xadd;
301 	pfr_destroy_ktable(tmpkt, 0);
302 	return (0);
303 _bad:
304 	pfr_clean_node_mask(tmpkt, &workq);
305 	pfr_destroy_kentries(&workq);
306 	if (flags & PFR_FLAG_FEEDBACK)
307 		pfr_reset_feedback(addr, size, flags);
308 	pfr_destroy_ktable(tmpkt, 0);
309 	return (rv);
310 }
311 
312 int
313 pfr_del_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
314     int *ndel, int flags)
315 {
316 	struct pfr_ktable	*kt;
317 	struct pfr_kentryworkq	 workq;
318 	struct pfr_kentry	*p;
319 	struct pfr_addr		 ad;
320 	int			 i, rv, xdel = 0, log = 1;
321 
322 	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
323 	    PFR_FLAG_FEEDBACK);
324 	if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
325 		return (EINVAL);
326 	kt = pfr_lookup_table(tbl);
327 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
328 		return (ESRCH);
329 	if (kt->pfrkt_flags & PFR_TFLAG_CONST)
330 		return (EPERM);
331 	/*
332 	 * there are two algorithms to choose from here.
333 	 * with:
334 	 *   n: number of addresses to delete
335 	 *   N: number of addresses in the table
336 	 *
337 	 * one is O(N) and is better for large 'n'
338 	 * one is O(n*LOG(N)) and is better for small 'n'
339 	 *
340 	 * following code try to decide which one is best.
341 	 */
342 	for (i = kt->pfrkt_cnt; i > 0; i >>= 1)
343 		log++;
344 	if (size > kt->pfrkt_cnt/log) {
345 		/* full table scan */
346 		pfr_mark_addrs(kt);
347 	} else {
348 		/* iterate over addresses to delete */
349 		for (i = 0; i < size; i++) {
350 			if (COPYIN(addr+i, &ad, sizeof(ad), flags))
351 				return (EFAULT);
352 			if (pfr_validate_addr(&ad))
353 				return (EINVAL);
354 			p = pfr_lookup_addr(kt, &ad, 1);
355 			if (p != NULL)
356 				p->pfrke_mark = 0;
357 		}
358 	}
359 	SLIST_INIT(&workq);
360 	for (i = 0; i < size; i++) {
361 		if (COPYIN(addr+i, &ad, sizeof(ad), flags))
362 			senderr(EFAULT);
363 		if (pfr_validate_addr(&ad))
364 			senderr(EINVAL);
365 		p = pfr_lookup_addr(kt, &ad, 1);
366 		if (flags & PFR_FLAG_FEEDBACK) {
367 			if (p == NULL)
368 				ad.pfra_fback = PFR_FB_NONE;
369 			else if (p->pfrke_not != ad.pfra_not)
370 				ad.pfra_fback = PFR_FB_CONFLICT;
371 			else if (p->pfrke_mark)
372 				ad.pfra_fback = PFR_FB_DUPLICATE;
373 			else
374 				ad.pfra_fback = PFR_FB_DELETED;
375 		}
376 		if (p != NULL && p->pfrke_not == ad.pfra_not &&
377 		    !p->pfrke_mark) {
378 			p->pfrke_mark = 1;
379 			SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
380 			xdel++;
381 		}
382 		if (flags & PFR_FLAG_FEEDBACK)
383 			if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
384 				senderr(EFAULT);
385 	}
386 	if (!(flags & PFR_FLAG_DUMMY)) {
387 		if (flags & PFR_FLAG_ATOMIC)
388 			crit_enter();
389 		pfr_remove_kentries(kt, &workq);
390 		if (flags & PFR_FLAG_ATOMIC)
391 			crit_exit();
392 	}
393 	if (ndel != NULL)
394 		*ndel = xdel;
395 	return (0);
396 _bad:
397 	if (flags & PFR_FLAG_FEEDBACK)
398 		pfr_reset_feedback(addr, size, flags);
399 	return (rv);
400 }
401 
402 int
403 pfr_set_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
404     int *size2, int *nadd, int *ndel, int *nchange, int flags,
405     u_int32_t ignore_pfrt_flags)
406 {
407 	struct pfr_ktable	*kt, *tmpkt;
408 	struct pfr_kentryworkq	 addq, delq, changeq;
409 	struct pfr_kentry	*p, *q;
410 	struct pfr_addr		 ad;
411 	int			 i, rv, xadd = 0, xdel = 0, xchange = 0;
412 	long			 tzero = time_second;
413 
414 	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
415 	    PFR_FLAG_FEEDBACK);
416 	if (pfr_validate_table(tbl, ignore_pfrt_flags, flags &
417 	    PFR_FLAG_USERIOCTL))
418 		return (EINVAL);
419 	kt = pfr_lookup_table(tbl);
420 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
421 		return (ESRCH);
422 	if (kt->pfrkt_flags & PFR_TFLAG_CONST)
423 		return (EPERM);
424 	tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0);
425 	if (tmpkt == NULL)
426 		return (ENOMEM);
427 	pfr_mark_addrs(kt);
428 	SLIST_INIT(&addq);
429 	SLIST_INIT(&delq);
430 	SLIST_INIT(&changeq);
431 	for (i = 0; i < size; i++) {
432 		if (COPYIN(addr+i, &ad, sizeof(ad), flags))
433 			senderr(EFAULT);
434 		if (pfr_validate_addr(&ad))
435 			senderr(EINVAL);
436 		ad.pfra_fback = PFR_FB_NONE;
437 		p = pfr_lookup_addr(kt, &ad, 1);
438 		if (p != NULL) {
439 			if (p->pfrke_mark) {
440 				ad.pfra_fback = PFR_FB_DUPLICATE;
441 				goto _skip;
442 			}
443 			p->pfrke_mark = 1;
444 			if (p->pfrke_not != ad.pfra_not) {
445 				SLIST_INSERT_HEAD(&changeq, p, pfrke_workq);
446 				ad.pfra_fback = PFR_FB_CHANGED;
447 				xchange++;
448 			}
449 		} else {
450 			q = pfr_lookup_addr(tmpkt, &ad, 1);
451 			if (q != NULL) {
452 				ad.pfra_fback = PFR_FB_DUPLICATE;
453 				goto _skip;
454 			}
455 			p = pfr_create_kentry(&ad,
456 			    !(flags & PFR_FLAG_USERIOCTL));
457 			if (p == NULL)
458 				senderr(ENOMEM);
459 			if (pfr_route_kentry(tmpkt, p)) {
460 				pfr_destroy_kentry(p);
461 				ad.pfra_fback = PFR_FB_NONE;
462 			} else {
463 				SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
464 				ad.pfra_fback = PFR_FB_ADDED;
465 				xadd++;
466 			}
467 		}
468 _skip:
469 		if (flags & PFR_FLAG_FEEDBACK)
470 			if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
471 				senderr(EFAULT);
472 	}
473 	pfr_enqueue_addrs(kt, &delq, &xdel, ENQUEUE_UNMARKED_ONLY);
474 	if ((flags & PFR_FLAG_FEEDBACK) && *size2) {
475 		if (*size2 < size+xdel) {
476 			*size2 = size+xdel;
477 			senderr(0);
478 		}
479 		i = 0;
480 		SLIST_FOREACH(p, &delq, pfrke_workq) {
481 			pfr_copyout_addr(&ad, p);
482 			ad.pfra_fback = PFR_FB_DELETED;
483 			if (COPYOUT(&ad, addr+size+i, sizeof(ad), flags))
484 				senderr(EFAULT);
485 			i++;
486 		}
487 	}
488 	pfr_clean_node_mask(tmpkt, &addq);
489 	if (!(flags & PFR_FLAG_DUMMY)) {
490 		if (flags & PFR_FLAG_ATOMIC)
491 			crit_enter();
492 		pfr_insert_kentries(kt, &addq, tzero);
493 		pfr_remove_kentries(kt, &delq);
494 		pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG);
495 		if (flags & PFR_FLAG_ATOMIC)
496 			crit_exit();
497 	} else
498 		pfr_destroy_kentries(&addq);
499 	if (nadd != NULL)
500 		*nadd = xadd;
501 	if (ndel != NULL)
502 		*ndel = xdel;
503 	if (nchange != NULL)
504 		*nchange = xchange;
505 	if ((flags & PFR_FLAG_FEEDBACK) && size2)
506 		*size2 = size+xdel;
507 	pfr_destroy_ktable(tmpkt, 0);
508 	return (0);
509 _bad:
510 	pfr_clean_node_mask(tmpkt, &addq);
511 	pfr_destroy_kentries(&addq);
512 	if (flags & PFR_FLAG_FEEDBACK)
513 		pfr_reset_feedback(addr, size, flags);
514 	pfr_destroy_ktable(tmpkt, 0);
515 	return (rv);
516 }
517 
518 int
519 pfr_tst_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
520 	int *nmatch, int flags)
521 {
522 	struct pfr_ktable	*kt;
523 	struct pfr_kentry	*p;
524 	struct pfr_addr		 ad;
525 	int			 i, xmatch = 0;
526 
527 	ACCEPT_FLAGS(flags, PFR_FLAG_REPLACE);
528 	if (pfr_validate_table(tbl, 0, 0))
529 		return (EINVAL);
530 	kt = pfr_lookup_table(tbl);
531 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
532 		return (ESRCH);
533 
534 	for (i = 0; i < size; i++) {
535 		if (COPYIN(addr+i, &ad, sizeof(ad), flags))
536 			return (EFAULT);
537 		if (pfr_validate_addr(&ad))
538 			return (EINVAL);
539 		if (ADDR_NETWORK(&ad))
540 			return (EINVAL);
541 		p = pfr_lookup_addr(kt, &ad, 0);
542 		if (flags & PFR_FLAG_REPLACE)
543 			pfr_copyout_addr(&ad, p);
544 		ad.pfra_fback = (p == NULL) ? PFR_FB_NONE :
545 		    (p->pfrke_not ? PFR_FB_NOTMATCH : PFR_FB_MATCH);
546 		if (p != NULL && !p->pfrke_not)
547 			xmatch++;
548 		if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
549 			return (EFAULT);
550 	}
551 	if (nmatch != NULL)
552 		*nmatch = xmatch;
553 	return (0);
554 }
555 
556 int
557 pfr_get_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int *size,
558 	int flags)
559 {
560 	struct pfr_ktable	*kt;
561 	struct pfr_walktree	 w;
562 	int			 rv;
563 
564 	ACCEPT_FLAGS(flags, 0);
565 	if (pfr_validate_table(tbl, 0, 0))
566 		return (EINVAL);
567 	kt = pfr_lookup_table(tbl);
568 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
569 		return (ESRCH);
570 	if (kt->pfrkt_cnt > *size) {
571 		*size = kt->pfrkt_cnt;
572 		return (0);
573 	}
574 
575 	bzero(&w, sizeof(w));
576 	w.pfrw_op = PFRW_GET_ADDRS;
577 	w.pfrw_addr = addr;
578 	w.pfrw_free = kt->pfrkt_cnt;
579 	w.pfrw_flags = flags;
580 	rv = kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
581 	if (!rv)
582 		rv = kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
583 	if (rv)
584 		return (rv);
585 
586 	if (w.pfrw_free) {
587 		kprintf("pfr_get_addrs: corruption detected (%d).\n",
588 		    w.pfrw_free);
589 		return (ENOTTY);
590 	}
591 	*size = kt->pfrkt_cnt;
592 	return (0);
593 }
594 
595 int
596 pfr_get_astats(struct pfr_table *tbl, struct pfr_astats *addr, int *size,
597 	int flags)
598 {
599 	struct pfr_ktable	*kt;
600 	struct pfr_walktree	 w;
601 	struct pfr_kentryworkq	 workq;
602 	int			 rv;
603 	long			 tzero = time_second;
604 
605 	/* XXX PFR_FLAG_CLSTATS disabled */
606 	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC);
607 	if (pfr_validate_table(tbl, 0, 0))
608 		return (EINVAL);
609 	kt = pfr_lookup_table(tbl);
610 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
611 		return (ESRCH);
612 	if (kt->pfrkt_cnt > *size) {
613 		*size = kt->pfrkt_cnt;
614 		return (0);
615 	}
616 
617 	bzero(&w, sizeof(w));
618 	w.pfrw_op = PFRW_GET_ASTATS;
619 	w.pfrw_astats = addr;
620 	w.pfrw_free = kt->pfrkt_cnt;
621 	w.pfrw_flags = flags;
622 	if (flags & PFR_FLAG_ATOMIC)
623 		crit_enter();
624 	rv = kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
625 	if (!rv)
626 		rv = kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
627 	if (!rv && (flags & PFR_FLAG_CLSTATS)) {
628 		pfr_enqueue_addrs(kt, &workq, NULL, 0);
629 		pfr_clstats_kentries(&workq, tzero, 0);
630 	}
631 	if (flags & PFR_FLAG_ATOMIC)
632 		crit_exit();
633 	if (rv)
634 		return (rv);
635 
636 	if (w.pfrw_free) {
637 		kprintf("pfr_get_astats: corruption detected (%d).\n",
638 		    w.pfrw_free);
639 		return (ENOTTY);
640 	}
641 	*size = kt->pfrkt_cnt;
642 	return (0);
643 }
644 
645 int
646 pfr_clr_astats(struct pfr_table *tbl, struct pfr_addr *addr, int size,
647     int *nzero, int flags)
648 {
649 	struct pfr_ktable	*kt;
650 	struct pfr_kentryworkq	 workq;
651 	struct pfr_kentry	*p;
652 	struct pfr_addr		 ad;
653 	int			 i, rv, xzero = 0;
654 
655 	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
656 	    PFR_FLAG_FEEDBACK);
657 	if (pfr_validate_table(tbl, 0, 0))
658 		return (EINVAL);
659 	kt = pfr_lookup_table(tbl);
660 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
661 		return (ESRCH);
662 	SLIST_INIT(&workq);
663 	for (i = 0; i < size; i++) {
664 		if (COPYIN(addr+i, &ad, sizeof(ad), flags))
665 			senderr(EFAULT);
666 		if (pfr_validate_addr(&ad))
667 			senderr(EINVAL);
668 		p = pfr_lookup_addr(kt, &ad, 1);
669 		if (flags & PFR_FLAG_FEEDBACK) {
670 			ad.pfra_fback = (p != NULL) ?
671 			    PFR_FB_CLEARED : PFR_FB_NONE;
672 			if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
673 				senderr(EFAULT);
674 		}
675 		if (p != NULL) {
676 			SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
677 			xzero++;
678 		}
679 	}
680 
681 	if (!(flags & PFR_FLAG_DUMMY)) {
682 		if (flags & PFR_FLAG_ATOMIC)
683 			crit_enter();
684 		pfr_clstats_kentries(&workq, 0, 0);
685 		if (flags & PFR_FLAG_ATOMIC)
686 			crit_exit();
687 	}
688 	if (nzero != NULL)
689 		*nzero = xzero;
690 	return (0);
691 _bad:
692 	if (flags & PFR_FLAG_FEEDBACK)
693 		pfr_reset_feedback(addr, size, flags);
694 	return (rv);
695 }
696 
697 int
698 pfr_validate_addr(struct pfr_addr *ad)
699 {
700 	int i;
701 
702 	switch (ad->pfra_af) {
703 #ifdef INET
704 	case AF_INET:
705 		if (ad->pfra_net > 32)
706 			return (-1);
707 		break;
708 #endif /* INET */
709 #ifdef INET6
710 	case AF_INET6:
711 		if (ad->pfra_net > 128)
712 			return (-1);
713 		break;
714 #endif /* INET6 */
715 	default:
716 		return (-1);
717 	}
718 	if (ad->pfra_net < 128 &&
719 		(((caddr_t)ad)[ad->pfra_net/8] & (0xFF >> (ad->pfra_net%8))))
720 			return (-1);
721 	for (i = (ad->pfra_net+7)/8; i < sizeof(ad->pfra_u); i++)
722 		if (((caddr_t)ad)[i])
723 			return (-1);
724 	if (ad->pfra_not && ad->pfra_not != 1)
725 		return (-1);
726 	if (ad->pfra_fback)
727 		return (-1);
728 	return (0);
729 }
730 
731 void
732 pfr_enqueue_addrs(struct pfr_ktable *kt, struct pfr_kentryworkq *workq,
733 	int *naddr, int sweep)
734 {
735 	struct pfr_walktree	w;
736 
737 	SLIST_INIT(workq);
738 	bzero(&w, sizeof(w));
739 	w.pfrw_op = sweep ? PFRW_SWEEP : PFRW_ENQUEUE;
740 	w.pfrw_workq = workq;
741 	if (kt->pfrkt_ip4 != NULL)
742 		if (kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w))
743 			kprintf("pfr_enqueue_addrs: IPv4 walktree failed.\n");
744 	if (kt->pfrkt_ip6 != NULL)
745 		if (kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w))
746 			kprintf("pfr_enqueue_addrs: IPv6 walktree failed.\n");
747 	if (naddr != NULL)
748 		*naddr = w.pfrw_cnt;
749 }
750 
751 void
752 pfr_mark_addrs(struct pfr_ktable *kt)
753 {
754 	struct pfr_walktree	w;
755 
756 	bzero(&w, sizeof(w));
757 	w.pfrw_op = PFRW_MARK;
758 	if (kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w))
759 		kprintf("pfr_mark_addrs: IPv4 walktree failed.\n");
760 	if (kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w))
761 		kprintf("pfr_mark_addrs: IPv6 walktree failed.\n");
762 }
763 
764 
765 struct pfr_kentry *
766 pfr_lookup_addr(struct pfr_ktable *kt, struct pfr_addr *ad, int exact)
767 {
768 	union sockaddr_union	 sa, mask;
769 	struct radix_node_head	*head = NULL;
770 	struct pfr_kentry	*ke;
771 
772 	bzero(&sa, sizeof(sa));
773 	if (ad->pfra_af == AF_INET) {
774 		FILLIN_SIN(sa.sin, ad->pfra_ip4addr);
775 		head = kt->pfrkt_ip4;
776 	} else if ( ad->pfra_af == AF_INET6 ) {
777 		FILLIN_SIN6(sa.sin6, ad->pfra_ip6addr);
778 		head = kt->pfrkt_ip6;
779 	}
780 	if (ADDR_NETWORK(ad)) {
781 		pfr_prepare_network(&mask, ad->pfra_af, ad->pfra_net);
782 		ke = (struct pfr_kentry *)rn_lookup((char *)&sa, (char *)&mask,
783 		    head);
784 		if (ke && KENTRY_RNF_ROOT(ke))
785 			ke = NULL;
786 	} else {
787 		ke = (struct pfr_kentry *)rn_match((char *)&sa, head);
788 		if (ke && KENTRY_RNF_ROOT(ke))
789 			ke = NULL;
790 		if (exact && ke && KENTRY_NETWORK(ke))
791 			ke = NULL;
792 	}
793 	return (ke);
794 }
795 
796 struct pfr_kentry *
797 pfr_create_kentry(struct pfr_addr *ad, int intr)
798 {
799 	struct pfr_kentry	*ke;
800 
801 	if (intr)
802 		ke = kmalloc(sizeof(struct pfr_kentry), M_PFRKENTRYPL2, M_NOWAIT|M_ZERO);
803 	else
804 		ke = kmalloc(sizeof(struct pfr_kentry), M_PFRKENTRYPL, M_NOWAIT|M_ZERO|M_NULLOK);
805 	if (ke == NULL)
806 		return (NULL);
807 
808 	if (ad->pfra_af == AF_INET)
809 		FILLIN_SIN(ke->pfrke_sa.sin, ad->pfra_ip4addr);
810 	else if (ad->pfra_af == AF_INET6)
811 		FILLIN_SIN6(ke->pfrke_sa.sin6, ad->pfra_ip6addr);
812 	ke->pfrke_af = ad->pfra_af;
813 	ke->pfrke_net = ad->pfra_net;
814 	ke->pfrke_not = ad->pfra_not;
815 	ke->pfrke_intrpool = intr;
816 	return (ke);
817 }
818 
819 void
820 pfr_destroy_kentries(struct pfr_kentryworkq *workq)
821 {
822 	struct pfr_kentry	*p, *q;
823 
824 	for (p = SLIST_FIRST(workq); p != NULL; p = q) {
825 		q = SLIST_NEXT(p, pfrke_workq);
826 		pfr_destroy_kentry(p);
827 	}
828 }
829 
830 void
831 pfr_destroy_kentry(struct pfr_kentry *ke)
832 {
833 	if (ke->pfrke_counters)
834 		kfree(ke->pfrke_counters, M_PFRKCOUNTERSPL);
835 	if (ke->pfrke_intrpool)
836 		kfree(ke, M_PFRKENTRYPL2);
837 	else
838 		kfree(ke, M_PFRKENTRYPL);
839 }
840 
841 void
842 pfr_insert_kentries(struct pfr_ktable *kt,
843     struct pfr_kentryworkq *workq, long tzero)
844 {
845 	struct pfr_kentry	*p;
846 	int			 rv, n = 0;
847 
848 	SLIST_FOREACH(p, workq, pfrke_workq) {
849 		rv = pfr_route_kentry(kt, p);
850 		if (rv) {
851 			kprintf("pfr_insert_kentries: cannot route entry "
852 			    "(code=%d).\n", rv);
853 			break;
854 		}
855 		p->pfrke_tzero = tzero;
856 		n++;
857 	}
858 	kt->pfrkt_cnt += n;
859 }
860 
861 int
862 pfr_insert_kentry(struct pfr_ktable *kt, struct pfr_addr *ad, long tzero)
863 {
864 	struct pfr_kentry	*p;
865 	int			 rv;
866 
867 	p = pfr_lookup_addr(kt, ad, 1);
868 	if (p != NULL)
869 		return (0);
870 	p = pfr_create_kentry(ad, 1);
871 	if (p == NULL)
872 		return (EINVAL);
873 
874 	rv = pfr_route_kentry(kt, p);
875 	if (rv)
876 		return (rv);
877 
878 	p->pfrke_tzero = tzero;
879 	kt->pfrkt_cnt++;
880 
881 	return (0);
882 }
883 
884 void
885 pfr_remove_kentries(struct pfr_ktable *kt,
886     struct pfr_kentryworkq *workq)
887 {
888 	struct pfr_kentry	*p;
889 	int			 n = 0;
890 
891 	SLIST_FOREACH(p, workq, pfrke_workq) {
892 		pfr_unroute_kentry(kt, p);
893 		n++;
894 	}
895 	kt->pfrkt_cnt -= n;
896 	pfr_destroy_kentries(workq);
897 }
898 
899 void
900 pfr_clean_node_mask(struct pfr_ktable *kt,
901     struct pfr_kentryworkq *workq)
902 {
903 	struct pfr_kentry	*p;
904 
905 	SLIST_FOREACH(p, workq, pfrke_workq)
906 		pfr_unroute_kentry(kt, p);
907 }
908 
909 void
910 pfr_clstats_kentries(struct pfr_kentryworkq *workq, long tzero, int negchange)
911 {
912 	struct pfr_kentry	*p;
913 
914 	SLIST_FOREACH(p, workq, pfrke_workq) {
915 		crit_enter();
916 		if (negchange)
917 			p->pfrke_not = !p->pfrke_not;
918 		if (p->pfrke_counters) {
919 			kfree(p->pfrke_counters, M_PFRKCOUNTERSPL);
920 			p->pfrke_counters = NULL;
921 		}
922 		crit_exit();
923 		p->pfrke_tzero = tzero;
924 	}
925 }
926 
927 void
928 pfr_reset_feedback(struct pfr_addr *addr, int size, int flags)
929 {
930 	struct pfr_addr	ad;
931 	int		i;
932 
933 	for (i = 0; i < size; i++) {
934 		if (COPYIN(addr+i, &ad, sizeof(ad), flags))
935 			break;
936 		ad.pfra_fback = PFR_FB_NONE;
937 		if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
938 			break;
939 	}
940 }
941 
942 void
943 pfr_prepare_network(union sockaddr_union *sa, int af, int net)
944 {
945 	int	i;
946 
947 	bzero(sa, sizeof(*sa));
948 	if (af == AF_INET) {
949 		sa->sin.sin_len = sizeof(sa->sin);
950 		sa->sin.sin_family = AF_INET;
951 		sa->sin.sin_addr.s_addr = net ? htonl(-1 << (32-net)) : 0;
952 	} else if (af == AF_INET6) {
953 		sa->sin6.sin6_len = sizeof(sa->sin6);
954 		sa->sin6.sin6_family = AF_INET6;
955 		for (i = 0; i < 4; i++) {
956 			if (net <= 32) {
957 				sa->sin6.sin6_addr.s6_addr32[i] =
958 				    net ? htonl(-1 << (32-net)) : 0;
959 				break;
960 			}
961 			sa->sin6.sin6_addr.s6_addr32[i] = 0xFFFFFFFF;
962 			net -= 32;
963 		}
964 	}
965 }
966 
967 int
968 pfr_route_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
969 {
970 	union sockaddr_union	 mask;
971 	struct radix_node	*rn;
972 	struct radix_node_head	*head = NULL;
973 
974 	bzero(ke->pfrke_node, sizeof(ke->pfrke_node));
975 	if (ke->pfrke_af == AF_INET)
976 		head = kt->pfrkt_ip4;
977 	else if (ke->pfrke_af == AF_INET6)
978 		head = kt->pfrkt_ip6;
979 
980 	crit_enter();
981 	if (KENTRY_NETWORK(ke)) {
982 		pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
983 		rn = rn_addroute((char *)&ke->pfrke_sa, (char *)&mask, head,
984 		    ke->pfrke_node);
985 	} else
986 		rn = rn_addroute((char *)&ke->pfrke_sa, NULL, head,
987 		    ke->pfrke_node);
988 	crit_exit();
989 
990 	return (rn == NULL ? -1 : 0);
991 }
992 
993 int
994 pfr_unroute_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
995 {
996 	union sockaddr_union	 mask;
997 	struct radix_node	*rn;
998 	struct radix_node_head	*head = NULL;
999 
1000 	if (ke->pfrke_af == AF_INET)
1001 		head = kt->pfrkt_ip4;
1002 	else if (ke->pfrke_af == AF_INET6)
1003 		head = kt->pfrkt_ip6;
1004 
1005 	crit_enter();
1006 	if (KENTRY_NETWORK(ke)) {
1007 		pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
1008 		rn = rn_delete((char *)&ke->pfrke_sa, (char *)&mask, head);
1009 	} else
1010 		rn = rn_delete((char *)&ke->pfrke_sa, NULL, head);
1011 	crit_exit();
1012 
1013 	if (rn == NULL) {
1014 		kprintf("pfr_unroute_kentry: delete failed.\n");
1015 		return (-1);
1016 	}
1017 	return (0);
1018 }
1019 
1020 void
1021 pfr_copyout_addr(struct pfr_addr *ad, struct pfr_kentry *ke)
1022 {
1023 	bzero(ad, sizeof(*ad));
1024 	if (ke == NULL)
1025 		return;
1026 	ad->pfra_af = ke->pfrke_af;
1027 	ad->pfra_net = ke->pfrke_net;
1028 	ad->pfra_not = ke->pfrke_not;
1029 	if (ad->pfra_af == AF_INET)
1030 		ad->pfra_ip4addr = ke->pfrke_sa.sin.sin_addr;
1031 	else if (ad->pfra_af == AF_INET6)
1032 		ad->pfra_ip6addr = ke->pfrke_sa.sin6.sin6_addr;
1033 }
1034 
1035 int
1036 pfr_walktree(struct radix_node *rn, void *arg)
1037 {
1038 	struct pfr_kentry	*ke = (struct pfr_kentry *)rn;
1039 	struct pfr_walktree	*w = arg;
1040 	union sockaddr_union	pfr_mask;
1041 	int			flags = w->pfrw_flags;
1042 
1043 	switch (w->pfrw_op) {
1044 	case PFRW_MARK:
1045 		ke->pfrke_mark = 0;
1046 		break;
1047 	case PFRW_SWEEP:
1048 		if (ke->pfrke_mark)
1049 			break;
1050 		/* FALLTHROUGH */
1051 	case PFRW_ENQUEUE:
1052 		SLIST_INSERT_HEAD(w->pfrw_workq, ke, pfrke_workq);
1053 		w->pfrw_cnt++;
1054 		break;
1055 	case PFRW_GET_ADDRS:
1056 		if (w->pfrw_free-- > 0) {
1057 			struct pfr_addr ad;
1058 
1059 			pfr_copyout_addr(&ad, ke);
1060 			if (copyout(&ad, w->pfrw_addr, sizeof(ad)))
1061 				return (EFAULT);
1062 			w->pfrw_addr++;
1063 		}
1064 		break;
1065 	case PFRW_GET_ASTATS:
1066 		if (w->pfrw_free-- > 0) {
1067 			struct pfr_astats as;
1068 
1069 			pfr_copyout_addr(&as.pfras_a, ke);
1070 
1071 			crit_enter();
1072 			if (ke->pfrke_counters) {
1073 				bcopy(ke->pfrke_counters->pfrkc_packets,
1074 				    as.pfras_packets, sizeof(as.pfras_packets));
1075 				bcopy(ke->pfrke_counters->pfrkc_bytes,
1076 				    as.pfras_bytes, sizeof(as.pfras_bytes));
1077 			} else {
1078 				bzero(as.pfras_packets, sizeof(as.pfras_packets));
1079 				bzero(as.pfras_bytes, sizeof(as.pfras_bytes));
1080 				as.pfras_a.pfra_fback = PFR_FB_NOCOUNT;
1081 			}
1082 			crit_exit();
1083 			as.pfras_tzero = ke->pfrke_tzero;
1084 
1085 			if (COPYOUT(&as, w->pfrw_astats, sizeof(as), flags))
1086 				return (EFAULT);
1087 			w->pfrw_astats++;
1088 		}
1089 		break;
1090 	case PFRW_POOL_GET:
1091 		if (ke->pfrke_not)
1092 			break; /* negative entries are ignored */
1093 		if (!w->pfrw_cnt--) {
1094 			w->pfrw_kentry = ke;
1095 			return (1); /* finish search */
1096 		}
1097 		break;
1098 	case PFRW_DYNADDR_UPDATE:
1099 		if (ke->pfrke_af == AF_INET) {
1100 			if (w->pfrw_dyn->pfid_acnt4++ > 0)
1101 				break;
1102 			pfr_prepare_network(&pfr_mask, AF_INET, ke->pfrke_net);
1103 			w->pfrw_dyn->pfid_addr4 = *SUNION2PF(
1104 			    &ke->pfrke_sa, AF_INET);
1105 			w->pfrw_dyn->pfid_mask4 = *SUNION2PF(
1106 			    &pfr_mask, AF_INET);
1107 		} else if (ke->pfrke_af == AF_INET6){
1108 			if (w->pfrw_dyn->pfid_acnt6++ > 0)
1109 				break;
1110 			pfr_prepare_network(&pfr_mask, AF_INET6, ke->pfrke_net);
1111 			w->pfrw_dyn->pfid_addr6 = *SUNION2PF(
1112 			    &ke->pfrke_sa, AF_INET6);
1113 			w->pfrw_dyn->pfid_mask6 = *SUNION2PF(
1114 			    &pfr_mask, AF_INET6);
1115 		}
1116 		break;
1117 	}
1118 	return (0);
1119 }
1120 
1121 int
1122 pfr_clr_tables(struct pfr_table *filter, int *ndel, int flags)
1123 {
1124 	struct pfr_ktableworkq	 workq;
1125 	struct pfr_ktable	*p;
1126 	int			 xdel = 0;
1127 
1128 	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
1129 	    PFR_FLAG_ALLRSETS);
1130 	if (pfr_fix_anchor(filter->pfrt_anchor))
1131 		return (EINVAL);
1132 	if (pfr_table_count(filter, flags) < 0)
1133 		return (ENOENT);
1134 
1135 	SLIST_INIT(&workq);
1136 	RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1137 		if (pfr_skip_table(filter, p, flags))
1138 			continue;
1139 		if (!strcmp(p->pfrkt_anchor, PF_RESERVED_ANCHOR))
1140 			continue;
1141 		if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE))
1142 			continue;
1143 		p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1144 		SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1145 		xdel++;
1146 	}
1147 	if (!(flags & PFR_FLAG_DUMMY)) {
1148 		if (flags & PFR_FLAG_ATOMIC)
1149 			crit_enter();
1150 		pfr_setflags_ktables(&workq);
1151 		if (flags & PFR_FLAG_ATOMIC)
1152 			crit_exit();
1153 	}
1154 	if (ndel != NULL)
1155 		*ndel = xdel;
1156 	return (0);
1157 }
1158 
1159 int
1160 pfr_add_tables(struct pfr_table *tbl, int size, int *nadd, int flags)
1161 {
1162 	struct pfr_ktableworkq	 addq, changeq;
1163 	struct pfr_ktable	*p, *q, *r, key;
1164 	int			 i, rv, xadd = 0;
1165 	long			 tzero = time_second;
1166 
1167 	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
1168 	SLIST_INIT(&addq);
1169 	SLIST_INIT(&changeq);
1170 	for (i = 0; i < size; i++) {
1171 		if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t), flags))
1172 			senderr(EFAULT);
1173 		if (pfr_validate_table(&key.pfrkt_t, PFR_TFLAG_USRMASK,
1174 		    flags & PFR_FLAG_USERIOCTL))
1175 			senderr(EINVAL);
1176 		key.pfrkt_flags |= PFR_TFLAG_ACTIVE;
1177 		p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1178 		if (p == NULL) {
1179 			p = pfr_create_ktable(&key.pfrkt_t, tzero, 1);
1180 			if (p == NULL)
1181 				senderr(ENOMEM);
1182 			SLIST_FOREACH(q, &addq, pfrkt_workq) {
1183 				if (!pfr_ktable_compare(p, q))
1184 					goto _skip;
1185 			}
1186 			SLIST_INSERT_HEAD(&addq, p, pfrkt_workq);
1187 			xadd++;
1188 			if (!key.pfrkt_anchor[0])
1189 				goto _skip;
1190 
1191 			/* find or create root table */
1192 			bzero(key.pfrkt_anchor, sizeof(key.pfrkt_anchor));
1193 			r = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1194 			if (r != NULL) {
1195 				p->pfrkt_root = r;
1196 				goto _skip;
1197 			}
1198 			SLIST_FOREACH(q, &addq, pfrkt_workq) {
1199 				if (!pfr_ktable_compare(&key, q)) {
1200 					p->pfrkt_root = q;
1201 					goto _skip;
1202 				}
1203 			}
1204 			key.pfrkt_flags = 0;
1205 			r = pfr_create_ktable(&key.pfrkt_t, 0, 1);
1206 			if (r == NULL)
1207 				senderr(ENOMEM);
1208 			SLIST_INSERT_HEAD(&addq, r, pfrkt_workq);
1209 			p->pfrkt_root = r;
1210 		} else if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1211 			SLIST_FOREACH(q, &changeq, pfrkt_workq)
1212 				if (!pfr_ktable_compare(&key, q))
1213 					goto _skip;
1214 			p->pfrkt_nflags = (p->pfrkt_flags &
1215 			    ~PFR_TFLAG_USRMASK) | key.pfrkt_flags;
1216 			SLIST_INSERT_HEAD(&changeq, p, pfrkt_workq);
1217 			xadd++;
1218 		}
1219 _skip:
1220 	;
1221 	}
1222 	if (!(flags & PFR_FLAG_DUMMY)) {
1223 		if (flags & PFR_FLAG_ATOMIC)
1224 			crit_enter();
1225 		pfr_insert_ktables(&addq);
1226 		pfr_setflags_ktables(&changeq);
1227 		if (flags & PFR_FLAG_ATOMIC)
1228 			crit_exit();
1229 	} else
1230 		 pfr_destroy_ktables(&addq, 0);
1231 	if (nadd != NULL)
1232 		*nadd = xadd;
1233 	return (0);
1234 _bad:
1235 	pfr_destroy_ktables(&addq, 0);
1236 	return (rv);
1237 }
1238 
1239 int
1240 pfr_del_tables(struct pfr_table *tbl, int size, int *ndel, int flags)
1241 {
1242 	struct pfr_ktableworkq	 workq;
1243 	struct pfr_ktable	*p, *q, key;
1244 	int			 i, xdel = 0;
1245 
1246 	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
1247 	SLIST_INIT(&workq);
1248 	for (i = 0; i < size; i++) {
1249 		if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t), flags))
1250 			return (EFAULT);
1251 		if (pfr_validate_table(&key.pfrkt_t, 0,
1252 		    flags & PFR_FLAG_USERIOCTL))
1253 			return (EINVAL);
1254 		p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1255 		if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1256 			SLIST_FOREACH(q, &workq, pfrkt_workq)
1257 				if (!pfr_ktable_compare(p, q))
1258 					goto _skip;
1259 			p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1260 			SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1261 			xdel++;
1262 		}
1263 _skip:
1264 	;
1265 	}
1266 
1267 	if (!(flags & PFR_FLAG_DUMMY)) {
1268 		if (flags & PFR_FLAG_ATOMIC)
1269 			crit_enter();
1270 		pfr_setflags_ktables(&workq);
1271 		if (flags & PFR_FLAG_ATOMIC)
1272 			crit_exit();
1273 	}
1274 	if (ndel != NULL)
1275 		*ndel = xdel;
1276 	return (0);
1277 }
1278 
1279 int
1280 pfr_get_tables(struct pfr_table *filter, struct pfr_table *tbl, int *size,
1281 	int flags)
1282 {
1283 	struct pfr_ktable	*p;
1284 	int			 n, nn;
1285 
1286 	ACCEPT_FLAGS(flags, PFR_FLAG_ALLRSETS);
1287 	if (pfr_fix_anchor(filter->pfrt_anchor))
1288 		return (EINVAL);
1289 	n = nn = pfr_table_count(filter, flags);
1290 	if (n < 0)
1291 		return (ENOENT);
1292 	if (n > *size) {
1293 		*size = n;
1294 		return (0);
1295 	}
1296 	RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1297 		if (pfr_skip_table(filter, p, flags))
1298 			continue;
1299 		if (n-- <= 0)
1300 			continue;
1301 		if (COPYOUT(&p->pfrkt_t, tbl++, sizeof(*tbl), flags))
1302 			return (EFAULT);
1303 	}
1304 	if (n) {
1305 		kprintf("pfr_get_tables: corruption detected (%d).\n", n);
1306 		return (ENOTTY);
1307 	}
1308 	*size = nn;
1309 	return (0);
1310 }
1311 
1312 int
1313 pfr_get_tstats(struct pfr_table *filter, struct pfr_tstats *tbl, int *size,
1314 	int flags)
1315 {
1316 	struct pfr_ktable	*p;
1317 	struct pfr_ktableworkq	 workq;
1318 	int			 n, nn;
1319 	long			 tzero = time_second;
1320 
1321 	/* XXX PFR_FLAG_CLSTATS disabled */
1322 	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_ALLRSETS);
1323 	if (pfr_fix_anchor(filter->pfrt_anchor))
1324 		return (EINVAL);
1325 	n = nn = pfr_table_count(filter, flags);
1326 	if (n < 0)
1327 		return (ENOENT);
1328 	if (n > *size) {
1329 		*size = n;
1330 		return (0);
1331 	}
1332 	SLIST_INIT(&workq);
1333 	if (flags & PFR_FLAG_ATOMIC)
1334 		crit_enter();
1335 	RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1336 		if (pfr_skip_table(filter, p, flags))
1337 			continue;
1338 		if (n-- <= 0)
1339 			continue;
1340 		if (!(flags & PFR_FLAG_ATOMIC))
1341 			crit_enter();
1342 		if (COPYOUT(&p->pfrkt_ts, tbl++, sizeof(*tbl), flags)) {
1343 			crit_exit();
1344 			return (EFAULT);
1345 		}
1346 		if (!(flags & PFR_FLAG_ATOMIC))
1347 			crit_exit();
1348 		SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1349 	}
1350 	if (flags & PFR_FLAG_CLSTATS)
1351 		pfr_clstats_ktables(&workq, tzero,
1352 		    flags & PFR_FLAG_ADDRSTOO);
1353 	if (flags & PFR_FLAG_ATOMIC)
1354 		crit_exit();
1355 	if (n) {
1356 		kprintf("pfr_get_tstats: corruption detected (%d).\n", n);
1357 		return (ENOTTY);
1358 	}
1359 	*size = nn;
1360 	return (0);
1361 }
1362 
1363 int
1364 pfr_clr_tstats(struct pfr_table *tbl, int size, int *nzero, int flags)
1365 {
1366 	struct pfr_ktableworkq	 workq;
1367 	struct pfr_ktable	*p, key;
1368 	int			 i, xzero = 0;
1369 	long			 tzero = time_second;
1370 
1371 	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
1372 	    PFR_FLAG_ADDRSTOO);
1373 	SLIST_INIT(&workq);
1374 	for (i = 0; i < size; i++) {
1375 		if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t), flags))
1376 			return (EFAULT);
1377 		if (pfr_validate_table(&key.pfrkt_t, 0, 0))
1378 			return (EINVAL);
1379 		p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1380 		if (p != NULL) {
1381 			SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1382 			xzero++;
1383 		}
1384 	}
1385 	if (!(flags & PFR_FLAG_DUMMY)) {
1386 		if (flags & PFR_FLAG_ATOMIC)
1387 			crit_enter();
1388 		pfr_clstats_ktables(&workq, tzero, flags & PFR_FLAG_ADDRSTOO);
1389 		if (flags & PFR_FLAG_ATOMIC)
1390 			crit_exit();
1391 	}
1392 	if (nzero != NULL)
1393 		*nzero = xzero;
1394 	return (0);
1395 }
1396 
1397 int
1398 pfr_set_tflags(struct pfr_table *tbl, int size, int setflag, int clrflag,
1399 	int *nchange, int *ndel, int flags)
1400 {
1401 	struct pfr_ktableworkq	 workq;
1402 	struct pfr_ktable	*p, *q, key;
1403 	int			 i, xchange = 0, xdel = 0;
1404 
1405 	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
1406 	if ((setflag & ~PFR_TFLAG_USRMASK) ||
1407 	    (clrflag & ~PFR_TFLAG_USRMASK) ||
1408 	    (setflag & clrflag))
1409 		return (EINVAL);
1410 	SLIST_INIT(&workq);
1411 	for (i = 0; i < size; i++) {
1412 		if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t), flags))
1413 			return (EFAULT);
1414 		if (pfr_validate_table(&key.pfrkt_t, 0,
1415 		    flags & PFR_FLAG_USERIOCTL))
1416 			return (EINVAL);
1417 		p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1418 		if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1419 			p->pfrkt_nflags = (p->pfrkt_flags | setflag) &
1420 			    ~clrflag;
1421 			if (p->pfrkt_nflags == p->pfrkt_flags)
1422 				goto _skip;
1423 			SLIST_FOREACH(q, &workq, pfrkt_workq)
1424 				if (!pfr_ktable_compare(p, q))
1425 					goto _skip;
1426 			SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1427 			if ((p->pfrkt_flags & PFR_TFLAG_PERSIST) &&
1428 			    (clrflag & PFR_TFLAG_PERSIST) &&
1429 			    !(p->pfrkt_flags & PFR_TFLAG_REFERENCED))
1430 				xdel++;
1431 			else
1432 				xchange++;
1433 		}
1434 _skip:
1435 	;
1436 	}
1437 	if (!(flags & PFR_FLAG_DUMMY)) {
1438 		if (flags & PFR_FLAG_ATOMIC)
1439 			crit_enter();
1440 		pfr_setflags_ktables(&workq);
1441 		if (flags & PFR_FLAG_ATOMIC)
1442 			crit_exit();
1443 	}
1444 	if (nchange != NULL)
1445 		*nchange = xchange;
1446 	if (ndel != NULL)
1447 		*ndel = xdel;
1448 	return (0);
1449 }
1450 
1451 int
1452 pfr_ina_begin(struct pfr_table *trs, u_int32_t *ticket, int *ndel, int flags)
1453 {
1454 	struct pfr_ktableworkq	 workq;
1455 	struct pfr_ktable	*p;
1456 	struct pf_ruleset	*rs;
1457 	int			 xdel = 0;
1458 
1459 	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1460 	rs = pf_find_or_create_ruleset(trs->pfrt_anchor);
1461 	if (rs == NULL)
1462 		return (ENOMEM);
1463 	SLIST_INIT(&workq);
1464 	RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1465 		if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1466 		    pfr_skip_table(trs, p, 0))
1467 			continue;
1468 		p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
1469 		SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1470 		xdel++;
1471 	}
1472 	if (!(flags & PFR_FLAG_DUMMY)) {
1473 		pfr_setflags_ktables(&workq);
1474 		if (ticket != NULL)
1475 			*ticket = ++rs->tticket;
1476 		rs->topen = 1;
1477 	} else
1478 		pf_remove_if_empty_ruleset(rs);
1479 	if (ndel != NULL)
1480 		*ndel = xdel;
1481 	return (0);
1482 }
1483 
1484 int
1485 pfr_ina_define(struct pfr_table *tbl, struct pfr_addr *addr, int size,
1486     int *nadd, int *naddr, u_int32_t ticket, int flags)
1487 {
1488 	struct pfr_ktableworkq	 tableq;
1489 	struct pfr_kentryworkq	 addrq;
1490 	struct pfr_ktable	*kt, *rt, *shadow, key;
1491 	struct pfr_kentry	*p;
1492 	struct pfr_addr		 ad;
1493 	struct pf_ruleset	*rs;
1494 	int			 i, rv, xadd = 0, xaddr = 0;
1495 
1496 	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ADDRSTOO);
1497 	if (size && !(flags & PFR_FLAG_ADDRSTOO))
1498 		return (EINVAL);
1499 	if (pfr_validate_table(tbl, PFR_TFLAG_USRMASK,
1500 	    flags & PFR_FLAG_USERIOCTL))
1501 		return (EINVAL);
1502 	rs = pf_find_ruleset(tbl->pfrt_anchor);
1503 	if (rs == NULL || !rs->topen || ticket != rs->tticket)
1504 		return (EBUSY);
1505 	tbl->pfrt_flags |= PFR_TFLAG_INACTIVE;
1506 	SLIST_INIT(&tableq);
1507 	kt = RB_FIND(pfr_ktablehead, &pfr_ktables, (struct pfr_ktable *)tbl);
1508 	if (kt == NULL) {
1509 		kt = pfr_create_ktable(tbl, 0, 1);
1510 		if (kt == NULL)
1511 			return (ENOMEM);
1512 		SLIST_INSERT_HEAD(&tableq, kt, pfrkt_workq);
1513 		xadd++;
1514 		if (!tbl->pfrt_anchor[0])
1515 			goto _skip;
1516 
1517 		/* find or create root table */
1518 		bzero(&key, sizeof(key));
1519 		strlcpy(key.pfrkt_name, tbl->pfrt_name, sizeof(key.pfrkt_name));
1520 		rt = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1521 		if (rt != NULL) {
1522 			kt->pfrkt_root = rt;
1523 			goto _skip;
1524 		}
1525 		rt = pfr_create_ktable(&key.pfrkt_t, 0, 1);
1526 		if (rt == NULL) {
1527 			pfr_destroy_ktables(&tableq, 0);
1528 			return (ENOMEM);
1529 		}
1530 		SLIST_INSERT_HEAD(&tableq, rt, pfrkt_workq);
1531 		kt->pfrkt_root = rt;
1532 	} else if (!(kt->pfrkt_flags & PFR_TFLAG_INACTIVE))
1533 		xadd++;
1534 _skip:
1535 	shadow = pfr_create_ktable(tbl, 0, 0);
1536 	if (shadow == NULL) {
1537 		pfr_destroy_ktables(&tableq, 0);
1538 		return (ENOMEM);
1539 	}
1540 	SLIST_INIT(&addrq);
1541 	for (i = 0; i < size; i++) {
1542 		if (COPYIN(addr+i, &ad, sizeof(ad), flags))
1543 			senderr(EFAULT);
1544 		if (pfr_validate_addr(&ad))
1545 			senderr(EINVAL);
1546 		if (pfr_lookup_addr(shadow, &ad, 1) != NULL)
1547 			continue;
1548 		p = pfr_create_kentry(&ad, 0);
1549 		if (p == NULL)
1550 			senderr(ENOMEM);
1551 		if (pfr_route_kentry(shadow, p)) {
1552 			pfr_destroy_kentry(p);
1553 			continue;
1554 		}
1555 		SLIST_INSERT_HEAD(&addrq, p, pfrke_workq);
1556 		xaddr++;
1557 	}
1558 	if (!(flags & PFR_FLAG_DUMMY)) {
1559 		if (kt->pfrkt_shadow != NULL)
1560 			pfr_destroy_ktable(kt->pfrkt_shadow, 1);
1561 		kt->pfrkt_flags |= PFR_TFLAG_INACTIVE;
1562 		pfr_insert_ktables(&tableq);
1563 		shadow->pfrkt_cnt = (flags & PFR_FLAG_ADDRSTOO) ?
1564 		    xaddr : NO_ADDRESSES;
1565 		kt->pfrkt_shadow = shadow;
1566 	} else {
1567 		pfr_clean_node_mask(shadow, &addrq);
1568 		pfr_destroy_ktable(shadow, 0);
1569 		pfr_destroy_ktables(&tableq, 0);
1570 		pfr_destroy_kentries(&addrq);
1571 	}
1572 	if (nadd != NULL)
1573 		*nadd = xadd;
1574 	if (naddr != NULL)
1575 		*naddr = xaddr;
1576 	return (0);
1577 _bad:
1578 	pfr_destroy_ktable(shadow, 0);
1579 	pfr_destroy_ktables(&tableq, 0);
1580 	pfr_destroy_kentries(&addrq);
1581 	return (rv);
1582 }
1583 
1584 int
1585 pfr_ina_rollback(struct pfr_table *trs, u_int32_t ticket, int *ndel, int flags)
1586 {
1587 	struct pfr_ktableworkq	 workq;
1588 	struct pfr_ktable	*p;
1589 	struct pf_ruleset	*rs;
1590 	int			 xdel = 0;
1591 
1592 	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1593 	rs = pf_find_ruleset(trs->pfrt_anchor);
1594 	if (rs == NULL || !rs->topen || ticket != rs->tticket)
1595 		return (0);
1596 	SLIST_INIT(&workq);
1597 	RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1598 		if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1599 		    pfr_skip_table(trs, p, 0))
1600 			continue;
1601 		p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
1602 		SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1603 		xdel++;
1604 	}
1605 	if (!(flags & PFR_FLAG_DUMMY)) {
1606 		pfr_setflags_ktables(&workq);
1607 		rs->topen = 0;
1608 		pf_remove_if_empty_ruleset(rs);
1609 	}
1610 	if (ndel != NULL)
1611 		*ndel = xdel;
1612 	return (0);
1613 }
1614 
1615 int
1616 pfr_ina_commit(struct pfr_table *trs, u_int32_t ticket, int *nadd,
1617     int *nchange, int flags)
1618 {
1619 	struct pfr_ktable	*p, *q;
1620 	struct pfr_ktableworkq	 workq;
1621 	struct pf_ruleset	*rs;
1622 	int			 xadd = 0, xchange = 0;
1623 	long			 tzero = time_second;
1624 
1625 	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
1626 	rs = pf_find_ruleset(trs->pfrt_anchor);
1627 	if (rs == NULL || !rs->topen || ticket != rs->tticket)
1628 		return (EBUSY);
1629 
1630 	SLIST_INIT(&workq);
1631 	RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1632 		if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1633 		    pfr_skip_table(trs, p, 0))
1634 			continue;
1635 		SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1636 		if (p->pfrkt_flags & PFR_TFLAG_ACTIVE)
1637 			xchange++;
1638 		else
1639 			xadd++;
1640 	}
1641 
1642 	if (!(flags & PFR_FLAG_DUMMY)) {
1643 		if (flags & PFR_FLAG_ATOMIC)
1644 			crit_enter();
1645 		for (p = SLIST_FIRST(&workq); p != NULL; p = q) {
1646 			q = SLIST_NEXT(p, pfrkt_workq);
1647 			pfr_commit_ktable(p, tzero);
1648 		}
1649 		if (flags & PFR_FLAG_ATOMIC)
1650 			crit_exit();
1651 		rs->topen = 0;
1652 		pf_remove_if_empty_ruleset(rs);
1653 	}
1654 	if (nadd != NULL)
1655 		*nadd = xadd;
1656 	if (nchange != NULL)
1657 		*nchange = xchange;
1658 
1659 	return (0);
1660 }
1661 
1662 void
1663 pfr_commit_ktable(struct pfr_ktable *kt, long tzero)
1664 {
1665 	struct pfr_ktable	*shadow = kt->pfrkt_shadow;
1666 	int			 nflags;
1667 
1668 	if (shadow->pfrkt_cnt == NO_ADDRESSES) {
1669 		if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
1670 			pfr_clstats_ktable(kt, tzero, 1);
1671 	} else if (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) {
1672 		/* kt might contain addresses */
1673 		struct pfr_kentryworkq	 addrq, addq, changeq, delq, garbageq;
1674 		struct pfr_kentry	*p, *q, *next;
1675 		struct pfr_addr		 ad;
1676 
1677 		pfr_enqueue_addrs(shadow, &addrq, NULL, 0);
1678 		pfr_mark_addrs(kt);
1679 		SLIST_INIT(&addq);
1680 		SLIST_INIT(&changeq);
1681 		SLIST_INIT(&delq);
1682 		SLIST_INIT(&garbageq);
1683 		pfr_clean_node_mask(shadow, &addrq);
1684 		for (p = SLIST_FIRST(&addrq); p != NULL; p = next) {
1685 			next = SLIST_NEXT(p, pfrke_workq);	/* XXX */
1686 			pfr_copyout_addr(&ad, p);
1687 			q = pfr_lookup_addr(kt, &ad, 1);
1688 			if (q != NULL) {
1689 				if (q->pfrke_not != p->pfrke_not)
1690 					SLIST_INSERT_HEAD(&changeq, q,
1691 					    pfrke_workq);
1692 				q->pfrke_mark = 1;
1693 				SLIST_INSERT_HEAD(&garbageq, p, pfrke_workq);
1694 			} else {
1695 				p->pfrke_tzero = tzero;
1696 				SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
1697 			}
1698 		}
1699 		pfr_enqueue_addrs(kt, &delq, NULL, ENQUEUE_UNMARKED_ONLY);
1700 		pfr_insert_kentries(kt, &addq, tzero);
1701 		pfr_remove_kentries(kt, &delq);
1702 		pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG);
1703 		pfr_destroy_kentries(&garbageq);
1704 	} else {
1705 		/* kt cannot contain addresses */
1706 		SWAP(struct radix_node_head *, kt->pfrkt_ip4,
1707 		    shadow->pfrkt_ip4);
1708 		SWAP(struct radix_node_head *, kt->pfrkt_ip6,
1709 		    shadow->pfrkt_ip6);
1710 		SWAP(int, kt->pfrkt_cnt, shadow->pfrkt_cnt);
1711 		pfr_clstats_ktable(kt, tzero, 1);
1712 	}
1713 	nflags = ((shadow->pfrkt_flags & PFR_TFLAG_USRMASK) |
1714 	    (kt->pfrkt_flags & PFR_TFLAG_SETMASK) | PFR_TFLAG_ACTIVE)
1715 		& ~PFR_TFLAG_INACTIVE;
1716 	pfr_destroy_ktable(shadow, 0);
1717 	kt->pfrkt_shadow = NULL;
1718 	pfr_setflags_ktable(kt, nflags);
1719 }
1720 
1721 int
1722 pfr_validate_table(struct pfr_table *tbl, int allowedflags, int no_reserved)
1723 {
1724 	int i;
1725 
1726 	if (!tbl->pfrt_name[0])
1727 		return (-1);
1728 	if (no_reserved && !strcmp(tbl->pfrt_anchor, PF_RESERVED_ANCHOR))
1729 		 return (-1);
1730 	if (tbl->pfrt_name[PF_TABLE_NAME_SIZE-1])
1731 		return (-1);
1732 	for (i = strlen(tbl->pfrt_name); i < PF_TABLE_NAME_SIZE; i++)
1733 		if (tbl->pfrt_name[i])
1734 			return (-1);
1735 	if (pfr_fix_anchor(tbl->pfrt_anchor))
1736 		return (-1);
1737 	if (tbl->pfrt_flags & ~allowedflags)
1738 		return (-1);
1739 	return (0);
1740 }
1741 
1742 /*
1743  * Rewrite anchors referenced by tables to remove slashes
1744  * and check for validity.
1745  */
1746 int
1747 pfr_fix_anchor(char *anchor)
1748 {
1749 	size_t siz = MAXPATHLEN;
1750 	int i;
1751 
1752 	if (anchor[0] == '/') {
1753 		char *path;
1754 		int off;
1755 
1756 		path = anchor;
1757 		off = 1;
1758 		while (*++path == '/')
1759 			off++;
1760 		bcopy(path, anchor, siz - off);
1761 		memset(anchor + siz - off, 0, off);
1762 	}
1763 	if (anchor[siz - 1])
1764 		return (-1);
1765 	for (i = strlen(anchor); i < siz; i++)
1766 		if (anchor[i])
1767 			return (-1);
1768 	return (0);
1769 }
1770 
1771 int
1772 pfr_table_count(struct pfr_table *filter, int flags)
1773 {
1774 	struct pf_ruleset *rs;
1775 
1776 	if (flags & PFR_FLAG_ALLRSETS)
1777 		return (pfr_ktable_cnt);
1778 	if (filter->pfrt_anchor[0]) {
1779 		rs = pf_find_ruleset(filter->pfrt_anchor);
1780 		return ((rs != NULL) ? rs->tables : -1);
1781 	}
1782 	return (pf_main_ruleset.tables);
1783 }
1784 
1785 int
1786 pfr_skip_table(struct pfr_table *filter, struct pfr_ktable *kt, int flags)
1787 {
1788 	if (flags & PFR_FLAG_ALLRSETS)
1789 		return (0);
1790 	if (strcmp(filter->pfrt_anchor, kt->pfrkt_anchor))
1791 		return (1);
1792 	return (0);
1793 }
1794 
1795 void
1796 pfr_insert_ktables(struct pfr_ktableworkq *workq)
1797 {
1798 	struct pfr_ktable	*p;
1799 
1800 	SLIST_FOREACH(p, workq, pfrkt_workq)
1801 		pfr_insert_ktable(p);
1802 }
1803 
1804 void
1805 pfr_insert_ktable(struct pfr_ktable *kt)
1806 {
1807 	RB_INSERT(pfr_ktablehead, &pfr_ktables, kt);
1808 	pfr_ktable_cnt++;
1809 	if (kt->pfrkt_root != NULL)
1810 		if (!kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]++)
1811 			pfr_setflags_ktable(kt->pfrkt_root,
1812 			    kt->pfrkt_root->pfrkt_flags|PFR_TFLAG_REFDANCHOR);
1813 }
1814 
1815 void
1816 pfr_setflags_ktables(struct pfr_ktableworkq *workq)
1817 {
1818 	struct pfr_ktable	*p, *q;
1819 
1820 	for (p = SLIST_FIRST(workq); p; p = q) {
1821 		q = SLIST_NEXT(p, pfrkt_workq);
1822 		pfr_setflags_ktable(p, p->pfrkt_nflags);
1823 	}
1824 }
1825 
1826 void
1827 pfr_setflags_ktable(struct pfr_ktable *kt, int newf)
1828 {
1829 	struct pfr_kentryworkq	addrq;
1830 
1831 	if (!(newf & PFR_TFLAG_REFERENCED) &&
1832 	    !(newf & PFR_TFLAG_PERSIST))
1833 		newf &= ~PFR_TFLAG_ACTIVE;
1834 	if (!(newf & PFR_TFLAG_ACTIVE))
1835 		newf &= ~PFR_TFLAG_USRMASK;
1836 	if (!(newf & PFR_TFLAG_SETMASK)) {
1837 		RB_REMOVE(pfr_ktablehead, &pfr_ktables, kt);
1838 		if (kt->pfrkt_root != NULL)
1839 			if (!--kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR])
1840 				pfr_setflags_ktable(kt->pfrkt_root,
1841 				    kt->pfrkt_root->pfrkt_flags &
1842 					~PFR_TFLAG_REFDANCHOR);
1843 		pfr_destroy_ktable(kt, 1);
1844 		pfr_ktable_cnt--;
1845 		return;
1846 	}
1847 	if (!(newf & PFR_TFLAG_ACTIVE) && kt->pfrkt_cnt) {
1848 		pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1849 		pfr_remove_kentries(kt, &addrq);
1850 	}
1851 	if (!(newf & PFR_TFLAG_INACTIVE) && kt->pfrkt_shadow != NULL) {
1852 		pfr_destroy_ktable(kt->pfrkt_shadow, 1);
1853 		kt->pfrkt_shadow = NULL;
1854 	}
1855 	kt->pfrkt_flags = newf;
1856 }
1857 
1858 void
1859 pfr_clstats_ktables(struct pfr_ktableworkq *workq, long tzero, int recurse)
1860 {
1861 	struct pfr_ktable	*p;
1862 
1863 	SLIST_FOREACH(p, workq, pfrkt_workq)
1864 		pfr_clstats_ktable(p, tzero, recurse);
1865 }
1866 
1867 void
1868 pfr_clstats_ktable(struct pfr_ktable *kt, long tzero, int recurse)
1869 {
1870 	struct pfr_kentryworkq	 addrq;
1871 
1872 	if (recurse) {
1873 		pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1874 		pfr_clstats_kentries(&addrq, tzero, 0);
1875 	}
1876 	crit_enter();
1877 	bzero(kt->pfrkt_packets, sizeof(kt->pfrkt_packets));
1878 	bzero(kt->pfrkt_bytes, sizeof(kt->pfrkt_bytes));
1879 	kt->pfrkt_match = kt->pfrkt_nomatch = 0;
1880 	crit_exit();
1881 	kt->pfrkt_tzero = tzero;
1882 }
1883 
1884 struct pfr_ktable *
1885 pfr_create_ktable(struct pfr_table *tbl, long tzero, int attachruleset)
1886 {
1887 	struct pfr_ktable	*kt;
1888 	struct pf_ruleset	*rs;
1889 
1890 	kt = kmalloc(sizeof(struct pfr_ktable), M_PFRKTABLEPL, M_NOWAIT|M_ZERO|M_NULLOK);
1891 	if (kt == NULL)
1892 		return (NULL);
1893 	kt->pfrkt_t = *tbl;
1894 
1895 	if (attachruleset) {
1896 		rs = pf_find_or_create_ruleset(tbl->pfrt_anchor);
1897 		if (!rs) {
1898 			pfr_destroy_ktable(kt, 0);
1899 			return (NULL);
1900 		}
1901 		kt->pfrkt_rs = rs;
1902 		rs->tables++;
1903 	}
1904 
1905 	KKASSERT(pf_maskhead != NULL);
1906 	if (!rn_inithead((void **)&kt->pfrkt_ip4, pf_maskhead,
1907 	    offsetof(struct sockaddr_in, sin_addr) * 8) ||
1908 	    !rn_inithead((void **)&kt->pfrkt_ip6, pf_maskhead,
1909 	    offsetof(struct sockaddr_in6, sin6_addr) * 8)) {
1910 		pfr_destroy_ktable(kt, 0);
1911 		return (NULL);
1912 	}
1913 	kt->pfrkt_tzero = tzero;
1914 
1915 	return (kt);
1916 }
1917 
1918 void
1919 pfr_destroy_ktables(struct pfr_ktableworkq *workq, int flushaddr)
1920 {
1921 	struct pfr_ktable	*p, *q;
1922 
1923 	for (p = SLIST_FIRST(workq); p; p = q) {
1924 		q = SLIST_NEXT(p, pfrkt_workq);
1925 		pfr_destroy_ktable(p, flushaddr);
1926 	}
1927 }
1928 
1929 void
1930 pfr_destroy_ktable(struct pfr_ktable *kt, int flushaddr)
1931 {
1932 	struct pfr_kentryworkq	 addrq;
1933 
1934 	if (flushaddr) {
1935 		pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1936 		pfr_clean_node_mask(kt, &addrq);
1937 		pfr_destroy_kentries(&addrq);
1938 	}
1939 	if (kt->pfrkt_ip4 != NULL)
1940 		kfree((caddr_t)kt->pfrkt_ip4, M_RTABLE);
1941 
1942 	if (kt->pfrkt_ip6 != NULL)
1943 		kfree((caddr_t)kt->pfrkt_ip6, M_RTABLE);
1944 	if (kt->pfrkt_shadow != NULL)
1945 		pfr_destroy_ktable(kt->pfrkt_shadow, flushaddr);
1946 	if (kt->pfrkt_rs != NULL) {
1947 		kt->pfrkt_rs->tables--;
1948 		pf_remove_if_empty_ruleset(kt->pfrkt_rs);
1949 	}
1950 	kfree(kt, M_PFRKTABLEPL);
1951 }
1952 
1953 int
1954 pfr_ktable_compare(struct pfr_ktable *p, struct pfr_ktable *q)
1955 {
1956 	int d;
1957 
1958 	if ((d = strncmp(p->pfrkt_name, q->pfrkt_name, PF_TABLE_NAME_SIZE)))
1959 		return (d);
1960 	return (strcmp(p->pfrkt_anchor, q->pfrkt_anchor));
1961 }
1962 
1963 struct pfr_ktable *
1964 pfr_lookup_table(struct pfr_table *tbl)
1965 {
1966 	/* struct pfr_ktable start like a struct pfr_table */
1967 	return (RB_FIND(pfr_ktablehead, &pfr_ktables,
1968 	    (struct pfr_ktable *)tbl));
1969 }
1970 
1971 int
1972 pfr_match_addr(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af)
1973 {
1974 	struct pfr_kentry	*ke = NULL;
1975 	int			 match;
1976 	struct sockaddr_in	 pfr_sin;
1977 #ifdef INET6
1978 	struct sockaddr_in6	 pfr_sin6;
1979 #endif
1980 
1981 	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
1982 		kt = kt->pfrkt_root;
1983 	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
1984 		return (0);
1985 
1986 	switch (af) {
1987 #ifdef INET
1988 	case AF_INET:
1989 		bzero(&pfr_sin, sizeof(pfr_sin));
1990 		pfr_sin.sin_len = sizeof(pfr_sin);
1991 		pfr_sin.sin_family = AF_INET;
1992 		pfr_sin.sin_addr.s_addr = a->addr32[0];
1993 		ke = (struct pfr_kentry *)rn_match((char *)&pfr_sin,
1994 		    kt->pfrkt_ip4);
1995 		if (ke && KENTRY_RNF_ROOT(ke))
1996 			ke = NULL;
1997 		break;
1998 #endif /* INET */
1999 #ifdef INET6
2000 	case AF_INET6:
2001 		bzero(&pfr_sin6, sizeof(pfr_sin6));
2002 		pfr_sin6.sin6_len = sizeof(pfr_sin6);
2003 		pfr_sin6.sin6_family = AF_INET6;
2004 		bcopy(a, &pfr_sin6.sin6_addr, sizeof(pfr_sin6.sin6_addr));
2005 		ke = (struct pfr_kentry *)rn_match((char *)&pfr_sin6,
2006 		    kt->pfrkt_ip6);
2007 		if (ke && KENTRY_RNF_ROOT(ke))
2008 			ke = NULL;
2009 		break;
2010 #endif /* INET6 */
2011 	}
2012 	match = (ke && !ke->pfrke_not);
2013 	if (match)
2014 		kt->pfrkt_match++;
2015 	else
2016 		kt->pfrkt_nomatch++;
2017 	return (match);
2018 }
2019 
2020 void
2021 pfr_update_stats(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af,
2022     u_int64_t len, int dir_out, int op_pass, int notrule)
2023 {
2024 	struct pfr_kentry	*ke = NULL;
2025 	struct sockaddr_in	 pfr_sin;
2026 #ifdef INET6
2027 	struct sockaddr_in6	 pfr_sin6;
2028 #endif
2029 
2030 	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
2031 		kt = kt->pfrkt_root;
2032 	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
2033 		return;
2034 
2035 	switch (af) {
2036 #ifdef INET
2037 	case AF_INET:
2038 		bzero(&pfr_sin, sizeof(pfr_sin));
2039 		pfr_sin.sin_len = sizeof(pfr_sin);
2040 		pfr_sin.sin_family = AF_INET;
2041 		pfr_sin.sin_addr.s_addr = a->addr32[0];
2042 		ke = (struct pfr_kentry *)rn_match((char *)&pfr_sin,
2043 		    kt->pfrkt_ip4);
2044 		if (ke && KENTRY_RNF_ROOT(ke))
2045 			ke = NULL;
2046 		break;
2047 #endif /* INET */
2048 #ifdef INET6
2049 	case AF_INET6:
2050 		bzero(&pfr_sin6, sizeof(pfr_sin6));
2051 		pfr_sin6.sin6_len = sizeof(pfr_sin6);
2052 		pfr_sin6.sin6_family = AF_INET6;
2053 		bcopy(a, &pfr_sin6.sin6_addr, sizeof(pfr_sin6.sin6_addr));
2054 		ke = (struct pfr_kentry *)rn_match((char *)&pfr_sin6,
2055 		    kt->pfrkt_ip6);
2056 		if (ke && KENTRY_RNF_ROOT(ke))
2057 			ke = NULL;
2058 		break;
2059 #endif /* INET6 */
2060 	default:
2061 		;
2062 	}
2063 	if ((ke == NULL || ke->pfrke_not) != notrule) {
2064 		if (op_pass != PFR_OP_PASS)
2065 			kprintf("pfr_update_stats: assertion failed.\n");
2066 		op_pass = PFR_OP_XPASS;
2067 	}
2068 	kt->pfrkt_packets[dir_out][op_pass]++;
2069 	kt->pfrkt_bytes[dir_out][op_pass] += len;
2070 	if (ke != NULL && op_pass != PFR_OP_XPASS &&
2071 	    (kt->pfrkt_flags & PFR_TFLAG_COUNTERS)) {
2072 		if (ke->pfrke_counters == NULL)
2073 			ke->pfrke_counters = kmalloc(sizeof(struct pfr_kcounters),
2074 			    M_PFRKCOUNTERSPL, M_NOWAIT|M_ZERO);
2075 		if (ke->pfrke_counters != NULL) {
2076 			ke->pfrke_counters->pfrkc_packets[dir_out][op_pass]++;
2077 			ke->pfrke_counters->pfrkc_bytes[dir_out][op_pass] += len;
2078 		}
2079 	}
2080 }
2081 
2082 struct pfr_ktable *
2083 pfr_attach_table(struct pf_ruleset *rs, char *name)
2084 {
2085 	struct pfr_ktable	*kt, *rt;
2086 	struct pfr_table	 tbl;
2087 	struct pf_anchor	*ac = rs->anchor;
2088 
2089 	bzero(&tbl, sizeof(tbl));
2090 	strlcpy(tbl.pfrt_name, name, sizeof(tbl.pfrt_name));
2091 	if (ac != NULL)
2092 		strlcpy(tbl.pfrt_anchor, ac->path, sizeof(tbl.pfrt_anchor));
2093 	kt = pfr_lookup_table(&tbl);
2094 	if (kt == NULL) {
2095 		kt = pfr_create_ktable(&tbl, time_second, 1);
2096 		if (kt == NULL)
2097 			return (NULL);
2098 		if (ac != NULL) {
2099 			bzero(tbl.pfrt_anchor, sizeof(tbl.pfrt_anchor));
2100 			rt = pfr_lookup_table(&tbl);
2101 			if (rt == NULL) {
2102 				rt = pfr_create_ktable(&tbl, 0, 1);
2103 				if (rt == NULL) {
2104 					pfr_destroy_ktable(kt, 0);
2105 					return (NULL);
2106 				}
2107 				pfr_insert_ktable(rt);
2108 			}
2109 			kt->pfrkt_root = rt;
2110 		}
2111 		pfr_insert_ktable(kt);
2112 	}
2113 	if (!kt->pfrkt_refcnt[PFR_REFCNT_RULE]++)
2114 		pfr_setflags_ktable(kt, kt->pfrkt_flags|PFR_TFLAG_REFERENCED);
2115 	return (kt);
2116 }
2117 
2118 void
2119 pfr_detach_table(struct pfr_ktable *kt)
2120 {
2121 	if (kt->pfrkt_refcnt[PFR_REFCNT_RULE] <= 0)
2122 		kprintf("pfr_detach_table: refcount = %d.\n",
2123 		    kt->pfrkt_refcnt[PFR_REFCNT_RULE]);
2124 	else if (!--kt->pfrkt_refcnt[PFR_REFCNT_RULE])
2125 		pfr_setflags_ktable(kt, kt->pfrkt_flags&~PFR_TFLAG_REFERENCED);
2126 }
2127 
2128 int
2129 pfr_pool_get(struct pfr_ktable *kt, int *pidx, struct pf_addr *counter,
2130     struct pf_addr **raddr, struct pf_addr **rmask, sa_family_t af)
2131 {
2132 	struct pfr_kentry	*ke, *ke2 = NULL;
2133 	struct pf_addr		*addr = NULL;
2134 	union sockaddr_union	 mask;
2135 	int			 idx = -1, use_counter = 0;
2136 	struct sockaddr_in	 pfr_sin;
2137 	struct sockaddr_in6	 pfr_sin6;
2138 	union sockaddr_union	 pfr_mask;
2139 
2140 	if (af == AF_INET)
2141 		addr = (struct pf_addr *)&pfr_sin.sin_addr;
2142 	else if (af == AF_INET6)
2143 		addr = (struct pf_addr *)&pfr_sin6.sin6_addr;
2144 	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
2145 		kt = kt->pfrkt_root;
2146 	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
2147 		return (-1);
2148 
2149 	if (pidx != NULL)
2150 		idx = *pidx;
2151 	if (counter != NULL && idx >= 0)
2152 		use_counter = 1;
2153 	if (idx < 0)
2154 		idx = 0;
2155 
2156 _next_block:
2157 	ke = pfr_kentry_byidx(kt, idx, af);
2158 	if (ke == NULL) {
2159 		kt->pfrkt_nomatch++;
2160 		return (1);
2161 	}
2162 	pfr_prepare_network(&pfr_mask, af, ke->pfrke_net);
2163 	*raddr = SUNION2PF(&ke->pfrke_sa, af);
2164 	*rmask = SUNION2PF(&pfr_mask, af);
2165 
2166 	if (use_counter) {
2167 		/* is supplied address within block? */
2168 		if (!PF_MATCHA(0, *raddr, *rmask, counter, af)) {
2169 			/* no, go to next block in table */
2170 			idx++;
2171 			use_counter = 0;
2172 			goto _next_block;
2173 		}
2174 		PF_ACPY(addr, counter, af);
2175 	} else {
2176 		/* use first address of block */
2177 		PF_ACPY(addr, *raddr, af);
2178 	}
2179 
2180 	if (!KENTRY_NETWORK(ke)) {
2181 		/* this is a single IP address - no possible nested block */
2182 		PF_ACPY(counter, addr, af);
2183 		*pidx = idx;
2184 		kt->pfrkt_match++;
2185 		return (0);
2186 	}
2187 	for (;;) {
2188 		/* we don't want to use a nested block */
2189 		if (af == AF_INET)
2190 			ke2 = (struct pfr_kentry *)rn_match((char *)&pfr_sin,
2191 			    kt->pfrkt_ip4);
2192 		else if (af == AF_INET6)
2193 			ke2 = (struct pfr_kentry *)rn_match((char *)&pfr_sin6,
2194 			    kt->pfrkt_ip6);
2195 		/* no need to check KENTRY_RNF_ROOT() here */
2196 		if (ke2 == ke) {
2197 			/* lookup return the same block - perfect */
2198 			PF_ACPY(counter, addr, af);
2199 			*pidx = idx;
2200 			kt->pfrkt_match++;
2201 			return (0);
2202 		}
2203 
2204 		/* we need to increase the counter past the nested block */
2205 		pfr_prepare_network(&mask, AF_INET, ke2->pfrke_net);
2206 		PF_POOLMASK(addr, addr, SUNION2PF(&mask, af), &pfr_ffaddr, af);
2207 		PF_AINC(addr, af);
2208 		if (!PF_MATCHA(0, *raddr, *rmask, addr, af)) {
2209 			/* ok, we reached the end of our main block */
2210 			/* go to next block in table */
2211 			idx++;
2212 			use_counter = 0;
2213 			goto _next_block;
2214 		}
2215 	}
2216 }
2217 
2218 struct pfr_kentry *
2219 pfr_kentry_byidx(struct pfr_ktable *kt, int idx, int af)
2220 {
2221 	struct pfr_walktree	w;
2222 
2223 	bzero(&w, sizeof(w));
2224 	w.pfrw_op = PFRW_POOL_GET;
2225 	w.pfrw_cnt = idx;
2226 
2227 	switch (af) {
2228 #ifdef INET
2229 	case AF_INET:
2230 		kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
2231 		return (w.pfrw_kentry);
2232 #endif /* INET */
2233 #ifdef INET6
2234 	case AF_INET6:
2235 		kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
2236 		return (w.pfrw_kentry);
2237 #endif /* INET6 */
2238 	default:
2239 		return (NULL);
2240 	}
2241 }
2242 
2243 void
2244 pfr_dynaddr_update(struct pfr_ktable *kt, struct pfi_dynaddr *dyn)
2245 {
2246 	struct pfr_walktree	w;
2247 
2248 	bzero(&w, sizeof(w));
2249 	w.pfrw_op = PFRW_DYNADDR_UPDATE;
2250 	w.pfrw_dyn = dyn;
2251 
2252 	crit_enter();
2253 	dyn->pfid_acnt4 = 0;
2254 	dyn->pfid_acnt6 = 0;
2255 	if (!dyn->pfid_af || dyn->pfid_af == AF_INET)
2256 		kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
2257 	if (!dyn->pfid_af || dyn->pfid_af == AF_INET6)
2258 		kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
2259 	crit_exit();
2260 }
2261