xref: /dflybsd-src/sys/net/pf/pf_table.c (revision 6823c302c37b3feda6c2c8b524a99daa1bcff11f)
1 /*	$OpenBSD: pf_table.c,v 1.78 2008/06/14 03:50:14 art Exp $	*/
2 
3 /*
4  * Copyright (c) 2010 The DragonFly Project.  All rights reserved.
5  *
6  * Copyright (c) 2002 Cedric Berger
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  *
13  *    - Redistributions of source code must retain the above copyright
14  *      notice, this list of conditions and the following disclaimer.
15  *    - Redistributions in binary form must reproduce the above
16  *      copyright notice, this list of conditions and the following
17  *      disclaimer in the documentation and/or other materials provided
18  *      with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
30  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  *
33  */
34 
35 #include "opt_inet.h"
36 #include "opt_inet6.h"
37 
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/socket.h>
41 #include <sys/mbuf.h>
42 #include <sys/kernel.h>
43 #include <sys/malloc.h>
44 #include <sys/thread2.h>
45 
46 #include <net/if.h>
47 #include <net/route.h>
48 #include <netinet/in.h>
49 #include <net/pf/pfvar.h>
50 
51 #define ACCEPT_FLAGS(flags, oklist)		\
52 	do {					\
53 		if ((flags & ~(oklist)) &	\
54 		    PFR_FLAG_ALLMASK)		\
55 			return (EINVAL);	\
56 	} while (0)
57 
58 #define COPYIN(from, to, size, flags)		\
59 	((flags & PFR_FLAG_USERIOCTL) ?		\
60 	copyin((from), (to), (size)) :		\
61 	(bcopy((from), (to), (size)), 0))
62 
63 #define COPYOUT(from, to, size, flags)		\
64 	((flags & PFR_FLAG_USERIOCTL) ?		\
65 	copyout((from), (to), (size)) :		\
66 	(bcopy((from), (to), (size)), 0))
67 
68 #define	FILLIN_SIN(sin, addr)			\
69 	do {					\
70 		(sin).sin_len = sizeof(sin);	\
71 		(sin).sin_family = AF_INET;	\
72 		(sin).sin_addr = (addr);	\
73 	} while (0)
74 
75 #define	FILLIN_SIN6(sin6, addr)			\
76 	do {					\
77 		(sin6).sin6_len = sizeof(sin6);	\
78 		(sin6).sin6_family = AF_INET6;	\
79 		(sin6).sin6_addr = (addr);	\
80 	} while (0)
81 
82 #define SWAP(type, a1, a2)			\
83 	do {					\
84 		type tmp = a1;			\
85 		a1 = a2;			\
86 		a2 = tmp;			\
87 	} while (0)
88 
89 #define SUNION2PF(su, af) (((af)==AF_INET) ?	\
90     (struct pf_addr *)&(su)->sin.sin_addr :	\
91     (struct pf_addr *)&(su)->sin6.sin6_addr)
92 
93 #define	AF_BITS(af)		(((af)==AF_INET)?32:128)
94 #define	ADDR_NETWORK(ad)	((ad)->pfra_net < AF_BITS((ad)->pfra_af))
95 #define	KENTRY_NETWORK(ke)	((ke)->pfrke_net < AF_BITS((ke)->pfrke_af))
96 #define KENTRY_RNF_ROOT(ke) \
97 		((((struct radix_node *)(ke))->rn_flags & RNF_ROOT) != 0)
98 
99 #define NO_ADDRESSES		(-1)
100 #define ENQUEUE_UNMARKED_ONLY	(1)
101 #define INVERT_NEG_FLAG		(1)
102 
103 static MALLOC_DEFINE(M_PFRKTABLEPL, "pfrktable", "pf radix table pool list");
104 static MALLOC_DEFINE(M_PFRKENTRYPL, "pfrkentry", "pf radix entry pool list");
105 static MALLOC_DEFINE(M_PFRKENTRYPL2, "pfrkentry2", "pf radix entry 2 pool list");
106 static MALLOC_DEFINE(M_PFRKCOUNTERSPL, "pfrkcounters", "pf radix counters");
107 
108 struct pfr_walktree {
109 	enum pfrw_op {
110 		PFRW_MARK,
111 		PFRW_SWEEP,
112 		PFRW_ENQUEUE,
113 		PFRW_GET_ADDRS,
114 		PFRW_GET_ASTATS,
115 		PFRW_POOL_GET,
116 		PFRW_DYNADDR_UPDATE
117 	}	 pfrw_op;
118 	union {
119 		struct pfr_addr		*pfrw1_addr;
120 		struct pfr_astats	*pfrw1_astats;
121 		struct pfr_kentryworkq	*pfrw1_workq;
122 		struct pfr_kentry	*pfrw1_kentry;
123 		struct pfi_dynaddr	*pfrw1_dyn;
124 	}	 pfrw_1;
125 	int	 pfrw_free;
126 	int	 pfrw_flags;
127 };
128 #define pfrw_addr	pfrw_1.pfrw1_addr
129 #define pfrw_astats	pfrw_1.pfrw1_astats
130 #define pfrw_workq	pfrw_1.pfrw1_workq
131 #define pfrw_kentry	pfrw_1.pfrw1_kentry
132 #define pfrw_dyn	pfrw_1.pfrw1_dyn
133 #define pfrw_cnt	pfrw_free
134 
135 #define senderr(e)	do { rv = (e); goto _bad; } while (0)
136 struct malloc_type	*pfr_ktable_pl;
137 struct malloc_type	*pfr_kentry_pl;
138 struct malloc_type	*pfr_kentry_pl2;
139 static struct pf_addr	 pfr_ffaddr;		/* constant after setup */
140 
141 void			 pfr_copyout_addr(struct pfr_addr *,
142 			    struct pfr_kentry *ke);
143 int			 pfr_validate_addr(struct pfr_addr *);
144 void			 pfr_enqueue_addrs(struct pfr_ktable *,
145 			    struct pfr_kentryworkq *, int *, int);
146 void			 pfr_mark_addrs(struct pfr_ktable *);
147 struct pfr_kentry	*pfr_lookup_addr(struct pfr_ktable *,
148 			    struct pfr_addr *, int);
149 struct pfr_kentry	*pfr_create_kentry(struct pfr_addr *, int);
150 void			 pfr_destroy_kentries(struct pfr_kentryworkq *);
151 void			 pfr_destroy_kentry(struct pfr_kentry *);
152 void			 pfr_insert_kentries(struct pfr_ktable *,
153 			    struct pfr_kentryworkq *, long);
154 void			 pfr_remove_kentries(struct pfr_ktable *,
155 			    struct pfr_kentryworkq *);
156 void			 pfr_clstats_kentries(struct pfr_kentryworkq *, long,
157 			    int);
158 void			 pfr_reset_feedback(struct pfr_addr *, int, int);
159 void			 pfr_prepare_network(union sockaddr_union *, int, int);
160 int			 pfr_route_kentry(struct pfr_ktable *,
161 			    struct pfr_kentry *);
162 int			 pfr_unroute_kentry(struct pfr_ktable *,
163 			    struct pfr_kentry *);
164 int			 pfr_walktree(struct radix_node *, void *);
165 int			 pfr_validate_table(struct pfr_table *, int, int);
166 int			 pfr_fix_anchor(char *);
167 void			 pfr_commit_ktable(struct pfr_ktable *, long);
168 void			 pfr_insert_ktables(struct pfr_ktableworkq *);
169 void			 pfr_insert_ktable(struct pfr_ktable *);
170 void			 pfr_setflags_ktables(struct pfr_ktableworkq *);
171 void			 pfr_setflags_ktable(struct pfr_ktable *, int);
172 void			 pfr_clstats_ktables(struct pfr_ktableworkq *, long,
173 			    int);
174 void			 pfr_clstats_ktable(struct pfr_ktable *, long, int);
175 struct pfr_ktable	*pfr_create_ktable(struct pfr_table *, long, int);
176 void			 pfr_destroy_ktables(struct pfr_ktableworkq *, int);
177 void			 pfr_destroy_ktable(struct pfr_ktable *, int);
178 int			 pfr_ktable_compare(struct pfr_ktable *,
179 			    struct pfr_ktable *);
180 struct pfr_ktable	*pfr_lookup_table(struct pfr_table *);
181 void			 pfr_clean_node_mask(struct pfr_ktable *,
182 			    struct pfr_kentryworkq *);
183 int			 pfr_table_count(struct pfr_table *, int);
184 int			 pfr_skip_table(struct pfr_table *,
185 			    struct pfr_ktable *, int);
186 struct pfr_kentry	*pfr_kentry_byidx(struct pfr_ktable *, int, int);
187 
188 RB_PROTOTYPE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
189 RB_GENERATE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
190 
191 struct pfr_ktablehead	 pfr_ktables;
192 struct pfr_table	 pfr_nulltable;
193 int			 pfr_ktable_cnt;
194 
195 void
196 pfr_initialize(void)
197 {
198 	memset(&pfr_ffaddr, 0xff, sizeof(pfr_ffaddr));
199 }
200 
201 int
202 pfr_clr_addrs(struct pfr_table *tbl, int *ndel, int flags)
203 {
204 	struct pfr_ktable	*kt;
205 	struct pfr_kentryworkq	 workq;
206 
207 	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
208 	if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
209 		return (EINVAL);
210 	kt = pfr_lookup_table(tbl);
211 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
212 		return (ESRCH);
213 	if (kt->pfrkt_flags & PFR_TFLAG_CONST)
214 		return (EPERM);
215 	pfr_enqueue_addrs(kt, &workq, ndel, 0);
216 
217 	if (!(flags & PFR_FLAG_DUMMY)) {
218 		if (flags & PFR_FLAG_ATOMIC)
219 			crit_enter();
220 		pfr_remove_kentries(kt, &workq);
221 		if (flags & PFR_FLAG_ATOMIC)
222 			crit_exit();
223 		if (kt->pfrkt_cnt) {
224 			kprintf("pfr_clr_addrs: corruption detected (%d).\n",
225 			    kt->pfrkt_cnt);
226 			kt->pfrkt_cnt = 0;
227 		}
228 	}
229 	return (0);
230 }
231 
232 int
233 pfr_add_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
234     int *nadd, int flags)
235 {
236 	struct pfr_ktable	*kt, *tmpkt;
237 	struct pfr_kentryworkq	 workq;
238 	struct pfr_kentry	*p, *q;
239 	struct pfr_addr		 ad;
240 	int			 i, rv, xadd = 0;
241 	long			 tzero = time_second;
242 
243 	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
244 	    PFR_FLAG_FEEDBACK);
245 	if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
246 		return (EINVAL);
247 	kt = pfr_lookup_table(tbl);
248 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
249 		return (ESRCH);
250 	if (kt->pfrkt_flags & PFR_TFLAG_CONST)
251 		return (EPERM);
252 	tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0);
253 	if (tmpkt == NULL)
254 		return (ENOMEM);
255 	SLIST_INIT(&workq);
256 	for (i = 0; i < size; i++) {
257 		if (COPYIN(addr+i, &ad, sizeof(ad), flags))
258 			senderr(EFAULT);
259 		if (pfr_validate_addr(&ad))
260 			senderr(EINVAL);
261 		p = pfr_lookup_addr(kt, &ad, 1);
262 		q = pfr_lookup_addr(tmpkt, &ad, 1);
263 		if (flags & PFR_FLAG_FEEDBACK) {
264 			if (q != NULL)
265 				ad.pfra_fback = PFR_FB_DUPLICATE;
266 			else if (p == NULL)
267 				ad.pfra_fback = PFR_FB_ADDED;
268 			else if (p->pfrke_not != ad.pfra_not)
269 				ad.pfra_fback = PFR_FB_CONFLICT;
270 			else
271 				ad.pfra_fback = PFR_FB_NONE;
272 		}
273 		if (p == NULL && q == NULL) {
274 			p = pfr_create_kentry(&ad,
275 			    !(flags & PFR_FLAG_USERIOCTL));
276 			if (p == NULL)
277 				senderr(ENOMEM);
278 			if (pfr_route_kentry(tmpkt, p)) {
279 				pfr_destroy_kentry(p);
280 				ad.pfra_fback = PFR_FB_NONE;
281 			} else {
282 				SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
283 				xadd++;
284 			}
285 		}
286 		if (flags & PFR_FLAG_FEEDBACK)
287 			if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
288 				senderr(EFAULT);
289 	}
290 	pfr_clean_node_mask(tmpkt, &workq);
291 	if (!(flags & PFR_FLAG_DUMMY)) {
292 		if (flags & PFR_FLAG_ATOMIC)
293 			crit_enter();
294 		pfr_insert_kentries(kt, &workq, tzero);
295 		if (flags & PFR_FLAG_ATOMIC)
296 			crit_exit();
297 	} else
298 		pfr_destroy_kentries(&workq);
299 	if (nadd != NULL)
300 		*nadd = xadd;
301 	pfr_destroy_ktable(tmpkt, 0);
302 	return (0);
303 _bad:
304 	pfr_clean_node_mask(tmpkt, &workq);
305 	pfr_destroy_kentries(&workq);
306 	if (flags & PFR_FLAG_FEEDBACK)
307 		pfr_reset_feedback(addr, size, flags);
308 	pfr_destroy_ktable(tmpkt, 0);
309 	return (rv);
310 }
311 
312 int
313 pfr_del_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
314     int *ndel, int flags)
315 {
316 	struct pfr_ktable	*kt;
317 	struct pfr_kentryworkq	 workq;
318 	struct pfr_kentry	*p;
319 	struct pfr_addr		 ad;
320 	int			 i, rv, xdel = 0, log = 1;
321 
322 	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
323 	    PFR_FLAG_FEEDBACK);
324 	if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
325 		return (EINVAL);
326 	kt = pfr_lookup_table(tbl);
327 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
328 		return (ESRCH);
329 	if (kt->pfrkt_flags & PFR_TFLAG_CONST)
330 		return (EPERM);
331 	/*
332 	 * there are two algorithms to choose from here.
333 	 * with:
334 	 *   n: number of addresses to delete
335 	 *   N: number of addresses in the table
336 	 *
337 	 * one is O(N) and is better for large 'n'
338 	 * one is O(n*LOG(N)) and is better for small 'n'
339 	 *
340 	 * following code try to decide which one is best.
341 	 */
342 	for (i = kt->pfrkt_cnt; i > 0; i >>= 1)
343 		log++;
344 	if (size > kt->pfrkt_cnt/log) {
345 		/* full table scan */
346 		pfr_mark_addrs(kt);
347 	} else {
348 		/* iterate over addresses to delete */
349 		for (i = 0; i < size; i++) {
350 			if (COPYIN(addr+i, &ad, sizeof(ad), flags))
351 				return (EFAULT);
352 			if (pfr_validate_addr(&ad))
353 				return (EINVAL);
354 			p = pfr_lookup_addr(kt, &ad, 1);
355 			if (p != NULL)
356 				p->pfrke_mark = 0;
357 		}
358 	}
359 	SLIST_INIT(&workq);
360 	for (i = 0; i < size; i++) {
361 		if (COPYIN(addr+i, &ad, sizeof(ad), flags))
362 			senderr(EFAULT);
363 		if (pfr_validate_addr(&ad))
364 			senderr(EINVAL);
365 		p = pfr_lookup_addr(kt, &ad, 1);
366 		if (flags & PFR_FLAG_FEEDBACK) {
367 			if (p == NULL)
368 				ad.pfra_fback = PFR_FB_NONE;
369 			else if (p->pfrke_not != ad.pfra_not)
370 				ad.pfra_fback = PFR_FB_CONFLICT;
371 			else if (p->pfrke_mark)
372 				ad.pfra_fback = PFR_FB_DUPLICATE;
373 			else
374 				ad.pfra_fback = PFR_FB_DELETED;
375 		}
376 		if (p != NULL && p->pfrke_not == ad.pfra_not &&
377 		    !p->pfrke_mark) {
378 			p->pfrke_mark = 1;
379 			SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
380 			xdel++;
381 		}
382 		if (flags & PFR_FLAG_FEEDBACK)
383 			if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
384 				senderr(EFAULT);
385 	}
386 	if (!(flags & PFR_FLAG_DUMMY)) {
387 		if (flags & PFR_FLAG_ATOMIC)
388 			crit_enter();
389 		pfr_remove_kentries(kt, &workq);
390 		if (flags & PFR_FLAG_ATOMIC)
391 			crit_exit();
392 	}
393 	if (ndel != NULL)
394 		*ndel = xdel;
395 	return (0);
396 _bad:
397 	if (flags & PFR_FLAG_FEEDBACK)
398 		pfr_reset_feedback(addr, size, flags);
399 	return (rv);
400 }
401 
402 int
403 pfr_set_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
404     int *size2, int *nadd, int *ndel, int *nchange, int flags,
405     u_int32_t ignore_pfrt_flags)
406 {
407 	struct pfr_ktable	*kt, *tmpkt;
408 	struct pfr_kentryworkq	 addq, delq, changeq;
409 	struct pfr_kentry	*p, *q;
410 	struct pfr_addr		 ad;
411 	int			 i, rv, xadd = 0, xdel = 0, xchange = 0;
412 	long			 tzero = time_second;
413 
414 	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
415 	    PFR_FLAG_FEEDBACK);
416 	if (pfr_validate_table(tbl, ignore_pfrt_flags, flags &
417 	    PFR_FLAG_USERIOCTL))
418 		return (EINVAL);
419 	kt = pfr_lookup_table(tbl);
420 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
421 		return (ESRCH);
422 	if (kt->pfrkt_flags & PFR_TFLAG_CONST)
423 		return (EPERM);
424 	tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0);
425 	if (tmpkt == NULL)
426 		return (ENOMEM);
427 	pfr_mark_addrs(kt);
428 	SLIST_INIT(&addq);
429 	SLIST_INIT(&delq);
430 	SLIST_INIT(&changeq);
431 	for (i = 0; i < size; i++) {
432 		if (COPYIN(addr+i, &ad, sizeof(ad), flags))
433 			senderr(EFAULT);
434 		if (pfr_validate_addr(&ad))
435 			senderr(EINVAL);
436 		ad.pfra_fback = PFR_FB_NONE;
437 		p = pfr_lookup_addr(kt, &ad, 1);
438 		if (p != NULL) {
439 			if (p->pfrke_mark) {
440 				ad.pfra_fback = PFR_FB_DUPLICATE;
441 				goto _skip;
442 			}
443 			p->pfrke_mark = 1;
444 			if (p->pfrke_not != ad.pfra_not) {
445 				SLIST_INSERT_HEAD(&changeq, p, pfrke_workq);
446 				ad.pfra_fback = PFR_FB_CHANGED;
447 				xchange++;
448 			}
449 		} else {
450 			q = pfr_lookup_addr(tmpkt, &ad, 1);
451 			if (q != NULL) {
452 				ad.pfra_fback = PFR_FB_DUPLICATE;
453 				goto _skip;
454 			}
455 			p = pfr_create_kentry(&ad,
456 			    !(flags & PFR_FLAG_USERIOCTL));
457 			if (p == NULL)
458 				senderr(ENOMEM);
459 			if (pfr_route_kentry(tmpkt, p)) {
460 				pfr_destroy_kentry(p);
461 				ad.pfra_fback = PFR_FB_NONE;
462 			} else {
463 				SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
464 				ad.pfra_fback = PFR_FB_ADDED;
465 				xadd++;
466 			}
467 		}
468 _skip:
469 		if (flags & PFR_FLAG_FEEDBACK)
470 			if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
471 				senderr(EFAULT);
472 	}
473 	pfr_enqueue_addrs(kt, &delq, &xdel, ENQUEUE_UNMARKED_ONLY);
474 	if ((flags & PFR_FLAG_FEEDBACK) && *size2) {
475 		if (*size2 < size+xdel) {
476 			*size2 = size+xdel;
477 			senderr(0);
478 		}
479 		i = 0;
480 		SLIST_FOREACH(p, &delq, pfrke_workq) {
481 			pfr_copyout_addr(&ad, p);
482 			ad.pfra_fback = PFR_FB_DELETED;
483 			if (COPYOUT(&ad, addr+size+i, sizeof(ad), flags))
484 				senderr(EFAULT);
485 			i++;
486 		}
487 	}
488 	pfr_clean_node_mask(tmpkt, &addq);
489 	if (!(flags & PFR_FLAG_DUMMY)) {
490 		if (flags & PFR_FLAG_ATOMIC)
491 			crit_enter();
492 		pfr_insert_kentries(kt, &addq, tzero);
493 		pfr_remove_kentries(kt, &delq);
494 		pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG);
495 		if (flags & PFR_FLAG_ATOMIC)
496 			crit_exit();
497 	} else
498 		pfr_destroy_kentries(&addq);
499 	if (nadd != NULL)
500 		*nadd = xadd;
501 	if (ndel != NULL)
502 		*ndel = xdel;
503 	if (nchange != NULL)
504 		*nchange = xchange;
505 	if ((flags & PFR_FLAG_FEEDBACK) && size2)
506 		*size2 = size+xdel;
507 	pfr_destroy_ktable(tmpkt, 0);
508 	return (0);
509 _bad:
510 	pfr_clean_node_mask(tmpkt, &addq);
511 	pfr_destroy_kentries(&addq);
512 	if (flags & PFR_FLAG_FEEDBACK)
513 		pfr_reset_feedback(addr, size, flags);
514 	pfr_destroy_ktable(tmpkt, 0);
515 	return (rv);
516 }
517 
518 int
519 pfr_tst_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
520 	int *nmatch, int flags)
521 {
522 	struct pfr_ktable	*kt;
523 	struct pfr_kentry	*p;
524 	struct pfr_addr		 ad;
525 	int			 i, xmatch = 0;
526 
527 	ACCEPT_FLAGS(flags, PFR_FLAG_REPLACE);
528 	if (pfr_validate_table(tbl, 0, 0))
529 		return (EINVAL);
530 	kt = pfr_lookup_table(tbl);
531 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
532 		return (ESRCH);
533 
534 	for (i = 0; i < size; i++) {
535 		if (COPYIN(addr+i, &ad, sizeof(ad), flags))
536 			return (EFAULT);
537 		if (pfr_validate_addr(&ad))
538 			return (EINVAL);
539 		if (ADDR_NETWORK(&ad))
540 			return (EINVAL);
541 		p = pfr_lookup_addr(kt, &ad, 0);
542 		if (flags & PFR_FLAG_REPLACE)
543 			pfr_copyout_addr(&ad, p);
544 		ad.pfra_fback = (p == NULL) ? PFR_FB_NONE :
545 		    (p->pfrke_not ? PFR_FB_NOTMATCH : PFR_FB_MATCH);
546 		if (p != NULL && !p->pfrke_not)
547 			xmatch++;
548 		if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
549 			return (EFAULT);
550 	}
551 	if (nmatch != NULL)
552 		*nmatch = xmatch;
553 	return (0);
554 }
555 
556 int
557 pfr_get_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int *size,
558 	int flags)
559 {
560 	struct pfr_ktable	*kt;
561 	struct pfr_walktree	 w;
562 	int			 rv;
563 
564 	ACCEPT_FLAGS(flags, 0);
565 	if (pfr_validate_table(tbl, 0, 0))
566 		return (EINVAL);
567 	kt = pfr_lookup_table(tbl);
568 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
569 		return (ESRCH);
570 	if (kt->pfrkt_cnt > *size) {
571 		*size = kt->pfrkt_cnt;
572 		return (0);
573 	}
574 
575 	bzero(&w, sizeof(w));
576 	w.pfrw_op = PFRW_GET_ADDRS;
577 	w.pfrw_addr = addr;
578 	w.pfrw_free = kt->pfrkt_cnt;
579 	w.pfrw_flags = flags;
580 	rv = kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
581 	if (!rv)
582 		rv = kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
583 	if (rv)
584 		return (rv);
585 
586 	if (w.pfrw_free) {
587 		kprintf("pfr_get_addrs: corruption detected (%d).\n",
588 		    w.pfrw_free);
589 		return (ENOTTY);
590 	}
591 	*size = kt->pfrkt_cnt;
592 	return (0);
593 }
594 
595 int
596 pfr_get_astats(struct pfr_table *tbl, struct pfr_astats *addr, int *size,
597 	int flags)
598 {
599 	struct pfr_ktable	*kt;
600 	struct pfr_walktree	 w;
601 	struct pfr_kentryworkq	 workq;
602 	int			 rv;
603 	long			 tzero = time_second;
604 
605 	/* XXX PFR_FLAG_CLSTATS disabled */
606 	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC);
607 	if (pfr_validate_table(tbl, 0, 0))
608 		return (EINVAL);
609 	kt = pfr_lookup_table(tbl);
610 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
611 		return (ESRCH);
612 	if (kt->pfrkt_cnt > *size) {
613 		*size = kt->pfrkt_cnt;
614 		return (0);
615 	}
616 
617 	bzero(&w, sizeof(w));
618 	w.pfrw_op = PFRW_GET_ASTATS;
619 	w.pfrw_astats = addr;
620 	w.pfrw_free = kt->pfrkt_cnt;
621 	w.pfrw_flags = flags;
622 	if (flags & PFR_FLAG_ATOMIC)
623 		crit_enter();
624 	rv = kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
625 	if (!rv)
626 		rv = kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
627 	if (!rv && (flags & PFR_FLAG_CLSTATS)) {
628 		pfr_enqueue_addrs(kt, &workq, NULL, 0);
629 		pfr_clstats_kentries(&workq, tzero, 0);
630 	}
631 	if (flags & PFR_FLAG_ATOMIC)
632 		crit_exit();
633 	if (rv)
634 		return (rv);
635 
636 	if (w.pfrw_free) {
637 		kprintf("pfr_get_astats: corruption detected (%d).\n",
638 		    w.pfrw_free);
639 		return (ENOTTY);
640 	}
641 	*size = kt->pfrkt_cnt;
642 	return (0);
643 }
644 
645 int
646 pfr_clr_astats(struct pfr_table *tbl, struct pfr_addr *addr, int size,
647     int *nzero, int flags)
648 {
649 	struct pfr_ktable	*kt;
650 	struct pfr_kentryworkq	 workq;
651 	struct pfr_kentry	*p;
652 	struct pfr_addr		 ad;
653 	int			 i, rv, xzero = 0;
654 
655 	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
656 	    PFR_FLAG_FEEDBACK);
657 	if (pfr_validate_table(tbl, 0, 0))
658 		return (EINVAL);
659 	kt = pfr_lookup_table(tbl);
660 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
661 		return (ESRCH);
662 	SLIST_INIT(&workq);
663 	for (i = 0; i < size; i++) {
664 		if (COPYIN(addr+i, &ad, sizeof(ad), flags))
665 			senderr(EFAULT);
666 		if (pfr_validate_addr(&ad))
667 			senderr(EINVAL);
668 		p = pfr_lookup_addr(kt, &ad, 1);
669 		if (flags & PFR_FLAG_FEEDBACK) {
670 			ad.pfra_fback = (p != NULL) ?
671 			    PFR_FB_CLEARED : PFR_FB_NONE;
672 			if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
673 				senderr(EFAULT);
674 		}
675 		if (p != NULL) {
676 			SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
677 			xzero++;
678 		}
679 	}
680 
681 	if (!(flags & PFR_FLAG_DUMMY)) {
682 		if (flags & PFR_FLAG_ATOMIC)
683 			crit_enter();
684 		pfr_clstats_kentries(&workq, 0, 0);
685 		if (flags & PFR_FLAG_ATOMIC)
686 			crit_exit();
687 	}
688 	if (nzero != NULL)
689 		*nzero = xzero;
690 	return (0);
691 _bad:
692 	if (flags & PFR_FLAG_FEEDBACK)
693 		pfr_reset_feedback(addr, size, flags);
694 	return (rv);
695 }
696 
697 int
698 pfr_validate_addr(struct pfr_addr *ad)
699 {
700 	int i;
701 
702 	switch (ad->pfra_af) {
703 #ifdef INET
704 	case AF_INET:
705 		if (ad->pfra_net > 32)
706 			return (-1);
707 		break;
708 #endif /* INET */
709 #ifdef INET6
710 	case AF_INET6:
711 		if (ad->pfra_net > 128)
712 			return (-1);
713 		break;
714 #endif /* INET6 */
715 	default:
716 		return (-1);
717 	}
718 	if (ad->pfra_net < 128 &&
719 		(((caddr_t)ad)[ad->pfra_net/8] & (0xFF >> (ad->pfra_net%8))))
720 			return (-1);
721 	for (i = (ad->pfra_net+7)/8; i < sizeof(ad->pfra_u); i++)
722 		if (((caddr_t)ad)[i])
723 			return (-1);
724 	if (ad->pfra_not && ad->pfra_not != 1)
725 		return (-1);
726 	if (ad->pfra_fback)
727 		return (-1);
728 	return (0);
729 }
730 
731 void
732 pfr_enqueue_addrs(struct pfr_ktable *kt, struct pfr_kentryworkq *workq,
733 	int *naddr, int sweep)
734 {
735 	struct pfr_walktree	w;
736 
737 	SLIST_INIT(workq);
738 	bzero(&w, sizeof(w));
739 	w.pfrw_op = sweep ? PFRW_SWEEP : PFRW_ENQUEUE;
740 	w.pfrw_workq = workq;
741 	if (kt->pfrkt_ip4 != NULL)
742 		if (kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w))
743 			kprintf("pfr_enqueue_addrs: IPv4 walktree failed.\n");
744 	if (kt->pfrkt_ip6 != NULL)
745 		if (kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w))
746 			kprintf("pfr_enqueue_addrs: IPv6 walktree failed.\n");
747 	if (naddr != NULL)
748 		*naddr = w.pfrw_cnt;
749 }
750 
751 void
752 pfr_mark_addrs(struct pfr_ktable *kt)
753 {
754 	struct pfr_walktree	w;
755 
756 	bzero(&w, sizeof(w));
757 	w.pfrw_op = PFRW_MARK;
758 	if (kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w))
759 		kprintf("pfr_mark_addrs: IPv4 walktree failed.\n");
760 	if (kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w))
761 		kprintf("pfr_mark_addrs: IPv6 walktree failed.\n");
762 }
763 
764 
765 struct pfr_kentry *
766 pfr_lookup_addr(struct pfr_ktable *kt, struct pfr_addr *ad, int exact)
767 {
768 	union sockaddr_union	 sa, mask;
769 	struct radix_node_head	*head = NULL;
770 	struct pfr_kentry	*ke;
771 
772 	bzero(&sa, sizeof(sa));
773 	if (ad->pfra_af == AF_INET) {
774 		FILLIN_SIN(sa.sin, ad->pfra_ip4addr);
775 		head = kt->pfrkt_ip4;
776 	} else if ( ad->pfra_af == AF_INET6 ) {
777 		FILLIN_SIN6(sa.sin6, ad->pfra_ip6addr);
778 		head = kt->pfrkt_ip6;
779 	}
780 	if (ADDR_NETWORK(ad)) {
781 		pfr_prepare_network(&mask, ad->pfra_af, ad->pfra_net);
782 		ke = (struct pfr_kentry *)rn_lookup((char *)&sa, (char *)&mask,
783 		    head);
784 		if (ke && KENTRY_RNF_ROOT(ke))
785 			ke = NULL;
786 	} else {
787 		ke = (struct pfr_kentry *)rn_match((char *)&sa, head);
788 		if (ke && KENTRY_RNF_ROOT(ke))
789 			ke = NULL;
790 		if (exact && ke && KENTRY_NETWORK(ke))
791 			ke = NULL;
792 	}
793 	return (ke);
794 }
795 
796 struct pfr_kentry *
797 pfr_create_kentry(struct pfr_addr *ad, int intr)
798 {
799 	struct pfr_kentry	*ke;
800 
801 	if (intr)
802 		ke = kmalloc(sizeof(struct pfr_kentry), M_PFRKENTRYPL2, M_NOWAIT|M_ZERO);
803 	else
804 		ke = kmalloc(sizeof(struct pfr_kentry), M_PFRKENTRYPL, M_NOWAIT|M_ZERO|M_NULLOK);
805 	if (ke == NULL)
806 		return (NULL);
807 
808 	if (ad->pfra_af == AF_INET)
809 		FILLIN_SIN(ke->pfrke_sa.sin, ad->pfra_ip4addr);
810 	else if (ad->pfra_af == AF_INET6)
811 		FILLIN_SIN6(ke->pfrke_sa.sin6, ad->pfra_ip6addr);
812 	ke->pfrke_af = ad->pfra_af;
813 	ke->pfrke_net = ad->pfra_net;
814 	ke->pfrke_not = ad->pfra_not;
815 	ke->pfrke_intrpool = intr;
816 	return (ke);
817 }
818 
819 void
820 pfr_destroy_kentries(struct pfr_kentryworkq *workq)
821 {
822 	struct pfr_kentry	*p, *q;
823 
824 	for (p = SLIST_FIRST(workq); p != NULL; p = q) {
825 		q = SLIST_NEXT(p, pfrke_workq);
826 		pfr_destroy_kentry(p);
827 	}
828 }
829 
830 void
831 pfr_destroy_kentry(struct pfr_kentry *ke)
832 {
833 	if (ke->pfrke_counters)
834 		kfree(ke->pfrke_counters, M_PFRKCOUNTERSPL);
835 	if (ke->pfrke_intrpool)
836 		kfree(ke, M_PFRKENTRYPL2);
837 	else
838 		kfree(ke, M_PFRKENTRYPL);
839 }
840 
841 void
842 pfr_insert_kentries(struct pfr_ktable *kt,
843     struct pfr_kentryworkq *workq, long tzero)
844 {
845 	struct pfr_kentry	*p;
846 	int			 rv, n = 0;
847 
848 	SLIST_FOREACH(p, workq, pfrke_workq) {
849 		rv = pfr_route_kentry(kt, p);
850 		if (rv) {
851 			kprintf("pfr_insert_kentries: cannot route entry "
852 			    "(code=%d).\n", rv);
853 			break;
854 		}
855 		p->pfrke_tzero = tzero;
856 		n++;
857 	}
858 	kt->pfrkt_cnt += n;
859 }
860 
861 int
862 pfr_insert_kentry(struct pfr_ktable *kt, struct pfr_addr *ad, long tzero)
863 {
864 	struct pfr_kentry	*p;
865 	int			 rv;
866 
867 	p = pfr_lookup_addr(kt, ad, 1);
868 	if (p != NULL)
869 		return (0);
870 	p = pfr_create_kentry(ad, 1);
871 	if (p == NULL)
872 		return (EINVAL);
873 
874 	rv = pfr_route_kentry(kt, p);
875 	if (rv)
876 		return (rv);
877 
878 	p->pfrke_tzero = tzero;
879 	kt->pfrkt_cnt++;
880 
881 	return (0);
882 }
883 
884 void
885 pfr_remove_kentries(struct pfr_ktable *kt,
886     struct pfr_kentryworkq *workq)
887 {
888 	struct pfr_kentry	*p;
889 	int			 n = 0;
890 
891 	SLIST_FOREACH(p, workq, pfrke_workq) {
892 		pfr_unroute_kentry(kt, p);
893 		n++;
894 	}
895 	kt->pfrkt_cnt -= n;
896 	pfr_destroy_kentries(workq);
897 }
898 
899 void
900 pfr_clean_node_mask(struct pfr_ktable *kt,
901     struct pfr_kentryworkq *workq)
902 {
903 	struct pfr_kentry	*p;
904 
905 	SLIST_FOREACH(p, workq, pfrke_workq)
906 		pfr_unroute_kentry(kt, p);
907 }
908 
909 void
910 pfr_clstats_kentries(struct pfr_kentryworkq *workq, long tzero, int negchange)
911 {
912 	struct pfr_kentry	*p;
913 
914 	SLIST_FOREACH(p, workq, pfrke_workq) {
915 		crit_enter();
916 		if (negchange)
917 			p->pfrke_not = !p->pfrke_not;
918 		if (p->pfrke_counters) {
919 			kfree(p->pfrke_counters, M_PFRKCOUNTERSPL);
920 			p->pfrke_counters = NULL;
921 		}
922 		crit_exit();
923 		p->pfrke_tzero = tzero;
924 	}
925 }
926 
927 void
928 pfr_reset_feedback(struct pfr_addr *addr, int size, int flags)
929 {
930 	struct pfr_addr	ad;
931 	int		i;
932 
933 	for (i = 0; i < size; i++) {
934 		if (COPYIN(addr+i, &ad, sizeof(ad), flags))
935 			break;
936 		ad.pfra_fback = PFR_FB_NONE;
937 		if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
938 			break;
939 	}
940 }
941 
942 void
943 pfr_prepare_network(union sockaddr_union *sa, int af, int net)
944 {
945 	int	i;
946 
947 	bzero(sa, sizeof(*sa));
948 	if (af == AF_INET) {
949 		sa->sin.sin_len = sizeof(sa->sin);
950 		sa->sin.sin_family = AF_INET;
951 		sa->sin.sin_addr.s_addr = net ? htonl(-1 << (32-net)) : 0;
952 	} else if (af == AF_INET6) {
953 		sa->sin6.sin6_len = sizeof(sa->sin6);
954 		sa->sin6.sin6_family = AF_INET6;
955 		for (i = 0; i < 4; i++) {
956 			if (net <= 32) {
957 				sa->sin6.sin6_addr.s6_addr32[i] =
958 				    net ? htonl(-1 << (32-net)) : 0;
959 				break;
960 			}
961 			sa->sin6.sin6_addr.s6_addr32[i] = 0xFFFFFFFF;
962 			net -= 32;
963 		}
964 	}
965 }
966 
967 int
968 pfr_route_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
969 {
970 	union sockaddr_union	 mask;
971 	struct radix_node	*rn;
972 	struct radix_node_head	*head = NULL;
973 
974 	bzero(ke->pfrke_node, sizeof(ke->pfrke_node));
975 	if (ke->pfrke_af == AF_INET)
976 		head = kt->pfrkt_ip4;
977 	else if (ke->pfrke_af == AF_INET6)
978 		head = kt->pfrkt_ip6;
979 
980 	if (KENTRY_NETWORK(ke)) {
981 		pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
982 		rn = rn_addroute((char *)&ke->pfrke_sa, (char *)&mask, head,
983 		    ke->pfrke_node);
984 	} else
985 		rn = rn_addroute((char *)&ke->pfrke_sa, NULL, head,
986 		    ke->pfrke_node);
987 
988 	return (rn == NULL ? -1 : 0);
989 }
990 
991 int
992 pfr_unroute_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
993 {
994 	union sockaddr_union	 mask;
995 	struct radix_node	*rn;
996 	struct radix_node_head	*head = NULL;
997 
998 	if (ke->pfrke_af == AF_INET)
999 		head = kt->pfrkt_ip4;
1000 	else if (ke->pfrke_af == AF_INET6)
1001 		head = kt->pfrkt_ip6;
1002 
1003 	if (KENTRY_NETWORK(ke)) {
1004 		pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
1005 		rn = rn_delete((char *)&ke->pfrke_sa, (char *)&mask, head);
1006 	} else
1007 		rn = rn_delete((char *)&ke->pfrke_sa, NULL, head);
1008 
1009 	if (rn == NULL) {
1010 		kprintf("pfr_unroute_kentry: delete failed.\n");
1011 		return (-1);
1012 	}
1013 	return (0);
1014 }
1015 
1016 void
1017 pfr_copyout_addr(struct pfr_addr *ad, struct pfr_kentry *ke)
1018 {
1019 	bzero(ad, sizeof(*ad));
1020 	if (ke == NULL)
1021 		return;
1022 	ad->pfra_af = ke->pfrke_af;
1023 	ad->pfra_net = ke->pfrke_net;
1024 	ad->pfra_not = ke->pfrke_not;
1025 	if (ad->pfra_af == AF_INET)
1026 		ad->pfra_ip4addr = ke->pfrke_sa.sin.sin_addr;
1027 	else if (ad->pfra_af == AF_INET6)
1028 		ad->pfra_ip6addr = ke->pfrke_sa.sin6.sin6_addr;
1029 }
1030 
1031 int
1032 pfr_walktree(struct radix_node *rn, void *arg)
1033 {
1034 	struct pfr_kentry	*ke = (struct pfr_kentry *)rn;
1035 	struct pfr_walktree	*w = arg;
1036 	union sockaddr_union	pfr_mask;
1037 	int			flags = w->pfrw_flags;
1038 
1039 	switch (w->pfrw_op) {
1040 	case PFRW_MARK:
1041 		ke->pfrke_mark = 0;
1042 		break;
1043 	case PFRW_SWEEP:
1044 		if (ke->pfrke_mark)
1045 			break;
1046 		/* FALLTHROUGH */
1047 	case PFRW_ENQUEUE:
1048 		SLIST_INSERT_HEAD(w->pfrw_workq, ke, pfrke_workq);
1049 		w->pfrw_cnt++;
1050 		break;
1051 	case PFRW_GET_ADDRS:
1052 		if (w->pfrw_free-- > 0) {
1053 			struct pfr_addr ad;
1054 
1055 			pfr_copyout_addr(&ad, ke);
1056 			if (copyout(&ad, w->pfrw_addr, sizeof(ad)))
1057 				return (EFAULT);
1058 			w->pfrw_addr++;
1059 		}
1060 		break;
1061 	case PFRW_GET_ASTATS:
1062 		if (w->pfrw_free-- > 0) {
1063 			struct pfr_astats as;
1064 
1065 			pfr_copyout_addr(&as.pfras_a, ke);
1066 
1067 			crit_enter();
1068 			if (ke->pfrke_counters) {
1069 				bcopy(ke->pfrke_counters->pfrkc_packets,
1070 				    as.pfras_packets, sizeof(as.pfras_packets));
1071 				bcopy(ke->pfrke_counters->pfrkc_bytes,
1072 				    as.pfras_bytes, sizeof(as.pfras_bytes));
1073 			} else {
1074 				bzero(as.pfras_packets, sizeof(as.pfras_packets));
1075 				bzero(as.pfras_bytes, sizeof(as.pfras_bytes));
1076 				as.pfras_a.pfra_fback = PFR_FB_NOCOUNT;
1077 			}
1078 			crit_exit();
1079 			as.pfras_tzero = ke->pfrke_tzero;
1080 
1081 			if (COPYOUT(&as, w->pfrw_astats, sizeof(as), flags))
1082 				return (EFAULT);
1083 			w->pfrw_astats++;
1084 		}
1085 		break;
1086 	case PFRW_POOL_GET:
1087 		if (ke->pfrke_not)
1088 			break; /* negative entries are ignored */
1089 		if (!w->pfrw_cnt--) {
1090 			w->pfrw_kentry = ke;
1091 			return (1); /* finish search */
1092 		}
1093 		break;
1094 	case PFRW_DYNADDR_UPDATE:
1095 		if (ke->pfrke_af == AF_INET) {
1096 			if (w->pfrw_dyn->pfid_acnt4++ > 0)
1097 				break;
1098 			pfr_prepare_network(&pfr_mask, AF_INET, ke->pfrke_net);
1099 			w->pfrw_dyn->pfid_addr4 = *SUNION2PF(
1100 			    &ke->pfrke_sa, AF_INET);
1101 			w->pfrw_dyn->pfid_mask4 = *SUNION2PF(
1102 			    &pfr_mask, AF_INET);
1103 		} else if (ke->pfrke_af == AF_INET6){
1104 			if (w->pfrw_dyn->pfid_acnt6++ > 0)
1105 				break;
1106 			pfr_prepare_network(&pfr_mask, AF_INET6, ke->pfrke_net);
1107 			w->pfrw_dyn->pfid_addr6 = *SUNION2PF(
1108 			    &ke->pfrke_sa, AF_INET6);
1109 			w->pfrw_dyn->pfid_mask6 = *SUNION2PF(
1110 			    &pfr_mask, AF_INET6);
1111 		}
1112 		break;
1113 	}
1114 	return (0);
1115 }
1116 
1117 int
1118 pfr_clr_tables(struct pfr_table *filter, int *ndel, int flags)
1119 {
1120 	struct pfr_ktableworkq	 workq;
1121 	struct pfr_ktable	*p;
1122 	int			 xdel = 0;
1123 
1124 	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
1125 	    PFR_FLAG_ALLRSETS);
1126 	if (pfr_fix_anchor(filter->pfrt_anchor))
1127 		return (EINVAL);
1128 	if (pfr_table_count(filter, flags) < 0)
1129 		return (ENOENT);
1130 
1131 	SLIST_INIT(&workq);
1132 	RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1133 		if (pfr_skip_table(filter, p, flags))
1134 			continue;
1135 		if (!strcmp(p->pfrkt_anchor, PF_RESERVED_ANCHOR))
1136 			continue;
1137 		if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE))
1138 			continue;
1139 		p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1140 		SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1141 		xdel++;
1142 	}
1143 	if (!(flags & PFR_FLAG_DUMMY)) {
1144 		if (flags & PFR_FLAG_ATOMIC)
1145 			crit_enter();
1146 		pfr_setflags_ktables(&workq);
1147 		if (flags & PFR_FLAG_ATOMIC)
1148 			crit_exit();
1149 	}
1150 	if (ndel != NULL)
1151 		*ndel = xdel;
1152 	return (0);
1153 }
1154 
1155 int
1156 pfr_add_tables(struct pfr_table *tbl, int size, int *nadd, int flags)
1157 {
1158 	struct pfr_ktableworkq	 addq, changeq;
1159 	struct pfr_ktable	*p, *q, *r, key;
1160 	int			 i, rv, xadd = 0;
1161 	long			 tzero = time_second;
1162 
1163 	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
1164 	SLIST_INIT(&addq);
1165 	SLIST_INIT(&changeq);
1166 	for (i = 0; i < size; i++) {
1167 		if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t), flags))
1168 			senderr(EFAULT);
1169 		if (pfr_validate_table(&key.pfrkt_t, PFR_TFLAG_USRMASK,
1170 		    flags & PFR_FLAG_USERIOCTL))
1171 			senderr(EINVAL);
1172 		key.pfrkt_flags |= PFR_TFLAG_ACTIVE;
1173 		p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1174 		if (p == NULL) {
1175 			p = pfr_create_ktable(&key.pfrkt_t, tzero, 1);
1176 			if (p == NULL)
1177 				senderr(ENOMEM);
1178 			SLIST_FOREACH(q, &addq, pfrkt_workq) {
1179 				if (!pfr_ktable_compare(p, q))
1180 					goto _skip;
1181 			}
1182 			SLIST_INSERT_HEAD(&addq, p, pfrkt_workq);
1183 			xadd++;
1184 			if (!key.pfrkt_anchor[0])
1185 				goto _skip;
1186 
1187 			/* find or create root table */
1188 			bzero(key.pfrkt_anchor, sizeof(key.pfrkt_anchor));
1189 			r = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1190 			if (r != NULL) {
1191 				p->pfrkt_root = r;
1192 				goto _skip;
1193 			}
1194 			SLIST_FOREACH(q, &addq, pfrkt_workq) {
1195 				if (!pfr_ktable_compare(&key, q)) {
1196 					p->pfrkt_root = q;
1197 					goto _skip;
1198 				}
1199 			}
1200 			key.pfrkt_flags = 0;
1201 			r = pfr_create_ktable(&key.pfrkt_t, 0, 1);
1202 			if (r == NULL)
1203 				senderr(ENOMEM);
1204 			SLIST_INSERT_HEAD(&addq, r, pfrkt_workq);
1205 			p->pfrkt_root = r;
1206 		} else if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1207 			SLIST_FOREACH(q, &changeq, pfrkt_workq)
1208 				if (!pfr_ktable_compare(&key, q))
1209 					goto _skip;
1210 			p->pfrkt_nflags = (p->pfrkt_flags &
1211 			    ~PFR_TFLAG_USRMASK) | key.pfrkt_flags;
1212 			SLIST_INSERT_HEAD(&changeq, p, pfrkt_workq);
1213 			xadd++;
1214 		}
1215 _skip:
1216 	;
1217 	}
1218 	if (!(flags & PFR_FLAG_DUMMY)) {
1219 		if (flags & PFR_FLAG_ATOMIC)
1220 			crit_enter();
1221 		pfr_insert_ktables(&addq);
1222 		pfr_setflags_ktables(&changeq);
1223 		if (flags & PFR_FLAG_ATOMIC)
1224 			crit_exit();
1225 	} else
1226 		 pfr_destroy_ktables(&addq, 0);
1227 	if (nadd != NULL)
1228 		*nadd = xadd;
1229 	return (0);
1230 _bad:
1231 	pfr_destroy_ktables(&addq, 0);
1232 	return (rv);
1233 }
1234 
1235 int
1236 pfr_del_tables(struct pfr_table *tbl, int size, int *ndel, int flags)
1237 {
1238 	struct pfr_ktableworkq	 workq;
1239 	struct pfr_ktable	*p, *q, key;
1240 	int			 i, xdel = 0;
1241 
1242 	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
1243 	SLIST_INIT(&workq);
1244 	for (i = 0; i < size; i++) {
1245 		if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t), flags))
1246 			return (EFAULT);
1247 		if (pfr_validate_table(&key.pfrkt_t, 0,
1248 		    flags & PFR_FLAG_USERIOCTL))
1249 			return (EINVAL);
1250 		p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1251 		if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1252 			SLIST_FOREACH(q, &workq, pfrkt_workq)
1253 				if (!pfr_ktable_compare(p, q))
1254 					goto _skip;
1255 			p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1256 			SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1257 			xdel++;
1258 		}
1259 _skip:
1260 	;
1261 	}
1262 
1263 	if (!(flags & PFR_FLAG_DUMMY)) {
1264 		if (flags & PFR_FLAG_ATOMIC)
1265 			crit_enter();
1266 		pfr_setflags_ktables(&workq);
1267 		if (flags & PFR_FLAG_ATOMIC)
1268 			crit_exit();
1269 	}
1270 	if (ndel != NULL)
1271 		*ndel = xdel;
1272 	return (0);
1273 }
1274 
1275 int
1276 pfr_get_tables(struct pfr_table *filter, struct pfr_table *tbl, int *size,
1277 	int flags)
1278 {
1279 	struct pfr_ktable	*p;
1280 	int			 n, nn;
1281 
1282 	ACCEPT_FLAGS(flags, PFR_FLAG_ALLRSETS);
1283 	if (pfr_fix_anchor(filter->pfrt_anchor))
1284 		return (EINVAL);
1285 	n = nn = pfr_table_count(filter, flags);
1286 	if (n < 0)
1287 		return (ENOENT);
1288 	if (n > *size) {
1289 		*size = n;
1290 		return (0);
1291 	}
1292 	RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1293 		if (pfr_skip_table(filter, p, flags))
1294 			continue;
1295 		if (n-- <= 0)
1296 			continue;
1297 		if (COPYOUT(&p->pfrkt_t, tbl++, sizeof(*tbl), flags))
1298 			return (EFAULT);
1299 	}
1300 	if (n) {
1301 		kprintf("pfr_get_tables: corruption detected (%d).\n", n);
1302 		return (ENOTTY);
1303 	}
1304 	*size = nn;
1305 	return (0);
1306 }
1307 
1308 int
1309 pfr_get_tstats(struct pfr_table *filter, struct pfr_tstats *tbl, int *size,
1310 	int flags)
1311 {
1312 	struct pfr_ktable	*p;
1313 	struct pfr_ktableworkq	 workq;
1314 	int			 n, nn;
1315 	long			 tzero = time_second;
1316 
1317 	/* XXX PFR_FLAG_CLSTATS disabled */
1318 	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_ALLRSETS);
1319 	if (pfr_fix_anchor(filter->pfrt_anchor))
1320 		return (EINVAL);
1321 	n = nn = pfr_table_count(filter, flags);
1322 	if (n < 0)
1323 		return (ENOENT);
1324 	if (n > *size) {
1325 		*size = n;
1326 		return (0);
1327 	}
1328 	SLIST_INIT(&workq);
1329 	if (flags & PFR_FLAG_ATOMIC)
1330 		crit_enter();
1331 	RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1332 		if (pfr_skip_table(filter, p, flags))
1333 			continue;
1334 		if (n-- <= 0)
1335 			continue;
1336 		if (!(flags & PFR_FLAG_ATOMIC))
1337 			crit_enter();
1338 		if (COPYOUT(&p->pfrkt_ts, tbl++, sizeof(*tbl), flags)) {
1339 			crit_exit();
1340 			return (EFAULT);
1341 		}
1342 		if (!(flags & PFR_FLAG_ATOMIC))
1343 			crit_exit();
1344 		SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1345 	}
1346 	if (flags & PFR_FLAG_CLSTATS)
1347 		pfr_clstats_ktables(&workq, tzero,
1348 		    flags & PFR_FLAG_ADDRSTOO);
1349 	if (flags & PFR_FLAG_ATOMIC)
1350 		crit_exit();
1351 	if (n) {
1352 		kprintf("pfr_get_tstats: corruption detected (%d).\n", n);
1353 		return (ENOTTY);
1354 	}
1355 	*size = nn;
1356 	return (0);
1357 }
1358 
1359 int
1360 pfr_clr_tstats(struct pfr_table *tbl, int size, int *nzero, int flags)
1361 {
1362 	struct pfr_ktableworkq	 workq;
1363 	struct pfr_ktable	*p, key;
1364 	int			 i, xzero = 0;
1365 	long			 tzero = time_second;
1366 
1367 	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
1368 	    PFR_FLAG_ADDRSTOO);
1369 	SLIST_INIT(&workq);
1370 	for (i = 0; i < size; i++) {
1371 		if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t), flags))
1372 			return (EFAULT);
1373 		if (pfr_validate_table(&key.pfrkt_t, 0, 0))
1374 			return (EINVAL);
1375 		p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1376 		if (p != NULL) {
1377 			SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1378 			xzero++;
1379 		}
1380 	}
1381 	if (!(flags & PFR_FLAG_DUMMY)) {
1382 		if (flags & PFR_FLAG_ATOMIC)
1383 			crit_enter();
1384 		pfr_clstats_ktables(&workq, tzero, flags & PFR_FLAG_ADDRSTOO);
1385 		if (flags & PFR_FLAG_ATOMIC)
1386 			crit_exit();
1387 	}
1388 	if (nzero != NULL)
1389 		*nzero = xzero;
1390 	return (0);
1391 }
1392 
1393 int
1394 pfr_set_tflags(struct pfr_table *tbl, int size, int setflag, int clrflag,
1395 	int *nchange, int *ndel, int flags)
1396 {
1397 	struct pfr_ktableworkq	 workq;
1398 	struct pfr_ktable	*p, *q, key;
1399 	int			 i, xchange = 0, xdel = 0;
1400 
1401 	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
1402 	if ((setflag & ~PFR_TFLAG_USRMASK) ||
1403 	    (clrflag & ~PFR_TFLAG_USRMASK) ||
1404 	    (setflag & clrflag))
1405 		return (EINVAL);
1406 	SLIST_INIT(&workq);
1407 	for (i = 0; i < size; i++) {
1408 		if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t), flags))
1409 			return (EFAULT);
1410 		if (pfr_validate_table(&key.pfrkt_t, 0,
1411 		    flags & PFR_FLAG_USERIOCTL))
1412 			return (EINVAL);
1413 		p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1414 		if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1415 			p->pfrkt_nflags = (p->pfrkt_flags | setflag) &
1416 			    ~clrflag;
1417 			if (p->pfrkt_nflags == p->pfrkt_flags)
1418 				goto _skip;
1419 			SLIST_FOREACH(q, &workq, pfrkt_workq)
1420 				if (!pfr_ktable_compare(p, q))
1421 					goto _skip;
1422 			SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1423 			if ((p->pfrkt_flags & PFR_TFLAG_PERSIST) &&
1424 			    (clrflag & PFR_TFLAG_PERSIST) &&
1425 			    !(p->pfrkt_flags & PFR_TFLAG_REFERENCED))
1426 				xdel++;
1427 			else
1428 				xchange++;
1429 		}
1430 _skip:
1431 	;
1432 	}
1433 	if (!(flags & PFR_FLAG_DUMMY)) {
1434 		if (flags & PFR_FLAG_ATOMIC)
1435 			crit_enter();
1436 		pfr_setflags_ktables(&workq);
1437 		if (flags & PFR_FLAG_ATOMIC)
1438 			crit_exit();
1439 	}
1440 	if (nchange != NULL)
1441 		*nchange = xchange;
1442 	if (ndel != NULL)
1443 		*ndel = xdel;
1444 	return (0);
1445 }
1446 
1447 int
1448 pfr_ina_begin(struct pfr_table *trs, u_int32_t *ticket, int *ndel, int flags)
1449 {
1450 	struct pfr_ktableworkq	 workq;
1451 	struct pfr_ktable	*p;
1452 	struct pf_ruleset	*rs;
1453 	int			 xdel = 0;
1454 
1455 	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1456 	rs = pf_find_or_create_ruleset(trs->pfrt_anchor);
1457 	if (rs == NULL)
1458 		return (ENOMEM);
1459 	SLIST_INIT(&workq);
1460 	RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1461 		if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1462 		    pfr_skip_table(trs, p, 0))
1463 			continue;
1464 		p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
1465 		SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1466 		xdel++;
1467 	}
1468 	if (!(flags & PFR_FLAG_DUMMY)) {
1469 		pfr_setflags_ktables(&workq);
1470 		if (ticket != NULL)
1471 			*ticket = ++rs->tticket;
1472 		rs->topen = 1;
1473 	} else
1474 		pf_remove_if_empty_ruleset(rs);
1475 	if (ndel != NULL)
1476 		*ndel = xdel;
1477 	return (0);
1478 }
1479 
1480 int
1481 pfr_ina_define(struct pfr_table *tbl, struct pfr_addr *addr, int size,
1482     int *nadd, int *naddr, u_int32_t ticket, int flags)
1483 {
1484 	struct pfr_ktableworkq	 tableq;
1485 	struct pfr_kentryworkq	 addrq;
1486 	struct pfr_ktable	*kt, *rt, *shadow, key;
1487 	struct pfr_kentry	*p;
1488 	struct pfr_addr		 ad;
1489 	struct pf_ruleset	*rs;
1490 	int			 i, rv, xadd = 0, xaddr = 0;
1491 
1492 	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ADDRSTOO);
1493 	if (size && !(flags & PFR_FLAG_ADDRSTOO))
1494 		return (EINVAL);
1495 	if (pfr_validate_table(tbl, PFR_TFLAG_USRMASK,
1496 	    flags & PFR_FLAG_USERIOCTL))
1497 		return (EINVAL);
1498 	rs = pf_find_ruleset(tbl->pfrt_anchor);
1499 	if (rs == NULL || !rs->topen || ticket != rs->tticket)
1500 		return (EBUSY);
1501 	tbl->pfrt_flags |= PFR_TFLAG_INACTIVE;
1502 	SLIST_INIT(&tableq);
1503 	kt = RB_FIND(pfr_ktablehead, &pfr_ktables, (struct pfr_ktable *)tbl);
1504 	if (kt == NULL) {
1505 		kt = pfr_create_ktable(tbl, 0, 1);
1506 		if (kt == NULL)
1507 			return (ENOMEM);
1508 		SLIST_INSERT_HEAD(&tableq, kt, pfrkt_workq);
1509 		xadd++;
1510 		if (!tbl->pfrt_anchor[0])
1511 			goto _skip;
1512 
1513 		/* find or create root table */
1514 		bzero(&key, sizeof(key));
1515 		strlcpy(key.pfrkt_name, tbl->pfrt_name, sizeof(key.pfrkt_name));
1516 		rt = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1517 		if (rt != NULL) {
1518 			kt->pfrkt_root = rt;
1519 			goto _skip;
1520 		}
1521 		rt = pfr_create_ktable(&key.pfrkt_t, 0, 1);
1522 		if (rt == NULL) {
1523 			pfr_destroy_ktables(&tableq, 0);
1524 			return (ENOMEM);
1525 		}
1526 		SLIST_INSERT_HEAD(&tableq, rt, pfrkt_workq);
1527 		kt->pfrkt_root = rt;
1528 	} else if (!(kt->pfrkt_flags & PFR_TFLAG_INACTIVE))
1529 		xadd++;
1530 _skip:
1531 	shadow = pfr_create_ktable(tbl, 0, 0);
1532 	if (shadow == NULL) {
1533 		pfr_destroy_ktables(&tableq, 0);
1534 		return (ENOMEM);
1535 	}
1536 	SLIST_INIT(&addrq);
1537 	for (i = 0; i < size; i++) {
1538 		if (COPYIN(addr+i, &ad, sizeof(ad), flags))
1539 			senderr(EFAULT);
1540 		if (pfr_validate_addr(&ad))
1541 			senderr(EINVAL);
1542 		if (pfr_lookup_addr(shadow, &ad, 1) != NULL)
1543 			continue;
1544 		p = pfr_create_kentry(&ad, 0);
1545 		if (p == NULL)
1546 			senderr(ENOMEM);
1547 		if (pfr_route_kentry(shadow, p)) {
1548 			pfr_destroy_kentry(p);
1549 			continue;
1550 		}
1551 		SLIST_INSERT_HEAD(&addrq, p, pfrke_workq);
1552 		xaddr++;
1553 	}
1554 	if (!(flags & PFR_FLAG_DUMMY)) {
1555 		if (kt->pfrkt_shadow != NULL)
1556 			pfr_destroy_ktable(kt->pfrkt_shadow, 1);
1557 		kt->pfrkt_flags |= PFR_TFLAG_INACTIVE;
1558 		pfr_insert_ktables(&tableq);
1559 		shadow->pfrkt_cnt = (flags & PFR_FLAG_ADDRSTOO) ?
1560 		    xaddr : NO_ADDRESSES;
1561 		kt->pfrkt_shadow = shadow;
1562 	} else {
1563 		pfr_clean_node_mask(shadow, &addrq);
1564 		pfr_destroy_ktable(shadow, 0);
1565 		pfr_destroy_ktables(&tableq, 0);
1566 		pfr_destroy_kentries(&addrq);
1567 	}
1568 	if (nadd != NULL)
1569 		*nadd = xadd;
1570 	if (naddr != NULL)
1571 		*naddr = xaddr;
1572 	return (0);
1573 _bad:
1574 	pfr_destroy_ktable(shadow, 0);
1575 	pfr_destroy_ktables(&tableq, 0);
1576 	pfr_destroy_kentries(&addrq);
1577 	return (rv);
1578 }
1579 
1580 int
1581 pfr_ina_rollback(struct pfr_table *trs, u_int32_t ticket, int *ndel, int flags)
1582 {
1583 	struct pfr_ktableworkq	 workq;
1584 	struct pfr_ktable	*p;
1585 	struct pf_ruleset	*rs;
1586 	int			 xdel = 0;
1587 
1588 	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1589 	rs = pf_find_ruleset(trs->pfrt_anchor);
1590 	if (rs == NULL || !rs->topen || ticket != rs->tticket)
1591 		return (0);
1592 	SLIST_INIT(&workq);
1593 	RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1594 		if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1595 		    pfr_skip_table(trs, p, 0))
1596 			continue;
1597 		p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
1598 		SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1599 		xdel++;
1600 	}
1601 	if (!(flags & PFR_FLAG_DUMMY)) {
1602 		pfr_setflags_ktables(&workq);
1603 		rs->topen = 0;
1604 		pf_remove_if_empty_ruleset(rs);
1605 	}
1606 	if (ndel != NULL)
1607 		*ndel = xdel;
1608 	return (0);
1609 }
1610 
1611 int
1612 pfr_ina_commit(struct pfr_table *trs, u_int32_t ticket, int *nadd,
1613     int *nchange, int flags)
1614 {
1615 	struct pfr_ktable	*p, *q;
1616 	struct pfr_ktableworkq	 workq;
1617 	struct pf_ruleset	*rs;
1618 	int			 xadd = 0, xchange = 0;
1619 	long			 tzero = time_second;
1620 
1621 	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
1622 	rs = pf_find_ruleset(trs->pfrt_anchor);
1623 	if (rs == NULL || !rs->topen || ticket != rs->tticket)
1624 		return (EBUSY);
1625 
1626 	SLIST_INIT(&workq);
1627 	RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1628 		if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1629 		    pfr_skip_table(trs, p, 0))
1630 			continue;
1631 		SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1632 		if (p->pfrkt_flags & PFR_TFLAG_ACTIVE)
1633 			xchange++;
1634 		else
1635 			xadd++;
1636 	}
1637 
1638 	if (!(flags & PFR_FLAG_DUMMY)) {
1639 		if (flags & PFR_FLAG_ATOMIC)
1640 			crit_enter();
1641 		for (p = SLIST_FIRST(&workq); p != NULL; p = q) {
1642 			q = SLIST_NEXT(p, pfrkt_workq);
1643 			pfr_commit_ktable(p, tzero);
1644 		}
1645 		if (flags & PFR_FLAG_ATOMIC)
1646 			crit_exit();
1647 		rs->topen = 0;
1648 		pf_remove_if_empty_ruleset(rs);
1649 	}
1650 	if (nadd != NULL)
1651 		*nadd = xadd;
1652 	if (nchange != NULL)
1653 		*nchange = xchange;
1654 
1655 	return (0);
1656 }
1657 
1658 void
1659 pfr_commit_ktable(struct pfr_ktable *kt, long tzero)
1660 {
1661 	struct pfr_ktable	*shadow = kt->pfrkt_shadow;
1662 	int			 nflags;
1663 
1664 	if (shadow->pfrkt_cnt == NO_ADDRESSES) {
1665 		if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
1666 			pfr_clstats_ktable(kt, tzero, 1);
1667 	} else if (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) {
1668 		/* kt might contain addresses */
1669 		struct pfr_kentryworkq	 addrq, addq, changeq, delq, garbageq;
1670 		struct pfr_kentry	*p, *q, *next;
1671 		struct pfr_addr		 ad;
1672 
1673 		pfr_enqueue_addrs(shadow, &addrq, NULL, 0);
1674 		pfr_mark_addrs(kt);
1675 		SLIST_INIT(&addq);
1676 		SLIST_INIT(&changeq);
1677 		SLIST_INIT(&delq);
1678 		SLIST_INIT(&garbageq);
1679 		pfr_clean_node_mask(shadow, &addrq);
1680 		for (p = SLIST_FIRST(&addrq); p != NULL; p = next) {
1681 			next = SLIST_NEXT(p, pfrke_workq);	/* XXX */
1682 			pfr_copyout_addr(&ad, p);
1683 			q = pfr_lookup_addr(kt, &ad, 1);
1684 			if (q != NULL) {
1685 				if (q->pfrke_not != p->pfrke_not)
1686 					SLIST_INSERT_HEAD(&changeq, q,
1687 					    pfrke_workq);
1688 				q->pfrke_mark = 1;
1689 				SLIST_INSERT_HEAD(&garbageq, p, pfrke_workq);
1690 			} else {
1691 				p->pfrke_tzero = tzero;
1692 				SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
1693 			}
1694 		}
1695 		pfr_enqueue_addrs(kt, &delq, NULL, ENQUEUE_UNMARKED_ONLY);
1696 		pfr_insert_kentries(kt, &addq, tzero);
1697 		pfr_remove_kentries(kt, &delq);
1698 		pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG);
1699 		pfr_destroy_kentries(&garbageq);
1700 	} else {
1701 		/* kt cannot contain addresses */
1702 		SWAP(struct radix_node_head *, kt->pfrkt_ip4,
1703 		    shadow->pfrkt_ip4);
1704 		SWAP(struct radix_node_head *, kt->pfrkt_ip6,
1705 		    shadow->pfrkt_ip6);
1706 		SWAP(int, kt->pfrkt_cnt, shadow->pfrkt_cnt);
1707 		pfr_clstats_ktable(kt, tzero, 1);
1708 	}
1709 	nflags = ((shadow->pfrkt_flags & PFR_TFLAG_USRMASK) |
1710 	    (kt->pfrkt_flags & PFR_TFLAG_SETMASK) | PFR_TFLAG_ACTIVE)
1711 		& ~PFR_TFLAG_INACTIVE;
1712 	pfr_destroy_ktable(shadow, 0);
1713 	kt->pfrkt_shadow = NULL;
1714 	pfr_setflags_ktable(kt, nflags);
1715 }
1716 
1717 int
1718 pfr_validate_table(struct pfr_table *tbl, int allowedflags, int no_reserved)
1719 {
1720 	int i;
1721 
1722 	if (!tbl->pfrt_name[0])
1723 		return (-1);
1724 	if (no_reserved && !strcmp(tbl->pfrt_anchor, PF_RESERVED_ANCHOR))
1725 		 return (-1);
1726 	if (tbl->pfrt_name[PF_TABLE_NAME_SIZE-1])
1727 		return (-1);
1728 	for (i = strlen(tbl->pfrt_name); i < PF_TABLE_NAME_SIZE; i++)
1729 		if (tbl->pfrt_name[i])
1730 			return (-1);
1731 	if (pfr_fix_anchor(tbl->pfrt_anchor))
1732 		return (-1);
1733 	if (tbl->pfrt_flags & ~allowedflags)
1734 		return (-1);
1735 	return (0);
1736 }
1737 
1738 /*
1739  * Rewrite anchors referenced by tables to remove slashes
1740  * and check for validity.
1741  */
1742 int
1743 pfr_fix_anchor(char *anchor)
1744 {
1745 	size_t siz = MAXPATHLEN;
1746 	int i;
1747 
1748 	if (anchor[0] == '/') {
1749 		char *path;
1750 		int off;
1751 
1752 		path = anchor;
1753 		off = 1;
1754 		while (*++path == '/')
1755 			off++;
1756 		bcopy(path, anchor, siz - off);
1757 		memset(anchor + siz - off, 0, off);
1758 	}
1759 	if (anchor[siz - 1])
1760 		return (-1);
1761 	for (i = strlen(anchor); i < siz; i++)
1762 		if (anchor[i])
1763 			return (-1);
1764 	return (0);
1765 }
1766 
1767 int
1768 pfr_table_count(struct pfr_table *filter, int flags)
1769 {
1770 	struct pf_ruleset *rs;
1771 
1772 	if (flags & PFR_FLAG_ALLRSETS)
1773 		return (pfr_ktable_cnt);
1774 	if (filter->pfrt_anchor[0]) {
1775 		rs = pf_find_ruleset(filter->pfrt_anchor);
1776 		return ((rs != NULL) ? rs->tables : -1);
1777 	}
1778 	return (pf_main_ruleset.tables);
1779 }
1780 
1781 int
1782 pfr_skip_table(struct pfr_table *filter, struct pfr_ktable *kt, int flags)
1783 {
1784 	if (flags & PFR_FLAG_ALLRSETS)
1785 		return (0);
1786 	if (strcmp(filter->pfrt_anchor, kt->pfrkt_anchor))
1787 		return (1);
1788 	return (0);
1789 }
1790 
1791 void
1792 pfr_insert_ktables(struct pfr_ktableworkq *workq)
1793 {
1794 	struct pfr_ktable	*p;
1795 
1796 	SLIST_FOREACH(p, workq, pfrkt_workq)
1797 		pfr_insert_ktable(p);
1798 }
1799 
1800 void
1801 pfr_insert_ktable(struct pfr_ktable *kt)
1802 {
1803 	RB_INSERT(pfr_ktablehead, &pfr_ktables, kt);
1804 	pfr_ktable_cnt++;
1805 	if (kt->pfrkt_root != NULL)
1806 		if (!kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]++)
1807 			pfr_setflags_ktable(kt->pfrkt_root,
1808 			    kt->pfrkt_root->pfrkt_flags|PFR_TFLAG_REFDANCHOR);
1809 }
1810 
1811 void
1812 pfr_setflags_ktables(struct pfr_ktableworkq *workq)
1813 {
1814 	struct pfr_ktable	*p, *q;
1815 
1816 	for (p = SLIST_FIRST(workq); p; p = q) {
1817 		q = SLIST_NEXT(p, pfrkt_workq);
1818 		pfr_setflags_ktable(p, p->pfrkt_nflags);
1819 	}
1820 }
1821 
1822 void
1823 pfr_setflags_ktable(struct pfr_ktable *kt, int newf)
1824 {
1825 	struct pfr_kentryworkq	addrq;
1826 
1827 	if (!(newf & PFR_TFLAG_REFERENCED) &&
1828 	    !(newf & PFR_TFLAG_PERSIST))
1829 		newf &= ~PFR_TFLAG_ACTIVE;
1830 	if (!(newf & PFR_TFLAG_ACTIVE))
1831 		newf &= ~PFR_TFLAG_USRMASK;
1832 	if (!(newf & PFR_TFLAG_SETMASK)) {
1833 		RB_REMOVE(pfr_ktablehead, &pfr_ktables, kt);
1834 		if (kt->pfrkt_root != NULL)
1835 			if (!--kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR])
1836 				pfr_setflags_ktable(kt->pfrkt_root,
1837 				    kt->pfrkt_root->pfrkt_flags &
1838 					~PFR_TFLAG_REFDANCHOR);
1839 		pfr_destroy_ktable(kt, 1);
1840 		pfr_ktable_cnt--;
1841 		return;
1842 	}
1843 	if (!(newf & PFR_TFLAG_ACTIVE) && kt->pfrkt_cnt) {
1844 		pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1845 		pfr_remove_kentries(kt, &addrq);
1846 	}
1847 	if (!(newf & PFR_TFLAG_INACTIVE) && kt->pfrkt_shadow != NULL) {
1848 		pfr_destroy_ktable(kt->pfrkt_shadow, 1);
1849 		kt->pfrkt_shadow = NULL;
1850 	}
1851 	kt->pfrkt_flags = newf;
1852 }
1853 
1854 void
1855 pfr_clstats_ktables(struct pfr_ktableworkq *workq, long tzero, int recurse)
1856 {
1857 	struct pfr_ktable	*p;
1858 
1859 	SLIST_FOREACH(p, workq, pfrkt_workq)
1860 		pfr_clstats_ktable(p, tzero, recurse);
1861 }
1862 
1863 void
1864 pfr_clstats_ktable(struct pfr_ktable *kt, long tzero, int recurse)
1865 {
1866 	struct pfr_kentryworkq	 addrq;
1867 
1868 	if (recurse) {
1869 		pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1870 		pfr_clstats_kentries(&addrq, tzero, 0);
1871 	}
1872 	crit_enter();
1873 	bzero(kt->pfrkt_packets, sizeof(kt->pfrkt_packets));
1874 	bzero(kt->pfrkt_bytes, sizeof(kt->pfrkt_bytes));
1875 	kt->pfrkt_match = kt->pfrkt_nomatch = 0;
1876 	crit_exit();
1877 	kt->pfrkt_tzero = tzero;
1878 }
1879 
1880 struct pfr_ktable *
1881 pfr_create_ktable(struct pfr_table *tbl, long tzero, int attachruleset)
1882 {
1883 	struct pfr_ktable	*kt;
1884 	struct pf_ruleset	*rs;
1885 
1886 	kt = kmalloc(sizeof(struct pfr_ktable), M_PFRKTABLEPL, M_NOWAIT|M_ZERO|M_NULLOK);
1887 	if (kt == NULL)
1888 		return (NULL);
1889 	kt->pfrkt_t = *tbl;
1890 
1891 	if (attachruleset) {
1892 		rs = pf_find_or_create_ruleset(tbl->pfrt_anchor);
1893 		if (!rs) {
1894 			pfr_destroy_ktable(kt, 0);
1895 			return (NULL);
1896 		}
1897 		kt->pfrkt_rs = rs;
1898 		rs->tables++;
1899 	}
1900 
1901 	KKASSERT(pf_maskhead != NULL);
1902 	if (!rn_inithead(&kt->pfrkt_ip4, pf_maskhead,
1903 			 offsetof(struct sockaddr_in, sin_addr)) ||
1904 	    !rn_inithead(&kt->pfrkt_ip6, pf_maskhead,
1905 			 offsetof(struct sockaddr_in6, sin6_addr))) {
1906 		pfr_destroy_ktable(kt, 0);
1907 		return (NULL);
1908 	}
1909 	kt->pfrkt_tzero = tzero;
1910 
1911 	return (kt);
1912 }
1913 
1914 void
1915 pfr_destroy_ktables(struct pfr_ktableworkq *workq, int flushaddr)
1916 {
1917 	struct pfr_ktable	*p, *q;
1918 
1919 	for (p = SLIST_FIRST(workq); p; p = q) {
1920 		q = SLIST_NEXT(p, pfrkt_workq);
1921 		pfr_destroy_ktable(p, flushaddr);
1922 	}
1923 }
1924 
1925 void
1926 pfr_destroy_ktable(struct pfr_ktable *kt, int flushaddr)
1927 {
1928 	struct pfr_kentryworkq	 addrq;
1929 
1930 	if (flushaddr) {
1931 		pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1932 		pfr_clean_node_mask(kt, &addrq);
1933 		pfr_destroy_kentries(&addrq);
1934 	}
1935 	if (kt->pfrkt_ip4 != NULL)
1936 		kfree((caddr_t)kt->pfrkt_ip4, M_RTABLE);
1937 
1938 	if (kt->pfrkt_ip6 != NULL)
1939 		kfree((caddr_t)kt->pfrkt_ip6, M_RTABLE);
1940 	if (kt->pfrkt_shadow != NULL)
1941 		pfr_destroy_ktable(kt->pfrkt_shadow, flushaddr);
1942 	if (kt->pfrkt_rs != NULL) {
1943 		kt->pfrkt_rs->tables--;
1944 		pf_remove_if_empty_ruleset(kt->pfrkt_rs);
1945 	}
1946 	kfree(kt, M_PFRKTABLEPL);
1947 }
1948 
1949 int
1950 pfr_ktable_compare(struct pfr_ktable *p, struct pfr_ktable *q)
1951 {
1952 	int d;
1953 
1954 	if ((d = strncmp(p->pfrkt_name, q->pfrkt_name, PF_TABLE_NAME_SIZE)))
1955 		return (d);
1956 	return (strcmp(p->pfrkt_anchor, q->pfrkt_anchor));
1957 }
1958 
1959 struct pfr_ktable *
1960 pfr_lookup_table(struct pfr_table *tbl)
1961 {
1962 	/* struct pfr_ktable start like a struct pfr_table */
1963 	return (RB_FIND(pfr_ktablehead, &pfr_ktables,
1964 	    (struct pfr_ktable *)tbl));
1965 }
1966 
1967 int
1968 pfr_match_addr(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af)
1969 {
1970 	struct pfr_kentry	*ke = NULL;
1971 	int			 match;
1972 	struct sockaddr_in	 pfr_sin;
1973 #ifdef INET6
1974 	struct sockaddr_in6	 pfr_sin6;
1975 #endif
1976 
1977 	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
1978 		kt = kt->pfrkt_root;
1979 	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
1980 		return (0);
1981 
1982 	switch (af) {
1983 #ifdef INET
1984 	case AF_INET:
1985 		bzero(&pfr_sin, sizeof(pfr_sin));
1986 		pfr_sin.sin_len = sizeof(pfr_sin);
1987 		pfr_sin.sin_family = AF_INET;
1988 		pfr_sin.sin_addr.s_addr = a->addr32[0];
1989 		ke = (struct pfr_kentry *)rn_match((char *)&pfr_sin,
1990 		    kt->pfrkt_ip4);
1991 		if (ke && KENTRY_RNF_ROOT(ke))
1992 			ke = NULL;
1993 		break;
1994 #endif /* INET */
1995 #ifdef INET6
1996 	case AF_INET6:
1997 		bzero(&pfr_sin6, sizeof(pfr_sin6));
1998 		pfr_sin6.sin6_len = sizeof(pfr_sin6);
1999 		pfr_sin6.sin6_family = AF_INET6;
2000 		bcopy(a, &pfr_sin6.sin6_addr, sizeof(pfr_sin6.sin6_addr));
2001 		ke = (struct pfr_kentry *)rn_match((char *)&pfr_sin6,
2002 		    kt->pfrkt_ip6);
2003 		if (ke && KENTRY_RNF_ROOT(ke))
2004 			ke = NULL;
2005 		break;
2006 #endif /* INET6 */
2007 	}
2008 	match = (ke && !ke->pfrke_not);
2009 	if (match)
2010 		kt->pfrkt_match++;
2011 	else
2012 		kt->pfrkt_nomatch++;
2013 	return (match);
2014 }
2015 
2016 void
2017 pfr_update_stats(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af,
2018     u_int64_t len, int dir_out, int op_pass, int notrule)
2019 {
2020 	struct pfr_kentry	*ke = NULL;
2021 	struct sockaddr_in	 pfr_sin;
2022 #ifdef INET6
2023 	struct sockaddr_in6	 pfr_sin6;
2024 #endif
2025 
2026 	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
2027 		kt = kt->pfrkt_root;
2028 	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
2029 		return;
2030 
2031 	switch (af) {
2032 #ifdef INET
2033 	case AF_INET:
2034 		bzero(&pfr_sin, sizeof(pfr_sin));
2035 		pfr_sin.sin_len = sizeof(pfr_sin);
2036 		pfr_sin.sin_family = AF_INET;
2037 		pfr_sin.sin_addr.s_addr = a->addr32[0];
2038 		ke = (struct pfr_kentry *)rn_match((char *)&pfr_sin,
2039 		    kt->pfrkt_ip4);
2040 		if (ke && KENTRY_RNF_ROOT(ke))
2041 			ke = NULL;
2042 		break;
2043 #endif /* INET */
2044 #ifdef INET6
2045 	case AF_INET6:
2046 		bzero(&pfr_sin6, sizeof(pfr_sin6));
2047 		pfr_sin6.sin6_len = sizeof(pfr_sin6);
2048 		pfr_sin6.sin6_family = AF_INET6;
2049 		bcopy(a, &pfr_sin6.sin6_addr, sizeof(pfr_sin6.sin6_addr));
2050 		ke = (struct pfr_kentry *)rn_match((char *)&pfr_sin6,
2051 		    kt->pfrkt_ip6);
2052 		if (ke && KENTRY_RNF_ROOT(ke))
2053 			ke = NULL;
2054 		break;
2055 #endif /* INET6 */
2056 	default:
2057 		;
2058 	}
2059 	if ((ke == NULL || ke->pfrke_not) != notrule) {
2060 		if (op_pass != PFR_OP_PASS)
2061 			kprintf("pfr_update_stats: assertion failed.\n");
2062 		op_pass = PFR_OP_XPASS;
2063 	}
2064 	kt->pfrkt_packets[dir_out][op_pass]++;
2065 	kt->pfrkt_bytes[dir_out][op_pass] += len;
2066 	if (ke != NULL && op_pass != PFR_OP_XPASS &&
2067 	    (kt->pfrkt_flags & PFR_TFLAG_COUNTERS)) {
2068 		if (ke->pfrke_counters == NULL)
2069 			ke->pfrke_counters = kmalloc(sizeof(struct pfr_kcounters),
2070 			    M_PFRKCOUNTERSPL, M_NOWAIT|M_ZERO);
2071 		if (ke->pfrke_counters != NULL) {
2072 			ke->pfrke_counters->pfrkc_packets[dir_out][op_pass]++;
2073 			ke->pfrke_counters->pfrkc_bytes[dir_out][op_pass] += len;
2074 		}
2075 	}
2076 }
2077 
2078 struct pfr_ktable *
2079 pfr_attach_table(struct pf_ruleset *rs, char *name)
2080 {
2081 	struct pfr_ktable	*kt, *rt;
2082 	struct pfr_table	 tbl;
2083 	struct pf_anchor	*ac = rs->anchor;
2084 
2085 	bzero(&tbl, sizeof(tbl));
2086 	strlcpy(tbl.pfrt_name, name, sizeof(tbl.pfrt_name));
2087 	if (ac != NULL)
2088 		strlcpy(tbl.pfrt_anchor, ac->path, sizeof(tbl.pfrt_anchor));
2089 	kt = pfr_lookup_table(&tbl);
2090 	if (kt == NULL) {
2091 		kt = pfr_create_ktable(&tbl, time_second, 1);
2092 		if (kt == NULL)
2093 			return (NULL);
2094 		if (ac != NULL) {
2095 			bzero(tbl.pfrt_anchor, sizeof(tbl.pfrt_anchor));
2096 			rt = pfr_lookup_table(&tbl);
2097 			if (rt == NULL) {
2098 				rt = pfr_create_ktable(&tbl, 0, 1);
2099 				if (rt == NULL) {
2100 					pfr_destroy_ktable(kt, 0);
2101 					return (NULL);
2102 				}
2103 				pfr_insert_ktable(rt);
2104 			}
2105 			kt->pfrkt_root = rt;
2106 		}
2107 		pfr_insert_ktable(kt);
2108 	}
2109 	if (!kt->pfrkt_refcnt[PFR_REFCNT_RULE]++)
2110 		pfr_setflags_ktable(kt, kt->pfrkt_flags|PFR_TFLAG_REFERENCED);
2111 	return (kt);
2112 }
2113 
2114 void
2115 pfr_detach_table(struct pfr_ktable *kt)
2116 {
2117 	if (kt->pfrkt_refcnt[PFR_REFCNT_RULE] <= 0)
2118 		kprintf("pfr_detach_table: refcount = %d.\n",
2119 		    kt->pfrkt_refcnt[PFR_REFCNT_RULE]);
2120 	else if (!--kt->pfrkt_refcnt[PFR_REFCNT_RULE])
2121 		pfr_setflags_ktable(kt, kt->pfrkt_flags&~PFR_TFLAG_REFERENCED);
2122 }
2123 
2124 int
2125 pfr_pool_get(struct pfr_ktable *kt, int *pidx, struct pf_addr *counter,
2126     struct pf_addr **raddr, struct pf_addr **rmask, sa_family_t af)
2127 {
2128 	struct pfr_kentry	*ke, *ke2 = NULL;
2129 	struct pf_addr		*addr = NULL;
2130 	union sockaddr_union	 mask;
2131 	int			 idx = -1, use_counter = 0;
2132 	struct sockaddr_in	 pfr_sin;
2133 	struct sockaddr_in6	 pfr_sin6;
2134 	union sockaddr_union	 pfr_mask;
2135 
2136 	if (af == AF_INET)
2137 		addr = (struct pf_addr *)&pfr_sin.sin_addr;
2138 	else if (af == AF_INET6)
2139 		addr = (struct pf_addr *)&pfr_sin6.sin6_addr;
2140 	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
2141 		kt = kt->pfrkt_root;
2142 	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
2143 		return (-1);
2144 
2145 	if (pidx != NULL)
2146 		idx = *pidx;
2147 	if (counter != NULL && idx >= 0)
2148 		use_counter = 1;
2149 	if (idx < 0)
2150 		idx = 0;
2151 
2152 _next_block:
2153 	ke = pfr_kentry_byidx(kt, idx, af);
2154 	if (ke == NULL) {
2155 		kt->pfrkt_nomatch++;
2156 		return (1);
2157 	}
2158 	pfr_prepare_network(&pfr_mask, af, ke->pfrke_net);
2159 	*raddr = SUNION2PF(&ke->pfrke_sa, af);
2160 	*rmask = SUNION2PF(&pfr_mask, af);
2161 
2162 	if (use_counter) {
2163 		/* is supplied address within block? */
2164 		if (!PF_MATCHA(0, *raddr, *rmask, counter, af)) {
2165 			/* no, go to next block in table */
2166 			idx++;
2167 			use_counter = 0;
2168 			goto _next_block;
2169 		}
2170 		PF_ACPY(addr, counter, af);
2171 	} else {
2172 		/* use first address of block */
2173 		PF_ACPY(addr, *raddr, af);
2174 	}
2175 
2176 	if (!KENTRY_NETWORK(ke)) {
2177 		/* this is a single IP address - no possible nested block */
2178 		PF_ACPY(counter, addr, af);
2179 		*pidx = idx;
2180 		kt->pfrkt_match++;
2181 		return (0);
2182 	}
2183 	for (;;) {
2184 		/* we don't want to use a nested block */
2185 		if (af == AF_INET)
2186 			ke2 = (struct pfr_kentry *)rn_match((char *)&pfr_sin,
2187 			    kt->pfrkt_ip4);
2188 		else if (af == AF_INET6)
2189 			ke2 = (struct pfr_kentry *)rn_match((char *)&pfr_sin6,
2190 			    kt->pfrkt_ip6);
2191 		/* no need to check KENTRY_RNF_ROOT() here */
2192 		if (ke2 == ke) {
2193 			/* lookup return the same block - perfect */
2194 			PF_ACPY(counter, addr, af);
2195 			*pidx = idx;
2196 			kt->pfrkt_match++;
2197 			return (0);
2198 		}
2199 
2200 		/* we need to increase the counter past the nested block */
2201 		pfr_prepare_network(&mask, AF_INET, ke2->pfrke_net);
2202 		PF_POOLMASK(addr, addr, SUNION2PF(&mask, af), &pfr_ffaddr, af);
2203 		PF_AINC(addr, af);
2204 		if (!PF_MATCHA(0, *raddr, *rmask, addr, af)) {
2205 			/* ok, we reached the end of our main block */
2206 			/* go to next block in table */
2207 			idx++;
2208 			use_counter = 0;
2209 			goto _next_block;
2210 		}
2211 	}
2212 }
2213 
2214 struct pfr_kentry *
2215 pfr_kentry_byidx(struct pfr_ktable *kt, int idx, int af)
2216 {
2217 	struct pfr_walktree	w;
2218 
2219 	bzero(&w, sizeof(w));
2220 	w.pfrw_op = PFRW_POOL_GET;
2221 	w.pfrw_cnt = idx;
2222 
2223 	switch (af) {
2224 #ifdef INET
2225 	case AF_INET:
2226 		kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
2227 		return (w.pfrw_kentry);
2228 #endif /* INET */
2229 #ifdef INET6
2230 	case AF_INET6:
2231 		kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
2232 		return (w.pfrw_kentry);
2233 #endif /* INET6 */
2234 	default:
2235 		return (NULL);
2236 	}
2237 }
2238 
2239 void
2240 pfr_dynaddr_update(struct pfr_ktable *kt, struct pfi_dynaddr *dyn)
2241 {
2242 	struct pfr_walktree	w;
2243 
2244 	bzero(&w, sizeof(w));
2245 	w.pfrw_op = PFRW_DYNADDR_UPDATE;
2246 	w.pfrw_dyn = dyn;
2247 
2248 	crit_enter();
2249 	dyn->pfid_acnt4 = 0;
2250 	dyn->pfid_acnt6 = 0;
2251 	if (!dyn->pfid_af || dyn->pfid_af == AF_INET)
2252 		kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
2253 	if (!dyn->pfid_af || dyn->pfid_af == AF_INET6)
2254 		kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
2255 	crit_exit();
2256 }
2257