xref: /netbsd-src/sys/dist/pf/net/pf_table.c (revision 3816d47b2c42fcd6e549e3407f842a5b1a1d23ad)
1 /*	$NetBSD: pf_table.c,v 1.15 2009/07/28 18:15:26 minskim Exp $	*/
2 /*	$OpenBSD: pf_table.c,v 1.70 2007/05/23 11:53:45 markus Exp $	*/
3 
4 /*
5  * Copyright (c) 2002 Cedric Berger
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  *    - Redistributions of source code must retain the above copyright
13  *      notice, this list of conditions and the following disclaimer.
14  *    - Redistributions in binary form must reproduce the above
15  *      copyright notice, this list of conditions and the following
16  *      disclaimer in the documentation and/or other materials provided
17  *      with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
22  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
23  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
24  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
25  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
29  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  *
32  */
33 
34 #include <sys/cdefs.h>
35 __KERNEL_RCSID(0, "$NetBSD: pf_table.c,v 1.15 2009/07/28 18:15:26 minskim Exp $");
36 
37 #ifdef _KERNEL_OPT
38 #include "opt_inet.h"
39 #endif
40 
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/socket.h>
44 #include <sys/mbuf.h>
45 #include <sys/kernel.h>
46 
47 #include <net/if.h>
48 #include <net/route.h>
49 #include <netinet/in.h>
50 #ifndef __NetBSD__
51 #include <netinet/ip_ipsp.h>
52 #endif /* !__NetBSD__ */
53 #include <net/pfvar.h>
54 
55 #define ACCEPT_FLAGS(flags, oklist)		\
56 	do {					\
57 		if ((flags & ~(oklist)) &	\
58 		    PFR_FLAG_ALLMASK)		\
59 			return (EINVAL);	\
60 	} while (0)
61 
62 #define COPYIN(from, to, size, flags)		\
63 	((flags & PFR_FLAG_USERIOCTL) ?		\
64 	copyin((from), (to), (size)) :		\
65 	(bcopy((from), (to), (size)), 0))
66 
67 #define COPYOUT(from, to, size, flags)		\
68 	((flags & PFR_FLAG_USERIOCTL) ?		\
69 	copyout((from), (to), (size)) :		\
70 	(bcopy((from), (to), (size)), 0))
71 
72 #define	FILLIN_SIN(sin, addr)			\
73 	do {					\
74 		(sin).sin_len = sizeof(sin);	\
75 		(sin).sin_family = AF_INET;	\
76 		(sin).sin_addr = (addr);	\
77 	} while (0)
78 
79 #define	FILLIN_SIN6(sin6, addr)			\
80 	do {					\
81 		(sin6).sin6_len = sizeof(sin6);	\
82 		(sin6).sin6_family = AF_INET6;	\
83 		(sin6).sin6_addr = (addr);	\
84 	} while (0)
85 
86 #define SWAP(type, a1, a2)			\
87 	do {					\
88 		type tmp = a1;			\
89 		a1 = a2;			\
90 		a2 = tmp;			\
91 	} while (0)
92 
93 #define SUNION2PF(su, af) (((af)==AF_INET) ?	\
94     (struct pf_addr *)&(su)->sin.sin_addr :	\
95     (struct pf_addr *)&(su)->sin6.sin6_addr)
96 
97 #define	AF_BITS(af)		(((af)==AF_INET)?32:128)
98 #define	ADDR_NETWORK(ad)	((ad)->pfra_net < AF_BITS((ad)->pfra_af))
99 #define	KENTRY_NETWORK(ke)	((ke)->pfrke_net < AF_BITS((ke)->pfrke_af))
100 #define KENTRY_RNF_ROOT(ke) \
101 		((((struct radix_node *)(ke))->rn_flags & RNF_ROOT) != 0)
102 
103 #define NO_ADDRESSES		(-1)
104 #define ENQUEUE_UNMARKED_ONLY	(1)
105 #define INVERT_NEG_FLAG		(1)
106 
107 struct pfr_walktree {
108 	enum pfrw_op {
109 		PFRW_MARK,
110 		PFRW_SWEEP,
111 		PFRW_ENQUEUE,
112 		PFRW_GET_ADDRS,
113 		PFRW_GET_ASTATS,
114 		PFRW_POOL_GET,
115 		PFRW_DYNADDR_UPDATE
116 	}	 pfrw_op;
117 	union {
118 		struct pfr_addr		*pfrw1_addr;
119 		struct pfr_astats	*pfrw1_astats;
120 		struct pfr_kentryworkq	*pfrw1_workq;
121 		struct pfr_kentry	*pfrw1_kentry;
122 		struct pfi_dynaddr	*pfrw1_dyn;
123 	}	 pfrw_1;
124 	int	 pfrw_free;
125 	int	 pfrw_flags;
126 };
127 #define pfrw_addr	pfrw_1.pfrw1_addr
128 #define pfrw_astats	pfrw_1.pfrw1_astats
129 #define pfrw_workq	pfrw_1.pfrw1_workq
130 #define pfrw_kentry	pfrw_1.pfrw1_kentry
131 #define pfrw_dyn	pfrw_1.pfrw1_dyn
132 #define pfrw_cnt	pfrw_free
133 
134 #define senderr(e)	do { rv = (e); goto _bad; } while (0)
135 
136 struct pool		 pfr_ktable_pl;
137 struct pool		 pfr_kentry_pl;
138 struct pool		 pfr_kentry_pl2;
139 struct sockaddr_in	 pfr_sin;
140 struct sockaddr_in6	 pfr_sin6;
141 union sockaddr_union	 pfr_mask;
142 struct pf_addr		 pfr_ffaddr;
143 
144 void			 pfr_copyout_addr(struct pfr_addr *,
145 			    struct pfr_kentry *ke);
146 int			 pfr_validate_addr(struct pfr_addr *);
147 void			 pfr_enqueue_addrs(struct pfr_ktable *,
148 			    struct pfr_kentryworkq *, int *, int);
149 void			 pfr_mark_addrs(struct pfr_ktable *);
150 struct pfr_kentry	*pfr_lookup_addr(struct pfr_ktable *,
151 			    struct pfr_addr *, int);
152 struct pfr_kentry	*pfr_create_kentry(struct pfr_addr *, int);
153 void			 pfr_destroy_kentries(struct pfr_kentryworkq *);
154 void			 pfr_destroy_kentry(struct pfr_kentry *);
155 void			 pfr_insert_kentries(struct pfr_ktable *,
156 			    struct pfr_kentryworkq *, long);
157 void			 pfr_remove_kentries(struct pfr_ktable *,
158 			    struct pfr_kentryworkq *);
159 void			 pfr_clstats_kentries(struct pfr_kentryworkq *, long,
160 			    int);
161 void			 pfr_reset_feedback(struct pfr_addr *, int, int);
162 void			 pfr_prepare_network(union sockaddr_union *, int, int);
163 int			 pfr_route_kentry(struct pfr_ktable *,
164 			    struct pfr_kentry *);
165 int			 pfr_unroute_kentry(struct pfr_ktable *,
166 			    struct pfr_kentry *);
167 int			 pfr_walktree(struct radix_node *, void *);
168 int			 pfr_validate_table(struct pfr_table *, int, int);
169 int			 pfr_fix_anchor(char *);
170 void			 pfr_commit_ktable(struct pfr_ktable *, long);
171 void			 pfr_insert_ktables(struct pfr_ktableworkq *);
172 void			 pfr_insert_ktable(struct pfr_ktable *);
173 void			 pfr_setflags_ktables(struct pfr_ktableworkq *);
174 void			 pfr_setflags_ktable(struct pfr_ktable *, int);
175 void			 pfr_clstats_ktables(struct pfr_ktableworkq *, long,
176 			    int);
177 void			 pfr_clstats_ktable(struct pfr_ktable *, long, int);
178 struct pfr_ktable	*pfr_create_ktable(struct pfr_table *, long, int);
179 void			 pfr_destroy_ktables(struct pfr_ktableworkq *, int);
180 void			 pfr_destroy_ktable(struct pfr_ktable *, int);
181 int			 pfr_ktable_compare(struct pfr_ktable *,
182 			    struct pfr_ktable *);
183 struct pfr_ktable	*pfr_lookup_table(struct pfr_table *);
184 void			 pfr_clean_node_mask(struct pfr_ktable *,
185 			    struct pfr_kentryworkq *);
186 int			 pfr_table_count(struct pfr_table *, int);
187 int			 pfr_skip_table(struct pfr_table *,
188 			    struct pfr_ktable *, int);
189 struct pfr_kentry	*pfr_kentry_byidx(struct pfr_ktable *, int, int);
190 
191 RB_PROTOTYPE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
192 RB_GENERATE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
193 
194 struct pfr_ktablehead	 pfr_ktables;
195 struct pfr_table	 pfr_nulltable;
196 int			 pfr_ktable_cnt;
197 
198 void
199 pfr_initialize(void)
200 {
201 #ifdef __NetBSD__
202 	pool_init(&pfr_ktable_pl, sizeof(struct pfr_ktable), 0, 0, 0,
203 	    "pfrktable", &pool_allocator_nointr, IPL_NONE);
204 	pool_init(&pfr_kentry_pl, sizeof(struct pfr_kentry), 0, 0, 0,
205 	    "pfrkentry", &pool_allocator_nointr, IPL_NONE);
206 	pool_init(&pfr_kentry_pl2, sizeof(struct pfr_kentry), 0, 0, 0,
207 	    "pfrkentry2", NULL, IPL_SOFTNET);
208 #else
209 	pool_init(&pfr_ktable_pl, sizeof(struct pfr_ktable), 0, 0, 0,
210 	    "pfrktable", &pool_allocator_oldnointr);
211 	pool_init(&pfr_kentry_pl, sizeof(struct pfr_kentry), 0, 0, 0,
212 	    "pfrkentry", &pool_allocator_oldnointr);
213 	pool_init(&pfr_kentry_pl2, sizeof(struct pfr_kentry), 0, 0, 0,
214 	    "pfrkentry2", NULL);
215 #endif /* !__NetBSD__ */
216 
217 	pfr_sin.sin_len = sizeof(pfr_sin);
218 	pfr_sin.sin_family = AF_INET;
219 	pfr_sin6.sin6_len = sizeof(pfr_sin6);
220 	pfr_sin6.sin6_family = AF_INET6;
221 
222 	memset(&pfr_ffaddr, 0xff, sizeof(pfr_ffaddr));
223 }
224 
225 int
226 pfr_clr_addrs(struct pfr_table *tbl, int *ndel, int flags)
227 {
228 	struct pfr_ktable	*kt;
229 	struct pfr_kentryworkq	 workq;
230 	int			 s = 0;
231 
232 	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
233 	if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
234 		return (EINVAL);
235 	kt = pfr_lookup_table(tbl);
236 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
237 		return (ESRCH);
238 	if (kt->pfrkt_flags & PFR_TFLAG_CONST)
239 		return (EPERM);
240 	pfr_enqueue_addrs(kt, &workq, ndel, 0);
241 
242 	if (!(flags & PFR_FLAG_DUMMY)) {
243 		if (flags & PFR_FLAG_ATOMIC)
244 			s = splsoftnet();
245 		pfr_remove_kentries(kt, &workq);
246 		if (flags & PFR_FLAG_ATOMIC)
247 			splx(s);
248 		if (kt->pfrkt_cnt) {
249 			printf("pfr_clr_addrs: corruption detected (%d).\n",
250 			    kt->pfrkt_cnt);
251 			kt->pfrkt_cnt = 0;
252 		}
253 	}
254 	return (0);
255 }
256 
257 int
258 pfr_add_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
259     int *nadd, int flags)
260 {
261 	struct pfr_ktable	*kt, *tmpkt;
262 	struct pfr_kentryworkq	 workq;
263 	struct pfr_kentry	*p, *q;
264 	struct pfr_addr		 ad;
265 	int			 i, rv, s = 0 /* XXX gcc */, xadd = 0;
266 	long			 tzero = time_second;
267 
268 	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
269 	    PFR_FLAG_FEEDBACK);
270 	if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
271 		return (EINVAL);
272 	kt = pfr_lookup_table(tbl);
273 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
274 		return (ESRCH);
275 	if (kt->pfrkt_flags & PFR_TFLAG_CONST)
276 		return (EPERM);
277 	tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0);
278 	if (tmpkt == NULL)
279 		return (ENOMEM);
280 	SLIST_INIT(&workq);
281 	for (i = 0; i < size; i++) {
282 		if (COPYIN(addr+i, &ad, sizeof(ad), flags))
283 			senderr(EFAULT);
284 		if (pfr_validate_addr(&ad))
285 			senderr(EINVAL);
286 		p = pfr_lookup_addr(kt, &ad, 1);
287 		q = pfr_lookup_addr(tmpkt, &ad, 1);
288 		if (flags & PFR_FLAG_FEEDBACK) {
289 			if (q != NULL)
290 				ad.pfra_fback = PFR_FB_DUPLICATE;
291 			else if (p == NULL)
292 				ad.pfra_fback = PFR_FB_ADDED;
293 			else if (p->pfrke_not != ad.pfra_not)
294 				ad.pfra_fback = PFR_FB_CONFLICT;
295 			else
296 				ad.pfra_fback = PFR_FB_NONE;
297 		}
298 		if (p == NULL && q == NULL) {
299 			p = pfr_create_kentry(&ad,
300 			    !(flags & PFR_FLAG_USERIOCTL));
301 			if (p == NULL)
302 				senderr(ENOMEM);
303 			if (pfr_route_kentry(tmpkt, p)) {
304 				pfr_destroy_kentry(p);
305 				ad.pfra_fback = PFR_FB_NONE;
306 			} else {
307 				SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
308 				xadd++;
309 			}
310 		}
311 		if (flags & PFR_FLAG_FEEDBACK)
312 			if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
313 				senderr(EFAULT);
314 	}
315 	pfr_clean_node_mask(tmpkt, &workq);
316 	if (!(flags & PFR_FLAG_DUMMY)) {
317 		if (flags & PFR_FLAG_ATOMIC)
318 			s = splsoftnet();
319 		pfr_insert_kentries(kt, &workq, tzero);
320 		if (flags & PFR_FLAG_ATOMIC)
321 			splx(s);
322 	} else
323 		pfr_destroy_kentries(&workq);
324 	if (nadd != NULL)
325 		*nadd = xadd;
326 	pfr_destroy_ktable(tmpkt, 0);
327 	return (0);
328 _bad:
329 	pfr_clean_node_mask(tmpkt, &workq);
330 	pfr_destroy_kentries(&workq);
331 	if (flags & PFR_FLAG_FEEDBACK)
332 		pfr_reset_feedback(addr, size, flags);
333 	pfr_destroy_ktable(tmpkt, 0);
334 	return (rv);
335 }
336 
337 int
338 pfr_del_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
339     int *ndel, int flags)
340 {
341 	struct pfr_ktable	*kt;
342 	struct pfr_kentryworkq	 workq;
343 	struct pfr_kentry	*p;
344 	struct pfr_addr		 ad;
345 	int			 i, rv, s = 0 /* XXX gcc */, xdel = 0, log = 1;
346 
347 	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
348 	    PFR_FLAG_FEEDBACK);
349 	if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
350 		return (EINVAL);
351 	kt = pfr_lookup_table(tbl);
352 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
353 		return (ESRCH);
354 	if (kt->pfrkt_flags & PFR_TFLAG_CONST)
355 		return (EPERM);
356 	/*
357 	 * there are two algorithms to choose from here.
358 	 * with:
359 	 *   n: number of addresses to delete
360 	 *   N: number of addresses in the table
361 	 *
362 	 * one is O(N) and is better for large 'n'
363 	 * one is O(n*LOG(N)) and is better for small 'n'
364 	 *
365 	 * following code try to decide which one is best.
366 	 */
367 	for (i = kt->pfrkt_cnt; i > 0; i >>= 1)
368 		log++;
369 	if (size > kt->pfrkt_cnt/log) {
370 		/* full table scan */
371 		pfr_mark_addrs(kt);
372 	} else {
373 		/* iterate over addresses to delete */
374 		for (i = 0; i < size; i++) {
375 			if (COPYIN(addr+i, &ad, sizeof(ad), flags))
376 				return (EFAULT);
377 			if (pfr_validate_addr(&ad))
378 				return (EINVAL);
379 			p = pfr_lookup_addr(kt, &ad, 1);
380 			if (p != NULL)
381 				p->pfrke_mark = 0;
382 		}
383 	}
384 	SLIST_INIT(&workq);
385 	for (i = 0; i < size; i++) {
386 		if (COPYIN(addr+i, &ad, sizeof(ad), flags))
387 			senderr(EFAULT);
388 		if (pfr_validate_addr(&ad))
389 			senderr(EINVAL);
390 		p = pfr_lookup_addr(kt, &ad, 1);
391 		if (flags & PFR_FLAG_FEEDBACK) {
392 			if (p == NULL)
393 				ad.pfra_fback = PFR_FB_NONE;
394 			else if (p->pfrke_not != ad.pfra_not)
395 				ad.pfra_fback = PFR_FB_CONFLICT;
396 			else if (p->pfrke_mark)
397 				ad.pfra_fback = PFR_FB_DUPLICATE;
398 			else
399 				ad.pfra_fback = PFR_FB_DELETED;
400 		}
401 		if (p != NULL && p->pfrke_not == ad.pfra_not &&
402 		    !p->pfrke_mark) {
403 			p->pfrke_mark = 1;
404 			SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
405 			xdel++;
406 		}
407 		if (flags & PFR_FLAG_FEEDBACK)
408 			if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
409 				senderr(EFAULT);
410 	}
411 	if (!(flags & PFR_FLAG_DUMMY)) {
412 		if (flags & PFR_FLAG_ATOMIC)
413 			s = splsoftnet();
414 		pfr_remove_kentries(kt, &workq);
415 		if (flags & PFR_FLAG_ATOMIC)
416 			splx(s);
417 	}
418 	if (ndel != NULL)
419 		*ndel = xdel;
420 	return (0);
421 _bad:
422 	if (flags & PFR_FLAG_FEEDBACK)
423 		pfr_reset_feedback(addr, size, flags);
424 	return (rv);
425 }
426 
427 int
428 pfr_set_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
429     int *size2, int *nadd, int *ndel, int *nchange, int flags,
430     u_int32_t ignore_pfrt_flags)
431 {
432 	struct pfr_ktable	*kt, *tmpkt;
433 	struct pfr_kentryworkq	 addq, delq, changeq;
434 	struct pfr_kentry	*p, *q;
435 	struct pfr_addr		 ad;
436 	int			 i, rv, s = 0 /* XXX gcc */, xadd = 0, xdel = 0,
437 				 xchange = 0;
438 	long			 tzero = time_second;
439 
440 	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
441 	    PFR_FLAG_FEEDBACK);
442 	if (pfr_validate_table(tbl, ignore_pfrt_flags, flags &
443 	    PFR_FLAG_USERIOCTL))
444 		return (EINVAL);
445 	kt = pfr_lookup_table(tbl);
446 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
447 		return (ESRCH);
448 	if (kt->pfrkt_flags & PFR_TFLAG_CONST)
449 		return (EPERM);
450 	tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0);
451 	if (tmpkt == NULL)
452 		return (ENOMEM);
453 	pfr_mark_addrs(kt);
454 	SLIST_INIT(&addq);
455 	SLIST_INIT(&delq);
456 	SLIST_INIT(&changeq);
457 	for (i = 0; i < size; i++) {
458 		if (COPYIN(addr+i, &ad, sizeof(ad), flags))
459 			senderr(EFAULT);
460 		if (pfr_validate_addr(&ad))
461 			senderr(EINVAL);
462 		ad.pfra_fback = PFR_FB_NONE;
463 		p = pfr_lookup_addr(kt, &ad, 1);
464 		if (p != NULL) {
465 			if (p->pfrke_mark) {
466 				ad.pfra_fback = PFR_FB_DUPLICATE;
467 				goto _skip;
468 			}
469 			p->pfrke_mark = 1;
470 			if (p->pfrke_not != ad.pfra_not) {
471 				SLIST_INSERT_HEAD(&changeq, p, pfrke_workq);
472 				ad.pfra_fback = PFR_FB_CHANGED;
473 				xchange++;
474 			}
475 		} else {
476 			q = pfr_lookup_addr(tmpkt, &ad, 1);
477 			if (q != NULL) {
478 				ad.pfra_fback = PFR_FB_DUPLICATE;
479 				goto _skip;
480 			}
481 			p = pfr_create_kentry(&ad,
482 			    !(flags & PFR_FLAG_USERIOCTL));
483 			if (p == NULL)
484 				senderr(ENOMEM);
485 			if (pfr_route_kentry(tmpkt, p)) {
486 				pfr_destroy_kentry(p);
487 				ad.pfra_fback = PFR_FB_NONE;
488 			} else {
489 				SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
490 				ad.pfra_fback = PFR_FB_ADDED;
491 				xadd++;
492 			}
493 		}
494 _skip:
495 		if (flags & PFR_FLAG_FEEDBACK)
496 			if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
497 				senderr(EFAULT);
498 	}
499 	pfr_enqueue_addrs(kt, &delq, &xdel, ENQUEUE_UNMARKED_ONLY);
500 	if ((flags & PFR_FLAG_FEEDBACK) && *size2) {
501 		if (*size2 < size+xdel) {
502 			*size2 = size+xdel;
503 			senderr(0);
504 		}
505 		i = 0;
506 		SLIST_FOREACH(p, &delq, pfrke_workq) {
507 			pfr_copyout_addr(&ad, p);
508 			ad.pfra_fback = PFR_FB_DELETED;
509 			if (COPYOUT(&ad, addr+size+i, sizeof(ad), flags))
510 				senderr(EFAULT);
511 			i++;
512 		}
513 	}
514 	pfr_clean_node_mask(tmpkt, &addq);
515 	if (!(flags & PFR_FLAG_DUMMY)) {
516 		if (flags & PFR_FLAG_ATOMIC)
517 			s = splsoftnet();
518 		pfr_insert_kentries(kt, &addq, tzero);
519 		pfr_remove_kentries(kt, &delq);
520 		pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG);
521 		if (flags & PFR_FLAG_ATOMIC)
522 			splx(s);
523 	} else
524 		pfr_destroy_kentries(&addq);
525 	if (nadd != NULL)
526 		*nadd = xadd;
527 	if (ndel != NULL)
528 		*ndel = xdel;
529 	if (nchange != NULL)
530 		*nchange = xchange;
531 	if ((flags & PFR_FLAG_FEEDBACK) && size2)
532 		*size2 = size+xdel;
533 	pfr_destroy_ktable(tmpkt, 0);
534 	return (0);
535 _bad:
536 	pfr_clean_node_mask(tmpkt, &addq);
537 	pfr_destroy_kentries(&addq);
538 	if (flags & PFR_FLAG_FEEDBACK)
539 		pfr_reset_feedback(addr, size, flags);
540 	pfr_destroy_ktable(tmpkt, 0);
541 	return (rv);
542 }
543 
544 int
545 pfr_tst_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
546 	int *nmatch, int flags)
547 {
548 	struct pfr_ktable	*kt;
549 	struct pfr_kentry	*p;
550 	struct pfr_addr		 ad;
551 	int			 i, xmatch = 0;
552 
553 	ACCEPT_FLAGS(flags, PFR_FLAG_REPLACE);
554 	if (pfr_validate_table(tbl, 0, 0))
555 		return (EINVAL);
556 	kt = pfr_lookup_table(tbl);
557 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
558 		return (ESRCH);
559 
560 	for (i = 0; i < size; i++) {
561 		if (COPYIN(addr+i, &ad, sizeof(ad), flags))
562 			return (EFAULT);
563 		if (pfr_validate_addr(&ad))
564 			return (EINVAL);
565 		if (ADDR_NETWORK(&ad))
566 			return (EINVAL);
567 		p = pfr_lookup_addr(kt, &ad, 0);
568 		if (flags & PFR_FLAG_REPLACE)
569 			pfr_copyout_addr(&ad, p);
570 		ad.pfra_fback = (p == NULL) ? PFR_FB_NONE :
571 		    (p->pfrke_not ? PFR_FB_NOTMATCH : PFR_FB_MATCH);
572 		if (p != NULL && !p->pfrke_not)
573 			xmatch++;
574 		if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
575 			return (EFAULT);
576 	}
577 	if (nmatch != NULL)
578 		*nmatch = xmatch;
579 	return (0);
580 }
581 
582 int
583 pfr_get_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int *size,
584 	int flags)
585 {
586 	struct pfr_ktable	*kt;
587 	struct pfr_walktree	 w;
588 	int			 rv;
589 
590 	ACCEPT_FLAGS(flags, 0);
591 	if (pfr_validate_table(tbl, 0, 0))
592 		return (EINVAL);
593 	kt = pfr_lookup_table(tbl);
594 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
595 		return (ESRCH);
596 	if (kt->pfrkt_cnt > *size) {
597 		*size = kt->pfrkt_cnt;
598 		return (0);
599 	}
600 
601 	bzero(&w, sizeof(w));
602 	w.pfrw_op = PFRW_GET_ADDRS;
603 	w.pfrw_addr = addr;
604 	w.pfrw_free = kt->pfrkt_cnt;
605 	w.pfrw_flags = flags;
606 	rv = rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
607 	if (!rv)
608 		rv = rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
609 	if (rv)
610 		return (rv);
611 
612 	if (w.pfrw_free) {
613 		printf("pfr_get_addrs: corruption detected (%d).\n",
614 		    w.pfrw_free);
615 		return (ENOTTY);
616 	}
617 	*size = kt->pfrkt_cnt;
618 	return (0);
619 }
620 
621 int
622 pfr_get_astats(struct pfr_table *tbl, struct pfr_astats *addr, int *size,
623 	int flags)
624 {
625 	struct pfr_ktable	*kt;
626 	struct pfr_walktree	 w;
627 	struct pfr_kentryworkq	 workq;
628 	int			 rv, s = 0 /* XXX gcc */;
629 	long			 tzero = time_second;
630 
631 	/* XXX PFR_FLAG_CLSTATS disabled */
632 	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC);
633 	if (pfr_validate_table(tbl, 0, 0))
634 		return (EINVAL);
635 	kt = pfr_lookup_table(tbl);
636 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
637 		return (ESRCH);
638 	if (kt->pfrkt_cnt > *size) {
639 		*size = kt->pfrkt_cnt;
640 		return (0);
641 	}
642 
643 	bzero(&w, sizeof(w));
644 	w.pfrw_op = PFRW_GET_ASTATS;
645 	w.pfrw_astats = addr;
646 	w.pfrw_free = kt->pfrkt_cnt;
647 	w.pfrw_flags = flags;
648 	if (flags & PFR_FLAG_ATOMIC)
649 		s = splsoftnet();
650 	rv = rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
651 	if (!rv)
652 		rv = rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
653 	if (!rv && (flags & PFR_FLAG_CLSTATS)) {
654 		pfr_enqueue_addrs(kt, &workq, NULL, 0);
655 		pfr_clstats_kentries(&workq, tzero, 0);
656 	}
657 	if (flags & PFR_FLAG_ATOMIC)
658 		splx(s);
659 	if (rv)
660 		return (rv);
661 
662 	if (w.pfrw_free) {
663 		printf("pfr_get_astats: corruption detected (%d).\n",
664 		    w.pfrw_free);
665 		return (ENOTTY);
666 	}
667 	*size = kt->pfrkt_cnt;
668 	return (0);
669 }
670 
671 int
672 pfr_clr_astats(struct pfr_table *tbl, struct pfr_addr *addr, int size,
673     int *nzero, int flags)
674 {
675 	struct pfr_ktable	*kt;
676 	struct pfr_kentryworkq	 workq;
677 	struct pfr_kentry	*p;
678 	struct pfr_addr		 ad;
679 	int			 i, rv, s = 0, xzero = 0;
680 
681 	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
682 	    PFR_FLAG_FEEDBACK);
683 	if (pfr_validate_table(tbl, 0, 0))
684 		return (EINVAL);
685 	kt = pfr_lookup_table(tbl);
686 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
687 		return (ESRCH);
688 	SLIST_INIT(&workq);
689 	for (i = 0; i < size; i++) {
690 		if (COPYIN(addr+i, &ad, sizeof(ad), flags))
691 			senderr(EFAULT);
692 		if (pfr_validate_addr(&ad))
693 			senderr(EINVAL);
694 		p = pfr_lookup_addr(kt, &ad, 1);
695 		if (flags & PFR_FLAG_FEEDBACK) {
696 			ad.pfra_fback = (p != NULL) ?
697 			    PFR_FB_CLEARED : PFR_FB_NONE;
698 			if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
699 				senderr(EFAULT);
700 		}
701 		if (p != NULL) {
702 			SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
703 			xzero++;
704 		}
705 	}
706 
707 	if (!(flags & PFR_FLAG_DUMMY)) {
708 		if (flags & PFR_FLAG_ATOMIC)
709 			s = splsoftnet();
710 		pfr_clstats_kentries(&workq, 0, 0);
711 		if (flags & PFR_FLAG_ATOMIC)
712 			splx(s);
713 	}
714 	if (nzero != NULL)
715 		*nzero = xzero;
716 	return (0);
717 _bad:
718 	if (flags & PFR_FLAG_FEEDBACK)
719 		pfr_reset_feedback(addr, size, flags);
720 	return (rv);
721 }
722 
723 int
724 pfr_validate_addr(struct pfr_addr *ad)
725 {
726 	int i;
727 
728 	switch (ad->pfra_af) {
729 #ifdef INET
730 	case AF_INET:
731 		if (ad->pfra_net > 32)
732 			return (-1);
733 		break;
734 #endif /* INET */
735 #ifdef INET6
736 	case AF_INET6:
737 		if (ad->pfra_net > 128)
738 			return (-1);
739 		break;
740 #endif /* INET6 */
741 	default:
742 		return (-1);
743 	}
744 	if (ad->pfra_net < 128 &&
745 		(((char *)ad)[ad->pfra_net/8] & (0xFF >> (ad->pfra_net%8))))
746 			return (-1);
747 	for (i = (ad->pfra_net+7)/8; i < sizeof(ad->pfra_u); i++)
748 		if (((char *)ad)[i])
749 			return (-1);
750 	if (ad->pfra_not && ad->pfra_not != 1)
751 		return (-1);
752 	if (ad->pfra_fback)
753 		return (-1);
754 	return (0);
755 }
756 
757 void
758 pfr_enqueue_addrs(struct pfr_ktable *kt, struct pfr_kentryworkq *workq,
759 	int *naddr, int sweep)
760 {
761 	struct pfr_walktree	w;
762 
763 	SLIST_INIT(workq);
764 	bzero(&w, sizeof(w));
765 	w.pfrw_op = sweep ? PFRW_SWEEP : PFRW_ENQUEUE;
766 	w.pfrw_workq = workq;
767 	if (kt->pfrkt_ip4 != NULL)
768 		if (rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w))
769 			printf("pfr_enqueue_addrs: IPv4 walktree failed.\n");
770 	if (kt->pfrkt_ip6 != NULL)
771 		if (rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w))
772 			printf("pfr_enqueue_addrs: IPv6 walktree failed.\n");
773 	if (naddr != NULL)
774 		*naddr = w.pfrw_cnt;
775 }
776 
777 void
778 pfr_mark_addrs(struct pfr_ktable *kt)
779 {
780 	struct pfr_walktree	w;
781 
782 	bzero(&w, sizeof(w));
783 	w.pfrw_op = PFRW_MARK;
784 	if (rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w))
785 		printf("pfr_mark_addrs: IPv4 walktree failed.\n");
786 	if (rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w))
787 		printf("pfr_mark_addrs: IPv6 walktree failed.\n");
788 }
789 
790 
791 struct pfr_kentry *
792 pfr_lookup_addr(struct pfr_ktable *kt, struct pfr_addr *ad, int exact)
793 {
794 	union sockaddr_union	 sa, mask;
795 	struct radix_node_head	*head = (void *)0xdeadb;
796 	struct pfr_kentry	*ke;
797 	int			 s;
798 
799 	bzero(&sa, sizeof(sa));
800 	if (ad->pfra_af == AF_INET) {
801 		FILLIN_SIN(sa.sin, ad->pfra_ip4addr);
802 		head = kt->pfrkt_ip4;
803 	} else if ( ad->pfra_af == AF_INET6 ) {
804 		FILLIN_SIN6(sa.sin6, ad->pfra_ip6addr);
805 		head = kt->pfrkt_ip6;
806 	}
807 	if (ADDR_NETWORK(ad)) {
808 		pfr_prepare_network(&mask, ad->pfra_af, ad->pfra_net);
809 		s = splsoftnet(); /* rn_lookup makes use of globals */
810 		ke = (struct pfr_kentry *)rn_lookup(&sa, &mask, head);
811 		splx(s);
812 		if (ke && KENTRY_RNF_ROOT(ke))
813 			ke = NULL;
814 	} else {
815 		ke = (struct pfr_kentry *)rn_match(&sa, head);
816 		if (ke && KENTRY_RNF_ROOT(ke))
817 			ke = NULL;
818 		if (exact && ke && KENTRY_NETWORK(ke))
819 			ke = NULL;
820 	}
821 	return (ke);
822 }
823 
824 struct pfr_kentry *
825 pfr_create_kentry(struct pfr_addr *ad, int intr)
826 {
827 	struct pfr_kentry	*ke;
828 
829 	if (intr)
830 		ke = pool_get(&pfr_kentry_pl2, PR_NOWAIT);
831 	else
832 		ke = pool_get(&pfr_kentry_pl, PR_NOWAIT);
833 	if (ke == NULL)
834 		return (NULL);
835 	bzero(ke, sizeof(*ke));
836 
837 	if (ad->pfra_af == AF_INET)
838 		FILLIN_SIN(ke->pfrke_sa.sin, ad->pfra_ip4addr);
839 	else if (ad->pfra_af == AF_INET6)
840 		FILLIN_SIN6(ke->pfrke_sa.sin6, ad->pfra_ip6addr);
841 	ke->pfrke_af = ad->pfra_af;
842 	ke->pfrke_net = ad->pfra_net;
843 	ke->pfrke_not = ad->pfra_not;
844 	ke->pfrke_intrpool = intr;
845 	return (ke);
846 }
847 
848 void
849 pfr_destroy_kentries(struct pfr_kentryworkq *workq)
850 {
851 	struct pfr_kentry	*p, *q;
852 
853 	for (p = SLIST_FIRST(workq); p != NULL; p = q) {
854 		q = SLIST_NEXT(p, pfrke_workq);
855 		pfr_destroy_kentry(p);
856 	}
857 }
858 
859 void
860 pfr_destroy_kentry(struct pfr_kentry *ke)
861 {
862 	if (ke->pfrke_intrpool)
863 		pool_put(&pfr_kentry_pl2, ke);
864 	else
865 		pool_put(&pfr_kentry_pl, ke);
866 }
867 
868 void
869 pfr_insert_kentries(struct pfr_ktable *kt,
870     struct pfr_kentryworkq *workq, long tzero)
871 {
872 	struct pfr_kentry	*p;
873 	int			 rv, n = 0;
874 
875 	SLIST_FOREACH(p, workq, pfrke_workq) {
876 		rv = pfr_route_kentry(kt, p);
877 		if (rv) {
878 			printf("pfr_insert_kentries: cannot route entry "
879 			    "(code=%d).\n", rv);
880 			break;
881 		}
882 		p->pfrke_tzero = tzero;
883 		n++;
884 	}
885 	kt->pfrkt_cnt += n;
886 }
887 
888 int
889 pfr_insert_kentry(struct pfr_ktable *kt, struct pfr_addr *ad, long tzero)
890 {
891 	struct pfr_kentry	*p;
892 	int			 rv;
893 
894 	p = pfr_lookup_addr(kt, ad, 1);
895 	if (p != NULL)
896 		return (0);
897 	p = pfr_create_kentry(ad, 1);
898 	if (p == NULL)
899 		return (EINVAL);
900 
901 	rv = pfr_route_kentry(kt, p);
902 	if (rv)
903 		return (rv);
904 
905 	p->pfrke_tzero = tzero;
906 	kt->pfrkt_cnt++;
907 
908 	return (0);
909 }
910 
911 void
912 pfr_remove_kentries(struct pfr_ktable *kt,
913     struct pfr_kentryworkq *workq)
914 {
915 	struct pfr_kentry	*p;
916 	int			 n = 0;
917 
918 	SLIST_FOREACH(p, workq, pfrke_workq) {
919 		pfr_unroute_kentry(kt, p);
920 		n++;
921 	}
922 	kt->pfrkt_cnt -= n;
923 	pfr_destroy_kentries(workq);
924 }
925 
926 void
927 pfr_clean_node_mask(struct pfr_ktable *kt,
928     struct pfr_kentryworkq *workq)
929 {
930 	struct pfr_kentry	*p;
931 
932 	SLIST_FOREACH(p, workq, pfrke_workq)
933 		pfr_unroute_kentry(kt, p);
934 }
935 
936 void
937 pfr_clstats_kentries(struct pfr_kentryworkq *workq, long tzero, int negchange)
938 {
939 	struct pfr_kentry	*p;
940 	int			 s;
941 
942 	SLIST_FOREACH(p, workq, pfrke_workq) {
943 		s = splsoftnet();
944 		if (negchange)
945 			p->pfrke_not = !p->pfrke_not;
946 		bzero(p->pfrke_packets, sizeof(p->pfrke_packets));
947 		bzero(p->pfrke_bytes, sizeof(p->pfrke_bytes));
948 		splx(s);
949 		p->pfrke_tzero = tzero;
950 	}
951 }
952 
953 void
954 pfr_reset_feedback(struct pfr_addr *addr, int size, int flags)
955 {
956 	struct pfr_addr	ad;
957 	int		i;
958 
959 	for (i = 0; i < size; i++) {
960 		if (COPYIN(addr+i, &ad, sizeof(ad), flags))
961 			break;
962 		ad.pfra_fback = PFR_FB_NONE;
963 		if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
964 			break;
965 	}
966 }
967 
968 void
969 pfr_prepare_network(union sockaddr_union *sa, int af, int net)
970 {
971 	int	i;
972 
973 	bzero(sa, sizeof(*sa));
974 	if (af == AF_INET) {
975 		sa->sin.sin_len = sizeof(sa->sin);
976 		sa->sin.sin_family = AF_INET;
977 		sa->sin.sin_addr.s_addr = net ? htonl(-1 << (32-net)) : 0;
978 	} else if (af == AF_INET6) {
979 		sa->sin6.sin6_len = sizeof(sa->sin6);
980 		sa->sin6.sin6_family = AF_INET6;
981 		for (i = 0; i < 4; i++) {
982 			if (net <= 32) {
983 				sa->sin6.sin6_addr.s6_addr32[i] =
984 				    net ? htonl(-1 << (32-net)) : 0;
985 				break;
986 			}
987 			sa->sin6.sin6_addr.s6_addr32[i] = 0xFFFFFFFF;
988 			net -= 32;
989 		}
990 	}
991 }
992 
993 int
994 pfr_route_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
995 {
996 	union sockaddr_union	 mask;
997 	struct radix_node	*rn;
998 	struct radix_node_head	*head = (void *)0xdeadb;
999 	int			 s;
1000 
1001 	bzero(ke->pfrke_node, sizeof(ke->pfrke_node));
1002 	if (ke->pfrke_af == AF_INET)
1003 		head = kt->pfrkt_ip4;
1004 	else if (ke->pfrke_af == AF_INET6)
1005 		head = kt->pfrkt_ip6;
1006 
1007 	s = splsoftnet();
1008 	if (KENTRY_NETWORK(ke)) {
1009 		pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
1010 		rn = rn_addroute(&ke->pfrke_sa, &mask, head, ke->pfrke_node);
1011 	} else
1012 		rn = rn_addroute(&ke->pfrke_sa, NULL, head, ke->pfrke_node);
1013 	splx(s);
1014 
1015 	return (rn == NULL ? -1 : 0);
1016 }
1017 
1018 int
1019 pfr_unroute_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
1020 {
1021 	union sockaddr_union	 mask;
1022 	struct radix_node	*rn;
1023 	struct radix_node_head	*head = (void *)0xdeadb;
1024 	int			 s;
1025 
1026 	if (ke->pfrke_af == AF_INET)
1027 		head = kt->pfrkt_ip4;
1028 	else if (ke->pfrke_af == AF_INET6)
1029 		head = kt->pfrkt_ip6;
1030 
1031 	s = splsoftnet();
1032 	if (KENTRY_NETWORK(ke)) {
1033 		pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
1034 		rn = rn_delete(&ke->pfrke_sa, &mask, head);
1035 	} else
1036 		rn = rn_delete(&ke->pfrke_sa, NULL, head);
1037 	splx(s);
1038 
1039 	if (rn == NULL) {
1040 		printf("pfr_unroute_kentry: delete failed.\n");
1041 		return (-1);
1042 	}
1043 	return (0);
1044 }
1045 
1046 void
1047 pfr_copyout_addr(struct pfr_addr *ad, struct pfr_kentry *ke)
1048 {
1049 	bzero(ad, sizeof(*ad));
1050 	if (ke == NULL)
1051 		return;
1052 	ad->pfra_af = ke->pfrke_af;
1053 	ad->pfra_net = ke->pfrke_net;
1054 	ad->pfra_not = ke->pfrke_not;
1055 	if (ad->pfra_af == AF_INET)
1056 		ad->pfra_ip4addr = ke->pfrke_sa.sin.sin_addr;
1057 	else if (ad->pfra_af == AF_INET6)
1058 		ad->pfra_ip6addr = ke->pfrke_sa.sin6.sin6_addr;
1059 }
1060 
1061 int
1062 pfr_walktree(struct radix_node *rn, void *arg)
1063 {
1064 	struct pfr_kentry	*ke = (struct pfr_kentry *)rn;
1065 	struct pfr_walktree	*w = arg;
1066 	int			 s, flags = w->pfrw_flags;
1067 
1068 	switch (w->pfrw_op) {
1069 	case PFRW_MARK:
1070 		ke->pfrke_mark = 0;
1071 		break;
1072 	case PFRW_SWEEP:
1073 		if (ke->pfrke_mark)
1074 			break;
1075 		/* FALLTHROUGH */
1076 	case PFRW_ENQUEUE:
1077 		SLIST_INSERT_HEAD(w->pfrw_workq, ke, pfrke_workq);
1078 		w->pfrw_cnt++;
1079 		break;
1080 	case PFRW_GET_ADDRS:
1081 		if (w->pfrw_free-- > 0) {
1082 			struct pfr_addr ad;
1083 
1084 			pfr_copyout_addr(&ad, ke);
1085 			if (copyout(&ad, w->pfrw_addr, sizeof(ad)))
1086 				return (EFAULT);
1087 			w->pfrw_addr++;
1088 		}
1089 		break;
1090 	case PFRW_GET_ASTATS:
1091 		if (w->pfrw_free-- > 0) {
1092 			struct pfr_astats as;
1093 
1094 			pfr_copyout_addr(&as.pfras_a, ke);
1095 
1096 			s = splsoftnet();
1097 			bcopy(ke->pfrke_packets, as.pfras_packets,
1098 			    sizeof(as.pfras_packets));
1099 			bcopy(ke->pfrke_bytes, as.pfras_bytes,
1100 			    sizeof(as.pfras_bytes));
1101 			splx(s);
1102 			as.pfras_tzero = ke->pfrke_tzero;
1103 
1104 			if (COPYOUT(&as, w->pfrw_astats, sizeof(as), flags))
1105 				return (EFAULT);
1106 			w->pfrw_astats++;
1107 		}
1108 		break;
1109 	case PFRW_POOL_GET:
1110 		if (ke->pfrke_not)
1111 			break; /* negative entries are ignored */
1112 		if (!w->pfrw_cnt--) {
1113 			w->pfrw_kentry = ke;
1114 			return (1); /* finish search */
1115 		}
1116 		break;
1117 	case PFRW_DYNADDR_UPDATE:
1118 		if (ke->pfrke_af == AF_INET) {
1119 			if (w->pfrw_dyn->pfid_acnt4++ > 0)
1120 				break;
1121 			pfr_prepare_network(&pfr_mask, AF_INET, ke->pfrke_net);
1122 			w->pfrw_dyn->pfid_addr4 = *SUNION2PF(
1123 			    &ke->pfrke_sa, AF_INET);
1124 			w->pfrw_dyn->pfid_mask4 = *SUNION2PF(
1125 			    &pfr_mask, AF_INET);
1126 		} else if (ke->pfrke_af == AF_INET6){
1127 			if (w->pfrw_dyn->pfid_acnt6++ > 0)
1128 				break;
1129 			pfr_prepare_network(&pfr_mask, AF_INET6, ke->pfrke_net);
1130 			w->pfrw_dyn->pfid_addr6 = *SUNION2PF(
1131 			    &ke->pfrke_sa, AF_INET6);
1132 			w->pfrw_dyn->pfid_mask6 = *SUNION2PF(
1133 			    &pfr_mask, AF_INET6);
1134 		}
1135 		break;
1136 	}
1137 	return (0);
1138 }
1139 
1140 int
1141 pfr_clr_tables(struct pfr_table *filter, int *ndel, int flags)
1142 {
1143 	struct pfr_ktableworkq	 workq;
1144 	struct pfr_ktable	*p;
1145 	int			 s = 0, xdel = 0;
1146 
1147 	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
1148 	    PFR_FLAG_ALLRSETS);
1149 	if (pfr_fix_anchor(filter->pfrt_anchor))
1150 		return (EINVAL);
1151 	if (pfr_table_count(filter, flags) < 0)
1152 		return (ENOENT);
1153 
1154 	SLIST_INIT(&workq);
1155 	RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1156 		if (pfr_skip_table(filter, p, flags))
1157 			continue;
1158 		if (!strcmp(p->pfrkt_anchor, PF_RESERVED_ANCHOR))
1159 			continue;
1160 		if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE))
1161 			continue;
1162 		p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1163 		SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1164 		xdel++;
1165 	}
1166 	if (!(flags & PFR_FLAG_DUMMY)) {
1167 		if (flags & PFR_FLAG_ATOMIC)
1168 			s = splsoftnet();
1169 		pfr_setflags_ktables(&workq);
1170 		if (flags & PFR_FLAG_ATOMIC)
1171 			splx(s);
1172 	}
1173 	if (ndel != NULL)
1174 		*ndel = xdel;
1175 	return (0);
1176 }
1177 
1178 int
1179 pfr_add_tables(struct pfr_table *tbl, int size, int *nadd, int flags)
1180 {
1181 	struct pfr_ktableworkq	 addq, changeq;
1182 	struct pfr_ktable	*p, *q, *r, key;
1183 	int			 i, rv, s = 0 /* XXX gcc */, xadd = 0;
1184 	long			 tzero = time_second;
1185 
1186 	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
1187 	SLIST_INIT(&addq);
1188 	SLIST_INIT(&changeq);
1189 	for (i = 0; i < size; i++) {
1190 		if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t), flags))
1191 			senderr(EFAULT);
1192 		if (pfr_validate_table(&key.pfrkt_t, PFR_TFLAG_USRMASK,
1193 		    flags & PFR_FLAG_USERIOCTL))
1194 			senderr(EINVAL);
1195 		key.pfrkt_flags |= PFR_TFLAG_ACTIVE;
1196 		p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1197 		if (p == NULL) {
1198 			p = pfr_create_ktable(&key.pfrkt_t, tzero, 1);
1199 			if (p == NULL)
1200 				senderr(ENOMEM);
1201 			SLIST_FOREACH(q, &addq, pfrkt_workq) {
1202 				if (!pfr_ktable_compare(p, q))
1203 					goto _skip;
1204 			}
1205 			SLIST_INSERT_HEAD(&addq, p, pfrkt_workq);
1206 			xadd++;
1207 			if (!key.pfrkt_anchor[0])
1208 				goto _skip;
1209 
1210 			/* find or create root table */
1211 			bzero(key.pfrkt_anchor, sizeof(key.pfrkt_anchor));
1212 			r = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1213 			if (r != NULL) {
1214 				p->pfrkt_root = r;
1215 				goto _skip;
1216 			}
1217 			SLIST_FOREACH(q, &addq, pfrkt_workq) {
1218 				if (!pfr_ktable_compare(&key, q)) {
1219 					p->pfrkt_root = q;
1220 					goto _skip;
1221 				}
1222 			}
1223 			key.pfrkt_flags = 0;
1224 			r = pfr_create_ktable(&key.pfrkt_t, 0, 1);
1225 			if (r == NULL)
1226 				senderr(ENOMEM);
1227 			SLIST_INSERT_HEAD(&addq, r, pfrkt_workq);
1228 			p->pfrkt_root = r;
1229 		} else if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1230 			SLIST_FOREACH(q, &changeq, pfrkt_workq)
1231 				if (!pfr_ktable_compare(&key, q))
1232 					goto _skip;
1233 			p->pfrkt_nflags = (p->pfrkt_flags &
1234 			    ~PFR_TFLAG_USRMASK) | key.pfrkt_flags;
1235 			SLIST_INSERT_HEAD(&changeq, p, pfrkt_workq);
1236 			xadd++;
1237 		}
1238 _skip:
1239 	;
1240 	}
1241 	if (!(flags & PFR_FLAG_DUMMY)) {
1242 		if (flags & PFR_FLAG_ATOMIC)
1243 			s = splsoftnet();
1244 		pfr_insert_ktables(&addq);
1245 		pfr_setflags_ktables(&changeq);
1246 		if (flags & PFR_FLAG_ATOMIC)
1247 			splx(s);
1248 	} else
1249 		 pfr_destroy_ktables(&addq, 0);
1250 	if (nadd != NULL)
1251 		*nadd = xadd;
1252 	return (0);
1253 _bad:
1254 	pfr_destroy_ktables(&addq, 0);
1255 	return (rv);
1256 }
1257 
1258 int
1259 pfr_del_tables(struct pfr_table *tbl, int size, int *ndel, int flags)
1260 {
1261 	struct pfr_ktableworkq	 workq;
1262 	struct pfr_ktable	*p, *q, key;
1263 	int			 i, s = 0, xdel = 0;
1264 
1265 	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
1266 	SLIST_INIT(&workq);
1267 	for (i = 0; i < size; i++) {
1268 		if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t), flags))
1269 			return (EFAULT);
1270 		if (pfr_validate_table(&key.pfrkt_t, 0,
1271 		    flags & PFR_FLAG_USERIOCTL))
1272 			return (EINVAL);
1273 		p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1274 		if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1275 			SLIST_FOREACH(q, &workq, pfrkt_workq)
1276 				if (!pfr_ktable_compare(p, q))
1277 					goto _skip;
1278 			p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1279 			SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1280 			xdel++;
1281 		}
1282 _skip:
1283 	;
1284 	}
1285 
1286 	if (!(flags & PFR_FLAG_DUMMY)) {
1287 		if (flags & PFR_FLAG_ATOMIC)
1288 			s = splsoftnet();
1289 		pfr_setflags_ktables(&workq);
1290 		if (flags & PFR_FLAG_ATOMIC)
1291 			splx(s);
1292 	}
1293 	if (ndel != NULL)
1294 		*ndel = xdel;
1295 	return (0);
1296 }
1297 
1298 int
1299 pfr_get_tables(struct pfr_table *filter, struct pfr_table *tbl, int *size,
1300 	int flags)
1301 {
1302 	struct pfr_ktable	*p;
1303 	int			 n, nn;
1304 
1305 	ACCEPT_FLAGS(flags, PFR_FLAG_ALLRSETS);
1306 	if (pfr_fix_anchor(filter->pfrt_anchor))
1307 		return (EINVAL);
1308 	n = nn = pfr_table_count(filter, flags);
1309 	if (n < 0)
1310 		return (ENOENT);
1311 	if (n > *size) {
1312 		*size = n;
1313 		return (0);
1314 	}
1315 	RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1316 		if (pfr_skip_table(filter, p, flags))
1317 			continue;
1318 		if (n-- <= 0)
1319 			continue;
1320 		if (COPYOUT(&p->pfrkt_t, tbl++, sizeof(*tbl), flags))
1321 			return (EFAULT);
1322 	}
1323 	if (n) {
1324 		printf("pfr_get_tables: corruption detected (%d).\n", n);
1325 		return (ENOTTY);
1326 	}
1327 	*size = nn;
1328 	return (0);
1329 }
1330 
1331 int
1332 pfr_get_tstats(struct pfr_table *filter, struct pfr_tstats *tbl, int *size,
1333 	int flags)
1334 {
1335 	struct pfr_ktable	*p;
1336 	struct pfr_ktableworkq	 workq;
1337 	int			 s = 0 /* XXX gcc */, n, nn;
1338 	long			 tzero = time_second;
1339 
1340 	/* XXX PFR_FLAG_CLSTATS disabled */
1341 	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_ALLRSETS);
1342 	if (pfr_fix_anchor(filter->pfrt_anchor))
1343 		return (EINVAL);
1344 	n = nn = pfr_table_count(filter, flags);
1345 	if (n < 0)
1346 		return (ENOENT);
1347 	if (n > *size) {
1348 		*size = n;
1349 		return (0);
1350 	}
1351 	SLIST_INIT(&workq);
1352 	if (flags & PFR_FLAG_ATOMIC)
1353 		s = splsoftnet();
1354 	RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1355 		if (pfr_skip_table(filter, p, flags))
1356 			continue;
1357 		if (n-- <= 0)
1358 			continue;
1359 		if (!(flags & PFR_FLAG_ATOMIC))
1360 			s = splsoftnet();
1361 		if (COPYOUT(&p->pfrkt_ts, tbl++, sizeof(*tbl), flags)) {
1362 			splx(s);
1363 			return (EFAULT);
1364 		}
1365 		if (!(flags & PFR_FLAG_ATOMIC))
1366 			splx(s);
1367 		SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1368 	}
1369 	if (flags & PFR_FLAG_CLSTATS)
1370 		pfr_clstats_ktables(&workq, tzero,
1371 		    flags & PFR_FLAG_ADDRSTOO);
1372 	if (flags & PFR_FLAG_ATOMIC)
1373 		splx(s);
1374 	if (n) {
1375 		printf("pfr_get_tstats: corruption detected (%d).\n", n);
1376 		return (ENOTTY);
1377 	}
1378 	*size = nn;
1379 	return (0);
1380 }
1381 
1382 int
1383 pfr_clr_tstats(struct pfr_table *tbl, int size, int *nzero, int flags)
1384 {
1385 	struct pfr_ktableworkq	 workq;
1386 	struct pfr_ktable	*p, key;
1387 	int			 i, s = 0 /* XXX gcc */, xzero = 0;
1388 	long			 tzero = time_second;
1389 
1390 	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
1391 	    PFR_FLAG_ADDRSTOO);
1392 	SLIST_INIT(&workq);
1393 	for (i = 0; i < size; i++) {
1394 		if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t), flags))
1395 			return (EFAULT);
1396 		if (pfr_validate_table(&key.pfrkt_t, 0, 0))
1397 			return (EINVAL);
1398 		p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1399 		if (p != NULL) {
1400 			SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1401 			xzero++;
1402 		}
1403 	}
1404 	if (!(flags & PFR_FLAG_DUMMY)) {
1405 		if (flags & PFR_FLAG_ATOMIC)
1406 			s = splsoftnet();
1407 		pfr_clstats_ktables(&workq, tzero, flags & PFR_FLAG_ADDRSTOO);
1408 		if (flags & PFR_FLAG_ATOMIC)
1409 			splx(s);
1410 	}
1411 	if (nzero != NULL)
1412 		*nzero = xzero;
1413 	return (0);
1414 }
1415 
1416 int
1417 pfr_set_tflags(struct pfr_table *tbl, int size, int setflag, int clrflag,
1418 	int *nchange, int *ndel, int flags)
1419 {
1420 	struct pfr_ktableworkq	 workq;
1421 	struct pfr_ktable	*p, *q, key;
1422 	int			 i, s = 0, xchange = 0, xdel = 0;
1423 
1424 	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
1425 	if ((setflag & ~PFR_TFLAG_USRMASK) ||
1426 	    (clrflag & ~PFR_TFLAG_USRMASK) ||
1427 	    (setflag & clrflag))
1428 		return (EINVAL);
1429 	SLIST_INIT(&workq);
1430 	for (i = 0; i < size; i++) {
1431 		if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t), flags))
1432 			return (EFAULT);
1433 		if (pfr_validate_table(&key.pfrkt_t, 0,
1434 		    flags & PFR_FLAG_USERIOCTL))
1435 			return (EINVAL);
1436 		p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1437 		if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1438 			p->pfrkt_nflags = (p->pfrkt_flags | setflag) &
1439 			    ~clrflag;
1440 			if (p->pfrkt_nflags == p->pfrkt_flags)
1441 				goto _skip;
1442 			SLIST_FOREACH(q, &workq, pfrkt_workq)
1443 				if (!pfr_ktable_compare(p, q))
1444 					goto _skip;
1445 			SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1446 			if ((p->pfrkt_flags & PFR_TFLAG_PERSIST) &&
1447 			    (clrflag & PFR_TFLAG_PERSIST) &&
1448 			    !(p->pfrkt_flags & PFR_TFLAG_REFERENCED))
1449 				xdel++;
1450 			else
1451 				xchange++;
1452 		}
1453 _skip:
1454 	;
1455 	}
1456 	if (!(flags & PFR_FLAG_DUMMY)) {
1457 		if (flags & PFR_FLAG_ATOMIC)
1458 			s = splsoftnet();
1459 		pfr_setflags_ktables(&workq);
1460 		if (flags & PFR_FLAG_ATOMIC)
1461 			splx(s);
1462 	}
1463 	if (nchange != NULL)
1464 		*nchange = xchange;
1465 	if (ndel != NULL)
1466 		*ndel = xdel;
1467 	return (0);
1468 }
1469 
1470 int
1471 pfr_ina_begin(struct pfr_table *trs, u_int32_t *ticket, int *ndel, int flags)
1472 {
1473 	struct pfr_ktableworkq	 workq;
1474 	struct pfr_ktable	*p;
1475 	struct pf_ruleset	*rs;
1476 	int			 xdel = 0;
1477 
1478 	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1479 	rs = pf_find_or_create_ruleset(trs->pfrt_anchor);
1480 	if (rs == NULL)
1481 		return (ENOMEM);
1482 	SLIST_INIT(&workq);
1483 	RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1484 		if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1485 		    pfr_skip_table(trs, p, 0))
1486 			continue;
1487 		p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
1488 		SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1489 		xdel++;
1490 	}
1491 	if (!(flags & PFR_FLAG_DUMMY)) {
1492 		pfr_setflags_ktables(&workq);
1493 		if (ticket != NULL)
1494 			*ticket = ++rs->tticket;
1495 		rs->topen = 1;
1496 	} else
1497 		pf_remove_if_empty_ruleset(rs);
1498 	if (ndel != NULL)
1499 		*ndel = xdel;
1500 	return (0);
1501 }
1502 
1503 int
1504 pfr_ina_define(struct pfr_table *tbl, struct pfr_addr *addr, int size,
1505     int *nadd, int *naddr, u_int32_t ticket, int flags)
1506 {
1507 	struct pfr_ktableworkq	 tableq;
1508 	struct pfr_kentryworkq	 addrq;
1509 	struct pfr_ktable	*kt, *rt, *shadow, key;
1510 	struct pfr_kentry	*p;
1511 	struct pfr_addr		 ad;
1512 	struct pf_ruleset	*rs;
1513 	int			 i, rv, xadd = 0, xaddr = 0;
1514 
1515 	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ADDRSTOO);
1516 	if (size && !(flags & PFR_FLAG_ADDRSTOO))
1517 		return (EINVAL);
1518 	if (pfr_validate_table(tbl, PFR_TFLAG_USRMASK,
1519 	    flags & PFR_FLAG_USERIOCTL))
1520 		return (EINVAL);
1521 	rs = pf_find_ruleset(tbl->pfrt_anchor);
1522 	if (rs == NULL || !rs->topen || ticket != rs->tticket)
1523 		return (EBUSY);
1524 	tbl->pfrt_flags |= PFR_TFLAG_INACTIVE;
1525 	SLIST_INIT(&tableq);
1526 	kt = RB_FIND(pfr_ktablehead, &pfr_ktables, (struct pfr_ktable *)tbl);
1527 	if (kt == NULL) {
1528 		kt = pfr_create_ktable(tbl, 0, 1);
1529 		if (kt == NULL)
1530 			return (ENOMEM);
1531 		SLIST_INSERT_HEAD(&tableq, kt, pfrkt_workq);
1532 		xadd++;
1533 		if (!tbl->pfrt_anchor[0])
1534 			goto _skip;
1535 
1536 		/* find or create root table */
1537 		bzero(&key, sizeof(key));
1538 		strlcpy(key.pfrkt_name, tbl->pfrt_name, sizeof(key.pfrkt_name));
1539 		rt = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1540 		if (rt != NULL) {
1541 			kt->pfrkt_root = rt;
1542 			goto _skip;
1543 		}
1544 		rt = pfr_create_ktable(&key.pfrkt_t, 0, 1);
1545 		if (rt == NULL) {
1546 			pfr_destroy_ktables(&tableq, 0);
1547 			return (ENOMEM);
1548 		}
1549 		SLIST_INSERT_HEAD(&tableq, rt, pfrkt_workq);
1550 		kt->pfrkt_root = rt;
1551 	} else if (!(kt->pfrkt_flags & PFR_TFLAG_INACTIVE))
1552 		xadd++;
1553 _skip:
1554 	shadow = pfr_create_ktable(tbl, 0, 0);
1555 	if (shadow == NULL) {
1556 		pfr_destroy_ktables(&tableq, 0);
1557 		return (ENOMEM);
1558 	}
1559 	SLIST_INIT(&addrq);
1560 	for (i = 0; i < size; i++) {
1561 		if (COPYIN(addr+i, &ad, sizeof(ad), flags))
1562 			senderr(EFAULT);
1563 		if (pfr_validate_addr(&ad))
1564 			senderr(EINVAL);
1565 		if (pfr_lookup_addr(shadow, &ad, 1) != NULL)
1566 			continue;
1567 		p = pfr_create_kentry(&ad, 0);
1568 		if (p == NULL)
1569 			senderr(ENOMEM);
1570 		if (pfr_route_kentry(shadow, p)) {
1571 			pfr_destroy_kentry(p);
1572 			continue;
1573 		}
1574 		SLIST_INSERT_HEAD(&addrq, p, pfrke_workq);
1575 		xaddr++;
1576 	}
1577 	if (!(flags & PFR_FLAG_DUMMY)) {
1578 		if (kt->pfrkt_shadow != NULL)
1579 			pfr_destroy_ktable(kt->pfrkt_shadow, 1);
1580 		kt->pfrkt_flags |= PFR_TFLAG_INACTIVE;
1581 		pfr_insert_ktables(&tableq);
1582 		shadow->pfrkt_cnt = (flags & PFR_FLAG_ADDRSTOO) ?
1583 		    xaddr : NO_ADDRESSES;
1584 		kt->pfrkt_shadow = shadow;
1585 	} else {
1586 		pfr_clean_node_mask(shadow, &addrq);
1587 		pfr_destroy_ktable(shadow, 0);
1588 		pfr_destroy_ktables(&tableq, 0);
1589 		pfr_destroy_kentries(&addrq);
1590 	}
1591 	if (nadd != NULL)
1592 		*nadd = xadd;
1593 	if (naddr != NULL)
1594 		*naddr = xaddr;
1595 	return (0);
1596 _bad:
1597 	pfr_destroy_ktable(shadow, 0);
1598 	pfr_destroy_ktables(&tableq, 0);
1599 	pfr_destroy_kentries(&addrq);
1600 	return (rv);
1601 }
1602 
1603 int
1604 pfr_ina_rollback(struct pfr_table *trs, u_int32_t ticket, int *ndel, int flags)
1605 {
1606 	struct pfr_ktableworkq	 workq;
1607 	struct pfr_ktable	*p;
1608 	struct pf_ruleset	*rs;
1609 	int			 xdel = 0;
1610 
1611 	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1612 	rs = pf_find_ruleset(trs->pfrt_anchor);
1613 	if (rs == NULL || !rs->topen || ticket != rs->tticket)
1614 		return (0);
1615 	SLIST_INIT(&workq);
1616 	RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1617 		if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1618 		    pfr_skip_table(trs, p, 0))
1619 			continue;
1620 		p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
1621 		SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1622 		xdel++;
1623 	}
1624 	if (!(flags & PFR_FLAG_DUMMY)) {
1625 		pfr_setflags_ktables(&workq);
1626 		rs->topen = 0;
1627 		pf_remove_if_empty_ruleset(rs);
1628 	}
1629 	if (ndel != NULL)
1630 		*ndel = xdel;
1631 	return (0);
1632 }
1633 
1634 int
1635 pfr_ina_commit(struct pfr_table *trs, u_int32_t ticket, int *nadd,
1636     int *nchange, int flags)
1637 {
1638 	struct pfr_ktable	*p, *q;
1639 	struct pfr_ktableworkq	 workq;
1640 	struct pf_ruleset	*rs;
1641 	int			 s = 0 /* XXX gcc */, xadd = 0, xchange = 0;
1642 	long			 tzero = time_second;
1643 
1644 	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
1645 	rs = pf_find_ruleset(trs->pfrt_anchor);
1646 	if (rs == NULL || !rs->topen || ticket != rs->tticket)
1647 		return (EBUSY);
1648 
1649 	SLIST_INIT(&workq);
1650 	RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1651 		if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1652 		    pfr_skip_table(trs, p, 0))
1653 			continue;
1654 		SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1655 		if (p->pfrkt_flags & PFR_TFLAG_ACTIVE)
1656 			xchange++;
1657 		else
1658 			xadd++;
1659 	}
1660 
1661 	if (!(flags & PFR_FLAG_DUMMY)) {
1662 		if (flags & PFR_FLAG_ATOMIC)
1663 			s = splsoftnet();
1664 		for (p = SLIST_FIRST(&workq); p != NULL; p = q) {
1665 			q = SLIST_NEXT(p, pfrkt_workq);
1666 			pfr_commit_ktable(p, tzero);
1667 		}
1668 		if (flags & PFR_FLAG_ATOMIC)
1669 			splx(s);
1670 		rs->topen = 0;
1671 		pf_remove_if_empty_ruleset(rs);
1672 	}
1673 	if (nadd != NULL)
1674 		*nadd = xadd;
1675 	if (nchange != NULL)
1676 		*nchange = xchange;
1677 
1678 	return (0);
1679 }
1680 
1681 void
1682 pfr_commit_ktable(struct pfr_ktable *kt, long tzero)
1683 {
1684 	struct pfr_ktable	*shadow = kt->pfrkt_shadow;
1685 	int			 nflags;
1686 
1687 	if (shadow->pfrkt_cnt == NO_ADDRESSES) {
1688 		if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
1689 			pfr_clstats_ktable(kt, tzero, 1);
1690 	} else if (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) {
1691 		/* kt might contain addresses */
1692 		struct pfr_kentryworkq	 addrq, addq, changeq, delq, garbageq;
1693 		struct pfr_kentry	*p, *q, *next;
1694 		struct pfr_addr		 ad;
1695 
1696 		pfr_enqueue_addrs(shadow, &addrq, NULL, 0);
1697 		pfr_mark_addrs(kt);
1698 		SLIST_INIT(&addq);
1699 		SLIST_INIT(&changeq);
1700 		SLIST_INIT(&delq);
1701 		SLIST_INIT(&garbageq);
1702 		pfr_clean_node_mask(shadow, &addrq);
1703 		for (p = SLIST_FIRST(&addrq); p != NULL; p = next) {
1704 			next = SLIST_NEXT(p, pfrke_workq);	/* XXX */
1705 			pfr_copyout_addr(&ad, p);
1706 			q = pfr_lookup_addr(kt, &ad, 1);
1707 			if (q != NULL) {
1708 				if (q->pfrke_not != p->pfrke_not)
1709 					SLIST_INSERT_HEAD(&changeq, q,
1710 					    pfrke_workq);
1711 				q->pfrke_mark = 1;
1712 				SLIST_INSERT_HEAD(&garbageq, p, pfrke_workq);
1713 			} else {
1714 				p->pfrke_tzero = tzero;
1715 				SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
1716 			}
1717 		}
1718 		pfr_enqueue_addrs(kt, &delq, NULL, ENQUEUE_UNMARKED_ONLY);
1719 		pfr_insert_kentries(kt, &addq, tzero);
1720 		pfr_remove_kentries(kt, &delq);
1721 		pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG);
1722 		pfr_destroy_kentries(&garbageq);
1723 	} else {
1724 		/* kt cannot contain addresses */
1725 		SWAP(struct radix_node_head *, kt->pfrkt_ip4,
1726 		    shadow->pfrkt_ip4);
1727 		SWAP(struct radix_node_head *, kt->pfrkt_ip6,
1728 		    shadow->pfrkt_ip6);
1729 		SWAP(int, kt->pfrkt_cnt, shadow->pfrkt_cnt);
1730 		pfr_clstats_ktable(kt, tzero, 1);
1731 	}
1732 	nflags = ((shadow->pfrkt_flags & PFR_TFLAG_USRMASK) |
1733 	    (kt->pfrkt_flags & PFR_TFLAG_SETMASK) | PFR_TFLAG_ACTIVE)
1734 		& ~PFR_TFLAG_INACTIVE;
1735 	pfr_destroy_ktable(shadow, 0);
1736 	kt->pfrkt_shadow = NULL;
1737 	pfr_setflags_ktable(kt, nflags);
1738 }
1739 
1740 int
1741 pfr_validate_table(struct pfr_table *tbl, int allowedflags, int no_reserved)
1742 {
1743 	int i;
1744 
1745 	if (!tbl->pfrt_name[0])
1746 		return (-1);
1747 	if (no_reserved && !strcmp(tbl->pfrt_anchor, PF_RESERVED_ANCHOR))
1748 		 return (-1);
1749 	if (tbl->pfrt_name[PF_TABLE_NAME_SIZE-1])
1750 		return (-1);
1751 	for (i = strlen(tbl->pfrt_name); i < PF_TABLE_NAME_SIZE; i++)
1752 		if (tbl->pfrt_name[i])
1753 			return (-1);
1754 	if (pfr_fix_anchor(tbl->pfrt_anchor))
1755 		return (-1);
1756 	if (tbl->pfrt_flags & ~allowedflags)
1757 		return (-1);
1758 	return (0);
1759 }
1760 
1761 /*
1762  * Rewrite anchors referenced by tables to remove slashes
1763  * and check for validity.
1764  */
1765 int
1766 pfr_fix_anchor(char *anchor)
1767 {
1768 	size_t siz = MAXPATHLEN;
1769 	int i;
1770 
1771 	if (anchor[0] == '/') {
1772 		char *path;
1773 		int off;
1774 
1775 		path = anchor;
1776 		off = 1;
1777 		while (*++path == '/')
1778 			off++;
1779 		bcopy(path, anchor, siz - off);
1780 		memset(anchor + siz - off, 0, off);
1781 	}
1782 	if (anchor[siz - 1])
1783 		return (-1);
1784 	for (i = strlen(anchor); i < siz; i++)
1785 		if (anchor[i])
1786 			return (-1);
1787 	return (0);
1788 }
1789 
1790 int
1791 pfr_table_count(struct pfr_table *filter, int flags)
1792 {
1793 	struct pf_ruleset *rs;
1794 
1795 	if (flags & PFR_FLAG_ALLRSETS)
1796 		return (pfr_ktable_cnt);
1797 	if (filter->pfrt_anchor[0]) {
1798 		rs = pf_find_ruleset(filter->pfrt_anchor);
1799 		return ((rs != NULL) ? rs->tables : -1);
1800 	}
1801 	return (pf_main_ruleset.tables);
1802 }
1803 
1804 int
1805 pfr_skip_table(struct pfr_table *filter, struct pfr_ktable *kt, int flags)
1806 {
1807 	if (flags & PFR_FLAG_ALLRSETS)
1808 		return (0);
1809 	if (strcmp(filter->pfrt_anchor, kt->pfrkt_anchor))
1810 		return (1);
1811 	return (0);
1812 }
1813 
1814 void
1815 pfr_insert_ktables(struct pfr_ktableworkq *workq)
1816 {
1817 	struct pfr_ktable	*p;
1818 
1819 	SLIST_FOREACH(p, workq, pfrkt_workq)
1820 		pfr_insert_ktable(p);
1821 }
1822 
1823 void
1824 pfr_insert_ktable(struct pfr_ktable *kt)
1825 {
1826 	RB_INSERT(pfr_ktablehead, &pfr_ktables, kt);
1827 	pfr_ktable_cnt++;
1828 	if (kt->pfrkt_root != NULL)
1829 		if (!kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]++)
1830 			pfr_setflags_ktable(kt->pfrkt_root,
1831 			    kt->pfrkt_root->pfrkt_flags|PFR_TFLAG_REFDANCHOR);
1832 }
1833 
1834 void
1835 pfr_setflags_ktables(struct pfr_ktableworkq *workq)
1836 {
1837 	struct pfr_ktable	*p, *q;
1838 
1839 	for (p = SLIST_FIRST(workq); p; p = q) {
1840 		q = SLIST_NEXT(p, pfrkt_workq);
1841 		pfr_setflags_ktable(p, p->pfrkt_nflags);
1842 	}
1843 }
1844 
1845 void
1846 pfr_setflags_ktable(struct pfr_ktable *kt, int newf)
1847 {
1848 	struct pfr_kentryworkq	addrq;
1849 
1850 	if (!(newf & PFR_TFLAG_REFERENCED) &&
1851 	    !(newf & PFR_TFLAG_PERSIST))
1852 		newf &= ~PFR_TFLAG_ACTIVE;
1853 	if (!(newf & PFR_TFLAG_ACTIVE))
1854 		newf &= ~PFR_TFLAG_USRMASK;
1855 	if (!(newf & PFR_TFLAG_SETMASK)) {
1856 		RB_REMOVE(pfr_ktablehead, &pfr_ktables, kt);
1857 		if (kt->pfrkt_root != NULL)
1858 			if (!--kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR])
1859 				pfr_setflags_ktable(kt->pfrkt_root,
1860 				    kt->pfrkt_root->pfrkt_flags &
1861 					~PFR_TFLAG_REFDANCHOR);
1862 		pfr_destroy_ktable(kt, 1);
1863 		pfr_ktable_cnt--;
1864 		return;
1865 	}
1866 	if (!(newf & PFR_TFLAG_ACTIVE) && kt->pfrkt_cnt) {
1867 		pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1868 		pfr_remove_kentries(kt, &addrq);
1869 	}
1870 	if (!(newf & PFR_TFLAG_INACTIVE) && kt->pfrkt_shadow != NULL) {
1871 		pfr_destroy_ktable(kt->pfrkt_shadow, 1);
1872 		kt->pfrkt_shadow = NULL;
1873 	}
1874 	kt->pfrkt_flags = newf;
1875 }
1876 
1877 void
1878 pfr_clstats_ktables(struct pfr_ktableworkq *workq, long tzero, int recurse)
1879 {
1880 	struct pfr_ktable	*p;
1881 
1882 	SLIST_FOREACH(p, workq, pfrkt_workq)
1883 		pfr_clstats_ktable(p, tzero, recurse);
1884 }
1885 
1886 void
1887 pfr_clstats_ktable(struct pfr_ktable *kt, long tzero, int recurse)
1888 {
1889 	struct pfr_kentryworkq	 addrq;
1890 	int			 s;
1891 
1892 	if (recurse) {
1893 		pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1894 		pfr_clstats_kentries(&addrq, tzero, 0);
1895 	}
1896 	s = splsoftnet();
1897 	bzero(kt->pfrkt_packets, sizeof(kt->pfrkt_packets));
1898 	bzero(kt->pfrkt_bytes, sizeof(kt->pfrkt_bytes));
1899 	kt->pfrkt_match = kt->pfrkt_nomatch = 0;
1900 	splx(s);
1901 	kt->pfrkt_tzero = tzero;
1902 }
1903 
1904 struct pfr_ktable *
1905 pfr_create_ktable(struct pfr_table *tbl, long tzero, int attachruleset)
1906 {
1907 	struct pfr_ktable	*kt;
1908 	struct pf_ruleset	*rs;
1909 	void			*h4 = NULL, *h6 = NULL;
1910 
1911 	kt = pool_get(&pfr_ktable_pl, PR_NOWAIT);
1912 	if (kt == NULL)
1913 		return (NULL);
1914 	bzero(kt, sizeof(*kt));
1915 	kt->pfrkt_t = *tbl;
1916 
1917 	if (attachruleset) {
1918 		rs = pf_find_or_create_ruleset(tbl->pfrt_anchor);
1919 		if (!rs) {
1920 			pfr_destroy_ktable(kt, 0);
1921 			return (NULL);
1922 		}
1923 		kt->pfrkt_rs = rs;
1924 		rs->tables++;
1925 	}
1926 
1927 	if (!rn_inithead(&h4, offsetof(struct sockaddr_in, sin_addr) * 8))
1928 		goto out;
1929 
1930 	if (!rn_inithead(&h6, offsetof(struct sockaddr_in6, sin6_addr) * 8)) {
1931 		Free(h4);
1932 		goto out;
1933 	}
1934 	kt->pfrkt_ip4 = h4;
1935 	kt->pfrkt_ip6 = h6;
1936 	kt->pfrkt_tzero = tzero;
1937 
1938 	return (kt);
1939 out:
1940 	pfr_destroy_ktable(kt, 0);
1941 	return (NULL);
1942 }
1943 
1944 void
1945 pfr_destroy_ktables(struct pfr_ktableworkq *workq, int flushaddr)
1946 {
1947 	struct pfr_ktable	*p, *q;
1948 
1949 	for (p = SLIST_FIRST(workq); p; p = q) {
1950 		q = SLIST_NEXT(p, pfrkt_workq);
1951 		pfr_destroy_ktable(p, flushaddr);
1952 	}
1953 }
1954 
1955 void
1956 pfr_destroy_ktable(struct pfr_ktable *kt, int flushaddr)
1957 {
1958 	struct pfr_kentryworkq	 addrq;
1959 
1960 	if (flushaddr) {
1961 		pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1962 		pfr_clean_node_mask(kt, &addrq);
1963 		pfr_destroy_kentries(&addrq);
1964 	}
1965 	if (kt->pfrkt_ip4 != NULL)
1966 		free((void *)kt->pfrkt_ip4, M_RTABLE);
1967 	if (kt->pfrkt_ip6 != NULL)
1968 		free((void *)kt->pfrkt_ip6, M_RTABLE);
1969 	if (kt->pfrkt_shadow != NULL)
1970 		pfr_destroy_ktable(kt->pfrkt_shadow, flushaddr);
1971 	if (kt->pfrkt_rs != NULL) {
1972 		kt->pfrkt_rs->tables--;
1973 		pf_remove_if_empty_ruleset(kt->pfrkt_rs);
1974 	}
1975 	pool_put(&pfr_ktable_pl, kt);
1976 }
1977 
1978 int
1979 pfr_ktable_compare(struct pfr_ktable *p, struct pfr_ktable *q)
1980 {
1981 	int d;
1982 
1983 	if ((d = strncmp(p->pfrkt_name, q->pfrkt_name, PF_TABLE_NAME_SIZE)))
1984 		return (d);
1985 	return (strcmp(p->pfrkt_anchor, q->pfrkt_anchor));
1986 }
1987 
1988 struct pfr_ktable *
1989 pfr_lookup_table(struct pfr_table *tbl)
1990 {
1991 	/* struct pfr_ktable start like a struct pfr_table */
1992 	return (RB_FIND(pfr_ktablehead, &pfr_ktables,
1993 	    (struct pfr_ktable *)tbl));
1994 }
1995 
1996 int
1997 pfr_match_addr(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af)
1998 {
1999 	struct pfr_kentry	*ke = NULL;
2000 	int			 match;
2001 
2002 	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
2003 		kt = kt->pfrkt_root;
2004 	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
2005 		return (0);
2006 
2007 	switch (af) {
2008 #ifdef INET
2009 	case AF_INET:
2010 		pfr_sin.sin_addr.s_addr = a->addr32[0];
2011 		ke = (struct pfr_kentry *)rn_match(&pfr_sin, kt->pfrkt_ip4);
2012 		if (ke && KENTRY_RNF_ROOT(ke))
2013 			ke = NULL;
2014 		break;
2015 #endif /* INET */
2016 #ifdef INET6
2017 	case AF_INET6:
2018 		bcopy(a, &pfr_sin6.sin6_addr, sizeof(pfr_sin6.sin6_addr));
2019 		ke = (struct pfr_kentry *)rn_match(&pfr_sin6, kt->pfrkt_ip6);
2020 		if (ke && KENTRY_RNF_ROOT(ke))
2021 			ke = NULL;
2022 		break;
2023 #endif /* INET6 */
2024 	}
2025 	match = (ke && !ke->pfrke_not);
2026 	if (match)
2027 		kt->pfrkt_match++;
2028 	else
2029 		kt->pfrkt_nomatch++;
2030 	return (match);
2031 }
2032 
2033 void
2034 pfr_update_stats(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af,
2035     u_int64_t len, int dir_out, int op_pass, int notrule)
2036 {
2037 	struct pfr_kentry	*ke = NULL;
2038 
2039 	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
2040 		kt = kt->pfrkt_root;
2041 	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
2042 		return;
2043 
2044 	switch (af) {
2045 #ifdef INET
2046 	case AF_INET:
2047 		pfr_sin.sin_addr.s_addr = a->addr32[0];
2048 		ke = (struct pfr_kentry *)rn_match(&pfr_sin, kt->pfrkt_ip4);
2049 		if (ke && KENTRY_RNF_ROOT(ke))
2050 			ke = NULL;
2051 		break;
2052 #endif /* INET */
2053 #ifdef INET6
2054 	case AF_INET6:
2055 		bcopy(a, &pfr_sin6.sin6_addr, sizeof(pfr_sin6.sin6_addr));
2056 		ke = (struct pfr_kentry *)rn_match(&pfr_sin6, kt->pfrkt_ip6);
2057 		if (ke && KENTRY_RNF_ROOT(ke))
2058 			ke = NULL;
2059 		break;
2060 #endif /* INET6 */
2061 	default:
2062 		;
2063 	}
2064 	if ((ke == NULL || ke->pfrke_not) != notrule) {
2065 		if (op_pass != PFR_OP_PASS)
2066 			printf("pfr_update_stats: assertion failed.\n");
2067 		op_pass = PFR_OP_XPASS;
2068 	}
2069 	kt->pfrkt_packets[dir_out][op_pass]++;
2070 	kt->pfrkt_bytes[dir_out][op_pass] += len;
2071 	if (ke != NULL && op_pass != PFR_OP_XPASS) {
2072 		ke->pfrke_packets[dir_out][op_pass]++;
2073 		ke->pfrke_bytes[dir_out][op_pass] += len;
2074 	}
2075 }
2076 
2077 struct pfr_ktable *
2078 pfr_attach_table(struct pf_ruleset *rs, char *name)
2079 {
2080 	struct pfr_ktable	*kt, *rt;
2081 	struct pfr_table	 tbl;
2082 	struct pf_anchor	*ac = rs->anchor;
2083 
2084 	bzero(&tbl, sizeof(tbl));
2085 	strlcpy(tbl.pfrt_name, name, sizeof(tbl.pfrt_name));
2086 	if (ac != NULL)
2087 		strlcpy(tbl.pfrt_anchor, ac->path, sizeof(tbl.pfrt_anchor));
2088 	kt = pfr_lookup_table(&tbl);
2089 	if (kt == NULL) {
2090 		kt = pfr_create_ktable(&tbl, time_second, 1);
2091 		if (kt == NULL)
2092 			return (NULL);
2093 		if (ac != NULL) {
2094 			bzero(tbl.pfrt_anchor, sizeof(tbl.pfrt_anchor));
2095 			rt = pfr_lookup_table(&tbl);
2096 			if (rt == NULL) {
2097 				rt = pfr_create_ktable(&tbl, 0, 1);
2098 				if (rt == NULL) {
2099 					pfr_destroy_ktable(kt, 0);
2100 					return (NULL);
2101 				}
2102 				pfr_insert_ktable(rt);
2103 			}
2104 			kt->pfrkt_root = rt;
2105 		}
2106 		pfr_insert_ktable(kt);
2107 	}
2108 	if (!kt->pfrkt_refcnt[PFR_REFCNT_RULE]++)
2109 		pfr_setflags_ktable(kt, kt->pfrkt_flags|PFR_TFLAG_REFERENCED);
2110 	return (kt);
2111 }
2112 
2113 void
2114 pfr_detach_table(struct pfr_ktable *kt)
2115 {
2116 	if (kt->pfrkt_refcnt[PFR_REFCNT_RULE] <= 0)
2117 		printf("pfr_detach_table: refcount = %d.\n",
2118 		    kt->pfrkt_refcnt[PFR_REFCNT_RULE]);
2119 	else if (!--kt->pfrkt_refcnt[PFR_REFCNT_RULE])
2120 		pfr_setflags_ktable(kt, kt->pfrkt_flags&~PFR_TFLAG_REFERENCED);
2121 }
2122 
2123 int
2124 pfr_pool_get(struct pfr_ktable *kt, int *pidx, struct pf_addr *counter,
2125     struct pf_addr **raddr, struct pf_addr **rmask, sa_family_t af)
2126 {
2127 	struct pfr_kentry	*ke, *ke2 = (void *)0xdeadb;
2128 	struct pf_addr		*addr = (void *)0xdeadb;
2129 	union sockaddr_union	 mask;
2130 	int			 idx = -1, use_counter = 0;
2131 
2132 	if (af == AF_INET)
2133 		addr = (struct pf_addr *)&pfr_sin.sin_addr;
2134 	else if (af == AF_INET6)
2135 		addr = (struct pf_addr *)&pfr_sin6.sin6_addr;
2136 	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
2137 		kt = kt->pfrkt_root;
2138 	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
2139 		return (-1);
2140 
2141 	if (pidx != NULL)
2142 		idx = *pidx;
2143 	if (counter != NULL && idx >= 0)
2144 		use_counter = 1;
2145 	if (idx < 0)
2146 		idx = 0;
2147 
2148 _next_block:
2149 	ke = pfr_kentry_byidx(kt, idx, af);
2150 	if (ke == NULL)
2151 		return (1);
2152 	pfr_prepare_network(&pfr_mask, af, ke->pfrke_net);
2153 	*raddr = SUNION2PF(&ke->pfrke_sa, af);
2154 	*rmask = SUNION2PF(&pfr_mask, af);
2155 
2156 	if (use_counter) {
2157 		/* is supplied address within block? */
2158 		if (!PF_MATCHA(0, *raddr, *rmask, counter, af)) {
2159 			/* no, go to next block in table */
2160 			idx++;
2161 			use_counter = 0;
2162 			goto _next_block;
2163 		}
2164 		PF_ACPY(addr, counter, af);
2165 	} else {
2166 		/* use first address of block */
2167 		PF_ACPY(addr, *raddr, af);
2168 	}
2169 
2170 	if (!KENTRY_NETWORK(ke)) {
2171 		/* this is a single IP address - no possible nested block */
2172 		PF_ACPY(counter, addr, af);
2173 		*pidx = idx;
2174 		return (0);
2175 	}
2176 	for (;;) {
2177 		/* we don't want to use a nested block */
2178 		if (af == AF_INET)
2179 			ke2 = (struct pfr_kentry *)rn_match(&pfr_sin,
2180 			    kt->pfrkt_ip4);
2181 		else if (af == AF_INET6)
2182 			ke2 = (struct pfr_kentry *)rn_match(&pfr_sin6,
2183 			    kt->pfrkt_ip6);
2184 		/* no need to check KENTRY_RNF_ROOT() here */
2185 		if (ke2 == ke) {
2186 			/* lookup return the same block - perfect */
2187 			PF_ACPY(counter, addr, af);
2188 			*pidx = idx;
2189 			return (0);
2190 		}
2191 
2192 		/* we need to increase the counter past the nested block */
2193 		pfr_prepare_network(&mask, AF_INET, ke2->pfrke_net);
2194 		PF_POOLMASK(addr, addr, SUNION2PF(&mask, af), &pfr_ffaddr, af);
2195 		PF_AINC(addr, af);
2196 		if (!PF_MATCHA(0, *raddr, *rmask, addr, af)) {
2197 			/* ok, we reached the end of our main block */
2198 			/* go to next block in table */
2199 			idx++;
2200 			use_counter = 0;
2201 			goto _next_block;
2202 		}
2203 	}
2204 }
2205 
2206 struct pfr_kentry *
2207 pfr_kentry_byidx(struct pfr_ktable *kt, int idx, int af)
2208 {
2209 	struct pfr_walktree	w;
2210 
2211 	bzero(&w, sizeof(w));
2212 	w.pfrw_op = PFRW_POOL_GET;
2213 	w.pfrw_cnt = idx;
2214 
2215 	switch (af) {
2216 #ifdef INET
2217 	case AF_INET:
2218 		rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
2219 		return (w.pfrw_kentry);
2220 #endif /* INET */
2221 #ifdef INET6
2222 	case AF_INET6:
2223 		rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
2224 		return (w.pfrw_kentry);
2225 #endif /* INET6 */
2226 	default:
2227 		return (NULL);
2228 	}
2229 }
2230 
2231 void
2232 pfr_dynaddr_update(struct pfr_ktable *kt, struct pfi_dynaddr *dyn)
2233 {
2234 	struct pfr_walktree	w;
2235 	int			s;
2236 
2237 	bzero(&w, sizeof(w));
2238 	w.pfrw_op = PFRW_DYNADDR_UPDATE;
2239 	w.pfrw_dyn = dyn;
2240 
2241 	s = splsoftnet();
2242 	dyn->pfid_acnt4 = 0;
2243 	dyn->pfid_acnt6 = 0;
2244 	if (!dyn->pfid_af || dyn->pfid_af == AF_INET)
2245 		rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
2246 	if (!dyn->pfid_af || dyn->pfid_af == AF_INET6)
2247 		rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
2248 	splx(s);
2249 }
2250