xref: /openbsd-src/sys/net/pf_table.c (revision 2b0358df1d88d06ef4139321dd05bd5e05d91eaf)
1 /*	$OpenBSD: pf_table.c,v 1.80 2008/11/24 13:22:09 mikeb Exp $	*/
2 
3 /*
4  * Copyright (c) 2002 Cedric Berger
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  *    - Redistributions of source code must retain the above copyright
12  *      notice, this list of conditions and the following disclaimer.
13  *    - Redistributions in binary form must reproduce the above
14  *      copyright notice, this list of conditions and the following
15  *      disclaimer in the documentation and/or other materials provided
16  *      with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
21  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
22  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
23  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
24  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
28  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  *
31  */
32 
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/socket.h>
36 #include <sys/mbuf.h>
37 #include <sys/kernel.h>
38 #include <sys/pool.h>
39 
40 #include <net/if.h>
41 #include <net/route.h>
42 #include <netinet/in.h>
43 #include <netinet/ip_ipsp.h>
44 #include <net/pfvar.h>
45 
46 #define ACCEPT_FLAGS(flags, oklist)		\
47 	do {					\
48 		if ((flags & ~(oklist)) &	\
49 		    PFR_FLAG_ALLMASK)		\
50 			return (EINVAL);	\
51 	} while (0)
52 
53 #define COPYIN(from, to, size, flags)		\
54 	((flags & PFR_FLAG_USERIOCTL) ?		\
55 	copyin((from), (to), (size)) :		\
56 	(bcopy((from), (to), (size)), 0))
57 
58 #define COPYOUT(from, to, size, flags)		\
59 	((flags & PFR_FLAG_USERIOCTL) ?		\
60 	copyout((from), (to), (size)) :		\
61 	(bcopy((from), (to), (size)), 0))
62 
63 #define	FILLIN_SIN(sin, addr)			\
64 	do {					\
65 		(sin).sin_len = sizeof(sin);	\
66 		(sin).sin_family = AF_INET;	\
67 		(sin).sin_addr = (addr);	\
68 	} while (0)
69 
70 #define	FILLIN_SIN6(sin6, addr)			\
71 	do {					\
72 		(sin6).sin6_len = sizeof(sin6);	\
73 		(sin6).sin6_family = AF_INET6;	\
74 		(sin6).sin6_addr = (addr);	\
75 	} while (0)
76 
77 #define SWAP(type, a1, a2)			\
78 	do {					\
79 		type tmp = a1;			\
80 		a1 = a2;			\
81 		a2 = tmp;			\
82 	} while (0)
83 
84 #define SUNION2PF(su, af) (((af)==AF_INET) ?	\
85     (struct pf_addr *)&(su)->sin.sin_addr :	\
86     (struct pf_addr *)&(su)->sin6.sin6_addr)
87 
88 #define	AF_BITS(af)		(((af)==AF_INET)?32:128)
89 #define	ADDR_NETWORK(ad)	((ad)->pfra_net < AF_BITS((ad)->pfra_af))
90 #define	KENTRY_NETWORK(ke)	((ke)->pfrke_net < AF_BITS((ke)->pfrke_af))
91 #define KENTRY_RNF_ROOT(ke) \
92 		((((struct radix_node *)(ke))->rn_flags & RNF_ROOT) != 0)
93 
94 #define NO_ADDRESSES		(-1)
95 #define ENQUEUE_UNMARKED_ONLY	(1)
96 #define INVERT_NEG_FLAG		(1)
97 
98 struct pfr_walktree {
99 	enum pfrw_op {
100 		PFRW_MARK,
101 		PFRW_SWEEP,
102 		PFRW_ENQUEUE,
103 		PFRW_GET_ADDRS,
104 		PFRW_GET_ASTATS,
105 		PFRW_POOL_GET,
106 		PFRW_DYNADDR_UPDATE
107 	}	 pfrw_op;
108 	union {
109 		struct pfr_addr		*pfrw1_addr;
110 		struct pfr_astats	*pfrw1_astats;
111 		struct pfr_kentryworkq	*pfrw1_workq;
112 		struct pfr_kentry	*pfrw1_kentry;
113 		struct pfi_dynaddr	*pfrw1_dyn;
114 	}	 pfrw_1;
115 	int	 pfrw_free;
116 	int	 pfrw_flags;
117 };
118 #define pfrw_addr	pfrw_1.pfrw1_addr
119 #define pfrw_astats	pfrw_1.pfrw1_astats
120 #define pfrw_workq	pfrw_1.pfrw1_workq
121 #define pfrw_kentry	pfrw_1.pfrw1_kentry
122 #define pfrw_dyn	pfrw_1.pfrw1_dyn
123 #define pfrw_cnt	pfrw_free
124 
125 #define senderr(e)	do { rv = (e); goto _bad; } while (0)
126 
127 struct pool		 pfr_ktable_pl;
128 struct pool		 pfr_kentry_pl;
129 struct pool		 pfr_kcounters_pl;
130 struct sockaddr_in	 pfr_sin;
131 struct sockaddr_in6	 pfr_sin6;
132 union sockaddr_union	 pfr_mask;
133 struct pf_addr		 pfr_ffaddr;
134 
135 void			 pfr_copyout_addr(struct pfr_addr *,
136 			    struct pfr_kentry *ke);
137 int			 pfr_validate_addr(struct pfr_addr *);
138 void			 pfr_enqueue_addrs(struct pfr_ktable *,
139 			    struct pfr_kentryworkq *, int *, int);
140 void			 pfr_mark_addrs(struct pfr_ktable *);
141 struct pfr_kentry	*pfr_lookup_addr(struct pfr_ktable *,
142 			    struct pfr_addr *, int);
143 struct pfr_kentry	*pfr_create_kentry(struct pfr_addr *, int);
144 void			 pfr_destroy_kentries(struct pfr_kentryworkq *);
145 void			 pfr_destroy_kentry(struct pfr_kentry *);
146 void			 pfr_insert_kentries(struct pfr_ktable *,
147 			    struct pfr_kentryworkq *, long);
148 void			 pfr_remove_kentries(struct pfr_ktable *,
149 			    struct pfr_kentryworkq *);
150 void			 pfr_clstats_kentries(struct pfr_kentryworkq *, long,
151 			    int);
152 void			 pfr_reset_feedback(struct pfr_addr *, int, int);
153 void			 pfr_prepare_network(union sockaddr_union *, int, int);
154 int			 pfr_route_kentry(struct pfr_ktable *,
155 			    struct pfr_kentry *);
156 int			 pfr_unroute_kentry(struct pfr_ktable *,
157 			    struct pfr_kentry *);
158 int			 pfr_walktree(struct radix_node *, void *);
159 int			 pfr_validate_table(struct pfr_table *, int, int);
160 int			 pfr_fix_anchor(char *);
161 void			 pfr_commit_ktable(struct pfr_ktable *, long);
162 void			 pfr_insert_ktables(struct pfr_ktableworkq *);
163 void			 pfr_insert_ktable(struct pfr_ktable *);
164 void			 pfr_setflags_ktables(struct pfr_ktableworkq *);
165 void			 pfr_setflags_ktable(struct pfr_ktable *, int);
166 void			 pfr_clstats_ktables(struct pfr_ktableworkq *, long,
167 			    int);
168 void			 pfr_clstats_ktable(struct pfr_ktable *, long, int);
169 struct pfr_ktable	*pfr_create_ktable(struct pfr_table *, long, int, int);
170 void			 pfr_destroy_ktables(struct pfr_ktableworkq *, int);
171 void			 pfr_destroy_ktable(struct pfr_ktable *, int);
172 int			 pfr_ktable_compare(struct pfr_ktable *,
173 			    struct pfr_ktable *);
174 struct pfr_ktable	*pfr_lookup_table(struct pfr_table *);
175 void			 pfr_clean_node_mask(struct pfr_ktable *,
176 			    struct pfr_kentryworkq *);
177 int			 pfr_table_count(struct pfr_table *, int);
178 int			 pfr_skip_table(struct pfr_table *,
179 			    struct pfr_ktable *, int);
180 struct pfr_kentry	*pfr_kentry_byidx(struct pfr_ktable *, int, int);
181 
182 RB_PROTOTYPE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
183 RB_GENERATE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
184 
185 struct pfr_ktablehead	 pfr_ktables;
186 struct pfr_table	 pfr_nulltable;
187 int			 pfr_ktable_cnt;
188 
189 void
190 pfr_initialize(void)
191 {
192 	pool_init(&pfr_ktable_pl, sizeof(struct pfr_ktable), 0, 0, 0,
193 	    "pfrktable", NULL);
194 	pool_init(&pfr_kentry_pl, sizeof(struct pfr_kentry), 0, 0, 0,
195 	    "pfrkentry", NULL);
196 	pool_init(&pfr_kcounters_pl, sizeof(struct pfr_kcounters), 0, 0, 0,
197 	    "pfrkcounters", NULL);
198 
199 	pfr_sin.sin_len = sizeof(pfr_sin);
200 	pfr_sin.sin_family = AF_INET;
201 	pfr_sin6.sin6_len = sizeof(pfr_sin6);
202 	pfr_sin6.sin6_family = AF_INET6;
203 
204 	memset(&pfr_ffaddr, 0xff, sizeof(pfr_ffaddr));
205 }
206 
207 int
208 pfr_clr_addrs(struct pfr_table *tbl, int *ndel, int flags)
209 {
210 	struct pfr_ktable	*kt;
211 	struct pfr_kentryworkq	 workq;
212 	int			 s;
213 
214 	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
215 	if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
216 		return (EINVAL);
217 	kt = pfr_lookup_table(tbl);
218 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
219 		return (ESRCH);
220 	if (kt->pfrkt_flags & PFR_TFLAG_CONST)
221 		return (EPERM);
222 	pfr_enqueue_addrs(kt, &workq, ndel, 0);
223 
224 	if (!(flags & PFR_FLAG_DUMMY)) {
225 		if (flags & PFR_FLAG_ATOMIC)
226 			s = splsoftnet();
227 		pfr_remove_kentries(kt, &workq);
228 		if (flags & PFR_FLAG_ATOMIC)
229 			splx(s);
230 		if (kt->pfrkt_cnt) {
231 			printf("pfr_clr_addrs: corruption detected (%d).\n",
232 			    kt->pfrkt_cnt);
233 			kt->pfrkt_cnt = 0;
234 		}
235 	}
236 	return (0);
237 }
238 
239 int
240 pfr_add_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
241     int *nadd, int flags)
242 {
243 	struct pfr_ktable	*kt, *tmpkt;
244 	struct pfr_kentryworkq	 workq;
245 	struct pfr_kentry	*p, *q;
246 	struct pfr_addr		 ad;
247 	int			 i, rv, s, xadd = 0;
248 	long			 tzero = time_second;
249 
250 	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
251 	    PFR_FLAG_FEEDBACK);
252 	if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
253 		return (EINVAL);
254 	kt = pfr_lookup_table(tbl);
255 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
256 		return (ESRCH);
257 	if (kt->pfrkt_flags & PFR_TFLAG_CONST)
258 		return (EPERM);
259 	tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0,
260 	    !(flags & PFR_FLAG_USERIOCTL));
261 	if (tmpkt == NULL)
262 		return (ENOMEM);
263 	SLIST_INIT(&workq);
264 	for (i = 0; i < size; i++) {
265 		if (COPYIN(addr+i, &ad, sizeof(ad), flags))
266 			senderr(EFAULT);
267 		if (pfr_validate_addr(&ad))
268 			senderr(EINVAL);
269 		p = pfr_lookup_addr(kt, &ad, 1);
270 		q = pfr_lookup_addr(tmpkt, &ad, 1);
271 		if (flags & PFR_FLAG_FEEDBACK) {
272 			if (q != NULL)
273 				ad.pfra_fback = PFR_FB_DUPLICATE;
274 			else if (p == NULL)
275 				ad.pfra_fback = PFR_FB_ADDED;
276 			else if (p->pfrke_not != ad.pfra_not)
277 				ad.pfra_fback = PFR_FB_CONFLICT;
278 			else
279 				ad.pfra_fback = PFR_FB_NONE;
280 		}
281 		if (p == NULL && q == NULL) {
282 			p = pfr_create_kentry(&ad,
283 			    !(flags & PFR_FLAG_USERIOCTL));
284 			if (p == NULL)
285 				senderr(ENOMEM);
286 			if (pfr_route_kentry(tmpkt, p)) {
287 				pfr_destroy_kentry(p);
288 				ad.pfra_fback = PFR_FB_NONE;
289 			} else {
290 				SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
291 				xadd++;
292 			}
293 		}
294 		if (flags & PFR_FLAG_FEEDBACK)
295 			if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
296 				senderr(EFAULT);
297 	}
298 	pfr_clean_node_mask(tmpkt, &workq);
299 	if (!(flags & PFR_FLAG_DUMMY)) {
300 		if (flags & PFR_FLAG_ATOMIC)
301 			s = splsoftnet();
302 		pfr_insert_kentries(kt, &workq, tzero);
303 		if (flags & PFR_FLAG_ATOMIC)
304 			splx(s);
305 	} else
306 		pfr_destroy_kentries(&workq);
307 	if (nadd != NULL)
308 		*nadd = xadd;
309 	pfr_destroy_ktable(tmpkt, 0);
310 	return (0);
311 _bad:
312 	pfr_clean_node_mask(tmpkt, &workq);
313 	pfr_destroy_kentries(&workq);
314 	if (flags & PFR_FLAG_FEEDBACK)
315 		pfr_reset_feedback(addr, size, flags);
316 	pfr_destroy_ktable(tmpkt, 0);
317 	return (rv);
318 }
319 
320 int
321 pfr_del_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
322     int *ndel, int flags)
323 {
324 	struct pfr_ktable	*kt;
325 	struct pfr_kentryworkq	 workq;
326 	struct pfr_kentry	*p;
327 	struct pfr_addr		 ad;
328 	int			 i, rv, s, xdel = 0, log = 1;
329 
330 	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
331 	    PFR_FLAG_FEEDBACK);
332 	if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
333 		return (EINVAL);
334 	kt = pfr_lookup_table(tbl);
335 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
336 		return (ESRCH);
337 	if (kt->pfrkt_flags & PFR_TFLAG_CONST)
338 		return (EPERM);
339 	/*
340 	 * there are two algorithms to choose from here.
341 	 * with:
342 	 *   n: number of addresses to delete
343 	 *   N: number of addresses in the table
344 	 *
345 	 * one is O(N) and is better for large 'n'
346 	 * one is O(n*LOG(N)) and is better for small 'n'
347 	 *
348 	 * following code try to decide which one is best.
349 	 */
350 	for (i = kt->pfrkt_cnt; i > 0; i >>= 1)
351 		log++;
352 	if (size > kt->pfrkt_cnt/log) {
353 		/* full table scan */
354 		pfr_mark_addrs(kt);
355 	} else {
356 		/* iterate over addresses to delete */
357 		for (i = 0; i < size; i++) {
358 			if (COPYIN(addr+i, &ad, sizeof(ad), flags))
359 				return (EFAULT);
360 			if (pfr_validate_addr(&ad))
361 				return (EINVAL);
362 			p = pfr_lookup_addr(kt, &ad, 1);
363 			if (p != NULL)
364 				p->pfrke_mark = 0;
365 		}
366 	}
367 	SLIST_INIT(&workq);
368 	for (i = 0; i < size; i++) {
369 		if (COPYIN(addr+i, &ad, sizeof(ad), flags))
370 			senderr(EFAULT);
371 		if (pfr_validate_addr(&ad))
372 			senderr(EINVAL);
373 		p = pfr_lookup_addr(kt, &ad, 1);
374 		if (flags & PFR_FLAG_FEEDBACK) {
375 			if (p == NULL)
376 				ad.pfra_fback = PFR_FB_NONE;
377 			else if (p->pfrke_not != ad.pfra_not)
378 				ad.pfra_fback = PFR_FB_CONFLICT;
379 			else if (p->pfrke_mark)
380 				ad.pfra_fback = PFR_FB_DUPLICATE;
381 			else
382 				ad.pfra_fback = PFR_FB_DELETED;
383 		}
384 		if (p != NULL && p->pfrke_not == ad.pfra_not &&
385 		    !p->pfrke_mark) {
386 			p->pfrke_mark = 1;
387 			SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
388 			xdel++;
389 		}
390 		if (flags & PFR_FLAG_FEEDBACK)
391 			if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
392 				senderr(EFAULT);
393 	}
394 	if (!(flags & PFR_FLAG_DUMMY)) {
395 		if (flags & PFR_FLAG_ATOMIC)
396 			s = splsoftnet();
397 		pfr_remove_kentries(kt, &workq);
398 		if (flags & PFR_FLAG_ATOMIC)
399 			splx(s);
400 	}
401 	if (ndel != NULL)
402 		*ndel = xdel;
403 	return (0);
404 _bad:
405 	if (flags & PFR_FLAG_FEEDBACK)
406 		pfr_reset_feedback(addr, size, flags);
407 	return (rv);
408 }
409 
410 int
411 pfr_set_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
412     int *size2, int *nadd, int *ndel, int *nchange, int flags,
413     u_int32_t ignore_pfrt_flags)
414 {
415 	struct pfr_ktable	*kt, *tmpkt;
416 	struct pfr_kentryworkq	 addq, delq, changeq;
417 	struct pfr_kentry	*p, *q;
418 	struct pfr_addr		 ad;
419 	int			 i, rv, s, xadd = 0, xdel = 0, xchange = 0;
420 	long			 tzero = time_second;
421 
422 	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
423 	    PFR_FLAG_FEEDBACK);
424 	if (pfr_validate_table(tbl, ignore_pfrt_flags, flags &
425 	    PFR_FLAG_USERIOCTL))
426 		return (EINVAL);
427 	kt = pfr_lookup_table(tbl);
428 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
429 		return (ESRCH);
430 	if (kt->pfrkt_flags & PFR_TFLAG_CONST)
431 		return (EPERM);
432 	tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0,
433 	    !(flags & PFR_FLAG_USERIOCTL));
434 	if (tmpkt == NULL)
435 		return (ENOMEM);
436 	pfr_mark_addrs(kt);
437 	SLIST_INIT(&addq);
438 	SLIST_INIT(&delq);
439 	SLIST_INIT(&changeq);
440 	for (i = 0; i < size; i++) {
441 		if (COPYIN(addr+i, &ad, sizeof(ad), flags))
442 			senderr(EFAULT);
443 		if (pfr_validate_addr(&ad))
444 			senderr(EINVAL);
445 		ad.pfra_fback = PFR_FB_NONE;
446 		p = pfr_lookup_addr(kt, &ad, 1);
447 		if (p != NULL) {
448 			if (p->pfrke_mark) {
449 				ad.pfra_fback = PFR_FB_DUPLICATE;
450 				goto _skip;
451 			}
452 			p->pfrke_mark = 1;
453 			if (p->pfrke_not != ad.pfra_not) {
454 				SLIST_INSERT_HEAD(&changeq, p, pfrke_workq);
455 				ad.pfra_fback = PFR_FB_CHANGED;
456 				xchange++;
457 			}
458 		} else {
459 			q = pfr_lookup_addr(tmpkt, &ad, 1);
460 			if (q != NULL) {
461 				ad.pfra_fback = PFR_FB_DUPLICATE;
462 				goto _skip;
463 			}
464 			p = pfr_create_kentry(&ad,
465 			    !(flags & PFR_FLAG_USERIOCTL));
466 			if (p == NULL)
467 				senderr(ENOMEM);
468 			if (pfr_route_kentry(tmpkt, p)) {
469 				pfr_destroy_kentry(p);
470 				ad.pfra_fback = PFR_FB_NONE;
471 			} else {
472 				SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
473 				ad.pfra_fback = PFR_FB_ADDED;
474 				xadd++;
475 			}
476 		}
477 _skip:
478 		if (flags & PFR_FLAG_FEEDBACK)
479 			if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
480 				senderr(EFAULT);
481 	}
482 	pfr_enqueue_addrs(kt, &delq, &xdel, ENQUEUE_UNMARKED_ONLY);
483 	if ((flags & PFR_FLAG_FEEDBACK) && *size2) {
484 		if (*size2 < size+xdel) {
485 			*size2 = size+xdel;
486 			senderr(0);
487 		}
488 		i = 0;
489 		SLIST_FOREACH(p, &delq, pfrke_workq) {
490 			pfr_copyout_addr(&ad, p);
491 			ad.pfra_fback = PFR_FB_DELETED;
492 			if (COPYOUT(&ad, addr+size+i, sizeof(ad), flags))
493 				senderr(EFAULT);
494 			i++;
495 		}
496 	}
497 	pfr_clean_node_mask(tmpkt, &addq);
498 	if (!(flags & PFR_FLAG_DUMMY)) {
499 		if (flags & PFR_FLAG_ATOMIC)
500 			s = splsoftnet();
501 		pfr_insert_kentries(kt, &addq, tzero);
502 		pfr_remove_kentries(kt, &delq);
503 		pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG);
504 		if (flags & PFR_FLAG_ATOMIC)
505 			splx(s);
506 	} else
507 		pfr_destroy_kentries(&addq);
508 	if (nadd != NULL)
509 		*nadd = xadd;
510 	if (ndel != NULL)
511 		*ndel = xdel;
512 	if (nchange != NULL)
513 		*nchange = xchange;
514 	if ((flags & PFR_FLAG_FEEDBACK) && size2)
515 		*size2 = size+xdel;
516 	pfr_destroy_ktable(tmpkt, 0);
517 	return (0);
518 _bad:
519 	pfr_clean_node_mask(tmpkt, &addq);
520 	pfr_destroy_kentries(&addq);
521 	if (flags & PFR_FLAG_FEEDBACK)
522 		pfr_reset_feedback(addr, size, flags);
523 	pfr_destroy_ktable(tmpkt, 0);
524 	return (rv);
525 }
526 
527 int
528 pfr_tst_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
529 	int *nmatch, int flags)
530 {
531 	struct pfr_ktable	*kt;
532 	struct pfr_kentry	*p;
533 	struct pfr_addr		 ad;
534 	int			 i, xmatch = 0;
535 
536 	ACCEPT_FLAGS(flags, PFR_FLAG_REPLACE);
537 	if (pfr_validate_table(tbl, 0, 0))
538 		return (EINVAL);
539 	kt = pfr_lookup_table(tbl);
540 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
541 		return (ESRCH);
542 
543 	for (i = 0; i < size; i++) {
544 		if (COPYIN(addr+i, &ad, sizeof(ad), flags))
545 			return (EFAULT);
546 		if (pfr_validate_addr(&ad))
547 			return (EINVAL);
548 		if (ADDR_NETWORK(&ad))
549 			return (EINVAL);
550 		p = pfr_lookup_addr(kt, &ad, 0);
551 		if (flags & PFR_FLAG_REPLACE)
552 			pfr_copyout_addr(&ad, p);
553 		ad.pfra_fback = (p == NULL) ? PFR_FB_NONE :
554 		    (p->pfrke_not ? PFR_FB_NOTMATCH : PFR_FB_MATCH);
555 		if (p != NULL && !p->pfrke_not)
556 			xmatch++;
557 		if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
558 			return (EFAULT);
559 	}
560 	if (nmatch != NULL)
561 		*nmatch = xmatch;
562 	return (0);
563 }
564 
565 int
566 pfr_get_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int *size,
567 	int flags)
568 {
569 	struct pfr_ktable	*kt;
570 	struct pfr_walktree	 w;
571 	int			 rv;
572 
573 	ACCEPT_FLAGS(flags, 0);
574 	if (pfr_validate_table(tbl, 0, 0))
575 		return (EINVAL);
576 	kt = pfr_lookup_table(tbl);
577 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
578 		return (ESRCH);
579 	if (kt->pfrkt_cnt > *size) {
580 		*size = kt->pfrkt_cnt;
581 		return (0);
582 	}
583 
584 	bzero(&w, sizeof(w));
585 	w.pfrw_op = PFRW_GET_ADDRS;
586 	w.pfrw_addr = addr;
587 	w.pfrw_free = kt->pfrkt_cnt;
588 	w.pfrw_flags = flags;
589 	rv = rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
590 	if (!rv)
591 		rv = rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
592 	if (rv)
593 		return (rv);
594 
595 	if (w.pfrw_free) {
596 		printf("pfr_get_addrs: corruption detected (%d).\n",
597 		    w.pfrw_free);
598 		return (ENOTTY);
599 	}
600 	*size = kt->pfrkt_cnt;
601 	return (0);
602 }
603 
604 int
605 pfr_get_astats(struct pfr_table *tbl, struct pfr_astats *addr, int *size,
606 	int flags)
607 {
608 	struct pfr_ktable	*kt;
609 	struct pfr_walktree	 w;
610 	struct pfr_kentryworkq	 workq;
611 	int			 rv, s;
612 	long			 tzero = time_second;
613 
614 	/* XXX PFR_FLAG_CLSTATS disabled */
615 	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC);
616 	if (pfr_validate_table(tbl, 0, 0))
617 		return (EINVAL);
618 	kt = pfr_lookup_table(tbl);
619 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
620 		return (ESRCH);
621 	if (kt->pfrkt_cnt > *size) {
622 		*size = kt->pfrkt_cnt;
623 		return (0);
624 	}
625 
626 	bzero(&w, sizeof(w));
627 	w.pfrw_op = PFRW_GET_ASTATS;
628 	w.pfrw_astats = addr;
629 	w.pfrw_free = kt->pfrkt_cnt;
630 	w.pfrw_flags = flags;
631 	if (flags & PFR_FLAG_ATOMIC)
632 		s = splsoftnet();
633 	rv = rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
634 	if (!rv)
635 		rv = rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
636 	if (!rv && (flags & PFR_FLAG_CLSTATS)) {
637 		pfr_enqueue_addrs(kt, &workq, NULL, 0);
638 		pfr_clstats_kentries(&workq, tzero, 0);
639 	}
640 	if (flags & PFR_FLAG_ATOMIC)
641 		splx(s);
642 	if (rv)
643 		return (rv);
644 
645 	if (w.pfrw_free) {
646 		printf("pfr_get_astats: corruption detected (%d).\n",
647 		    w.pfrw_free);
648 		return (ENOTTY);
649 	}
650 	*size = kt->pfrkt_cnt;
651 	return (0);
652 }
653 
654 int
655 pfr_clr_astats(struct pfr_table *tbl, struct pfr_addr *addr, int size,
656     int *nzero, int flags)
657 {
658 	struct pfr_ktable	*kt;
659 	struct pfr_kentryworkq	 workq;
660 	struct pfr_kentry	*p;
661 	struct pfr_addr		 ad;
662 	int			 i, rv, s, xzero = 0;
663 
664 	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
665 	    PFR_FLAG_FEEDBACK);
666 	if (pfr_validate_table(tbl, 0, 0))
667 		return (EINVAL);
668 	kt = pfr_lookup_table(tbl);
669 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
670 		return (ESRCH);
671 	SLIST_INIT(&workq);
672 	for (i = 0; i < size; i++) {
673 		if (COPYIN(addr+i, &ad, sizeof(ad), flags))
674 			senderr(EFAULT);
675 		if (pfr_validate_addr(&ad))
676 			senderr(EINVAL);
677 		p = pfr_lookup_addr(kt, &ad, 1);
678 		if (flags & PFR_FLAG_FEEDBACK) {
679 			ad.pfra_fback = (p != NULL) ?
680 			    PFR_FB_CLEARED : PFR_FB_NONE;
681 			if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
682 				senderr(EFAULT);
683 		}
684 		if (p != NULL) {
685 			SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
686 			xzero++;
687 		}
688 	}
689 
690 	if (!(flags & PFR_FLAG_DUMMY)) {
691 		if (flags & PFR_FLAG_ATOMIC)
692 			s = splsoftnet();
693 		pfr_clstats_kentries(&workq, 0, 0);
694 		if (flags & PFR_FLAG_ATOMIC)
695 			splx(s);
696 	}
697 	if (nzero != NULL)
698 		*nzero = xzero;
699 	return (0);
700 _bad:
701 	if (flags & PFR_FLAG_FEEDBACK)
702 		pfr_reset_feedback(addr, size, flags);
703 	return (rv);
704 }
705 
706 int
707 pfr_validate_addr(struct pfr_addr *ad)
708 {
709 	int i;
710 
711 	switch (ad->pfra_af) {
712 #ifdef INET
713 	case AF_INET:
714 		if (ad->pfra_net > 32)
715 			return (-1);
716 		break;
717 #endif /* INET */
718 #ifdef INET6
719 	case AF_INET6:
720 		if (ad->pfra_net > 128)
721 			return (-1);
722 		break;
723 #endif /* INET6 */
724 	default:
725 		return (-1);
726 	}
727 	if (ad->pfra_net < 128 &&
728 		(((caddr_t)ad)[ad->pfra_net/8] & (0xFF >> (ad->pfra_net%8))))
729 			return (-1);
730 	for (i = (ad->pfra_net+7)/8; i < sizeof(ad->pfra_u); i++)
731 		if (((caddr_t)ad)[i])
732 			return (-1);
733 	if (ad->pfra_not && ad->pfra_not != 1)
734 		return (-1);
735 	if (ad->pfra_fback)
736 		return (-1);
737 	return (0);
738 }
739 
740 void
741 pfr_enqueue_addrs(struct pfr_ktable *kt, struct pfr_kentryworkq *workq,
742 	int *naddr, int sweep)
743 {
744 	struct pfr_walktree	w;
745 
746 	SLIST_INIT(workq);
747 	bzero(&w, sizeof(w));
748 	w.pfrw_op = sweep ? PFRW_SWEEP : PFRW_ENQUEUE;
749 	w.pfrw_workq = workq;
750 	if (kt->pfrkt_ip4 != NULL)
751 		if (rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w))
752 			printf("pfr_enqueue_addrs: IPv4 walktree failed.\n");
753 	if (kt->pfrkt_ip6 != NULL)
754 		if (rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w))
755 			printf("pfr_enqueue_addrs: IPv6 walktree failed.\n");
756 	if (naddr != NULL)
757 		*naddr = w.pfrw_cnt;
758 }
759 
760 void
761 pfr_mark_addrs(struct pfr_ktable *kt)
762 {
763 	struct pfr_walktree	w;
764 
765 	bzero(&w, sizeof(w));
766 	w.pfrw_op = PFRW_MARK;
767 	if (rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w))
768 		printf("pfr_mark_addrs: IPv4 walktree failed.\n");
769 	if (rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w))
770 		printf("pfr_mark_addrs: IPv6 walktree failed.\n");
771 }
772 
773 
774 struct pfr_kentry *
775 pfr_lookup_addr(struct pfr_ktable *kt, struct pfr_addr *ad, int exact)
776 {
777 	union sockaddr_union	 sa, mask;
778 	struct radix_node_head	*head;
779 	struct pfr_kentry	*ke;
780 	int			 s;
781 
782 	bzero(&sa, sizeof(sa));
783 	if (ad->pfra_af == AF_INET) {
784 		FILLIN_SIN(sa.sin, ad->pfra_ip4addr);
785 		head = kt->pfrkt_ip4;
786 	} else if ( ad->pfra_af == AF_INET6 ) {
787 		FILLIN_SIN6(sa.sin6, ad->pfra_ip6addr);
788 		head = kt->pfrkt_ip6;
789 	}
790 	if (ADDR_NETWORK(ad)) {
791 		pfr_prepare_network(&mask, ad->pfra_af, ad->pfra_net);
792 		s = splsoftnet(); /* rn_lookup makes use of globals */
793 		ke = (struct pfr_kentry *)rn_lookup(&sa, &mask, head);
794 		splx(s);
795 		if (ke && KENTRY_RNF_ROOT(ke))
796 			ke = NULL;
797 	} else {
798 		ke = (struct pfr_kentry *)rn_match(&sa, head);
799 		if (ke && KENTRY_RNF_ROOT(ke))
800 			ke = NULL;
801 		if (exact && ke && KENTRY_NETWORK(ke))
802 			ke = NULL;
803 	}
804 	return (ke);
805 }
806 
807 struct pfr_kentry *
808 pfr_create_kentry(struct pfr_addr *ad, int intr)
809 {
810 	struct pfr_kentry	*ke;
811 
812 	if (intr)
813 		ke = pool_get(&pfr_kentry_pl, PR_NOWAIT | PR_ZERO);
814 	else
815 		ke = pool_get(&pfr_kentry_pl, PR_WAITOK|PR_ZERO|PR_LIMITFAIL);
816 	if (ke == NULL)
817 		return (NULL);
818 
819 	if (ad->pfra_af == AF_INET)
820 		FILLIN_SIN(ke->pfrke_sa.sin, ad->pfra_ip4addr);
821 	else if (ad->pfra_af == AF_INET6)
822 		FILLIN_SIN6(ke->pfrke_sa.sin6, ad->pfra_ip6addr);
823 	ke->pfrke_af = ad->pfra_af;
824 	ke->pfrke_net = ad->pfra_net;
825 	ke->pfrke_not = ad->pfra_not;
826 	return (ke);
827 }
828 
829 void
830 pfr_destroy_kentries(struct pfr_kentryworkq *workq)
831 {
832 	struct pfr_kentry	*p, *q;
833 
834 	for (p = SLIST_FIRST(workq); p != NULL; p = q) {
835 		q = SLIST_NEXT(p, pfrke_workq);
836 		pfr_destroy_kentry(p);
837 	}
838 }
839 
840 void
841 pfr_destroy_kentry(struct pfr_kentry *ke)
842 {
843 	if (ke->pfrke_counters)
844 		pool_put(&pfr_kcounters_pl, ke->pfrke_counters);
845 	pool_put(&pfr_kentry_pl, ke);
846 }
847 
848 void
849 pfr_insert_kentries(struct pfr_ktable *kt,
850     struct pfr_kentryworkq *workq, long tzero)
851 {
852 	struct pfr_kentry	*p;
853 	int			 rv, n = 0;
854 
855 	SLIST_FOREACH(p, workq, pfrke_workq) {
856 		rv = pfr_route_kentry(kt, p);
857 		if (rv) {
858 			printf("pfr_insert_kentries: cannot route entry "
859 			    "(code=%d).\n", rv);
860 			break;
861 		}
862 		p->pfrke_tzero = tzero;
863 		n++;
864 	}
865 	kt->pfrkt_cnt += n;
866 }
867 
868 int
869 pfr_insert_kentry(struct pfr_ktable *kt, struct pfr_addr *ad, long tzero)
870 {
871 	struct pfr_kentry	*p;
872 	int			 rv;
873 
874 	p = pfr_lookup_addr(kt, ad, 1);
875 	if (p != NULL)
876 		return (0);
877 	p = pfr_create_kentry(ad, 1);
878 	if (p == NULL)
879 		return (EINVAL);
880 
881 	rv = pfr_route_kentry(kt, p);
882 	if (rv)
883 		return (rv);
884 
885 	p->pfrke_tzero = tzero;
886 	kt->pfrkt_cnt++;
887 
888 	return (0);
889 }
890 
891 void
892 pfr_remove_kentries(struct pfr_ktable *kt,
893     struct pfr_kentryworkq *workq)
894 {
895 	struct pfr_kentry	*p;
896 	int			 n = 0;
897 
898 	SLIST_FOREACH(p, workq, pfrke_workq) {
899 		pfr_unroute_kentry(kt, p);
900 		n++;
901 	}
902 	kt->pfrkt_cnt -= n;
903 	pfr_destroy_kentries(workq);
904 }
905 
906 void
907 pfr_clean_node_mask(struct pfr_ktable *kt,
908     struct pfr_kentryworkq *workq)
909 {
910 	struct pfr_kentry	*p;
911 
912 	SLIST_FOREACH(p, workq, pfrke_workq)
913 		pfr_unroute_kentry(kt, p);
914 }
915 
916 void
917 pfr_clstats_kentries(struct pfr_kentryworkq *workq, long tzero, int negchange)
918 {
919 	struct pfr_kentry	*p;
920 	int			 s;
921 
922 	SLIST_FOREACH(p, workq, pfrke_workq) {
923 		s = splsoftnet();
924 		if (negchange)
925 			p->pfrke_not = !p->pfrke_not;
926 		if (p->pfrke_counters) {
927 			pool_put(&pfr_kcounters_pl, p->pfrke_counters);
928 			p->pfrke_counters = NULL;
929 		}
930 		splx(s);
931 		p->pfrke_tzero = tzero;
932 	}
933 }
934 
935 void
936 pfr_reset_feedback(struct pfr_addr *addr, int size, int flags)
937 {
938 	struct pfr_addr	ad;
939 	int		i;
940 
941 	for (i = 0; i < size; i++) {
942 		if (COPYIN(addr+i, &ad, sizeof(ad), flags))
943 			break;
944 		ad.pfra_fback = PFR_FB_NONE;
945 		if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
946 			break;
947 	}
948 }
949 
950 void
951 pfr_prepare_network(union sockaddr_union *sa, int af, int net)
952 {
953 	int	i;
954 
955 	bzero(sa, sizeof(*sa));
956 	if (af == AF_INET) {
957 		sa->sin.sin_len = sizeof(sa->sin);
958 		sa->sin.sin_family = AF_INET;
959 		sa->sin.sin_addr.s_addr = net ? htonl(-1 << (32-net)) : 0;
960 	} else if (af == AF_INET6) {
961 		sa->sin6.sin6_len = sizeof(sa->sin6);
962 		sa->sin6.sin6_family = AF_INET6;
963 		for (i = 0; i < 4; i++) {
964 			if (net <= 32) {
965 				sa->sin6.sin6_addr.s6_addr32[i] =
966 				    net ? htonl(-1 << (32-net)) : 0;
967 				break;
968 			}
969 			sa->sin6.sin6_addr.s6_addr32[i] = 0xFFFFFFFF;
970 			net -= 32;
971 		}
972 	}
973 }
974 
975 int
976 pfr_route_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
977 {
978 	union sockaddr_union	 mask;
979 	struct radix_node	*rn;
980 	struct radix_node_head	*head;
981 	int			 s;
982 
983 	bzero(ke->pfrke_node, sizeof(ke->pfrke_node));
984 	if (ke->pfrke_af == AF_INET)
985 		head = kt->pfrkt_ip4;
986 	else if (ke->pfrke_af == AF_INET6)
987 		head = kt->pfrkt_ip6;
988 
989 	s = splsoftnet();
990 	if (KENTRY_NETWORK(ke)) {
991 		pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
992 		rn = rn_addroute(&ke->pfrke_sa, &mask, head, ke->pfrke_node, 0);
993 	} else
994 		rn = rn_addroute(&ke->pfrke_sa, NULL, head, ke->pfrke_node, 0);
995 	splx(s);
996 
997 	return (rn == NULL ? -1 : 0);
998 }
999 
1000 int
1001 pfr_unroute_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
1002 {
1003 	union sockaddr_union	 mask;
1004 	struct radix_node	*rn;
1005 	struct radix_node_head	*head;
1006 	int			 s;
1007 
1008 	if (ke->pfrke_af == AF_INET)
1009 		head = kt->pfrkt_ip4;
1010 	else if (ke->pfrke_af == AF_INET6)
1011 		head = kt->pfrkt_ip6;
1012 
1013 	s = splsoftnet();
1014 	if (KENTRY_NETWORK(ke)) {
1015 		pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
1016 		rn = rn_delete(&ke->pfrke_sa, &mask, head, NULL);
1017 	} else
1018 		rn = rn_delete(&ke->pfrke_sa, NULL, head, NULL);
1019 	splx(s);
1020 
1021 	if (rn == NULL) {
1022 		printf("pfr_unroute_kentry: delete failed.\n");
1023 		return (-1);
1024 	}
1025 	return (0);
1026 }
1027 
1028 void
1029 pfr_copyout_addr(struct pfr_addr *ad, struct pfr_kentry *ke)
1030 {
1031 	bzero(ad, sizeof(*ad));
1032 	if (ke == NULL)
1033 		return;
1034 	ad->pfra_af = ke->pfrke_af;
1035 	ad->pfra_net = ke->pfrke_net;
1036 	ad->pfra_not = ke->pfrke_not;
1037 	if (ad->pfra_af == AF_INET)
1038 		ad->pfra_ip4addr = ke->pfrke_sa.sin.sin_addr;
1039 	else if (ad->pfra_af == AF_INET6)
1040 		ad->pfra_ip6addr = ke->pfrke_sa.sin6.sin6_addr;
1041 }
1042 
1043 int
1044 pfr_walktree(struct radix_node *rn, void *arg)
1045 {
1046 	struct pfr_kentry	*ke = (struct pfr_kentry *)rn;
1047 	struct pfr_walktree	*w = arg;
1048 	int			 s, flags = w->pfrw_flags;
1049 
1050 	switch (w->pfrw_op) {
1051 	case PFRW_MARK:
1052 		ke->pfrke_mark = 0;
1053 		break;
1054 	case PFRW_SWEEP:
1055 		if (ke->pfrke_mark)
1056 			break;
1057 		/* FALLTHROUGH */
1058 	case PFRW_ENQUEUE:
1059 		SLIST_INSERT_HEAD(w->pfrw_workq, ke, pfrke_workq);
1060 		w->pfrw_cnt++;
1061 		break;
1062 	case PFRW_GET_ADDRS:
1063 		if (w->pfrw_free-- > 0) {
1064 			struct pfr_addr ad;
1065 
1066 			pfr_copyout_addr(&ad, ke);
1067 			if (copyout(&ad, w->pfrw_addr, sizeof(ad)))
1068 				return (EFAULT);
1069 			w->pfrw_addr++;
1070 		}
1071 		break;
1072 	case PFRW_GET_ASTATS:
1073 		if (w->pfrw_free-- > 0) {
1074 			struct pfr_astats as;
1075 
1076 			pfr_copyout_addr(&as.pfras_a, ke);
1077 
1078 			s = splsoftnet();
1079 			if (ke->pfrke_counters) {
1080 				bcopy(ke->pfrke_counters->pfrkc_packets,
1081 				    as.pfras_packets, sizeof(as.pfras_packets));
1082 				bcopy(ke->pfrke_counters->pfrkc_bytes,
1083 				    as.pfras_bytes, sizeof(as.pfras_bytes));
1084 			} else {
1085 				bzero(as.pfras_packets, sizeof(as.pfras_packets));
1086 				bzero(as.pfras_bytes, sizeof(as.pfras_bytes));
1087 				as.pfras_a.pfra_fback = PFR_FB_NOCOUNT;
1088 			}
1089 			splx(s);
1090 			as.pfras_tzero = ke->pfrke_tzero;
1091 
1092 			if (COPYOUT(&as, w->pfrw_astats, sizeof(as), flags))
1093 				return (EFAULT);
1094 			w->pfrw_astats++;
1095 		}
1096 		break;
1097 	case PFRW_POOL_GET:
1098 		if (ke->pfrke_not)
1099 			break; /* negative entries are ignored */
1100 		if (!w->pfrw_cnt--) {
1101 			w->pfrw_kentry = ke;
1102 			return (1); /* finish search */
1103 		}
1104 		break;
1105 	case PFRW_DYNADDR_UPDATE:
1106 		if (ke->pfrke_af == AF_INET) {
1107 			if (w->pfrw_dyn->pfid_acnt4++ > 0)
1108 				break;
1109 			pfr_prepare_network(&pfr_mask, AF_INET, ke->pfrke_net);
1110 			w->pfrw_dyn->pfid_addr4 = *SUNION2PF(
1111 			    &ke->pfrke_sa, AF_INET);
1112 			w->pfrw_dyn->pfid_mask4 = *SUNION2PF(
1113 			    &pfr_mask, AF_INET);
1114 		} else if (ke->pfrke_af == AF_INET6){
1115 			if (w->pfrw_dyn->pfid_acnt6++ > 0)
1116 				break;
1117 			pfr_prepare_network(&pfr_mask, AF_INET6, ke->pfrke_net);
1118 			w->pfrw_dyn->pfid_addr6 = *SUNION2PF(
1119 			    &ke->pfrke_sa, AF_INET6);
1120 			w->pfrw_dyn->pfid_mask6 = *SUNION2PF(
1121 			    &pfr_mask, AF_INET6);
1122 		}
1123 		break;
1124 	}
1125 	return (0);
1126 }
1127 
1128 int
1129 pfr_clr_tables(struct pfr_table *filter, int *ndel, int flags)
1130 {
1131 	struct pfr_ktableworkq	 workq;
1132 	struct pfr_ktable	*p;
1133 	int			 s, xdel = 0;
1134 
1135 	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
1136 	    PFR_FLAG_ALLRSETS);
1137 	if (pfr_fix_anchor(filter->pfrt_anchor))
1138 		return (EINVAL);
1139 	if (pfr_table_count(filter, flags) < 0)
1140 		return (ENOENT);
1141 
1142 	SLIST_INIT(&workq);
1143 	RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1144 		if (pfr_skip_table(filter, p, flags))
1145 			continue;
1146 		if (!strcmp(p->pfrkt_anchor, PF_RESERVED_ANCHOR))
1147 			continue;
1148 		if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE))
1149 			continue;
1150 		p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1151 		SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1152 		xdel++;
1153 	}
1154 	if (!(flags & PFR_FLAG_DUMMY)) {
1155 		if (flags & PFR_FLAG_ATOMIC)
1156 			s = splsoftnet();
1157 		pfr_setflags_ktables(&workq);
1158 		if (flags & PFR_FLAG_ATOMIC)
1159 			splx(s);
1160 	}
1161 	if (ndel != NULL)
1162 		*ndel = xdel;
1163 	return (0);
1164 }
1165 
1166 int
1167 pfr_add_tables(struct pfr_table *tbl, int size, int *nadd, int flags)
1168 {
1169 	struct pfr_ktableworkq	 addq, changeq;
1170 	struct pfr_ktable	*p, *q, *r, key;
1171 	int			 i, rv, s, xadd = 0;
1172 	long			 tzero = time_second;
1173 
1174 	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
1175 	SLIST_INIT(&addq);
1176 	SLIST_INIT(&changeq);
1177 	for (i = 0; i < size; i++) {
1178 		if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t), flags))
1179 			senderr(EFAULT);
1180 		if (pfr_validate_table(&key.pfrkt_t, PFR_TFLAG_USRMASK,
1181 		    flags & PFR_FLAG_USERIOCTL))
1182 			senderr(EINVAL);
1183 		key.pfrkt_flags |= PFR_TFLAG_ACTIVE;
1184 		p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1185 		if (p == NULL) {
1186 			p = pfr_create_ktable(&key.pfrkt_t, tzero, 1,
1187 			    !(flags & PFR_FLAG_USERIOCTL));
1188 			if (p == NULL)
1189 				senderr(ENOMEM);
1190 			SLIST_FOREACH(q, &addq, pfrkt_workq) {
1191 				if (!pfr_ktable_compare(p, q))
1192 					goto _skip;
1193 			}
1194 			SLIST_INSERT_HEAD(&addq, p, pfrkt_workq);
1195 			xadd++;
1196 			if (!key.pfrkt_anchor[0])
1197 				goto _skip;
1198 
1199 			/* find or create root table */
1200 			bzero(key.pfrkt_anchor, sizeof(key.pfrkt_anchor));
1201 			r = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1202 			if (r != NULL) {
1203 				p->pfrkt_root = r;
1204 				goto _skip;
1205 			}
1206 			SLIST_FOREACH(q, &addq, pfrkt_workq) {
1207 				if (!pfr_ktable_compare(&key, q)) {
1208 					p->pfrkt_root = q;
1209 					goto _skip;
1210 				}
1211 			}
1212 			key.pfrkt_flags = 0;
1213 			r = pfr_create_ktable(&key.pfrkt_t, 0, 1,
1214 			    !(flags & PFR_FLAG_USERIOCTL));
1215 			if (r == NULL)
1216 				senderr(ENOMEM);
1217 			SLIST_INSERT_HEAD(&addq, r, pfrkt_workq);
1218 			p->pfrkt_root = r;
1219 		} else if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1220 			SLIST_FOREACH(q, &changeq, pfrkt_workq)
1221 				if (!pfr_ktable_compare(&key, q))
1222 					goto _skip;
1223 			p->pfrkt_nflags = (p->pfrkt_flags &
1224 			    ~PFR_TFLAG_USRMASK) | key.pfrkt_flags;
1225 			SLIST_INSERT_HEAD(&changeq, p, pfrkt_workq);
1226 			xadd++;
1227 		}
1228 _skip:
1229 	;
1230 	}
1231 	if (!(flags & PFR_FLAG_DUMMY)) {
1232 		if (flags & PFR_FLAG_ATOMIC)
1233 			s = splsoftnet();
1234 		pfr_insert_ktables(&addq);
1235 		pfr_setflags_ktables(&changeq);
1236 		if (flags & PFR_FLAG_ATOMIC)
1237 			splx(s);
1238 	} else
1239 		 pfr_destroy_ktables(&addq, 0);
1240 	if (nadd != NULL)
1241 		*nadd = xadd;
1242 	return (0);
1243 _bad:
1244 	pfr_destroy_ktables(&addq, 0);
1245 	return (rv);
1246 }
1247 
1248 int
1249 pfr_del_tables(struct pfr_table *tbl, int size, int *ndel, int flags)
1250 {
1251 	struct pfr_ktableworkq	 workq;
1252 	struct pfr_ktable	*p, *q, key;
1253 	int			 i, s, xdel = 0;
1254 
1255 	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
1256 	SLIST_INIT(&workq);
1257 	for (i = 0; i < size; i++) {
1258 		if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t), flags))
1259 			return (EFAULT);
1260 		if (pfr_validate_table(&key.pfrkt_t, 0,
1261 		    flags & PFR_FLAG_USERIOCTL))
1262 			return (EINVAL);
1263 		p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1264 		if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1265 			SLIST_FOREACH(q, &workq, pfrkt_workq)
1266 				if (!pfr_ktable_compare(p, q))
1267 					goto _skip;
1268 			p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1269 			SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1270 			xdel++;
1271 		}
1272 _skip:
1273 	;
1274 	}
1275 
1276 	if (!(flags & PFR_FLAG_DUMMY)) {
1277 		if (flags & PFR_FLAG_ATOMIC)
1278 			s = splsoftnet();
1279 		pfr_setflags_ktables(&workq);
1280 		if (flags & PFR_FLAG_ATOMIC)
1281 			splx(s);
1282 	}
1283 	if (ndel != NULL)
1284 		*ndel = xdel;
1285 	return (0);
1286 }
1287 
1288 int
1289 pfr_get_tables(struct pfr_table *filter, struct pfr_table *tbl, int *size,
1290 	int flags)
1291 {
1292 	struct pfr_ktable	*p;
1293 	int			 n, nn;
1294 
1295 	ACCEPT_FLAGS(flags, PFR_FLAG_ALLRSETS);
1296 	if (pfr_fix_anchor(filter->pfrt_anchor))
1297 		return (EINVAL);
1298 	n = nn = pfr_table_count(filter, flags);
1299 	if (n < 0)
1300 		return (ENOENT);
1301 	if (n > *size) {
1302 		*size = n;
1303 		return (0);
1304 	}
1305 	RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1306 		if (pfr_skip_table(filter, p, flags))
1307 			continue;
1308 		if (n-- <= 0)
1309 			continue;
1310 		if (COPYOUT(&p->pfrkt_t, tbl++, sizeof(*tbl), flags))
1311 			return (EFAULT);
1312 	}
1313 	if (n) {
1314 		printf("pfr_get_tables: corruption detected (%d).\n", n);
1315 		return (ENOTTY);
1316 	}
1317 	*size = nn;
1318 	return (0);
1319 }
1320 
1321 int
1322 pfr_get_tstats(struct pfr_table *filter, struct pfr_tstats *tbl, int *size,
1323 	int flags)
1324 {
1325 	struct pfr_ktable	*p;
1326 	struct pfr_ktableworkq	 workq;
1327 	int			 s, n, nn;
1328 	long			 tzero = time_second;
1329 
1330 	/* XXX PFR_FLAG_CLSTATS disabled */
1331 	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_ALLRSETS);
1332 	if (pfr_fix_anchor(filter->pfrt_anchor))
1333 		return (EINVAL);
1334 	n = nn = pfr_table_count(filter, flags);
1335 	if (n < 0)
1336 		return (ENOENT);
1337 	if (n > *size) {
1338 		*size = n;
1339 		return (0);
1340 	}
1341 	SLIST_INIT(&workq);
1342 	if (flags & PFR_FLAG_ATOMIC)
1343 		s = splsoftnet();
1344 	RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1345 		if (pfr_skip_table(filter, p, flags))
1346 			continue;
1347 		if (n-- <= 0)
1348 			continue;
1349 		if (!(flags & PFR_FLAG_ATOMIC))
1350 			s = splsoftnet();
1351 		if (COPYOUT(&p->pfrkt_ts, tbl++, sizeof(*tbl), flags)) {
1352 			splx(s);
1353 			return (EFAULT);
1354 		}
1355 		if (!(flags & PFR_FLAG_ATOMIC))
1356 			splx(s);
1357 		SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1358 	}
1359 	if (flags & PFR_FLAG_CLSTATS)
1360 		pfr_clstats_ktables(&workq, tzero,
1361 		    flags & PFR_FLAG_ADDRSTOO);
1362 	if (flags & PFR_FLAG_ATOMIC)
1363 		splx(s);
1364 	if (n) {
1365 		printf("pfr_get_tstats: corruption detected (%d).\n", n);
1366 		return (ENOTTY);
1367 	}
1368 	*size = nn;
1369 	return (0);
1370 }
1371 
1372 int
1373 pfr_clr_tstats(struct pfr_table *tbl, int size, int *nzero, int flags)
1374 {
1375 	struct pfr_ktableworkq	 workq;
1376 	struct pfr_ktable	*p, key;
1377 	int			 i, s, xzero = 0;
1378 	long			 tzero = time_second;
1379 
1380 	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
1381 	    PFR_FLAG_ADDRSTOO);
1382 	SLIST_INIT(&workq);
1383 	for (i = 0; i < size; i++) {
1384 		if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t), flags))
1385 			return (EFAULT);
1386 		if (pfr_validate_table(&key.pfrkt_t, 0, 0))
1387 			return (EINVAL);
1388 		p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1389 		if (p != NULL) {
1390 			SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1391 			xzero++;
1392 		}
1393 	}
1394 	if (!(flags & PFR_FLAG_DUMMY)) {
1395 		if (flags & PFR_FLAG_ATOMIC)
1396 			s = splsoftnet();
1397 		pfr_clstats_ktables(&workq, tzero, flags & PFR_FLAG_ADDRSTOO);
1398 		if (flags & PFR_FLAG_ATOMIC)
1399 			splx(s);
1400 	}
1401 	if (nzero != NULL)
1402 		*nzero = xzero;
1403 	return (0);
1404 }
1405 
1406 int
1407 pfr_set_tflags(struct pfr_table *tbl, int size, int setflag, int clrflag,
1408 	int *nchange, int *ndel, int flags)
1409 {
1410 	struct pfr_ktableworkq	 workq;
1411 	struct pfr_ktable	*p, *q, key;
1412 	int			 i, s, xchange = 0, xdel = 0;
1413 
1414 	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
1415 	if ((setflag & ~PFR_TFLAG_USRMASK) ||
1416 	    (clrflag & ~PFR_TFLAG_USRMASK) ||
1417 	    (setflag & clrflag))
1418 		return (EINVAL);
1419 	SLIST_INIT(&workq);
1420 	for (i = 0; i < size; i++) {
1421 		if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t), flags))
1422 			return (EFAULT);
1423 		if (pfr_validate_table(&key.pfrkt_t, 0,
1424 		    flags & PFR_FLAG_USERIOCTL))
1425 			return (EINVAL);
1426 		p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1427 		if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1428 			p->pfrkt_nflags = (p->pfrkt_flags | setflag) &
1429 			    ~clrflag;
1430 			if (p->pfrkt_nflags == p->pfrkt_flags)
1431 				goto _skip;
1432 			SLIST_FOREACH(q, &workq, pfrkt_workq)
1433 				if (!pfr_ktable_compare(p, q))
1434 					goto _skip;
1435 			SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1436 			if ((p->pfrkt_flags & PFR_TFLAG_PERSIST) &&
1437 			    (clrflag & PFR_TFLAG_PERSIST) &&
1438 			    !(p->pfrkt_flags & PFR_TFLAG_REFERENCED))
1439 				xdel++;
1440 			else
1441 				xchange++;
1442 		}
1443 _skip:
1444 	;
1445 	}
1446 	if (!(flags & PFR_FLAG_DUMMY)) {
1447 		if (flags & PFR_FLAG_ATOMIC)
1448 			s = splsoftnet();
1449 		pfr_setflags_ktables(&workq);
1450 		if (flags & PFR_FLAG_ATOMIC)
1451 			splx(s);
1452 	}
1453 	if (nchange != NULL)
1454 		*nchange = xchange;
1455 	if (ndel != NULL)
1456 		*ndel = xdel;
1457 	return (0);
1458 }
1459 
1460 int
1461 pfr_ina_begin(struct pfr_table *trs, u_int32_t *ticket, int *ndel, int flags)
1462 {
1463 	struct pfr_ktableworkq	 workq;
1464 	struct pfr_ktable	*p;
1465 	struct pf_ruleset	*rs;
1466 	int			 xdel = 0;
1467 
1468 	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1469 	rs = pf_find_or_create_ruleset(trs->pfrt_anchor);
1470 	if (rs == NULL)
1471 		return (ENOMEM);
1472 	SLIST_INIT(&workq);
1473 	RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1474 		if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1475 		    pfr_skip_table(trs, p, 0))
1476 			continue;
1477 		p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
1478 		SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1479 		xdel++;
1480 	}
1481 	if (!(flags & PFR_FLAG_DUMMY)) {
1482 		pfr_setflags_ktables(&workq);
1483 		if (ticket != NULL)
1484 			*ticket = ++rs->tticket;
1485 		rs->topen = 1;
1486 	} else
1487 		pf_remove_if_empty_ruleset(rs);
1488 	if (ndel != NULL)
1489 		*ndel = xdel;
1490 	return (0);
1491 }
1492 
1493 int
1494 pfr_ina_define(struct pfr_table *tbl, struct pfr_addr *addr, int size,
1495     int *nadd, int *naddr, u_int32_t ticket, int flags)
1496 {
1497 	struct pfr_ktableworkq	 tableq;
1498 	struct pfr_kentryworkq	 addrq;
1499 	struct pfr_ktable	*kt, *rt, *shadow, key;
1500 	struct pfr_kentry	*p;
1501 	struct pfr_addr		 ad;
1502 	struct pf_ruleset	*rs;
1503 	int			 i, rv, xadd = 0, xaddr = 0;
1504 
1505 	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ADDRSTOO);
1506 	if (size && !(flags & PFR_FLAG_ADDRSTOO))
1507 		return (EINVAL);
1508 	if (pfr_validate_table(tbl, PFR_TFLAG_USRMASK,
1509 	    flags & PFR_FLAG_USERIOCTL))
1510 		return (EINVAL);
1511 	rs = pf_find_ruleset(tbl->pfrt_anchor);
1512 	if (rs == NULL || !rs->topen || ticket != rs->tticket)
1513 		return (EBUSY);
1514 	tbl->pfrt_flags |= PFR_TFLAG_INACTIVE;
1515 	SLIST_INIT(&tableq);
1516 	kt = RB_FIND(pfr_ktablehead, &pfr_ktables, (struct pfr_ktable *)tbl);
1517 	if (kt == NULL) {
1518 		kt = pfr_create_ktable(tbl, 0, 1,
1519 		    !(flags & PFR_FLAG_USERIOCTL));
1520 		if (kt == NULL)
1521 			return (ENOMEM);
1522 		SLIST_INSERT_HEAD(&tableq, kt, pfrkt_workq);
1523 		xadd++;
1524 		if (!tbl->pfrt_anchor[0])
1525 			goto _skip;
1526 
1527 		/* find or create root table */
1528 		bzero(&key, sizeof(key));
1529 		strlcpy(key.pfrkt_name, tbl->pfrt_name, sizeof(key.pfrkt_name));
1530 		rt = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1531 		if (rt != NULL) {
1532 			kt->pfrkt_root = rt;
1533 			goto _skip;
1534 		}
1535 		rt = pfr_create_ktable(&key.pfrkt_t, 0, 1,
1536 		    !(flags & PFR_FLAG_USERIOCTL));
1537 		if (rt == NULL) {
1538 			pfr_destroy_ktables(&tableq, 0);
1539 			return (ENOMEM);
1540 		}
1541 		SLIST_INSERT_HEAD(&tableq, rt, pfrkt_workq);
1542 		kt->pfrkt_root = rt;
1543 	} else if (!(kt->pfrkt_flags & PFR_TFLAG_INACTIVE))
1544 		xadd++;
1545 _skip:
1546 	shadow = pfr_create_ktable(tbl, 0, 0, !(flags & PFR_FLAG_USERIOCTL));
1547 	if (shadow == NULL) {
1548 		pfr_destroy_ktables(&tableq, 0);
1549 		return (ENOMEM);
1550 	}
1551 	SLIST_INIT(&addrq);
1552 	for (i = 0; i < size; i++) {
1553 		if (COPYIN(addr+i, &ad, sizeof(ad), flags))
1554 			senderr(EFAULT);
1555 		if (pfr_validate_addr(&ad))
1556 			senderr(EINVAL);
1557 		if (pfr_lookup_addr(shadow, &ad, 1) != NULL)
1558 			continue;
1559 		p = pfr_create_kentry(&ad, 0);
1560 		if (p == NULL)
1561 			senderr(ENOMEM);
1562 		if (pfr_route_kentry(shadow, p)) {
1563 			pfr_destroy_kentry(p);
1564 			continue;
1565 		}
1566 		SLIST_INSERT_HEAD(&addrq, p, pfrke_workq);
1567 		xaddr++;
1568 	}
1569 	if (!(flags & PFR_FLAG_DUMMY)) {
1570 		if (kt->pfrkt_shadow != NULL)
1571 			pfr_destroy_ktable(kt->pfrkt_shadow, 1);
1572 		kt->pfrkt_flags |= PFR_TFLAG_INACTIVE;
1573 		pfr_insert_ktables(&tableq);
1574 		shadow->pfrkt_cnt = (flags & PFR_FLAG_ADDRSTOO) ?
1575 		    xaddr : NO_ADDRESSES;
1576 		kt->pfrkt_shadow = shadow;
1577 	} else {
1578 		pfr_clean_node_mask(shadow, &addrq);
1579 		pfr_destroy_ktable(shadow, 0);
1580 		pfr_destroy_ktables(&tableq, 0);
1581 		pfr_destroy_kentries(&addrq);
1582 	}
1583 	if (nadd != NULL)
1584 		*nadd = xadd;
1585 	if (naddr != NULL)
1586 		*naddr = xaddr;
1587 	return (0);
1588 _bad:
1589 	pfr_destroy_ktable(shadow, 0);
1590 	pfr_destroy_ktables(&tableq, 0);
1591 	pfr_destroy_kentries(&addrq);
1592 	return (rv);
1593 }
1594 
1595 int
1596 pfr_ina_rollback(struct pfr_table *trs, u_int32_t ticket, int *ndel, int flags)
1597 {
1598 	struct pfr_ktableworkq	 workq;
1599 	struct pfr_ktable	*p;
1600 	struct pf_ruleset	*rs;
1601 	int			 xdel = 0;
1602 
1603 	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1604 	rs = pf_find_ruleset(trs->pfrt_anchor);
1605 	if (rs == NULL || !rs->topen || ticket != rs->tticket)
1606 		return (0);
1607 	SLIST_INIT(&workq);
1608 	RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1609 		if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1610 		    pfr_skip_table(trs, p, 0))
1611 			continue;
1612 		p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
1613 		SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1614 		xdel++;
1615 	}
1616 	if (!(flags & PFR_FLAG_DUMMY)) {
1617 		pfr_setflags_ktables(&workq);
1618 		rs->topen = 0;
1619 		pf_remove_if_empty_ruleset(rs);
1620 	}
1621 	if (ndel != NULL)
1622 		*ndel = xdel;
1623 	return (0);
1624 }
1625 
1626 int
1627 pfr_ina_commit(struct pfr_table *trs, u_int32_t ticket, int *nadd,
1628     int *nchange, int flags)
1629 {
1630 	struct pfr_ktable	*p, *q;
1631 	struct pfr_ktableworkq	 workq;
1632 	struct pf_ruleset	*rs;
1633 	int			 s, xadd = 0, xchange = 0;
1634 	long			 tzero = time_second;
1635 
1636 	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
1637 	rs = pf_find_ruleset(trs->pfrt_anchor);
1638 	if (rs == NULL || !rs->topen || ticket != rs->tticket)
1639 		return (EBUSY);
1640 
1641 	SLIST_INIT(&workq);
1642 	RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1643 		if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1644 		    pfr_skip_table(trs, p, 0))
1645 			continue;
1646 		SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1647 		if (p->pfrkt_flags & PFR_TFLAG_ACTIVE)
1648 			xchange++;
1649 		else
1650 			xadd++;
1651 	}
1652 
1653 	if (!(flags & PFR_FLAG_DUMMY)) {
1654 		if (flags & PFR_FLAG_ATOMIC)
1655 			s = splsoftnet();
1656 		for (p = SLIST_FIRST(&workq); p != NULL; p = q) {
1657 			q = SLIST_NEXT(p, pfrkt_workq);
1658 			pfr_commit_ktable(p, tzero);
1659 		}
1660 		if (flags & PFR_FLAG_ATOMIC)
1661 			splx(s);
1662 		rs->topen = 0;
1663 		pf_remove_if_empty_ruleset(rs);
1664 	}
1665 	if (nadd != NULL)
1666 		*nadd = xadd;
1667 	if (nchange != NULL)
1668 		*nchange = xchange;
1669 
1670 	return (0);
1671 }
1672 
1673 void
1674 pfr_commit_ktable(struct pfr_ktable *kt, long tzero)
1675 {
1676 	struct pfr_ktable	*shadow = kt->pfrkt_shadow;
1677 	int			 nflags;
1678 
1679 	if (shadow->pfrkt_cnt == NO_ADDRESSES) {
1680 		if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
1681 			pfr_clstats_ktable(kt, tzero, 1);
1682 	} else if (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) {
1683 		/* kt might contain addresses */
1684 		struct pfr_kentryworkq	 addrq, addq, changeq, delq, garbageq;
1685 		struct pfr_kentry	*p, *q, *next;
1686 		struct pfr_addr		 ad;
1687 
1688 		pfr_enqueue_addrs(shadow, &addrq, NULL, 0);
1689 		pfr_mark_addrs(kt);
1690 		SLIST_INIT(&addq);
1691 		SLIST_INIT(&changeq);
1692 		SLIST_INIT(&delq);
1693 		SLIST_INIT(&garbageq);
1694 		pfr_clean_node_mask(shadow, &addrq);
1695 		for (p = SLIST_FIRST(&addrq); p != NULL; p = next) {
1696 			next = SLIST_NEXT(p, pfrke_workq);	/* XXX */
1697 			pfr_copyout_addr(&ad, p);
1698 			q = pfr_lookup_addr(kt, &ad, 1);
1699 			if (q != NULL) {
1700 				if (q->pfrke_not != p->pfrke_not)
1701 					SLIST_INSERT_HEAD(&changeq, q,
1702 					    pfrke_workq);
1703 				q->pfrke_mark = 1;
1704 				SLIST_INSERT_HEAD(&garbageq, p, pfrke_workq);
1705 			} else {
1706 				p->pfrke_tzero = tzero;
1707 				SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
1708 			}
1709 		}
1710 		pfr_enqueue_addrs(kt, &delq, NULL, ENQUEUE_UNMARKED_ONLY);
1711 		pfr_insert_kentries(kt, &addq, tzero);
1712 		pfr_remove_kentries(kt, &delq);
1713 		pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG);
1714 		pfr_destroy_kentries(&garbageq);
1715 	} else {
1716 		/* kt cannot contain addresses */
1717 		SWAP(struct radix_node_head *, kt->pfrkt_ip4,
1718 		    shadow->pfrkt_ip4);
1719 		SWAP(struct radix_node_head *, kt->pfrkt_ip6,
1720 		    shadow->pfrkt_ip6);
1721 		SWAP(int, kt->pfrkt_cnt, shadow->pfrkt_cnt);
1722 		pfr_clstats_ktable(kt, tzero, 1);
1723 	}
1724 	nflags = ((shadow->pfrkt_flags & PFR_TFLAG_USRMASK) |
1725 	    (kt->pfrkt_flags & PFR_TFLAG_SETMASK) | PFR_TFLAG_ACTIVE)
1726 		& ~PFR_TFLAG_INACTIVE;
1727 	pfr_destroy_ktable(shadow, 0);
1728 	kt->pfrkt_shadow = NULL;
1729 	pfr_setflags_ktable(kt, nflags);
1730 }
1731 
1732 int
1733 pfr_validate_table(struct pfr_table *tbl, int allowedflags, int no_reserved)
1734 {
1735 	int i;
1736 
1737 	if (!tbl->pfrt_name[0])
1738 		return (-1);
1739 	if (no_reserved && !strcmp(tbl->pfrt_anchor, PF_RESERVED_ANCHOR))
1740 		 return (-1);
1741 	if (tbl->pfrt_name[PF_TABLE_NAME_SIZE-1])
1742 		return (-1);
1743 	for (i = strlen(tbl->pfrt_name); i < PF_TABLE_NAME_SIZE; i++)
1744 		if (tbl->pfrt_name[i])
1745 			return (-1);
1746 	if (pfr_fix_anchor(tbl->pfrt_anchor))
1747 		return (-1);
1748 	if (tbl->pfrt_flags & ~allowedflags)
1749 		return (-1);
1750 	return (0);
1751 }
1752 
1753 /*
1754  * Rewrite anchors referenced by tables to remove slashes
1755  * and check for validity.
1756  */
1757 int
1758 pfr_fix_anchor(char *anchor)
1759 {
1760 	size_t siz = MAXPATHLEN;
1761 	int i;
1762 
1763 	if (anchor[0] == '/') {
1764 		char *path;
1765 		int off;
1766 
1767 		path = anchor;
1768 		off = 1;
1769 		while (*++path == '/')
1770 			off++;
1771 		bcopy(path, anchor, siz - off);
1772 		memset(anchor + siz - off, 0, off);
1773 	}
1774 	if (anchor[siz - 1])
1775 		return (-1);
1776 	for (i = strlen(anchor); i < siz; i++)
1777 		if (anchor[i])
1778 			return (-1);
1779 	return (0);
1780 }
1781 
1782 int
1783 pfr_table_count(struct pfr_table *filter, int flags)
1784 {
1785 	struct pf_ruleset *rs;
1786 
1787 	if (flags & PFR_FLAG_ALLRSETS)
1788 		return (pfr_ktable_cnt);
1789 	if (filter->pfrt_anchor[0]) {
1790 		rs = pf_find_ruleset(filter->pfrt_anchor);
1791 		return ((rs != NULL) ? rs->tables : -1);
1792 	}
1793 	return (pf_main_ruleset.tables);
1794 }
1795 
1796 int
1797 pfr_skip_table(struct pfr_table *filter, struct pfr_ktable *kt, int flags)
1798 {
1799 	if (flags & PFR_FLAG_ALLRSETS)
1800 		return (0);
1801 	if (strcmp(filter->pfrt_anchor, kt->pfrkt_anchor))
1802 		return (1);
1803 	return (0);
1804 }
1805 
1806 void
1807 pfr_insert_ktables(struct pfr_ktableworkq *workq)
1808 {
1809 	struct pfr_ktable	*p;
1810 
1811 	SLIST_FOREACH(p, workq, pfrkt_workq)
1812 		pfr_insert_ktable(p);
1813 }
1814 
1815 void
1816 pfr_insert_ktable(struct pfr_ktable *kt)
1817 {
1818 	RB_INSERT(pfr_ktablehead, &pfr_ktables, kt);
1819 	pfr_ktable_cnt++;
1820 	if (kt->pfrkt_root != NULL)
1821 		if (!kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]++)
1822 			pfr_setflags_ktable(kt->pfrkt_root,
1823 			    kt->pfrkt_root->pfrkt_flags|PFR_TFLAG_REFDANCHOR);
1824 }
1825 
1826 void
1827 pfr_setflags_ktables(struct pfr_ktableworkq *workq)
1828 {
1829 	struct pfr_ktable	*p, *q;
1830 
1831 	for (p = SLIST_FIRST(workq); p; p = q) {
1832 		q = SLIST_NEXT(p, pfrkt_workq);
1833 		pfr_setflags_ktable(p, p->pfrkt_nflags);
1834 	}
1835 }
1836 
1837 void
1838 pfr_setflags_ktable(struct pfr_ktable *kt, int newf)
1839 {
1840 	struct pfr_kentryworkq	addrq;
1841 
1842 	if (!(newf & PFR_TFLAG_REFERENCED) &&
1843 	    !(newf & PFR_TFLAG_PERSIST))
1844 		newf &= ~PFR_TFLAG_ACTIVE;
1845 	if (!(newf & PFR_TFLAG_ACTIVE))
1846 		newf &= ~PFR_TFLAG_USRMASK;
1847 	if (!(newf & PFR_TFLAG_SETMASK)) {
1848 		RB_REMOVE(pfr_ktablehead, &pfr_ktables, kt);
1849 		if (kt->pfrkt_root != NULL)
1850 			if (!--kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR])
1851 				pfr_setflags_ktable(kt->pfrkt_root,
1852 				    kt->pfrkt_root->pfrkt_flags &
1853 					~PFR_TFLAG_REFDANCHOR);
1854 		pfr_destroy_ktable(kt, 1);
1855 		pfr_ktable_cnt--;
1856 		return;
1857 	}
1858 	if (!(newf & PFR_TFLAG_ACTIVE) && kt->pfrkt_cnt) {
1859 		pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1860 		pfr_remove_kentries(kt, &addrq);
1861 	}
1862 	if (!(newf & PFR_TFLAG_INACTIVE) && kt->pfrkt_shadow != NULL) {
1863 		pfr_destroy_ktable(kt->pfrkt_shadow, 1);
1864 		kt->pfrkt_shadow = NULL;
1865 	}
1866 	kt->pfrkt_flags = newf;
1867 }
1868 
1869 void
1870 pfr_clstats_ktables(struct pfr_ktableworkq *workq, long tzero, int recurse)
1871 {
1872 	struct pfr_ktable	*p;
1873 
1874 	SLIST_FOREACH(p, workq, pfrkt_workq)
1875 		pfr_clstats_ktable(p, tzero, recurse);
1876 }
1877 
1878 void
1879 pfr_clstats_ktable(struct pfr_ktable *kt, long tzero, int recurse)
1880 {
1881 	struct pfr_kentryworkq	 addrq;
1882 	int			 s;
1883 
1884 	if (recurse) {
1885 		pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1886 		pfr_clstats_kentries(&addrq, tzero, 0);
1887 	}
1888 	s = splsoftnet();
1889 	bzero(kt->pfrkt_packets, sizeof(kt->pfrkt_packets));
1890 	bzero(kt->pfrkt_bytes, sizeof(kt->pfrkt_bytes));
1891 	kt->pfrkt_match = kt->pfrkt_nomatch = 0;
1892 	splx(s);
1893 	kt->pfrkt_tzero = tzero;
1894 }
1895 
1896 struct pfr_ktable *
1897 pfr_create_ktable(struct pfr_table *tbl, long tzero, int attachruleset,
1898     int intr)
1899 {
1900 	struct pfr_ktable	*kt;
1901 	struct pf_ruleset	*rs;
1902 
1903 	if (intr)
1904 		kt = pool_get(&pfr_ktable_pl, PR_NOWAIT|PR_ZERO|PR_LIMITFAIL);
1905 	else
1906 		kt = pool_get(&pfr_ktable_pl, PR_WAITOK|PR_ZERO|PR_LIMITFAIL);
1907 	if (kt == NULL)
1908 		return (NULL);
1909 	kt->pfrkt_t = *tbl;
1910 
1911 	if (attachruleset) {
1912 		rs = pf_find_or_create_ruleset(tbl->pfrt_anchor);
1913 		if (!rs) {
1914 			pfr_destroy_ktable(kt, 0);
1915 			return (NULL);
1916 		}
1917 		kt->pfrkt_rs = rs;
1918 		rs->tables++;
1919 	}
1920 
1921 	if (!rn_inithead((void **)&kt->pfrkt_ip4,
1922 	    offsetof(struct sockaddr_in, sin_addr) * 8) ||
1923 	    !rn_inithead((void **)&kt->pfrkt_ip6,
1924 	    offsetof(struct sockaddr_in6, sin6_addr) * 8)) {
1925 		pfr_destroy_ktable(kt, 0);
1926 		return (NULL);
1927 	}
1928 	kt->pfrkt_tzero = tzero;
1929 
1930 	return (kt);
1931 }
1932 
1933 void
1934 pfr_destroy_ktables(struct pfr_ktableworkq *workq, int flushaddr)
1935 {
1936 	struct pfr_ktable	*p, *q;
1937 
1938 	for (p = SLIST_FIRST(workq); p; p = q) {
1939 		q = SLIST_NEXT(p, pfrkt_workq);
1940 		pfr_destroy_ktable(p, flushaddr);
1941 	}
1942 }
1943 
1944 void
1945 pfr_destroy_ktable(struct pfr_ktable *kt, int flushaddr)
1946 {
1947 	struct pfr_kentryworkq	 addrq;
1948 
1949 	if (flushaddr) {
1950 		pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1951 		pfr_clean_node_mask(kt, &addrq);
1952 		pfr_destroy_kentries(&addrq);
1953 	}
1954 	if (kt->pfrkt_ip4 != NULL)
1955 		free((caddr_t)kt->pfrkt_ip4, M_RTABLE);
1956 	if (kt->pfrkt_ip6 != NULL)
1957 		free((caddr_t)kt->pfrkt_ip6, M_RTABLE);
1958 	if (kt->pfrkt_shadow != NULL)
1959 		pfr_destroy_ktable(kt->pfrkt_shadow, flushaddr);
1960 	if (kt->pfrkt_rs != NULL) {
1961 		kt->pfrkt_rs->tables--;
1962 		pf_remove_if_empty_ruleset(kt->pfrkt_rs);
1963 	}
1964 	pool_put(&pfr_ktable_pl, kt);
1965 }
1966 
1967 int
1968 pfr_ktable_compare(struct pfr_ktable *p, struct pfr_ktable *q)
1969 {
1970 	int d;
1971 
1972 	if ((d = strncmp(p->pfrkt_name, q->pfrkt_name, PF_TABLE_NAME_SIZE)))
1973 		return (d);
1974 	return (strcmp(p->pfrkt_anchor, q->pfrkt_anchor));
1975 }
1976 
1977 struct pfr_ktable *
1978 pfr_lookup_table(struct pfr_table *tbl)
1979 {
1980 	/* struct pfr_ktable start like a struct pfr_table */
1981 	return (RB_FIND(pfr_ktablehead, &pfr_ktables,
1982 	    (struct pfr_ktable *)tbl));
1983 }
1984 
1985 int
1986 pfr_match_addr(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af)
1987 {
1988 	struct pfr_kentry	*ke = NULL;
1989 	int			 match;
1990 
1991 	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
1992 		kt = kt->pfrkt_root;
1993 	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
1994 		return (0);
1995 
1996 	switch (af) {
1997 #ifdef INET
1998 	case AF_INET:
1999 		pfr_sin.sin_addr.s_addr = a->addr32[0];
2000 		ke = (struct pfr_kentry *)rn_match(&pfr_sin, kt->pfrkt_ip4);
2001 		if (ke && KENTRY_RNF_ROOT(ke))
2002 			ke = NULL;
2003 		break;
2004 #endif /* INET */
2005 #ifdef INET6
2006 	case AF_INET6:
2007 		bcopy(a, &pfr_sin6.sin6_addr, sizeof(pfr_sin6.sin6_addr));
2008 		ke = (struct pfr_kentry *)rn_match(&pfr_sin6, kt->pfrkt_ip6);
2009 		if (ke && KENTRY_RNF_ROOT(ke))
2010 			ke = NULL;
2011 		break;
2012 #endif /* INET6 */
2013 	}
2014 	match = (ke && !ke->pfrke_not);
2015 	if (match)
2016 		kt->pfrkt_match++;
2017 	else
2018 		kt->pfrkt_nomatch++;
2019 	return (match);
2020 }
2021 
2022 void
2023 pfr_update_stats(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af,
2024     u_int64_t len, int dir_out, int op_pass, int notrule)
2025 {
2026 	struct pfr_kentry	*ke = NULL;
2027 
2028 	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
2029 		kt = kt->pfrkt_root;
2030 	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
2031 		return;
2032 
2033 	switch (af) {
2034 #ifdef INET
2035 	case AF_INET:
2036 		pfr_sin.sin_addr.s_addr = a->addr32[0];
2037 		ke = (struct pfr_kentry *)rn_match(&pfr_sin, kt->pfrkt_ip4);
2038 		if (ke && KENTRY_RNF_ROOT(ke))
2039 			ke = NULL;
2040 		break;
2041 #endif /* INET */
2042 #ifdef INET6
2043 	case AF_INET6:
2044 		bcopy(a, &pfr_sin6.sin6_addr, sizeof(pfr_sin6.sin6_addr));
2045 		ke = (struct pfr_kentry *)rn_match(&pfr_sin6, kt->pfrkt_ip6);
2046 		if (ke && KENTRY_RNF_ROOT(ke))
2047 			ke = NULL;
2048 		break;
2049 #endif /* INET6 */
2050 	default:
2051 		;
2052 	}
2053 	if ((ke == NULL || ke->pfrke_not) != notrule) {
2054 		if (op_pass != PFR_OP_PASS)
2055 			printf("pfr_update_stats: assertion failed.\n");
2056 		op_pass = PFR_OP_XPASS;
2057 	}
2058 	kt->pfrkt_packets[dir_out][op_pass]++;
2059 	kt->pfrkt_bytes[dir_out][op_pass] += len;
2060 	if (ke != NULL && op_pass != PFR_OP_XPASS &&
2061 	    (kt->pfrkt_flags & PFR_TFLAG_COUNTERS)) {
2062 		if (ke->pfrke_counters == NULL)
2063 			ke->pfrke_counters = pool_get(&pfr_kcounters_pl,
2064 			    PR_NOWAIT | PR_ZERO);
2065 		if (ke->pfrke_counters != NULL) {
2066 			ke->pfrke_counters->pfrkc_packets[dir_out][op_pass]++;
2067 			ke->pfrke_counters->pfrkc_bytes[dir_out][op_pass] += len;
2068 		}
2069 	}
2070 }
2071 
2072 struct pfr_ktable *
2073 pfr_attach_table(struct pf_ruleset *rs, char *name, int intr)
2074 {
2075 	struct pfr_ktable	*kt, *rt;
2076 	struct pfr_table	 tbl;
2077 	struct pf_anchor	*ac = rs->anchor;
2078 
2079 	bzero(&tbl, sizeof(tbl));
2080 	strlcpy(tbl.pfrt_name, name, sizeof(tbl.pfrt_name));
2081 	if (ac != NULL)
2082 		strlcpy(tbl.pfrt_anchor, ac->path, sizeof(tbl.pfrt_anchor));
2083 	kt = pfr_lookup_table(&tbl);
2084 	if (kt == NULL) {
2085 		kt = pfr_create_ktable(&tbl, time_second, 1, intr);
2086 		if (kt == NULL)
2087 			return (NULL);
2088 		if (ac != NULL) {
2089 			bzero(tbl.pfrt_anchor, sizeof(tbl.pfrt_anchor));
2090 			rt = pfr_lookup_table(&tbl);
2091 			if (rt == NULL) {
2092 				rt = pfr_create_ktable(&tbl, 0, 1, intr);
2093 				if (rt == NULL) {
2094 					pfr_destroy_ktable(kt, 0);
2095 					return (NULL);
2096 				}
2097 				pfr_insert_ktable(rt);
2098 			}
2099 			kt->pfrkt_root = rt;
2100 		}
2101 		pfr_insert_ktable(kt);
2102 	}
2103 	if (!kt->pfrkt_refcnt[PFR_REFCNT_RULE]++)
2104 		pfr_setflags_ktable(kt, kt->pfrkt_flags|PFR_TFLAG_REFERENCED);
2105 	return (kt);
2106 }
2107 
2108 void
2109 pfr_detach_table(struct pfr_ktable *kt)
2110 {
2111 	if (kt->pfrkt_refcnt[PFR_REFCNT_RULE] <= 0)
2112 		printf("pfr_detach_table: refcount = %d.\n",
2113 		    kt->pfrkt_refcnt[PFR_REFCNT_RULE]);
2114 	else if (!--kt->pfrkt_refcnt[PFR_REFCNT_RULE])
2115 		pfr_setflags_ktable(kt, kt->pfrkt_flags&~PFR_TFLAG_REFERENCED);
2116 }
2117 
2118 int
2119 pfr_pool_get(struct pfr_ktable *kt, int *pidx, struct pf_addr *counter,
2120     struct pf_addr **raddr, struct pf_addr **rmask, sa_family_t af)
2121 {
2122 	struct pfr_kentry	*ke, *ke2;
2123 	struct pf_addr		*addr;
2124 	union sockaddr_union	 mask;
2125 	int			 idx = -1, use_counter = 0;
2126 
2127 	if (af == AF_INET)
2128 		addr = (struct pf_addr *)&pfr_sin.sin_addr;
2129 	else if (af == AF_INET6)
2130 		addr = (struct pf_addr *)&pfr_sin6.sin6_addr;
2131 	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
2132 		kt = kt->pfrkt_root;
2133 	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
2134 		return (-1);
2135 
2136 	if (pidx != NULL)
2137 		idx = *pidx;
2138 	if (counter != NULL && idx >= 0)
2139 		use_counter = 1;
2140 	if (idx < 0)
2141 		idx = 0;
2142 
2143 _next_block:
2144 	ke = pfr_kentry_byidx(kt, idx, af);
2145 	if (ke == NULL) {
2146 		kt->pfrkt_nomatch++;
2147 		return (1);
2148 	}
2149 	pfr_prepare_network(&pfr_mask, af, ke->pfrke_net);
2150 	*raddr = SUNION2PF(&ke->pfrke_sa, af);
2151 	*rmask = SUNION2PF(&pfr_mask, af);
2152 
2153 	if (use_counter) {
2154 		/* is supplied address within block? */
2155 		if (!PF_MATCHA(0, *raddr, *rmask, counter, af)) {
2156 			/* no, go to next block in table */
2157 			idx++;
2158 			use_counter = 0;
2159 			goto _next_block;
2160 		}
2161 		PF_ACPY(addr, counter, af);
2162 	} else {
2163 		/* use first address of block */
2164 		PF_ACPY(addr, *raddr, af);
2165 	}
2166 
2167 	if (!KENTRY_NETWORK(ke)) {
2168 		/* this is a single IP address - no possible nested block */
2169 		PF_ACPY(counter, addr, af);
2170 		*pidx = idx;
2171 		kt->pfrkt_match++;
2172 		return (0);
2173 	}
2174 	for (;;) {
2175 		/* we don't want to use a nested block */
2176 		if (af == AF_INET)
2177 			ke2 = (struct pfr_kentry *)rn_match(&pfr_sin,
2178 			    kt->pfrkt_ip4);
2179 		else if (af == AF_INET6)
2180 			ke2 = (struct pfr_kentry *)rn_match(&pfr_sin6,
2181 			    kt->pfrkt_ip6);
2182 		/* no need to check KENTRY_RNF_ROOT() here */
2183 		if (ke2 == ke) {
2184 			/* lookup return the same block - perfect */
2185 			PF_ACPY(counter, addr, af);
2186 			*pidx = idx;
2187 			kt->pfrkt_match++;
2188 			return (0);
2189 		}
2190 
2191 		/* we need to increase the counter past the nested block */
2192 		pfr_prepare_network(&mask, AF_INET, ke2->pfrke_net);
2193 		PF_POOLMASK(addr, addr, SUNION2PF(&mask, af), &pfr_ffaddr, af);
2194 		PF_AINC(addr, af);
2195 		if (!PF_MATCHA(0, *raddr, *rmask, addr, af)) {
2196 			/* ok, we reached the end of our main block */
2197 			/* go to next block in table */
2198 			idx++;
2199 			use_counter = 0;
2200 			goto _next_block;
2201 		}
2202 	}
2203 }
2204 
2205 struct pfr_kentry *
2206 pfr_kentry_byidx(struct pfr_ktable *kt, int idx, int af)
2207 {
2208 	struct pfr_walktree	w;
2209 
2210 	bzero(&w, sizeof(w));
2211 	w.pfrw_op = PFRW_POOL_GET;
2212 	w.pfrw_cnt = idx;
2213 
2214 	switch (af) {
2215 #ifdef INET
2216 	case AF_INET:
2217 		rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
2218 		return (w.pfrw_kentry);
2219 #endif /* INET */
2220 #ifdef INET6
2221 	case AF_INET6:
2222 		rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
2223 		return (w.pfrw_kentry);
2224 #endif /* INET6 */
2225 	default:
2226 		return (NULL);
2227 	}
2228 }
2229 
2230 void
2231 pfr_dynaddr_update(struct pfr_ktable *kt, struct pfi_dynaddr *dyn)
2232 {
2233 	struct pfr_walktree	w;
2234 	int			s;
2235 
2236 	bzero(&w, sizeof(w));
2237 	w.pfrw_op = PFRW_DYNADDR_UPDATE;
2238 	w.pfrw_dyn = dyn;
2239 
2240 	s = splsoftnet();
2241 	dyn->pfid_acnt4 = 0;
2242 	dyn->pfid_acnt6 = 0;
2243 	if (!dyn->pfid_af || dyn->pfid_af == AF_INET)
2244 		rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
2245 	if (!dyn->pfid_af || dyn->pfid_af == AF_INET6)
2246 		rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
2247 	splx(s);
2248 }
2249