xref: /openbsd-src/sys/net/pf_table.c (revision 850e275390052b330d93020bf619a739a3c277ac)
1 /*	$OpenBSD: pf_table.c,v 1.78 2008/06/14 03:50:14 art Exp $	*/
2 
3 /*
4  * Copyright (c) 2002 Cedric Berger
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  *    - Redistributions of source code must retain the above copyright
12  *      notice, this list of conditions and the following disclaimer.
13  *    - Redistributions in binary form must reproduce the above
14  *      copyright notice, this list of conditions and the following
15  *      disclaimer in the documentation and/or other materials provided
16  *      with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
21  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
22  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
23  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
24  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
28  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  *
31  */
32 
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/socket.h>
36 #include <sys/mbuf.h>
37 #include <sys/kernel.h>
38 #include <sys/pool.h>
39 
40 #include <net/if.h>
41 #include <net/route.h>
42 #include <netinet/in.h>
43 #include <netinet/ip_ipsp.h>
44 #include <net/pfvar.h>
45 
46 #define ACCEPT_FLAGS(flags, oklist)		\
47 	do {					\
48 		if ((flags & ~(oklist)) &	\
49 		    PFR_FLAG_ALLMASK)		\
50 			return (EINVAL);	\
51 	} while (0)
52 
53 #define COPYIN(from, to, size, flags)		\
54 	((flags & PFR_FLAG_USERIOCTL) ?		\
55 	copyin((from), (to), (size)) :		\
56 	(bcopy((from), (to), (size)), 0))
57 
58 #define COPYOUT(from, to, size, flags)		\
59 	((flags & PFR_FLAG_USERIOCTL) ?		\
60 	copyout((from), (to), (size)) :		\
61 	(bcopy((from), (to), (size)), 0))
62 
63 #define	FILLIN_SIN(sin, addr)			\
64 	do {					\
65 		(sin).sin_len = sizeof(sin);	\
66 		(sin).sin_family = AF_INET;	\
67 		(sin).sin_addr = (addr);	\
68 	} while (0)
69 
70 #define	FILLIN_SIN6(sin6, addr)			\
71 	do {					\
72 		(sin6).sin6_len = sizeof(sin6);	\
73 		(sin6).sin6_family = AF_INET6;	\
74 		(sin6).sin6_addr = (addr);	\
75 	} while (0)
76 
77 #define SWAP(type, a1, a2)			\
78 	do {					\
79 		type tmp = a1;			\
80 		a1 = a2;			\
81 		a2 = tmp;			\
82 	} while (0)
83 
84 #define SUNION2PF(su, af) (((af)==AF_INET) ?	\
85     (struct pf_addr *)&(su)->sin.sin_addr :	\
86     (struct pf_addr *)&(su)->sin6.sin6_addr)
87 
88 #define	AF_BITS(af)		(((af)==AF_INET)?32:128)
89 #define	ADDR_NETWORK(ad)	((ad)->pfra_net < AF_BITS((ad)->pfra_af))
90 #define	KENTRY_NETWORK(ke)	((ke)->pfrke_net < AF_BITS((ke)->pfrke_af))
91 #define KENTRY_RNF_ROOT(ke) \
92 		((((struct radix_node *)(ke))->rn_flags & RNF_ROOT) != 0)
93 
94 #define NO_ADDRESSES		(-1)
95 #define ENQUEUE_UNMARKED_ONLY	(1)
96 #define INVERT_NEG_FLAG		(1)
97 
98 struct pfr_walktree {
99 	enum pfrw_op {
100 		PFRW_MARK,
101 		PFRW_SWEEP,
102 		PFRW_ENQUEUE,
103 		PFRW_GET_ADDRS,
104 		PFRW_GET_ASTATS,
105 		PFRW_POOL_GET,
106 		PFRW_DYNADDR_UPDATE
107 	}	 pfrw_op;
108 	union {
109 		struct pfr_addr		*pfrw1_addr;
110 		struct pfr_astats	*pfrw1_astats;
111 		struct pfr_kentryworkq	*pfrw1_workq;
112 		struct pfr_kentry	*pfrw1_kentry;
113 		struct pfi_dynaddr	*pfrw1_dyn;
114 	}	 pfrw_1;
115 	int	 pfrw_free;
116 	int	 pfrw_flags;
117 };
118 #define pfrw_addr	pfrw_1.pfrw1_addr
119 #define pfrw_astats	pfrw_1.pfrw1_astats
120 #define pfrw_workq	pfrw_1.pfrw1_workq
121 #define pfrw_kentry	pfrw_1.pfrw1_kentry
122 #define pfrw_dyn	pfrw_1.pfrw1_dyn
123 #define pfrw_cnt	pfrw_free
124 
125 #define senderr(e)	do { rv = (e); goto _bad; } while (0)
126 
127 struct pool		 pfr_ktable_pl;
128 struct pool		 pfr_kentry_pl;
129 struct pool		 pfr_kentry_pl2;
130 struct pool		 pfr_kcounters_pl;
131 struct sockaddr_in	 pfr_sin;
132 struct sockaddr_in6	 pfr_sin6;
133 union sockaddr_union	 pfr_mask;
134 struct pf_addr		 pfr_ffaddr;
135 
136 void			 pfr_copyout_addr(struct pfr_addr *,
137 			    struct pfr_kentry *ke);
138 int			 pfr_validate_addr(struct pfr_addr *);
139 void			 pfr_enqueue_addrs(struct pfr_ktable *,
140 			    struct pfr_kentryworkq *, int *, int);
141 void			 pfr_mark_addrs(struct pfr_ktable *);
142 struct pfr_kentry	*pfr_lookup_addr(struct pfr_ktable *,
143 			    struct pfr_addr *, int);
144 struct pfr_kentry	*pfr_create_kentry(struct pfr_addr *, int);
145 void			 pfr_destroy_kentries(struct pfr_kentryworkq *);
146 void			 pfr_destroy_kentry(struct pfr_kentry *);
147 void			 pfr_insert_kentries(struct pfr_ktable *,
148 			    struct pfr_kentryworkq *, long);
149 void			 pfr_remove_kentries(struct pfr_ktable *,
150 			    struct pfr_kentryworkq *);
151 void			 pfr_clstats_kentries(struct pfr_kentryworkq *, long,
152 			    int);
153 void			 pfr_reset_feedback(struct pfr_addr *, int, int);
154 void			 pfr_prepare_network(union sockaddr_union *, int, int);
155 int			 pfr_route_kentry(struct pfr_ktable *,
156 			    struct pfr_kentry *);
157 int			 pfr_unroute_kentry(struct pfr_ktable *,
158 			    struct pfr_kentry *);
159 int			 pfr_walktree(struct radix_node *, void *);
160 int			 pfr_validate_table(struct pfr_table *, int, int);
161 int			 pfr_fix_anchor(char *);
162 void			 pfr_commit_ktable(struct pfr_ktable *, long);
163 void			 pfr_insert_ktables(struct pfr_ktableworkq *);
164 void			 pfr_insert_ktable(struct pfr_ktable *);
165 void			 pfr_setflags_ktables(struct pfr_ktableworkq *);
166 void			 pfr_setflags_ktable(struct pfr_ktable *, int);
167 void			 pfr_clstats_ktables(struct pfr_ktableworkq *, long,
168 			    int);
169 void			 pfr_clstats_ktable(struct pfr_ktable *, long, int);
170 struct pfr_ktable	*pfr_create_ktable(struct pfr_table *, long, int);
171 void			 pfr_destroy_ktables(struct pfr_ktableworkq *, int);
172 void			 pfr_destroy_ktable(struct pfr_ktable *, int);
173 int			 pfr_ktable_compare(struct pfr_ktable *,
174 			    struct pfr_ktable *);
175 struct pfr_ktable	*pfr_lookup_table(struct pfr_table *);
176 void			 pfr_clean_node_mask(struct pfr_ktable *,
177 			    struct pfr_kentryworkq *);
178 int			 pfr_table_count(struct pfr_table *, int);
179 int			 pfr_skip_table(struct pfr_table *,
180 			    struct pfr_ktable *, int);
181 struct pfr_kentry	*pfr_kentry_byidx(struct pfr_ktable *, int, int);
182 
183 RB_PROTOTYPE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
184 RB_GENERATE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
185 
186 struct pfr_ktablehead	 pfr_ktables;
187 struct pfr_table	 pfr_nulltable;
188 int			 pfr_ktable_cnt;
189 
190 void
191 pfr_initialize(void)
192 {
193 	pool_init(&pfr_ktable_pl, sizeof(struct pfr_ktable), 0, 0, 0,
194 	    "pfrktable", NULL);
195 	pool_init(&pfr_kentry_pl, sizeof(struct pfr_kentry), 0, 0, 0,
196 	    "pfrkentry", NULL);
197 	pool_init(&pfr_kentry_pl2, sizeof(struct pfr_kentry), 0, 0, 0,
198 	    "pfrkentry2", NULL);
199 	pool_init(&pfr_kcounters_pl, sizeof(struct pfr_kcounters), 0, 0, 0,
200 	    "pfrkcounters", NULL);
201 
202 	pfr_sin.sin_len = sizeof(pfr_sin);
203 	pfr_sin.sin_family = AF_INET;
204 	pfr_sin6.sin6_len = sizeof(pfr_sin6);
205 	pfr_sin6.sin6_family = AF_INET6;
206 
207 	memset(&pfr_ffaddr, 0xff, sizeof(pfr_ffaddr));
208 }
209 
210 int
211 pfr_clr_addrs(struct pfr_table *tbl, int *ndel, int flags)
212 {
213 	struct pfr_ktable	*kt;
214 	struct pfr_kentryworkq	 workq;
215 	int			 s;
216 
217 	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
218 	if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
219 		return (EINVAL);
220 	kt = pfr_lookup_table(tbl);
221 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
222 		return (ESRCH);
223 	if (kt->pfrkt_flags & PFR_TFLAG_CONST)
224 		return (EPERM);
225 	pfr_enqueue_addrs(kt, &workq, ndel, 0);
226 
227 	if (!(flags & PFR_FLAG_DUMMY)) {
228 		if (flags & PFR_FLAG_ATOMIC)
229 			s = splsoftnet();
230 		pfr_remove_kentries(kt, &workq);
231 		if (flags & PFR_FLAG_ATOMIC)
232 			splx(s);
233 		if (kt->pfrkt_cnt) {
234 			printf("pfr_clr_addrs: corruption detected (%d).\n",
235 			    kt->pfrkt_cnt);
236 			kt->pfrkt_cnt = 0;
237 		}
238 	}
239 	return (0);
240 }
241 
242 int
243 pfr_add_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
244     int *nadd, int flags)
245 {
246 	struct pfr_ktable	*kt, *tmpkt;
247 	struct pfr_kentryworkq	 workq;
248 	struct pfr_kentry	*p, *q;
249 	struct pfr_addr		 ad;
250 	int			 i, rv, s, xadd = 0;
251 	long			 tzero = time_second;
252 
253 	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
254 	    PFR_FLAG_FEEDBACK);
255 	if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
256 		return (EINVAL);
257 	kt = pfr_lookup_table(tbl);
258 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
259 		return (ESRCH);
260 	if (kt->pfrkt_flags & PFR_TFLAG_CONST)
261 		return (EPERM);
262 	tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0);
263 	if (tmpkt == NULL)
264 		return (ENOMEM);
265 	SLIST_INIT(&workq);
266 	for (i = 0; i < size; i++) {
267 		if (COPYIN(addr+i, &ad, sizeof(ad), flags))
268 			senderr(EFAULT);
269 		if (pfr_validate_addr(&ad))
270 			senderr(EINVAL);
271 		p = pfr_lookup_addr(kt, &ad, 1);
272 		q = pfr_lookup_addr(tmpkt, &ad, 1);
273 		if (flags & PFR_FLAG_FEEDBACK) {
274 			if (q != NULL)
275 				ad.pfra_fback = PFR_FB_DUPLICATE;
276 			else if (p == NULL)
277 				ad.pfra_fback = PFR_FB_ADDED;
278 			else if (p->pfrke_not != ad.pfra_not)
279 				ad.pfra_fback = PFR_FB_CONFLICT;
280 			else
281 				ad.pfra_fback = PFR_FB_NONE;
282 		}
283 		if (p == NULL && q == NULL) {
284 			p = pfr_create_kentry(&ad,
285 			    !(flags & PFR_FLAG_USERIOCTL));
286 			if (p == NULL)
287 				senderr(ENOMEM);
288 			if (pfr_route_kentry(tmpkt, p)) {
289 				pfr_destroy_kentry(p);
290 				ad.pfra_fback = PFR_FB_NONE;
291 			} else {
292 				SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
293 				xadd++;
294 			}
295 		}
296 		if (flags & PFR_FLAG_FEEDBACK)
297 			if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
298 				senderr(EFAULT);
299 	}
300 	pfr_clean_node_mask(tmpkt, &workq);
301 	if (!(flags & PFR_FLAG_DUMMY)) {
302 		if (flags & PFR_FLAG_ATOMIC)
303 			s = splsoftnet();
304 		pfr_insert_kentries(kt, &workq, tzero);
305 		if (flags & PFR_FLAG_ATOMIC)
306 			splx(s);
307 	} else
308 		pfr_destroy_kentries(&workq);
309 	if (nadd != NULL)
310 		*nadd = xadd;
311 	pfr_destroy_ktable(tmpkt, 0);
312 	return (0);
313 _bad:
314 	pfr_clean_node_mask(tmpkt, &workq);
315 	pfr_destroy_kentries(&workq);
316 	if (flags & PFR_FLAG_FEEDBACK)
317 		pfr_reset_feedback(addr, size, flags);
318 	pfr_destroy_ktable(tmpkt, 0);
319 	return (rv);
320 }
321 
322 int
323 pfr_del_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
324     int *ndel, int flags)
325 {
326 	struct pfr_ktable	*kt;
327 	struct pfr_kentryworkq	 workq;
328 	struct pfr_kentry	*p;
329 	struct pfr_addr		 ad;
330 	int			 i, rv, s, xdel = 0, log = 1;
331 
332 	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
333 	    PFR_FLAG_FEEDBACK);
334 	if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
335 		return (EINVAL);
336 	kt = pfr_lookup_table(tbl);
337 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
338 		return (ESRCH);
339 	if (kt->pfrkt_flags & PFR_TFLAG_CONST)
340 		return (EPERM);
341 	/*
342 	 * there are two algorithms to choose from here.
343 	 * with:
344 	 *   n: number of addresses to delete
345 	 *   N: number of addresses in the table
346 	 *
347 	 * one is O(N) and is better for large 'n'
348 	 * one is O(n*LOG(N)) and is better for small 'n'
349 	 *
350 	 * following code try to decide which one is best.
351 	 */
352 	for (i = kt->pfrkt_cnt; i > 0; i >>= 1)
353 		log++;
354 	if (size > kt->pfrkt_cnt/log) {
355 		/* full table scan */
356 		pfr_mark_addrs(kt);
357 	} else {
358 		/* iterate over addresses to delete */
359 		for (i = 0; i < size; i++) {
360 			if (COPYIN(addr+i, &ad, sizeof(ad), flags))
361 				return (EFAULT);
362 			if (pfr_validate_addr(&ad))
363 				return (EINVAL);
364 			p = pfr_lookup_addr(kt, &ad, 1);
365 			if (p != NULL)
366 				p->pfrke_mark = 0;
367 		}
368 	}
369 	SLIST_INIT(&workq);
370 	for (i = 0; i < size; i++) {
371 		if (COPYIN(addr+i, &ad, sizeof(ad), flags))
372 			senderr(EFAULT);
373 		if (pfr_validate_addr(&ad))
374 			senderr(EINVAL);
375 		p = pfr_lookup_addr(kt, &ad, 1);
376 		if (flags & PFR_FLAG_FEEDBACK) {
377 			if (p == NULL)
378 				ad.pfra_fback = PFR_FB_NONE;
379 			else if (p->pfrke_not != ad.pfra_not)
380 				ad.pfra_fback = PFR_FB_CONFLICT;
381 			else if (p->pfrke_mark)
382 				ad.pfra_fback = PFR_FB_DUPLICATE;
383 			else
384 				ad.pfra_fback = PFR_FB_DELETED;
385 		}
386 		if (p != NULL && p->pfrke_not == ad.pfra_not &&
387 		    !p->pfrke_mark) {
388 			p->pfrke_mark = 1;
389 			SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
390 			xdel++;
391 		}
392 		if (flags & PFR_FLAG_FEEDBACK)
393 			if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
394 				senderr(EFAULT);
395 	}
396 	if (!(flags & PFR_FLAG_DUMMY)) {
397 		if (flags & PFR_FLAG_ATOMIC)
398 			s = splsoftnet();
399 		pfr_remove_kentries(kt, &workq);
400 		if (flags & PFR_FLAG_ATOMIC)
401 			splx(s);
402 	}
403 	if (ndel != NULL)
404 		*ndel = xdel;
405 	return (0);
406 _bad:
407 	if (flags & PFR_FLAG_FEEDBACK)
408 		pfr_reset_feedback(addr, size, flags);
409 	return (rv);
410 }
411 
412 int
413 pfr_set_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
414     int *size2, int *nadd, int *ndel, int *nchange, int flags,
415     u_int32_t ignore_pfrt_flags)
416 {
417 	struct pfr_ktable	*kt, *tmpkt;
418 	struct pfr_kentryworkq	 addq, delq, changeq;
419 	struct pfr_kentry	*p, *q;
420 	struct pfr_addr		 ad;
421 	int			 i, rv, s, xadd = 0, xdel = 0, xchange = 0;
422 	long			 tzero = time_second;
423 
424 	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
425 	    PFR_FLAG_FEEDBACK);
426 	if (pfr_validate_table(tbl, ignore_pfrt_flags, flags &
427 	    PFR_FLAG_USERIOCTL))
428 		return (EINVAL);
429 	kt = pfr_lookup_table(tbl);
430 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
431 		return (ESRCH);
432 	if (kt->pfrkt_flags & PFR_TFLAG_CONST)
433 		return (EPERM);
434 	tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0);
435 	if (tmpkt == NULL)
436 		return (ENOMEM);
437 	pfr_mark_addrs(kt);
438 	SLIST_INIT(&addq);
439 	SLIST_INIT(&delq);
440 	SLIST_INIT(&changeq);
441 	for (i = 0; i < size; i++) {
442 		if (COPYIN(addr+i, &ad, sizeof(ad), flags))
443 			senderr(EFAULT);
444 		if (pfr_validate_addr(&ad))
445 			senderr(EINVAL);
446 		ad.pfra_fback = PFR_FB_NONE;
447 		p = pfr_lookup_addr(kt, &ad, 1);
448 		if (p != NULL) {
449 			if (p->pfrke_mark) {
450 				ad.pfra_fback = PFR_FB_DUPLICATE;
451 				goto _skip;
452 			}
453 			p->pfrke_mark = 1;
454 			if (p->pfrke_not != ad.pfra_not) {
455 				SLIST_INSERT_HEAD(&changeq, p, pfrke_workq);
456 				ad.pfra_fback = PFR_FB_CHANGED;
457 				xchange++;
458 			}
459 		} else {
460 			q = pfr_lookup_addr(tmpkt, &ad, 1);
461 			if (q != NULL) {
462 				ad.pfra_fback = PFR_FB_DUPLICATE;
463 				goto _skip;
464 			}
465 			p = pfr_create_kentry(&ad,
466 			    !(flags & PFR_FLAG_USERIOCTL));
467 			if (p == NULL)
468 				senderr(ENOMEM);
469 			if (pfr_route_kentry(tmpkt, p)) {
470 				pfr_destroy_kentry(p);
471 				ad.pfra_fback = PFR_FB_NONE;
472 			} else {
473 				SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
474 				ad.pfra_fback = PFR_FB_ADDED;
475 				xadd++;
476 			}
477 		}
478 _skip:
479 		if (flags & PFR_FLAG_FEEDBACK)
480 			if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
481 				senderr(EFAULT);
482 	}
483 	pfr_enqueue_addrs(kt, &delq, &xdel, ENQUEUE_UNMARKED_ONLY);
484 	if ((flags & PFR_FLAG_FEEDBACK) && *size2) {
485 		if (*size2 < size+xdel) {
486 			*size2 = size+xdel;
487 			senderr(0);
488 		}
489 		i = 0;
490 		SLIST_FOREACH(p, &delq, pfrke_workq) {
491 			pfr_copyout_addr(&ad, p);
492 			ad.pfra_fback = PFR_FB_DELETED;
493 			if (COPYOUT(&ad, addr+size+i, sizeof(ad), flags))
494 				senderr(EFAULT);
495 			i++;
496 		}
497 	}
498 	pfr_clean_node_mask(tmpkt, &addq);
499 	if (!(flags & PFR_FLAG_DUMMY)) {
500 		if (flags & PFR_FLAG_ATOMIC)
501 			s = splsoftnet();
502 		pfr_insert_kentries(kt, &addq, tzero);
503 		pfr_remove_kentries(kt, &delq);
504 		pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG);
505 		if (flags & PFR_FLAG_ATOMIC)
506 			splx(s);
507 	} else
508 		pfr_destroy_kentries(&addq);
509 	if (nadd != NULL)
510 		*nadd = xadd;
511 	if (ndel != NULL)
512 		*ndel = xdel;
513 	if (nchange != NULL)
514 		*nchange = xchange;
515 	if ((flags & PFR_FLAG_FEEDBACK) && size2)
516 		*size2 = size+xdel;
517 	pfr_destroy_ktable(tmpkt, 0);
518 	return (0);
519 _bad:
520 	pfr_clean_node_mask(tmpkt, &addq);
521 	pfr_destroy_kentries(&addq);
522 	if (flags & PFR_FLAG_FEEDBACK)
523 		pfr_reset_feedback(addr, size, flags);
524 	pfr_destroy_ktable(tmpkt, 0);
525 	return (rv);
526 }
527 
528 int
529 pfr_tst_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
530 	int *nmatch, int flags)
531 {
532 	struct pfr_ktable	*kt;
533 	struct pfr_kentry	*p;
534 	struct pfr_addr		 ad;
535 	int			 i, xmatch = 0;
536 
537 	ACCEPT_FLAGS(flags, PFR_FLAG_REPLACE);
538 	if (pfr_validate_table(tbl, 0, 0))
539 		return (EINVAL);
540 	kt = pfr_lookup_table(tbl);
541 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
542 		return (ESRCH);
543 
544 	for (i = 0; i < size; i++) {
545 		if (COPYIN(addr+i, &ad, sizeof(ad), flags))
546 			return (EFAULT);
547 		if (pfr_validate_addr(&ad))
548 			return (EINVAL);
549 		if (ADDR_NETWORK(&ad))
550 			return (EINVAL);
551 		p = pfr_lookup_addr(kt, &ad, 0);
552 		if (flags & PFR_FLAG_REPLACE)
553 			pfr_copyout_addr(&ad, p);
554 		ad.pfra_fback = (p == NULL) ? PFR_FB_NONE :
555 		    (p->pfrke_not ? PFR_FB_NOTMATCH : PFR_FB_MATCH);
556 		if (p != NULL && !p->pfrke_not)
557 			xmatch++;
558 		if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
559 			return (EFAULT);
560 	}
561 	if (nmatch != NULL)
562 		*nmatch = xmatch;
563 	return (0);
564 }
565 
566 int
567 pfr_get_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int *size,
568 	int flags)
569 {
570 	struct pfr_ktable	*kt;
571 	struct pfr_walktree	 w;
572 	int			 rv;
573 
574 	ACCEPT_FLAGS(flags, 0);
575 	if (pfr_validate_table(tbl, 0, 0))
576 		return (EINVAL);
577 	kt = pfr_lookup_table(tbl);
578 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
579 		return (ESRCH);
580 	if (kt->pfrkt_cnt > *size) {
581 		*size = kt->pfrkt_cnt;
582 		return (0);
583 	}
584 
585 	bzero(&w, sizeof(w));
586 	w.pfrw_op = PFRW_GET_ADDRS;
587 	w.pfrw_addr = addr;
588 	w.pfrw_free = kt->pfrkt_cnt;
589 	w.pfrw_flags = flags;
590 	rv = rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
591 	if (!rv)
592 		rv = rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
593 	if (rv)
594 		return (rv);
595 
596 	if (w.pfrw_free) {
597 		printf("pfr_get_addrs: corruption detected (%d).\n",
598 		    w.pfrw_free);
599 		return (ENOTTY);
600 	}
601 	*size = kt->pfrkt_cnt;
602 	return (0);
603 }
604 
605 int
606 pfr_get_astats(struct pfr_table *tbl, struct pfr_astats *addr, int *size,
607 	int flags)
608 {
609 	struct pfr_ktable	*kt;
610 	struct pfr_walktree	 w;
611 	struct pfr_kentryworkq	 workq;
612 	int			 rv, s;
613 	long			 tzero = time_second;
614 
615 	/* XXX PFR_FLAG_CLSTATS disabled */
616 	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC);
617 	if (pfr_validate_table(tbl, 0, 0))
618 		return (EINVAL);
619 	kt = pfr_lookup_table(tbl);
620 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
621 		return (ESRCH);
622 	if (kt->pfrkt_cnt > *size) {
623 		*size = kt->pfrkt_cnt;
624 		return (0);
625 	}
626 
627 	bzero(&w, sizeof(w));
628 	w.pfrw_op = PFRW_GET_ASTATS;
629 	w.pfrw_astats = addr;
630 	w.pfrw_free = kt->pfrkt_cnt;
631 	w.pfrw_flags = flags;
632 	if (flags & PFR_FLAG_ATOMIC)
633 		s = splsoftnet();
634 	rv = rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
635 	if (!rv)
636 		rv = rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
637 	if (!rv && (flags & PFR_FLAG_CLSTATS)) {
638 		pfr_enqueue_addrs(kt, &workq, NULL, 0);
639 		pfr_clstats_kentries(&workq, tzero, 0);
640 	}
641 	if (flags & PFR_FLAG_ATOMIC)
642 		splx(s);
643 	if (rv)
644 		return (rv);
645 
646 	if (w.pfrw_free) {
647 		printf("pfr_get_astats: corruption detected (%d).\n",
648 		    w.pfrw_free);
649 		return (ENOTTY);
650 	}
651 	*size = kt->pfrkt_cnt;
652 	return (0);
653 }
654 
655 int
656 pfr_clr_astats(struct pfr_table *tbl, struct pfr_addr *addr, int size,
657     int *nzero, int flags)
658 {
659 	struct pfr_ktable	*kt;
660 	struct pfr_kentryworkq	 workq;
661 	struct pfr_kentry	*p;
662 	struct pfr_addr		 ad;
663 	int			 i, rv, s, xzero = 0;
664 
665 	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
666 	    PFR_FLAG_FEEDBACK);
667 	if (pfr_validate_table(tbl, 0, 0))
668 		return (EINVAL);
669 	kt = pfr_lookup_table(tbl);
670 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
671 		return (ESRCH);
672 	SLIST_INIT(&workq);
673 	for (i = 0; i < size; i++) {
674 		if (COPYIN(addr+i, &ad, sizeof(ad), flags))
675 			senderr(EFAULT);
676 		if (pfr_validate_addr(&ad))
677 			senderr(EINVAL);
678 		p = pfr_lookup_addr(kt, &ad, 1);
679 		if (flags & PFR_FLAG_FEEDBACK) {
680 			ad.pfra_fback = (p != NULL) ?
681 			    PFR_FB_CLEARED : PFR_FB_NONE;
682 			if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
683 				senderr(EFAULT);
684 		}
685 		if (p != NULL) {
686 			SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
687 			xzero++;
688 		}
689 	}
690 
691 	if (!(flags & PFR_FLAG_DUMMY)) {
692 		if (flags & PFR_FLAG_ATOMIC)
693 			s = splsoftnet();
694 		pfr_clstats_kentries(&workq, 0, 0);
695 		if (flags & PFR_FLAG_ATOMIC)
696 			splx(s);
697 	}
698 	if (nzero != NULL)
699 		*nzero = xzero;
700 	return (0);
701 _bad:
702 	if (flags & PFR_FLAG_FEEDBACK)
703 		pfr_reset_feedback(addr, size, flags);
704 	return (rv);
705 }
706 
707 int
708 pfr_validate_addr(struct pfr_addr *ad)
709 {
710 	int i;
711 
712 	switch (ad->pfra_af) {
713 #ifdef INET
714 	case AF_INET:
715 		if (ad->pfra_net > 32)
716 			return (-1);
717 		break;
718 #endif /* INET */
719 #ifdef INET6
720 	case AF_INET6:
721 		if (ad->pfra_net > 128)
722 			return (-1);
723 		break;
724 #endif /* INET6 */
725 	default:
726 		return (-1);
727 	}
728 	if (ad->pfra_net < 128 &&
729 		(((caddr_t)ad)[ad->pfra_net/8] & (0xFF >> (ad->pfra_net%8))))
730 			return (-1);
731 	for (i = (ad->pfra_net+7)/8; i < sizeof(ad->pfra_u); i++)
732 		if (((caddr_t)ad)[i])
733 			return (-1);
734 	if (ad->pfra_not && ad->pfra_not != 1)
735 		return (-1);
736 	if (ad->pfra_fback)
737 		return (-1);
738 	return (0);
739 }
740 
741 void
742 pfr_enqueue_addrs(struct pfr_ktable *kt, struct pfr_kentryworkq *workq,
743 	int *naddr, int sweep)
744 {
745 	struct pfr_walktree	w;
746 
747 	SLIST_INIT(workq);
748 	bzero(&w, sizeof(w));
749 	w.pfrw_op = sweep ? PFRW_SWEEP : PFRW_ENQUEUE;
750 	w.pfrw_workq = workq;
751 	if (kt->pfrkt_ip4 != NULL)
752 		if (rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w))
753 			printf("pfr_enqueue_addrs: IPv4 walktree failed.\n");
754 	if (kt->pfrkt_ip6 != NULL)
755 		if (rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w))
756 			printf("pfr_enqueue_addrs: IPv6 walktree failed.\n");
757 	if (naddr != NULL)
758 		*naddr = w.pfrw_cnt;
759 }
760 
761 void
762 pfr_mark_addrs(struct pfr_ktable *kt)
763 {
764 	struct pfr_walktree	w;
765 
766 	bzero(&w, sizeof(w));
767 	w.pfrw_op = PFRW_MARK;
768 	if (rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w))
769 		printf("pfr_mark_addrs: IPv4 walktree failed.\n");
770 	if (rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w))
771 		printf("pfr_mark_addrs: IPv6 walktree failed.\n");
772 }
773 
774 
775 struct pfr_kentry *
776 pfr_lookup_addr(struct pfr_ktable *kt, struct pfr_addr *ad, int exact)
777 {
778 	union sockaddr_union	 sa, mask;
779 	struct radix_node_head	*head;
780 	struct pfr_kentry	*ke;
781 	int			 s;
782 
783 	bzero(&sa, sizeof(sa));
784 	if (ad->pfra_af == AF_INET) {
785 		FILLIN_SIN(sa.sin, ad->pfra_ip4addr);
786 		head = kt->pfrkt_ip4;
787 	} else if ( ad->pfra_af == AF_INET6 ) {
788 		FILLIN_SIN6(sa.sin6, ad->pfra_ip6addr);
789 		head = kt->pfrkt_ip6;
790 	}
791 	if (ADDR_NETWORK(ad)) {
792 		pfr_prepare_network(&mask, ad->pfra_af, ad->pfra_net);
793 		s = splsoftnet(); /* rn_lookup makes use of globals */
794 		ke = (struct pfr_kentry *)rn_lookup(&sa, &mask, head);
795 		splx(s);
796 		if (ke && KENTRY_RNF_ROOT(ke))
797 			ke = NULL;
798 	} else {
799 		ke = (struct pfr_kentry *)rn_match(&sa, head);
800 		if (ke && KENTRY_RNF_ROOT(ke))
801 			ke = NULL;
802 		if (exact && ke && KENTRY_NETWORK(ke))
803 			ke = NULL;
804 	}
805 	return (ke);
806 }
807 
808 struct pfr_kentry *
809 pfr_create_kentry(struct pfr_addr *ad, int intr)
810 {
811 	struct pfr_kentry	*ke;
812 
813 	if (intr)
814 		ke = pool_get(&pfr_kentry_pl2, PR_NOWAIT | PR_ZERO);
815 	else
816 		ke = pool_get(&pfr_kentry_pl, PR_WAITOK|PR_ZERO|PR_LIMITFAIL);
817 	if (ke == NULL)
818 		return (NULL);
819 
820 	if (ad->pfra_af == AF_INET)
821 		FILLIN_SIN(ke->pfrke_sa.sin, ad->pfra_ip4addr);
822 	else if (ad->pfra_af == AF_INET6)
823 		FILLIN_SIN6(ke->pfrke_sa.sin6, ad->pfra_ip6addr);
824 	ke->pfrke_af = ad->pfra_af;
825 	ke->pfrke_net = ad->pfra_net;
826 	ke->pfrke_not = ad->pfra_not;
827 	ke->pfrke_intrpool = intr;
828 	return (ke);
829 }
830 
831 void
832 pfr_destroy_kentries(struct pfr_kentryworkq *workq)
833 {
834 	struct pfr_kentry	*p, *q;
835 
836 	for (p = SLIST_FIRST(workq); p != NULL; p = q) {
837 		q = SLIST_NEXT(p, pfrke_workq);
838 		pfr_destroy_kentry(p);
839 	}
840 }
841 
842 void
843 pfr_destroy_kentry(struct pfr_kentry *ke)
844 {
845 	if (ke->pfrke_counters)
846 		pool_put(&pfr_kcounters_pl, ke->pfrke_counters);
847 	if (ke->pfrke_intrpool)
848 		pool_put(&pfr_kentry_pl2, ke);
849 	else
850 		pool_put(&pfr_kentry_pl, ke);
851 }
852 
853 void
854 pfr_insert_kentries(struct pfr_ktable *kt,
855     struct pfr_kentryworkq *workq, long tzero)
856 {
857 	struct pfr_kentry	*p;
858 	int			 rv, n = 0;
859 
860 	SLIST_FOREACH(p, workq, pfrke_workq) {
861 		rv = pfr_route_kentry(kt, p);
862 		if (rv) {
863 			printf("pfr_insert_kentries: cannot route entry "
864 			    "(code=%d).\n", rv);
865 			break;
866 		}
867 		p->pfrke_tzero = tzero;
868 		n++;
869 	}
870 	kt->pfrkt_cnt += n;
871 }
872 
873 int
874 pfr_insert_kentry(struct pfr_ktable *kt, struct pfr_addr *ad, long tzero)
875 {
876 	struct pfr_kentry	*p;
877 	int			 rv;
878 
879 	p = pfr_lookup_addr(kt, ad, 1);
880 	if (p != NULL)
881 		return (0);
882 	p = pfr_create_kentry(ad, 1);
883 	if (p == NULL)
884 		return (EINVAL);
885 
886 	rv = pfr_route_kentry(kt, p);
887 	if (rv)
888 		return (rv);
889 
890 	p->pfrke_tzero = tzero;
891 	kt->pfrkt_cnt++;
892 
893 	return (0);
894 }
895 
896 void
897 pfr_remove_kentries(struct pfr_ktable *kt,
898     struct pfr_kentryworkq *workq)
899 {
900 	struct pfr_kentry	*p;
901 	int			 n = 0;
902 
903 	SLIST_FOREACH(p, workq, pfrke_workq) {
904 		pfr_unroute_kentry(kt, p);
905 		n++;
906 	}
907 	kt->pfrkt_cnt -= n;
908 	pfr_destroy_kentries(workq);
909 }
910 
911 void
912 pfr_clean_node_mask(struct pfr_ktable *kt,
913     struct pfr_kentryworkq *workq)
914 {
915 	struct pfr_kentry	*p;
916 
917 	SLIST_FOREACH(p, workq, pfrke_workq)
918 		pfr_unroute_kentry(kt, p);
919 }
920 
921 void
922 pfr_clstats_kentries(struct pfr_kentryworkq *workq, long tzero, int negchange)
923 {
924 	struct pfr_kentry	*p;
925 	int			 s;
926 
927 	SLIST_FOREACH(p, workq, pfrke_workq) {
928 		s = splsoftnet();
929 		if (negchange)
930 			p->pfrke_not = !p->pfrke_not;
931 		if (p->pfrke_counters) {
932 			pool_put(&pfr_kcounters_pl, p->pfrke_counters);
933 			p->pfrke_counters = NULL;
934 		}
935 		splx(s);
936 		p->pfrke_tzero = tzero;
937 	}
938 }
939 
940 void
941 pfr_reset_feedback(struct pfr_addr *addr, int size, int flags)
942 {
943 	struct pfr_addr	ad;
944 	int		i;
945 
946 	for (i = 0; i < size; i++) {
947 		if (COPYIN(addr+i, &ad, sizeof(ad), flags))
948 			break;
949 		ad.pfra_fback = PFR_FB_NONE;
950 		if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
951 			break;
952 	}
953 }
954 
955 void
956 pfr_prepare_network(union sockaddr_union *sa, int af, int net)
957 {
958 	int	i;
959 
960 	bzero(sa, sizeof(*sa));
961 	if (af == AF_INET) {
962 		sa->sin.sin_len = sizeof(sa->sin);
963 		sa->sin.sin_family = AF_INET;
964 		sa->sin.sin_addr.s_addr = net ? htonl(-1 << (32-net)) : 0;
965 	} else if (af == AF_INET6) {
966 		sa->sin6.sin6_len = sizeof(sa->sin6);
967 		sa->sin6.sin6_family = AF_INET6;
968 		for (i = 0; i < 4; i++) {
969 			if (net <= 32) {
970 				sa->sin6.sin6_addr.s6_addr32[i] =
971 				    net ? htonl(-1 << (32-net)) : 0;
972 				break;
973 			}
974 			sa->sin6.sin6_addr.s6_addr32[i] = 0xFFFFFFFF;
975 			net -= 32;
976 		}
977 	}
978 }
979 
980 int
981 pfr_route_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
982 {
983 	union sockaddr_union	 mask;
984 	struct radix_node	*rn;
985 	struct radix_node_head	*head;
986 	int			 s;
987 
988 	bzero(ke->pfrke_node, sizeof(ke->pfrke_node));
989 	if (ke->pfrke_af == AF_INET)
990 		head = kt->pfrkt_ip4;
991 	else if (ke->pfrke_af == AF_INET6)
992 		head = kt->pfrkt_ip6;
993 
994 	s = splsoftnet();
995 	if (KENTRY_NETWORK(ke)) {
996 		pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
997 		rn = rn_addroute(&ke->pfrke_sa, &mask, head, ke->pfrke_node, 0);
998 	} else
999 		rn = rn_addroute(&ke->pfrke_sa, NULL, head, ke->pfrke_node, 0);
1000 	splx(s);
1001 
1002 	return (rn == NULL ? -1 : 0);
1003 }
1004 
1005 int
1006 pfr_unroute_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
1007 {
1008 	union sockaddr_union	 mask;
1009 	struct radix_node	*rn;
1010 	struct radix_node_head	*head;
1011 	int			 s;
1012 
1013 	if (ke->pfrke_af == AF_INET)
1014 		head = kt->pfrkt_ip4;
1015 	else if (ke->pfrke_af == AF_INET6)
1016 		head = kt->pfrkt_ip6;
1017 
1018 	s = splsoftnet();
1019 	if (KENTRY_NETWORK(ke)) {
1020 		pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
1021 		rn = rn_delete(&ke->pfrke_sa, &mask, head, NULL);
1022 	} else
1023 		rn = rn_delete(&ke->pfrke_sa, NULL, head, NULL);
1024 	splx(s);
1025 
1026 	if (rn == NULL) {
1027 		printf("pfr_unroute_kentry: delete failed.\n");
1028 		return (-1);
1029 	}
1030 	return (0);
1031 }
1032 
1033 void
1034 pfr_copyout_addr(struct pfr_addr *ad, struct pfr_kentry *ke)
1035 {
1036 	bzero(ad, sizeof(*ad));
1037 	if (ke == NULL)
1038 		return;
1039 	ad->pfra_af = ke->pfrke_af;
1040 	ad->pfra_net = ke->pfrke_net;
1041 	ad->pfra_not = ke->pfrke_not;
1042 	if (ad->pfra_af == AF_INET)
1043 		ad->pfra_ip4addr = ke->pfrke_sa.sin.sin_addr;
1044 	else if (ad->pfra_af == AF_INET6)
1045 		ad->pfra_ip6addr = ke->pfrke_sa.sin6.sin6_addr;
1046 }
1047 
1048 int
1049 pfr_walktree(struct radix_node *rn, void *arg)
1050 {
1051 	struct pfr_kentry	*ke = (struct pfr_kentry *)rn;
1052 	struct pfr_walktree	*w = arg;
1053 	int			 s, flags = w->pfrw_flags;
1054 
1055 	switch (w->pfrw_op) {
1056 	case PFRW_MARK:
1057 		ke->pfrke_mark = 0;
1058 		break;
1059 	case PFRW_SWEEP:
1060 		if (ke->pfrke_mark)
1061 			break;
1062 		/* FALLTHROUGH */
1063 	case PFRW_ENQUEUE:
1064 		SLIST_INSERT_HEAD(w->pfrw_workq, ke, pfrke_workq);
1065 		w->pfrw_cnt++;
1066 		break;
1067 	case PFRW_GET_ADDRS:
1068 		if (w->pfrw_free-- > 0) {
1069 			struct pfr_addr ad;
1070 
1071 			pfr_copyout_addr(&ad, ke);
1072 			if (copyout(&ad, w->pfrw_addr, sizeof(ad)))
1073 				return (EFAULT);
1074 			w->pfrw_addr++;
1075 		}
1076 		break;
1077 	case PFRW_GET_ASTATS:
1078 		if (w->pfrw_free-- > 0) {
1079 			struct pfr_astats as;
1080 
1081 			pfr_copyout_addr(&as.pfras_a, ke);
1082 
1083 			s = splsoftnet();
1084 			if (ke->pfrke_counters) {
1085 				bcopy(ke->pfrke_counters->pfrkc_packets,
1086 				    as.pfras_packets, sizeof(as.pfras_packets));
1087 				bcopy(ke->pfrke_counters->pfrkc_bytes,
1088 				    as.pfras_bytes, sizeof(as.pfras_bytes));
1089 			} else {
1090 				bzero(as.pfras_packets, sizeof(as.pfras_packets));
1091 				bzero(as.pfras_bytes, sizeof(as.pfras_bytes));
1092 				as.pfras_a.pfra_fback = PFR_FB_NOCOUNT;
1093 			}
1094 			splx(s);
1095 			as.pfras_tzero = ke->pfrke_tzero;
1096 
1097 			if (COPYOUT(&as, w->pfrw_astats, sizeof(as), flags))
1098 				return (EFAULT);
1099 			w->pfrw_astats++;
1100 		}
1101 		break;
1102 	case PFRW_POOL_GET:
1103 		if (ke->pfrke_not)
1104 			break; /* negative entries are ignored */
1105 		if (!w->pfrw_cnt--) {
1106 			w->pfrw_kentry = ke;
1107 			return (1); /* finish search */
1108 		}
1109 		break;
1110 	case PFRW_DYNADDR_UPDATE:
1111 		if (ke->pfrke_af == AF_INET) {
1112 			if (w->pfrw_dyn->pfid_acnt4++ > 0)
1113 				break;
1114 			pfr_prepare_network(&pfr_mask, AF_INET, ke->pfrke_net);
1115 			w->pfrw_dyn->pfid_addr4 = *SUNION2PF(
1116 			    &ke->pfrke_sa, AF_INET);
1117 			w->pfrw_dyn->pfid_mask4 = *SUNION2PF(
1118 			    &pfr_mask, AF_INET);
1119 		} else if (ke->pfrke_af == AF_INET6){
1120 			if (w->pfrw_dyn->pfid_acnt6++ > 0)
1121 				break;
1122 			pfr_prepare_network(&pfr_mask, AF_INET6, ke->pfrke_net);
1123 			w->pfrw_dyn->pfid_addr6 = *SUNION2PF(
1124 			    &ke->pfrke_sa, AF_INET6);
1125 			w->pfrw_dyn->pfid_mask6 = *SUNION2PF(
1126 			    &pfr_mask, AF_INET6);
1127 		}
1128 		break;
1129 	}
1130 	return (0);
1131 }
1132 
1133 int
1134 pfr_clr_tables(struct pfr_table *filter, int *ndel, int flags)
1135 {
1136 	struct pfr_ktableworkq	 workq;
1137 	struct pfr_ktable	*p;
1138 	int			 s, xdel = 0;
1139 
1140 	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
1141 	    PFR_FLAG_ALLRSETS);
1142 	if (pfr_fix_anchor(filter->pfrt_anchor))
1143 		return (EINVAL);
1144 	if (pfr_table_count(filter, flags) < 0)
1145 		return (ENOENT);
1146 
1147 	SLIST_INIT(&workq);
1148 	RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1149 		if (pfr_skip_table(filter, p, flags))
1150 			continue;
1151 		if (!strcmp(p->pfrkt_anchor, PF_RESERVED_ANCHOR))
1152 			continue;
1153 		if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE))
1154 			continue;
1155 		p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1156 		SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1157 		xdel++;
1158 	}
1159 	if (!(flags & PFR_FLAG_DUMMY)) {
1160 		if (flags & PFR_FLAG_ATOMIC)
1161 			s = splsoftnet();
1162 		pfr_setflags_ktables(&workq);
1163 		if (flags & PFR_FLAG_ATOMIC)
1164 			splx(s);
1165 	}
1166 	if (ndel != NULL)
1167 		*ndel = xdel;
1168 	return (0);
1169 }
1170 
1171 int
1172 pfr_add_tables(struct pfr_table *tbl, int size, int *nadd, int flags)
1173 {
1174 	struct pfr_ktableworkq	 addq, changeq;
1175 	struct pfr_ktable	*p, *q, *r, key;
1176 	int			 i, rv, s, xadd = 0;
1177 	long			 tzero = time_second;
1178 
1179 	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
1180 	SLIST_INIT(&addq);
1181 	SLIST_INIT(&changeq);
1182 	for (i = 0; i < size; i++) {
1183 		if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t), flags))
1184 			senderr(EFAULT);
1185 		if (pfr_validate_table(&key.pfrkt_t, PFR_TFLAG_USRMASK,
1186 		    flags & PFR_FLAG_USERIOCTL))
1187 			senderr(EINVAL);
1188 		key.pfrkt_flags |= PFR_TFLAG_ACTIVE;
1189 		p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1190 		if (p == NULL) {
1191 			p = pfr_create_ktable(&key.pfrkt_t, tzero, 1);
1192 			if (p == NULL)
1193 				senderr(ENOMEM);
1194 			SLIST_FOREACH(q, &addq, pfrkt_workq) {
1195 				if (!pfr_ktable_compare(p, q))
1196 					goto _skip;
1197 			}
1198 			SLIST_INSERT_HEAD(&addq, p, pfrkt_workq);
1199 			xadd++;
1200 			if (!key.pfrkt_anchor[0])
1201 				goto _skip;
1202 
1203 			/* find or create root table */
1204 			bzero(key.pfrkt_anchor, sizeof(key.pfrkt_anchor));
1205 			r = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1206 			if (r != NULL) {
1207 				p->pfrkt_root = r;
1208 				goto _skip;
1209 			}
1210 			SLIST_FOREACH(q, &addq, pfrkt_workq) {
1211 				if (!pfr_ktable_compare(&key, q)) {
1212 					p->pfrkt_root = q;
1213 					goto _skip;
1214 				}
1215 			}
1216 			key.pfrkt_flags = 0;
1217 			r = pfr_create_ktable(&key.pfrkt_t, 0, 1);
1218 			if (r == NULL)
1219 				senderr(ENOMEM);
1220 			SLIST_INSERT_HEAD(&addq, r, pfrkt_workq);
1221 			p->pfrkt_root = r;
1222 		} else if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1223 			SLIST_FOREACH(q, &changeq, pfrkt_workq)
1224 				if (!pfr_ktable_compare(&key, q))
1225 					goto _skip;
1226 			p->pfrkt_nflags = (p->pfrkt_flags &
1227 			    ~PFR_TFLAG_USRMASK) | key.pfrkt_flags;
1228 			SLIST_INSERT_HEAD(&changeq, p, pfrkt_workq);
1229 			xadd++;
1230 		}
1231 _skip:
1232 	;
1233 	}
1234 	if (!(flags & PFR_FLAG_DUMMY)) {
1235 		if (flags & PFR_FLAG_ATOMIC)
1236 			s = splsoftnet();
1237 		pfr_insert_ktables(&addq);
1238 		pfr_setflags_ktables(&changeq);
1239 		if (flags & PFR_FLAG_ATOMIC)
1240 			splx(s);
1241 	} else
1242 		 pfr_destroy_ktables(&addq, 0);
1243 	if (nadd != NULL)
1244 		*nadd = xadd;
1245 	return (0);
1246 _bad:
1247 	pfr_destroy_ktables(&addq, 0);
1248 	return (rv);
1249 }
1250 
1251 int
1252 pfr_del_tables(struct pfr_table *tbl, int size, int *ndel, int flags)
1253 {
1254 	struct pfr_ktableworkq	 workq;
1255 	struct pfr_ktable	*p, *q, key;
1256 	int			 i, s, xdel = 0;
1257 
1258 	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
1259 	SLIST_INIT(&workq);
1260 	for (i = 0; i < size; i++) {
1261 		if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t), flags))
1262 			return (EFAULT);
1263 		if (pfr_validate_table(&key.pfrkt_t, 0,
1264 		    flags & PFR_FLAG_USERIOCTL))
1265 			return (EINVAL);
1266 		p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1267 		if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1268 			SLIST_FOREACH(q, &workq, pfrkt_workq)
1269 				if (!pfr_ktable_compare(p, q))
1270 					goto _skip;
1271 			p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1272 			SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1273 			xdel++;
1274 		}
1275 _skip:
1276 	;
1277 	}
1278 
1279 	if (!(flags & PFR_FLAG_DUMMY)) {
1280 		if (flags & PFR_FLAG_ATOMIC)
1281 			s = splsoftnet();
1282 		pfr_setflags_ktables(&workq);
1283 		if (flags & PFR_FLAG_ATOMIC)
1284 			splx(s);
1285 	}
1286 	if (ndel != NULL)
1287 		*ndel = xdel;
1288 	return (0);
1289 }
1290 
1291 int
1292 pfr_get_tables(struct pfr_table *filter, struct pfr_table *tbl, int *size,
1293 	int flags)
1294 {
1295 	struct pfr_ktable	*p;
1296 	int			 n, nn;
1297 
1298 	ACCEPT_FLAGS(flags, PFR_FLAG_ALLRSETS);
1299 	if (pfr_fix_anchor(filter->pfrt_anchor))
1300 		return (EINVAL);
1301 	n = nn = pfr_table_count(filter, flags);
1302 	if (n < 0)
1303 		return (ENOENT);
1304 	if (n > *size) {
1305 		*size = n;
1306 		return (0);
1307 	}
1308 	RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1309 		if (pfr_skip_table(filter, p, flags))
1310 			continue;
1311 		if (n-- <= 0)
1312 			continue;
1313 		if (COPYOUT(&p->pfrkt_t, tbl++, sizeof(*tbl), flags))
1314 			return (EFAULT);
1315 	}
1316 	if (n) {
1317 		printf("pfr_get_tables: corruption detected (%d).\n", n);
1318 		return (ENOTTY);
1319 	}
1320 	*size = nn;
1321 	return (0);
1322 }
1323 
1324 int
1325 pfr_get_tstats(struct pfr_table *filter, struct pfr_tstats *tbl, int *size,
1326 	int flags)
1327 {
1328 	struct pfr_ktable	*p;
1329 	struct pfr_ktableworkq	 workq;
1330 	int			 s, n, nn;
1331 	long			 tzero = time_second;
1332 
1333 	/* XXX PFR_FLAG_CLSTATS disabled */
1334 	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_ALLRSETS);
1335 	if (pfr_fix_anchor(filter->pfrt_anchor))
1336 		return (EINVAL);
1337 	n = nn = pfr_table_count(filter, flags);
1338 	if (n < 0)
1339 		return (ENOENT);
1340 	if (n > *size) {
1341 		*size = n;
1342 		return (0);
1343 	}
1344 	SLIST_INIT(&workq);
1345 	if (flags & PFR_FLAG_ATOMIC)
1346 		s = splsoftnet();
1347 	RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1348 		if (pfr_skip_table(filter, p, flags))
1349 			continue;
1350 		if (n-- <= 0)
1351 			continue;
1352 		if (!(flags & PFR_FLAG_ATOMIC))
1353 			s = splsoftnet();
1354 		if (COPYOUT(&p->pfrkt_ts, tbl++, sizeof(*tbl), flags)) {
1355 			splx(s);
1356 			return (EFAULT);
1357 		}
1358 		if (!(flags & PFR_FLAG_ATOMIC))
1359 			splx(s);
1360 		SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1361 	}
1362 	if (flags & PFR_FLAG_CLSTATS)
1363 		pfr_clstats_ktables(&workq, tzero,
1364 		    flags & PFR_FLAG_ADDRSTOO);
1365 	if (flags & PFR_FLAG_ATOMIC)
1366 		splx(s);
1367 	if (n) {
1368 		printf("pfr_get_tstats: corruption detected (%d).\n", n);
1369 		return (ENOTTY);
1370 	}
1371 	*size = nn;
1372 	return (0);
1373 }
1374 
1375 int
1376 pfr_clr_tstats(struct pfr_table *tbl, int size, int *nzero, int flags)
1377 {
1378 	struct pfr_ktableworkq	 workq;
1379 	struct pfr_ktable	*p, key;
1380 	int			 i, s, xzero = 0;
1381 	long			 tzero = time_second;
1382 
1383 	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
1384 	    PFR_FLAG_ADDRSTOO);
1385 	SLIST_INIT(&workq);
1386 	for (i = 0; i < size; i++) {
1387 		if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t), flags))
1388 			return (EFAULT);
1389 		if (pfr_validate_table(&key.pfrkt_t, 0, 0))
1390 			return (EINVAL);
1391 		p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1392 		if (p != NULL) {
1393 			SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1394 			xzero++;
1395 		}
1396 	}
1397 	if (!(flags & PFR_FLAG_DUMMY)) {
1398 		if (flags & PFR_FLAG_ATOMIC)
1399 			s = splsoftnet();
1400 		pfr_clstats_ktables(&workq, tzero, flags & PFR_FLAG_ADDRSTOO);
1401 		if (flags & PFR_FLAG_ATOMIC)
1402 			splx(s);
1403 	}
1404 	if (nzero != NULL)
1405 		*nzero = xzero;
1406 	return (0);
1407 }
1408 
1409 int
1410 pfr_set_tflags(struct pfr_table *tbl, int size, int setflag, int clrflag,
1411 	int *nchange, int *ndel, int flags)
1412 {
1413 	struct pfr_ktableworkq	 workq;
1414 	struct pfr_ktable	*p, *q, key;
1415 	int			 i, s, xchange = 0, xdel = 0;
1416 
1417 	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
1418 	if ((setflag & ~PFR_TFLAG_USRMASK) ||
1419 	    (clrflag & ~PFR_TFLAG_USRMASK) ||
1420 	    (setflag & clrflag))
1421 		return (EINVAL);
1422 	SLIST_INIT(&workq);
1423 	for (i = 0; i < size; i++) {
1424 		if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t), flags))
1425 			return (EFAULT);
1426 		if (pfr_validate_table(&key.pfrkt_t, 0,
1427 		    flags & PFR_FLAG_USERIOCTL))
1428 			return (EINVAL);
1429 		p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1430 		if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1431 			p->pfrkt_nflags = (p->pfrkt_flags | setflag) &
1432 			    ~clrflag;
1433 			if (p->pfrkt_nflags == p->pfrkt_flags)
1434 				goto _skip;
1435 			SLIST_FOREACH(q, &workq, pfrkt_workq)
1436 				if (!pfr_ktable_compare(p, q))
1437 					goto _skip;
1438 			SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1439 			if ((p->pfrkt_flags & PFR_TFLAG_PERSIST) &&
1440 			    (clrflag & PFR_TFLAG_PERSIST) &&
1441 			    !(p->pfrkt_flags & PFR_TFLAG_REFERENCED))
1442 				xdel++;
1443 			else
1444 				xchange++;
1445 		}
1446 _skip:
1447 	;
1448 	}
1449 	if (!(flags & PFR_FLAG_DUMMY)) {
1450 		if (flags & PFR_FLAG_ATOMIC)
1451 			s = splsoftnet();
1452 		pfr_setflags_ktables(&workq);
1453 		if (flags & PFR_FLAG_ATOMIC)
1454 			splx(s);
1455 	}
1456 	if (nchange != NULL)
1457 		*nchange = xchange;
1458 	if (ndel != NULL)
1459 		*ndel = xdel;
1460 	return (0);
1461 }
1462 
1463 int
1464 pfr_ina_begin(struct pfr_table *trs, u_int32_t *ticket, int *ndel, int flags)
1465 {
1466 	struct pfr_ktableworkq	 workq;
1467 	struct pfr_ktable	*p;
1468 	struct pf_ruleset	*rs;
1469 	int			 xdel = 0;
1470 
1471 	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1472 	rs = pf_find_or_create_ruleset(trs->pfrt_anchor);
1473 	if (rs == NULL)
1474 		return (ENOMEM);
1475 	SLIST_INIT(&workq);
1476 	RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1477 		if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1478 		    pfr_skip_table(trs, p, 0))
1479 			continue;
1480 		p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
1481 		SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1482 		xdel++;
1483 	}
1484 	if (!(flags & PFR_FLAG_DUMMY)) {
1485 		pfr_setflags_ktables(&workq);
1486 		if (ticket != NULL)
1487 			*ticket = ++rs->tticket;
1488 		rs->topen = 1;
1489 	} else
1490 		pf_remove_if_empty_ruleset(rs);
1491 	if (ndel != NULL)
1492 		*ndel = xdel;
1493 	return (0);
1494 }
1495 
1496 int
1497 pfr_ina_define(struct pfr_table *tbl, struct pfr_addr *addr, int size,
1498     int *nadd, int *naddr, u_int32_t ticket, int flags)
1499 {
1500 	struct pfr_ktableworkq	 tableq;
1501 	struct pfr_kentryworkq	 addrq;
1502 	struct pfr_ktable	*kt, *rt, *shadow, key;
1503 	struct pfr_kentry	*p;
1504 	struct pfr_addr		 ad;
1505 	struct pf_ruleset	*rs;
1506 	int			 i, rv, xadd = 0, xaddr = 0;
1507 
1508 	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ADDRSTOO);
1509 	if (size && !(flags & PFR_FLAG_ADDRSTOO))
1510 		return (EINVAL);
1511 	if (pfr_validate_table(tbl, PFR_TFLAG_USRMASK,
1512 	    flags & PFR_FLAG_USERIOCTL))
1513 		return (EINVAL);
1514 	rs = pf_find_ruleset(tbl->pfrt_anchor);
1515 	if (rs == NULL || !rs->topen || ticket != rs->tticket)
1516 		return (EBUSY);
1517 	tbl->pfrt_flags |= PFR_TFLAG_INACTIVE;
1518 	SLIST_INIT(&tableq);
1519 	kt = RB_FIND(pfr_ktablehead, &pfr_ktables, (struct pfr_ktable *)tbl);
1520 	if (kt == NULL) {
1521 		kt = pfr_create_ktable(tbl, 0, 1);
1522 		if (kt == NULL)
1523 			return (ENOMEM);
1524 		SLIST_INSERT_HEAD(&tableq, kt, pfrkt_workq);
1525 		xadd++;
1526 		if (!tbl->pfrt_anchor[0])
1527 			goto _skip;
1528 
1529 		/* find or create root table */
1530 		bzero(&key, sizeof(key));
1531 		strlcpy(key.pfrkt_name, tbl->pfrt_name, sizeof(key.pfrkt_name));
1532 		rt = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1533 		if (rt != NULL) {
1534 			kt->pfrkt_root = rt;
1535 			goto _skip;
1536 		}
1537 		rt = pfr_create_ktable(&key.pfrkt_t, 0, 1);
1538 		if (rt == NULL) {
1539 			pfr_destroy_ktables(&tableq, 0);
1540 			return (ENOMEM);
1541 		}
1542 		SLIST_INSERT_HEAD(&tableq, rt, pfrkt_workq);
1543 		kt->pfrkt_root = rt;
1544 	} else if (!(kt->pfrkt_flags & PFR_TFLAG_INACTIVE))
1545 		xadd++;
1546 _skip:
1547 	shadow = pfr_create_ktable(tbl, 0, 0);
1548 	if (shadow == NULL) {
1549 		pfr_destroy_ktables(&tableq, 0);
1550 		return (ENOMEM);
1551 	}
1552 	SLIST_INIT(&addrq);
1553 	for (i = 0; i < size; i++) {
1554 		if (COPYIN(addr+i, &ad, sizeof(ad), flags))
1555 			senderr(EFAULT);
1556 		if (pfr_validate_addr(&ad))
1557 			senderr(EINVAL);
1558 		if (pfr_lookup_addr(shadow, &ad, 1) != NULL)
1559 			continue;
1560 		p = pfr_create_kentry(&ad, 0);
1561 		if (p == NULL)
1562 			senderr(ENOMEM);
1563 		if (pfr_route_kentry(shadow, p)) {
1564 			pfr_destroy_kentry(p);
1565 			continue;
1566 		}
1567 		SLIST_INSERT_HEAD(&addrq, p, pfrke_workq);
1568 		xaddr++;
1569 	}
1570 	if (!(flags & PFR_FLAG_DUMMY)) {
1571 		if (kt->pfrkt_shadow != NULL)
1572 			pfr_destroy_ktable(kt->pfrkt_shadow, 1);
1573 		kt->pfrkt_flags |= PFR_TFLAG_INACTIVE;
1574 		pfr_insert_ktables(&tableq);
1575 		shadow->pfrkt_cnt = (flags & PFR_FLAG_ADDRSTOO) ?
1576 		    xaddr : NO_ADDRESSES;
1577 		kt->pfrkt_shadow = shadow;
1578 	} else {
1579 		pfr_clean_node_mask(shadow, &addrq);
1580 		pfr_destroy_ktable(shadow, 0);
1581 		pfr_destroy_ktables(&tableq, 0);
1582 		pfr_destroy_kentries(&addrq);
1583 	}
1584 	if (nadd != NULL)
1585 		*nadd = xadd;
1586 	if (naddr != NULL)
1587 		*naddr = xaddr;
1588 	return (0);
1589 _bad:
1590 	pfr_destroy_ktable(shadow, 0);
1591 	pfr_destroy_ktables(&tableq, 0);
1592 	pfr_destroy_kentries(&addrq);
1593 	return (rv);
1594 }
1595 
1596 int
1597 pfr_ina_rollback(struct pfr_table *trs, u_int32_t ticket, int *ndel, int flags)
1598 {
1599 	struct pfr_ktableworkq	 workq;
1600 	struct pfr_ktable	*p;
1601 	struct pf_ruleset	*rs;
1602 	int			 xdel = 0;
1603 
1604 	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1605 	rs = pf_find_ruleset(trs->pfrt_anchor);
1606 	if (rs == NULL || !rs->topen || ticket != rs->tticket)
1607 		return (0);
1608 	SLIST_INIT(&workq);
1609 	RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1610 		if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1611 		    pfr_skip_table(trs, p, 0))
1612 			continue;
1613 		p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
1614 		SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1615 		xdel++;
1616 	}
1617 	if (!(flags & PFR_FLAG_DUMMY)) {
1618 		pfr_setflags_ktables(&workq);
1619 		rs->topen = 0;
1620 		pf_remove_if_empty_ruleset(rs);
1621 	}
1622 	if (ndel != NULL)
1623 		*ndel = xdel;
1624 	return (0);
1625 }
1626 
1627 int
1628 pfr_ina_commit(struct pfr_table *trs, u_int32_t ticket, int *nadd,
1629     int *nchange, int flags)
1630 {
1631 	struct pfr_ktable	*p, *q;
1632 	struct pfr_ktableworkq	 workq;
1633 	struct pf_ruleset	*rs;
1634 	int			 s, xadd = 0, xchange = 0;
1635 	long			 tzero = time_second;
1636 
1637 	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
1638 	rs = pf_find_ruleset(trs->pfrt_anchor);
1639 	if (rs == NULL || !rs->topen || ticket != rs->tticket)
1640 		return (EBUSY);
1641 
1642 	SLIST_INIT(&workq);
1643 	RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1644 		if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1645 		    pfr_skip_table(trs, p, 0))
1646 			continue;
1647 		SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1648 		if (p->pfrkt_flags & PFR_TFLAG_ACTIVE)
1649 			xchange++;
1650 		else
1651 			xadd++;
1652 	}
1653 
1654 	if (!(flags & PFR_FLAG_DUMMY)) {
1655 		if (flags & PFR_FLAG_ATOMIC)
1656 			s = splsoftnet();
1657 		for (p = SLIST_FIRST(&workq); p != NULL; p = q) {
1658 			q = SLIST_NEXT(p, pfrkt_workq);
1659 			pfr_commit_ktable(p, tzero);
1660 		}
1661 		if (flags & PFR_FLAG_ATOMIC)
1662 			splx(s);
1663 		rs->topen = 0;
1664 		pf_remove_if_empty_ruleset(rs);
1665 	}
1666 	if (nadd != NULL)
1667 		*nadd = xadd;
1668 	if (nchange != NULL)
1669 		*nchange = xchange;
1670 
1671 	return (0);
1672 }
1673 
1674 void
1675 pfr_commit_ktable(struct pfr_ktable *kt, long tzero)
1676 {
1677 	struct pfr_ktable	*shadow = kt->pfrkt_shadow;
1678 	int			 nflags;
1679 
1680 	if (shadow->pfrkt_cnt == NO_ADDRESSES) {
1681 		if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
1682 			pfr_clstats_ktable(kt, tzero, 1);
1683 	} else if (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) {
1684 		/* kt might contain addresses */
1685 		struct pfr_kentryworkq	 addrq, addq, changeq, delq, garbageq;
1686 		struct pfr_kentry	*p, *q, *next;
1687 		struct pfr_addr		 ad;
1688 
1689 		pfr_enqueue_addrs(shadow, &addrq, NULL, 0);
1690 		pfr_mark_addrs(kt);
1691 		SLIST_INIT(&addq);
1692 		SLIST_INIT(&changeq);
1693 		SLIST_INIT(&delq);
1694 		SLIST_INIT(&garbageq);
1695 		pfr_clean_node_mask(shadow, &addrq);
1696 		for (p = SLIST_FIRST(&addrq); p != NULL; p = next) {
1697 			next = SLIST_NEXT(p, pfrke_workq);	/* XXX */
1698 			pfr_copyout_addr(&ad, p);
1699 			q = pfr_lookup_addr(kt, &ad, 1);
1700 			if (q != NULL) {
1701 				if (q->pfrke_not != p->pfrke_not)
1702 					SLIST_INSERT_HEAD(&changeq, q,
1703 					    pfrke_workq);
1704 				q->pfrke_mark = 1;
1705 				SLIST_INSERT_HEAD(&garbageq, p, pfrke_workq);
1706 			} else {
1707 				p->pfrke_tzero = tzero;
1708 				SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
1709 			}
1710 		}
1711 		pfr_enqueue_addrs(kt, &delq, NULL, ENQUEUE_UNMARKED_ONLY);
1712 		pfr_insert_kentries(kt, &addq, tzero);
1713 		pfr_remove_kentries(kt, &delq);
1714 		pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG);
1715 		pfr_destroy_kentries(&garbageq);
1716 	} else {
1717 		/* kt cannot contain addresses */
1718 		SWAP(struct radix_node_head *, kt->pfrkt_ip4,
1719 		    shadow->pfrkt_ip4);
1720 		SWAP(struct radix_node_head *, kt->pfrkt_ip6,
1721 		    shadow->pfrkt_ip6);
1722 		SWAP(int, kt->pfrkt_cnt, shadow->pfrkt_cnt);
1723 		pfr_clstats_ktable(kt, tzero, 1);
1724 	}
1725 	nflags = ((shadow->pfrkt_flags & PFR_TFLAG_USRMASK) |
1726 	    (kt->pfrkt_flags & PFR_TFLAG_SETMASK) | PFR_TFLAG_ACTIVE)
1727 		& ~PFR_TFLAG_INACTIVE;
1728 	pfr_destroy_ktable(shadow, 0);
1729 	kt->pfrkt_shadow = NULL;
1730 	pfr_setflags_ktable(kt, nflags);
1731 }
1732 
1733 int
1734 pfr_validate_table(struct pfr_table *tbl, int allowedflags, int no_reserved)
1735 {
1736 	int i;
1737 
1738 	if (!tbl->pfrt_name[0])
1739 		return (-1);
1740 	if (no_reserved && !strcmp(tbl->pfrt_anchor, PF_RESERVED_ANCHOR))
1741 		 return (-1);
1742 	if (tbl->pfrt_name[PF_TABLE_NAME_SIZE-1])
1743 		return (-1);
1744 	for (i = strlen(tbl->pfrt_name); i < PF_TABLE_NAME_SIZE; i++)
1745 		if (tbl->pfrt_name[i])
1746 			return (-1);
1747 	if (pfr_fix_anchor(tbl->pfrt_anchor))
1748 		return (-1);
1749 	if (tbl->pfrt_flags & ~allowedflags)
1750 		return (-1);
1751 	return (0);
1752 }
1753 
1754 /*
1755  * Rewrite anchors referenced by tables to remove slashes
1756  * and check for validity.
1757  */
1758 int
1759 pfr_fix_anchor(char *anchor)
1760 {
1761 	size_t siz = MAXPATHLEN;
1762 	int i;
1763 
1764 	if (anchor[0] == '/') {
1765 		char *path;
1766 		int off;
1767 
1768 		path = anchor;
1769 		off = 1;
1770 		while (*++path == '/')
1771 			off++;
1772 		bcopy(path, anchor, siz - off);
1773 		memset(anchor + siz - off, 0, off);
1774 	}
1775 	if (anchor[siz - 1])
1776 		return (-1);
1777 	for (i = strlen(anchor); i < siz; i++)
1778 		if (anchor[i])
1779 			return (-1);
1780 	return (0);
1781 }
1782 
1783 int
1784 pfr_table_count(struct pfr_table *filter, int flags)
1785 {
1786 	struct pf_ruleset *rs;
1787 
1788 	if (flags & PFR_FLAG_ALLRSETS)
1789 		return (pfr_ktable_cnt);
1790 	if (filter->pfrt_anchor[0]) {
1791 		rs = pf_find_ruleset(filter->pfrt_anchor);
1792 		return ((rs != NULL) ? rs->tables : -1);
1793 	}
1794 	return (pf_main_ruleset.tables);
1795 }
1796 
1797 int
1798 pfr_skip_table(struct pfr_table *filter, struct pfr_ktable *kt, int flags)
1799 {
1800 	if (flags & PFR_FLAG_ALLRSETS)
1801 		return (0);
1802 	if (strcmp(filter->pfrt_anchor, kt->pfrkt_anchor))
1803 		return (1);
1804 	return (0);
1805 }
1806 
1807 void
1808 pfr_insert_ktables(struct pfr_ktableworkq *workq)
1809 {
1810 	struct pfr_ktable	*p;
1811 
1812 	SLIST_FOREACH(p, workq, pfrkt_workq)
1813 		pfr_insert_ktable(p);
1814 }
1815 
1816 void
1817 pfr_insert_ktable(struct pfr_ktable *kt)
1818 {
1819 	RB_INSERT(pfr_ktablehead, &pfr_ktables, kt);
1820 	pfr_ktable_cnt++;
1821 	if (kt->pfrkt_root != NULL)
1822 		if (!kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]++)
1823 			pfr_setflags_ktable(kt->pfrkt_root,
1824 			    kt->pfrkt_root->pfrkt_flags|PFR_TFLAG_REFDANCHOR);
1825 }
1826 
1827 void
1828 pfr_setflags_ktables(struct pfr_ktableworkq *workq)
1829 {
1830 	struct pfr_ktable	*p, *q;
1831 
1832 	for (p = SLIST_FIRST(workq); p; p = q) {
1833 		q = SLIST_NEXT(p, pfrkt_workq);
1834 		pfr_setflags_ktable(p, p->pfrkt_nflags);
1835 	}
1836 }
1837 
1838 void
1839 pfr_setflags_ktable(struct pfr_ktable *kt, int newf)
1840 {
1841 	struct pfr_kentryworkq	addrq;
1842 
1843 	if (!(newf & PFR_TFLAG_REFERENCED) &&
1844 	    !(newf & PFR_TFLAG_PERSIST))
1845 		newf &= ~PFR_TFLAG_ACTIVE;
1846 	if (!(newf & PFR_TFLAG_ACTIVE))
1847 		newf &= ~PFR_TFLAG_USRMASK;
1848 	if (!(newf & PFR_TFLAG_SETMASK)) {
1849 		RB_REMOVE(pfr_ktablehead, &pfr_ktables, kt);
1850 		if (kt->pfrkt_root != NULL)
1851 			if (!--kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR])
1852 				pfr_setflags_ktable(kt->pfrkt_root,
1853 				    kt->pfrkt_root->pfrkt_flags &
1854 					~PFR_TFLAG_REFDANCHOR);
1855 		pfr_destroy_ktable(kt, 1);
1856 		pfr_ktable_cnt--;
1857 		return;
1858 	}
1859 	if (!(newf & PFR_TFLAG_ACTIVE) && kt->pfrkt_cnt) {
1860 		pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1861 		pfr_remove_kentries(kt, &addrq);
1862 	}
1863 	if (!(newf & PFR_TFLAG_INACTIVE) && kt->pfrkt_shadow != NULL) {
1864 		pfr_destroy_ktable(kt->pfrkt_shadow, 1);
1865 		kt->pfrkt_shadow = NULL;
1866 	}
1867 	kt->pfrkt_flags = newf;
1868 }
1869 
1870 void
1871 pfr_clstats_ktables(struct pfr_ktableworkq *workq, long tzero, int recurse)
1872 {
1873 	struct pfr_ktable	*p;
1874 
1875 	SLIST_FOREACH(p, workq, pfrkt_workq)
1876 		pfr_clstats_ktable(p, tzero, recurse);
1877 }
1878 
1879 void
1880 pfr_clstats_ktable(struct pfr_ktable *kt, long tzero, int recurse)
1881 {
1882 	struct pfr_kentryworkq	 addrq;
1883 	int			 s;
1884 
1885 	if (recurse) {
1886 		pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1887 		pfr_clstats_kentries(&addrq, tzero, 0);
1888 	}
1889 	s = splsoftnet();
1890 	bzero(kt->pfrkt_packets, sizeof(kt->pfrkt_packets));
1891 	bzero(kt->pfrkt_bytes, sizeof(kt->pfrkt_bytes));
1892 	kt->pfrkt_match = kt->pfrkt_nomatch = 0;
1893 	splx(s);
1894 	kt->pfrkt_tzero = tzero;
1895 }
1896 
1897 struct pfr_ktable *
1898 pfr_create_ktable(struct pfr_table *tbl, long tzero, int attachruleset)
1899 {
1900 	struct pfr_ktable	*kt;
1901 	struct pf_ruleset	*rs;
1902 
1903 	kt = pool_get(&pfr_ktable_pl, PR_WAITOK | PR_ZERO | PR_LIMITFAIL);
1904 	if (kt == NULL)
1905 		return (NULL);
1906 	kt->pfrkt_t = *tbl;
1907 
1908 	if (attachruleset) {
1909 		rs = pf_find_or_create_ruleset(tbl->pfrt_anchor);
1910 		if (!rs) {
1911 			pfr_destroy_ktable(kt, 0);
1912 			return (NULL);
1913 		}
1914 		kt->pfrkt_rs = rs;
1915 		rs->tables++;
1916 	}
1917 
1918 	if (!rn_inithead((void **)&kt->pfrkt_ip4,
1919 	    offsetof(struct sockaddr_in, sin_addr) * 8) ||
1920 	    !rn_inithead((void **)&kt->pfrkt_ip6,
1921 	    offsetof(struct sockaddr_in6, sin6_addr) * 8)) {
1922 		pfr_destroy_ktable(kt, 0);
1923 		return (NULL);
1924 	}
1925 	kt->pfrkt_tzero = tzero;
1926 
1927 	return (kt);
1928 }
1929 
1930 void
1931 pfr_destroy_ktables(struct pfr_ktableworkq *workq, int flushaddr)
1932 {
1933 	struct pfr_ktable	*p, *q;
1934 
1935 	for (p = SLIST_FIRST(workq); p; p = q) {
1936 		q = SLIST_NEXT(p, pfrkt_workq);
1937 		pfr_destroy_ktable(p, flushaddr);
1938 	}
1939 }
1940 
1941 void
1942 pfr_destroy_ktable(struct pfr_ktable *kt, int flushaddr)
1943 {
1944 	struct pfr_kentryworkq	 addrq;
1945 
1946 	if (flushaddr) {
1947 		pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1948 		pfr_clean_node_mask(kt, &addrq);
1949 		pfr_destroy_kentries(&addrq);
1950 	}
1951 	if (kt->pfrkt_ip4 != NULL)
1952 		free((caddr_t)kt->pfrkt_ip4, M_RTABLE);
1953 	if (kt->pfrkt_ip6 != NULL)
1954 		free((caddr_t)kt->pfrkt_ip6, M_RTABLE);
1955 	if (kt->pfrkt_shadow != NULL)
1956 		pfr_destroy_ktable(kt->pfrkt_shadow, flushaddr);
1957 	if (kt->pfrkt_rs != NULL) {
1958 		kt->pfrkt_rs->tables--;
1959 		pf_remove_if_empty_ruleset(kt->pfrkt_rs);
1960 	}
1961 	pool_put(&pfr_ktable_pl, kt);
1962 }
1963 
1964 int
1965 pfr_ktable_compare(struct pfr_ktable *p, struct pfr_ktable *q)
1966 {
1967 	int d;
1968 
1969 	if ((d = strncmp(p->pfrkt_name, q->pfrkt_name, PF_TABLE_NAME_SIZE)))
1970 		return (d);
1971 	return (strcmp(p->pfrkt_anchor, q->pfrkt_anchor));
1972 }
1973 
1974 struct pfr_ktable *
1975 pfr_lookup_table(struct pfr_table *tbl)
1976 {
1977 	/* struct pfr_ktable start like a struct pfr_table */
1978 	return (RB_FIND(pfr_ktablehead, &pfr_ktables,
1979 	    (struct pfr_ktable *)tbl));
1980 }
1981 
1982 int
1983 pfr_match_addr(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af)
1984 {
1985 	struct pfr_kentry	*ke = NULL;
1986 	int			 match;
1987 
1988 	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
1989 		kt = kt->pfrkt_root;
1990 	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
1991 		return (0);
1992 
1993 	switch (af) {
1994 #ifdef INET
1995 	case AF_INET:
1996 		pfr_sin.sin_addr.s_addr = a->addr32[0];
1997 		ke = (struct pfr_kentry *)rn_match(&pfr_sin, kt->pfrkt_ip4);
1998 		if (ke && KENTRY_RNF_ROOT(ke))
1999 			ke = NULL;
2000 		break;
2001 #endif /* INET */
2002 #ifdef INET6
2003 	case AF_INET6:
2004 		bcopy(a, &pfr_sin6.sin6_addr, sizeof(pfr_sin6.sin6_addr));
2005 		ke = (struct pfr_kentry *)rn_match(&pfr_sin6, kt->pfrkt_ip6);
2006 		if (ke && KENTRY_RNF_ROOT(ke))
2007 			ke = NULL;
2008 		break;
2009 #endif /* INET6 */
2010 	}
2011 	match = (ke && !ke->pfrke_not);
2012 	if (match)
2013 		kt->pfrkt_match++;
2014 	else
2015 		kt->pfrkt_nomatch++;
2016 	return (match);
2017 }
2018 
2019 void
2020 pfr_update_stats(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af,
2021     u_int64_t len, int dir_out, int op_pass, int notrule)
2022 {
2023 	struct pfr_kentry	*ke = NULL;
2024 
2025 	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
2026 		kt = kt->pfrkt_root;
2027 	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
2028 		return;
2029 
2030 	switch (af) {
2031 #ifdef INET
2032 	case AF_INET:
2033 		pfr_sin.sin_addr.s_addr = a->addr32[0];
2034 		ke = (struct pfr_kentry *)rn_match(&pfr_sin, kt->pfrkt_ip4);
2035 		if (ke && KENTRY_RNF_ROOT(ke))
2036 			ke = NULL;
2037 		break;
2038 #endif /* INET */
2039 #ifdef INET6
2040 	case AF_INET6:
2041 		bcopy(a, &pfr_sin6.sin6_addr, sizeof(pfr_sin6.sin6_addr));
2042 		ke = (struct pfr_kentry *)rn_match(&pfr_sin6, kt->pfrkt_ip6);
2043 		if (ke && KENTRY_RNF_ROOT(ke))
2044 			ke = NULL;
2045 		break;
2046 #endif /* INET6 */
2047 	default:
2048 		;
2049 	}
2050 	if ((ke == NULL || ke->pfrke_not) != notrule) {
2051 		if (op_pass != PFR_OP_PASS)
2052 			printf("pfr_update_stats: assertion failed.\n");
2053 		op_pass = PFR_OP_XPASS;
2054 	}
2055 	kt->pfrkt_packets[dir_out][op_pass]++;
2056 	kt->pfrkt_bytes[dir_out][op_pass] += len;
2057 	if (ke != NULL && op_pass != PFR_OP_XPASS &&
2058 	    (kt->pfrkt_flags & PFR_TFLAG_COUNTERS)) {
2059 		if (ke->pfrke_counters == NULL)
2060 			ke->pfrke_counters = pool_get(&pfr_kcounters_pl,
2061 			    PR_NOWAIT | PR_ZERO);
2062 		if (ke->pfrke_counters != NULL) {
2063 			ke->pfrke_counters->pfrkc_packets[dir_out][op_pass]++;
2064 			ke->pfrke_counters->pfrkc_bytes[dir_out][op_pass] += len;
2065 		}
2066 	}
2067 }
2068 
2069 struct pfr_ktable *
2070 pfr_attach_table(struct pf_ruleset *rs, char *name)
2071 {
2072 	struct pfr_ktable	*kt, *rt;
2073 	struct pfr_table	 tbl;
2074 	struct pf_anchor	*ac = rs->anchor;
2075 
2076 	bzero(&tbl, sizeof(tbl));
2077 	strlcpy(tbl.pfrt_name, name, sizeof(tbl.pfrt_name));
2078 	if (ac != NULL)
2079 		strlcpy(tbl.pfrt_anchor, ac->path, sizeof(tbl.pfrt_anchor));
2080 	kt = pfr_lookup_table(&tbl);
2081 	if (kt == NULL) {
2082 		kt = pfr_create_ktable(&tbl, time_second, 1);
2083 		if (kt == NULL)
2084 			return (NULL);
2085 		if (ac != NULL) {
2086 			bzero(tbl.pfrt_anchor, sizeof(tbl.pfrt_anchor));
2087 			rt = pfr_lookup_table(&tbl);
2088 			if (rt == NULL) {
2089 				rt = pfr_create_ktable(&tbl, 0, 1);
2090 				if (rt == NULL) {
2091 					pfr_destroy_ktable(kt, 0);
2092 					return (NULL);
2093 				}
2094 				pfr_insert_ktable(rt);
2095 			}
2096 			kt->pfrkt_root = rt;
2097 		}
2098 		pfr_insert_ktable(kt);
2099 	}
2100 	if (!kt->pfrkt_refcnt[PFR_REFCNT_RULE]++)
2101 		pfr_setflags_ktable(kt, kt->pfrkt_flags|PFR_TFLAG_REFERENCED);
2102 	return (kt);
2103 }
2104 
2105 void
2106 pfr_detach_table(struct pfr_ktable *kt)
2107 {
2108 	if (kt->pfrkt_refcnt[PFR_REFCNT_RULE] <= 0)
2109 		printf("pfr_detach_table: refcount = %d.\n",
2110 		    kt->pfrkt_refcnt[PFR_REFCNT_RULE]);
2111 	else if (!--kt->pfrkt_refcnt[PFR_REFCNT_RULE])
2112 		pfr_setflags_ktable(kt, kt->pfrkt_flags&~PFR_TFLAG_REFERENCED);
2113 }
2114 
2115 int
2116 pfr_pool_get(struct pfr_ktable *kt, int *pidx, struct pf_addr *counter,
2117     struct pf_addr **raddr, struct pf_addr **rmask, sa_family_t af)
2118 {
2119 	struct pfr_kentry	*ke, *ke2;
2120 	struct pf_addr		*addr;
2121 	union sockaddr_union	 mask;
2122 	int			 idx = -1, use_counter = 0;
2123 
2124 	if (af == AF_INET)
2125 		addr = (struct pf_addr *)&pfr_sin.sin_addr;
2126 	else if (af == AF_INET6)
2127 		addr = (struct pf_addr *)&pfr_sin6.sin6_addr;
2128 	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
2129 		kt = kt->pfrkt_root;
2130 	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
2131 		return (-1);
2132 
2133 	if (pidx != NULL)
2134 		idx = *pidx;
2135 	if (counter != NULL && idx >= 0)
2136 		use_counter = 1;
2137 	if (idx < 0)
2138 		idx = 0;
2139 
2140 _next_block:
2141 	ke = pfr_kentry_byidx(kt, idx, af);
2142 	if (ke == NULL) {
2143 		kt->pfrkt_nomatch++;
2144 		return (1);
2145 	}
2146 	pfr_prepare_network(&pfr_mask, af, ke->pfrke_net);
2147 	*raddr = SUNION2PF(&ke->pfrke_sa, af);
2148 	*rmask = SUNION2PF(&pfr_mask, af);
2149 
2150 	if (use_counter) {
2151 		/* is supplied address within block? */
2152 		if (!PF_MATCHA(0, *raddr, *rmask, counter, af)) {
2153 			/* no, go to next block in table */
2154 			idx++;
2155 			use_counter = 0;
2156 			goto _next_block;
2157 		}
2158 		PF_ACPY(addr, counter, af);
2159 	} else {
2160 		/* use first address of block */
2161 		PF_ACPY(addr, *raddr, af);
2162 	}
2163 
2164 	if (!KENTRY_NETWORK(ke)) {
2165 		/* this is a single IP address - no possible nested block */
2166 		PF_ACPY(counter, addr, af);
2167 		*pidx = idx;
2168 		kt->pfrkt_match++;
2169 		return (0);
2170 	}
2171 	for (;;) {
2172 		/* we don't want to use a nested block */
2173 		if (af == AF_INET)
2174 			ke2 = (struct pfr_kentry *)rn_match(&pfr_sin,
2175 			    kt->pfrkt_ip4);
2176 		else if (af == AF_INET6)
2177 			ke2 = (struct pfr_kentry *)rn_match(&pfr_sin6,
2178 			    kt->pfrkt_ip6);
2179 		/* no need to check KENTRY_RNF_ROOT() here */
2180 		if (ke2 == ke) {
2181 			/* lookup return the same block - perfect */
2182 			PF_ACPY(counter, addr, af);
2183 			*pidx = idx;
2184 			kt->pfrkt_match++;
2185 			return (0);
2186 		}
2187 
2188 		/* we need to increase the counter past the nested block */
2189 		pfr_prepare_network(&mask, AF_INET, ke2->pfrke_net);
2190 		PF_POOLMASK(addr, addr, SUNION2PF(&mask, af), &pfr_ffaddr, af);
2191 		PF_AINC(addr, af);
2192 		if (!PF_MATCHA(0, *raddr, *rmask, addr, af)) {
2193 			/* ok, we reached the end of our main block */
2194 			/* go to next block in table */
2195 			idx++;
2196 			use_counter = 0;
2197 			goto _next_block;
2198 		}
2199 	}
2200 }
2201 
2202 struct pfr_kentry *
2203 pfr_kentry_byidx(struct pfr_ktable *kt, int idx, int af)
2204 {
2205 	struct pfr_walktree	w;
2206 
2207 	bzero(&w, sizeof(w));
2208 	w.pfrw_op = PFRW_POOL_GET;
2209 	w.pfrw_cnt = idx;
2210 
2211 	switch (af) {
2212 #ifdef INET
2213 	case AF_INET:
2214 		rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
2215 		return (w.pfrw_kentry);
2216 #endif /* INET */
2217 #ifdef INET6
2218 	case AF_INET6:
2219 		rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
2220 		return (w.pfrw_kentry);
2221 #endif /* INET6 */
2222 	default:
2223 		return (NULL);
2224 	}
2225 }
2226 
2227 void
2228 pfr_dynaddr_update(struct pfr_ktable *kt, struct pfi_dynaddr *dyn)
2229 {
2230 	struct pfr_walktree	w;
2231 	int			s;
2232 
2233 	bzero(&w, sizeof(w));
2234 	w.pfrw_op = PFRW_DYNADDR_UPDATE;
2235 	w.pfrw_dyn = dyn;
2236 
2237 	s = splsoftnet();
2238 	dyn->pfid_acnt4 = 0;
2239 	dyn->pfid_acnt6 = 0;
2240 	if (!dyn->pfid_af || dyn->pfid_af == AF_INET)
2241 		rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
2242 	if (!dyn->pfid_af || dyn->pfid_af == AF_INET6)
2243 		rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
2244 	splx(s);
2245 }
2246