xref: /openbsd-src/sys/net/pf_table.c (revision 8500990981f885cbe5e6a4958549cacc238b5ae6)
1 /*	$OpenBSD: pf_table.c,v 1.42 2003/09/26 21:44:09 cedric Exp $	*/
2 
3 /*
4  * Copyright (c) 2002 Cedric Berger
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  *    - Redistributions of source code must retain the above copyright
12  *      notice, this list of conditions and the following disclaimer.
13  *    - Redistributions in binary form must reproduce the above
14  *      copyright notice, this list of conditions and the following
15  *      disclaimer in the documentation and/or other materials provided
16  *      with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
21  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
22  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
23  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
24  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
28  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  *
31  */
32 
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/socket.h>
36 #include <sys/mbuf.h>
37 #include <sys/kernel.h>
38 
39 #include <net/if.h>
40 #include <net/route.h>
41 #include <netinet/in.h>
42 #include <netinet/ip_ipsp.h>
43 #include <net/pfvar.h>
44 
45 #define ACCEPT_FLAGS(oklist)			\
46 	do {					\
47 		if ((flags & ~(oklist)) &	\
48 		    PFR_FLAG_ALLMASK)		\
49 			return (EINVAL);	\
50 	} while (0)
51 
52 #define	FILLIN_SIN(sin, addr)			\
53 	do {					\
54 		(sin).sin_len = sizeof(sin);	\
55 		(sin).sin_family = AF_INET;	\
56 		(sin).sin_addr = (addr);	\
57 	} while (0)
58 
59 #define	FILLIN_SIN6(sin6, addr)			\
60 	do {					\
61 		(sin6).sin6_len = sizeof(sin6);	\
62 		(sin6).sin6_family = AF_INET6;	\
63 		(sin6).sin6_addr = (addr);	\
64 	} while (0)
65 
66 #define SWAP(type, a1, a2)			\
67 	do {					\
68 		type tmp = a1;			\
69 		a1 = a2;			\
70 		a2 = tmp;			\
71 	} while (0)
72 
73 #define SUNION2PF(su, af) (((af)==AF_INET) ?	\
74         (struct pf_addr *)&(su)->sin.sin_addr :	\
75         (struct pf_addr *)&(su)->sin6.sin6_addr)
76 
77 #define	AF_BITS(af)		(((af)==AF_INET)?32:128)
78 #define	ADDR_NETWORK(ad)	((ad)->pfra_net < AF_BITS((ad)->pfra_af))
79 #define	KENTRY_NETWORK(ke)	((ke)->pfrke_net < AF_BITS((ke)->pfrke_af))
80 #define KENTRY_RNF_ROOT(ke) \
81 		((((struct radix_node *)(ke))->rn_flags & RNF_ROOT) != 0)
82 
83 #define NO_ADDRESSES		(-1)
84 #define ENQUEUE_UNMARKED_ONLY	(1)
85 #define INVERT_NEG_FLAG		(1)
86 
87 struct pfr_walktree {
88 	enum pfrw_op {
89 		PFRW_MARK,
90 		PFRW_SWEEP,
91 		PFRW_ENQUEUE,
92 		PFRW_GET_ADDRS,
93 		PFRW_GET_ASTATS,
94 		PFRW_POOL_GET
95 	}	 pfrw_op;
96 	union {
97 		struct pfr_addr		*pfrw1_addr;
98 		struct pfr_astats	*pfrw1_astats;
99 		struct pfr_kentryworkq	*pfrw1_workq;
100 		struct pfr_kentry	*pfrw1_kentry;
101 	}	 pfrw_1;
102 	int	 pfrw_free;
103 };
104 #define pfrw_addr	pfrw_1.pfrw1_addr
105 #define pfrw_astats	pfrw_1.pfrw1_astats
106 #define pfrw_workq	pfrw_1.pfrw1_workq
107 #define pfrw_kentry	pfrw_1.pfrw1_kentry
108 #define pfrw_cnt	pfrw_free
109 
110 #define senderr(e)	do { rv = (e); goto _bad; } while (0)
111 
112 struct pool		 pfr_ktable_pl;
113 struct pool		 pfr_kentry_pl;
114 struct sockaddr_in	 pfr_sin;
115 struct sockaddr_in6	 pfr_sin6;
116 union  sockaddr_union	 pfr_mask;
117 struct pf_addr		 pfr_ffaddr;
118 
119 void			 pfr_copyout_addr(struct pfr_addr *,
120 			    struct pfr_kentry *ke);
121 int			 pfr_validate_addr(struct pfr_addr *);
122 void			 pfr_enqueue_addrs(struct pfr_ktable *,
123 			    struct pfr_kentryworkq *, int *, int);
124 void			 pfr_mark_addrs(struct pfr_ktable *);
125 struct pfr_kentry	*pfr_lookup_addr(struct pfr_ktable *,
126 			    struct pfr_addr *, int);
127 struct pfr_kentry	*pfr_create_kentry(struct pfr_addr *);
128 void			 pfr_destroy_kentries(struct pfr_kentryworkq *);
129 void			 pfr_destroy_kentry(struct pfr_kentry *);
130 void			 pfr_insert_kentries(struct pfr_ktable *,
131 			    struct pfr_kentryworkq *, long);
132 void			 pfr_remove_kentries(struct pfr_ktable *,
133 			    struct pfr_kentryworkq *);
134 void			 pfr_clstats_kentries(struct pfr_kentryworkq *, long,
135 			    int);
136 void			 pfr_reset_feedback(struct pfr_addr *, int);
137 void			 pfr_prepare_network(union sockaddr_union *, int, int);
138 int			 pfr_route_kentry(struct pfr_ktable *,
139 			    struct pfr_kentry *);
140 int			 pfr_unroute_kentry(struct pfr_ktable *,
141 			    struct pfr_kentry *);
142 int			 pfr_walktree(struct radix_node *, void *);
143 int			 pfr_validate_table(struct pfr_table *, int);
144 void			 pfr_commit_ktable(struct pfr_ktable *, long);
145 void			 pfr_insert_ktables(struct pfr_ktableworkq *);
146 void			 pfr_insert_ktable(struct pfr_ktable *);
147 void			 pfr_setflags_ktables(struct pfr_ktableworkq *);
148 void			 pfr_setflags_ktable(struct pfr_ktable *, int);
149 void			 pfr_clstats_ktables(struct pfr_ktableworkq *, long,
150 			    int);
151 void			 pfr_clstats_ktable(struct pfr_ktable *, long, int);
152 struct pfr_ktable	*pfr_create_ktable(struct pfr_table *, long, int);
153 void			 pfr_destroy_ktables(struct pfr_ktableworkq *, int);
154 void			 pfr_destroy_ktable(struct pfr_ktable *, int);
155 int			 pfr_ktable_compare(struct pfr_ktable *,
156 			    struct pfr_ktable *);
157 struct pfr_ktable	*pfr_lookup_table(struct pfr_table *);
158 void			 pfr_clean_node_mask(struct pfr_ktable *,
159 			    struct pfr_kentryworkq *);
160 int			 pfr_table_count(struct pfr_table *, int);
161 int			 pfr_skip_table(struct pfr_table *,
162 			    struct pfr_ktable *, int);
163 struct pfr_kentry       *pfr_kentry_byidx(struct pfr_ktable *, int, int);
164 
165 RB_PROTOTYPE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
166 RB_GENERATE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
167 
168 struct pfr_ktablehead	 pfr_ktables;
169 struct pfr_table	 pfr_nulltable;
170 int			 pfr_ktable_cnt;
171 
172 void
173 pfr_initialize(void)
174 {
175 	pool_init(&pfr_ktable_pl, sizeof(struct pfr_ktable), 0, 0, 0,
176 	    "pfrktable", NULL);
177 	pool_init(&pfr_kentry_pl, sizeof(struct pfr_kentry), 0, 0, 0,
178 	    "pfrkentry", NULL);
179 
180 	pfr_sin.sin_len = sizeof(pfr_sin);
181 	pfr_sin.sin_family = AF_INET;
182 	pfr_sin6.sin6_len = sizeof(pfr_sin6);
183 	pfr_sin6.sin6_family = AF_INET6;
184 
185 	memset(&pfr_ffaddr, 0xff, sizeof(pfr_ffaddr));
186 }
187 
188 int
189 pfr_clr_addrs(struct pfr_table *tbl, int *ndel, int flags)
190 {
191 	struct pfr_ktable	*kt;
192 	struct pfr_kentryworkq	 workq;
193 	int			 s;
194 
195 	ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY);
196 	if (pfr_validate_table(tbl, 0))
197 		return (EINVAL);
198 	kt = pfr_lookup_table(tbl);
199 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
200 		return (ESRCH);
201 	if (kt->pfrkt_flags & PFR_TFLAG_CONST)
202 		return (EPERM);
203 	pfr_enqueue_addrs(kt, &workq, ndel, 0);
204 
205 	if (!(flags & PFR_FLAG_DUMMY)) {
206 		if (flags & PFR_FLAG_ATOMIC)
207 			s = splsoftnet();
208 		pfr_remove_kentries(kt, &workq);
209 		if (flags & PFR_FLAG_ATOMIC)
210 			splx(s);
211 		if (kt->pfrkt_cnt) {
212 			printf("pfr_clr_addrs: corruption detected (%d).\n",
213 			    kt->pfrkt_cnt);
214 			kt->pfrkt_cnt = 0;
215 		}
216 	}
217 	return (0);
218 }
219 
220 int
221 pfr_add_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
222     int *nadd, int flags)
223 {
224 	struct pfr_ktable	*kt, *tmpkt;
225 	struct pfr_kentryworkq	 workq;
226 	struct pfr_kentry	*p, *q;
227 	struct pfr_addr		 ad;
228 	int			 i, rv, s, xadd = 0;
229 	long			 tzero = time.tv_sec;
230 
231 	ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_FEEDBACK);
232 	if (pfr_validate_table(tbl, 0))
233 		return (EINVAL);
234 	kt = pfr_lookup_table(tbl);
235 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
236 		return (ESRCH);
237 	if (kt->pfrkt_flags & PFR_TFLAG_CONST)
238 		return (EPERM);
239 	tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0);
240 	if (tmpkt == NULL)
241 		return (ENOMEM);
242 	SLIST_INIT(&workq);
243 	for (i = 0; i < size; i++) {
244 		if (copyin(addr+i, &ad, sizeof(ad)))
245 			senderr(EFAULT);
246 		if (pfr_validate_addr(&ad))
247 			senderr(EINVAL);
248 		p = pfr_lookup_addr(kt, &ad, 1);
249 		q = pfr_lookup_addr(tmpkt, &ad, 1);
250 		if (flags & PFR_FLAG_FEEDBACK) {
251 			if (q != NULL)
252 				ad.pfra_fback = PFR_FB_DUPLICATE;
253 			else if (p == NULL)
254 				ad.pfra_fback = PFR_FB_ADDED;
255 			else if (p->pfrke_not != ad.pfra_not)
256 				ad.pfra_fback = PFR_FB_CONFLICT;
257 			else
258 				ad.pfra_fback = PFR_FB_NONE;
259 		}
260 		if (p == NULL && q == NULL) {
261 			p = pfr_create_kentry(&ad);
262 			if (p == NULL)
263 				senderr(ENOMEM);
264 			if (pfr_route_kentry(tmpkt, p)) {
265 				pfr_destroy_kentry(p);
266 				ad.pfra_fback = PFR_FB_NONE;
267 			} else {
268 				SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
269 				xadd++;
270 			}
271 		}
272 		if (flags & PFR_FLAG_FEEDBACK)
273 			if (copyout(&ad, addr+i, sizeof(ad)))
274 				senderr(EFAULT);
275 	}
276 	pfr_clean_node_mask(tmpkt, &workq);
277 	if (!(flags & PFR_FLAG_DUMMY)) {
278 		if (flags & PFR_FLAG_ATOMIC)
279 			s = splsoftnet();
280 		pfr_insert_kentries(kt, &workq, tzero);
281 		if (flags & PFR_FLAG_ATOMIC)
282 			splx(s);
283 	} else
284 		pfr_destroy_kentries(&workq);
285 	if (nadd != NULL)
286 		*nadd = xadd;
287 	pfr_destroy_ktable(tmpkt, 0);
288 	return (0);
289 _bad:
290 	pfr_clean_node_mask(tmpkt, &workq);
291 	pfr_destroy_kentries(&workq);
292 	if (flags & PFR_FLAG_FEEDBACK)
293 		pfr_reset_feedback(addr, size);
294 	pfr_destroy_ktable(tmpkt, 0);
295 	return (rv);
296 }
297 
298 int
299 pfr_del_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
300     int *ndel, int flags)
301 {
302 	struct pfr_ktable	*kt;
303 	struct pfr_kentryworkq	 workq;
304 	struct pfr_kentry	*p;
305 	struct pfr_addr		 ad;
306 	int			 i, rv, s, xdel = 0;
307 
308 	ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_FEEDBACK);
309 	if (pfr_validate_table(tbl, 0))
310 		return (EINVAL);
311 	kt = pfr_lookup_table(tbl);
312 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
313 		return (ESRCH);
314 	if (kt->pfrkt_flags & PFR_TFLAG_CONST)
315 		return (EPERM);
316 	pfr_mark_addrs(kt);
317 	SLIST_INIT(&workq);
318 	for (i = 0; i < size; i++) {
319 		if (copyin(addr+i, &ad, sizeof(ad)))
320 			senderr(EFAULT);
321 		if (pfr_validate_addr(&ad))
322 			senderr(EINVAL);
323 		p = pfr_lookup_addr(kt, &ad, 1);
324 		if (flags & PFR_FLAG_FEEDBACK) {
325 			if (p == NULL)
326 				ad.pfra_fback = PFR_FB_NONE;
327 			else if (p->pfrke_not != ad.pfra_not)
328 				ad.pfra_fback = PFR_FB_CONFLICT;
329 			else if (p->pfrke_mark)
330 				ad.pfra_fback = PFR_FB_DUPLICATE;
331 			else
332 				ad.pfra_fback = PFR_FB_DELETED;
333 		}
334 		if (p != NULL && p->pfrke_not == ad.pfra_not &&
335 		    !p->pfrke_mark) {
336 			p->pfrke_mark = 1;
337 			SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
338 			xdel++;
339 		}
340 		if (flags & PFR_FLAG_FEEDBACK)
341 			if (copyout(&ad, addr+i, sizeof(ad)))
342 				senderr(EFAULT);
343 	}
344 	if (!(flags & PFR_FLAG_DUMMY)) {
345 		if (flags & PFR_FLAG_ATOMIC)
346 			s = splsoftnet();
347 		pfr_remove_kentries(kt, &workq);
348 		if (flags & PFR_FLAG_ATOMIC)
349 			splx(s);
350 	}
351 	if (ndel != NULL)
352 		*ndel = xdel;
353 	return (0);
354 _bad:
355 	if (flags & PFR_FLAG_FEEDBACK)
356 		pfr_reset_feedback(addr, size);
357 	return (rv);
358 }
359 
360 int
361 pfr_set_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
362     int *size2, int *nadd, int *ndel, int *nchange, int flags)
363 {
364 	struct pfr_ktable	*kt, *tmpkt;
365 	struct pfr_kentryworkq	 addq, delq, changeq;
366 	struct pfr_kentry	*p, *q;
367 	struct pfr_addr		 ad;
368 	int			 i, rv, s, xadd = 0, xdel = 0, xchange = 0;
369 	long			 tzero = time.tv_sec;
370 
371 	ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_FEEDBACK);
372 	if (pfr_validate_table(tbl, 0))
373 		return (EINVAL);
374 	kt = pfr_lookup_table(tbl);
375 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
376 		return (ESRCH);
377 	if (kt->pfrkt_flags & PFR_TFLAG_CONST)
378 		return (EPERM);
379 	tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0);
380 	if (tmpkt == NULL)
381 		return (ENOMEM);
382 	pfr_mark_addrs(kt);
383 	SLIST_INIT(&addq);
384 	SLIST_INIT(&delq);
385 	SLIST_INIT(&changeq);
386 	for (i = 0; i < size; i++) {
387 		if (copyin(addr+i, &ad, sizeof(ad)))
388 			senderr(EFAULT);
389 		if (pfr_validate_addr(&ad))
390 			senderr(EINVAL);
391 		ad.pfra_fback = PFR_FB_NONE;
392 		p = pfr_lookup_addr(kt, &ad, 1);
393 		if (p != NULL) {
394 			if (p->pfrke_mark) {
395 				ad.pfra_fback = PFR_FB_DUPLICATE;
396 				goto _skip;
397 			}
398 			p->pfrke_mark = 1;
399 			if (p->pfrke_not != ad.pfra_not) {
400 				SLIST_INSERT_HEAD(&changeq, p, pfrke_workq);
401 				ad.pfra_fback = PFR_FB_CHANGED;
402 				xchange++;
403 			}
404 		} else {
405 			q = pfr_lookup_addr(tmpkt, &ad, 1);
406 			if (q != NULL) {
407 				ad.pfra_fback = PFR_FB_DUPLICATE;
408 				goto _skip;
409 			}
410 			p = pfr_create_kentry(&ad);
411 			if (p == NULL)
412 				senderr(ENOMEM);
413 			if (pfr_route_kentry(tmpkt, p)) {
414 				pfr_destroy_kentry(p);
415 				ad.pfra_fback = PFR_FB_NONE;
416 			} else {
417 				SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
418 				ad.pfra_fback = PFR_FB_ADDED;
419 				xadd++;
420 			}
421 		}
422 _skip:
423 		if (flags & PFR_FLAG_FEEDBACK)
424 			if (copyout(&ad, addr+i, sizeof(ad)))
425 				senderr(EFAULT);
426 	}
427 	pfr_enqueue_addrs(kt, &delq, &xdel, ENQUEUE_UNMARKED_ONLY);
428 	if ((flags & PFR_FLAG_FEEDBACK) && *size2) {
429 		if (*size2 < size+xdel) {
430 			*size2 = size+xdel;
431 			senderr(0);
432 		}
433 		i = 0;
434 		SLIST_FOREACH(p, &delq, pfrke_workq) {
435 			pfr_copyout_addr(&ad, p);
436 			ad.pfra_fback = PFR_FB_DELETED;
437 			if (copyout(&ad, addr+size+i, sizeof(ad)))
438 				senderr(EFAULT);
439 			i++;
440 		}
441 	}
442 	pfr_clean_node_mask(tmpkt, &addq);
443 	if (!(flags & PFR_FLAG_DUMMY)) {
444 		if (flags & PFR_FLAG_ATOMIC)
445 			s = splsoftnet();
446 		pfr_insert_kentries(kt, &addq, tzero);
447 		pfr_remove_kentries(kt, &delq);
448 		pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG);
449 		if (flags & PFR_FLAG_ATOMIC)
450 			splx(s);
451 	} else
452 		pfr_destroy_kentries(&addq);
453 	if (nadd != NULL)
454 		*nadd = xadd;
455 	if (ndel != NULL)
456 		*ndel = xdel;
457 	if (nchange != NULL)
458 		*nchange = xchange;
459 	if ((flags & PFR_FLAG_FEEDBACK) && *size2)
460 		*size2 = size+xdel;
461 	pfr_destroy_ktable(tmpkt, 0);
462 	return (0);
463 _bad:
464 	pfr_clean_node_mask(tmpkt, &addq);
465 	pfr_destroy_kentries(&addq);
466 	if (flags & PFR_FLAG_FEEDBACK)
467 		pfr_reset_feedback(addr, size);
468 	pfr_destroy_ktable(tmpkt, 0);
469 	return (rv);
470 }
471 
472 int
473 pfr_tst_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
474 	int *nmatch, int flags)
475 {
476 	struct pfr_ktable	*kt;
477 	struct pfr_kentry	*p;
478 	struct pfr_addr		 ad;
479 	int			 i, xmatch = 0;
480 
481 	ACCEPT_FLAGS(PFR_FLAG_REPLACE);
482 	if (pfr_validate_table(tbl, 0))
483 		return (EINVAL);
484 	kt = pfr_lookup_table(tbl);
485 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
486 		return (ESRCH);
487 
488 	for (i = 0; i < size; i++) {
489 		if (copyin(addr+i, &ad, sizeof(ad)))
490 			return (EFAULT);
491 		if (pfr_validate_addr(&ad))
492 			return (EINVAL);
493 		if (ADDR_NETWORK(&ad))
494 			return (EINVAL);
495 		p = pfr_lookup_addr(kt, &ad, 0);
496 		if (flags & PFR_FLAG_REPLACE)
497 			pfr_copyout_addr(&ad, p);
498 		ad.pfra_fback = (p == NULL) ? PFR_FB_NONE :
499 		    (p->pfrke_not ? PFR_FB_NOTMATCH : PFR_FB_MATCH);
500 		if (p != NULL && !p->pfrke_not)
501 			xmatch++;
502 		if (copyout(&ad, addr+i, sizeof(ad)))
503 			return (EFAULT);
504 	}
505 	if (nmatch != NULL)
506 		*nmatch = xmatch;
507 	return (0);
508 }
509 
510 int
511 pfr_get_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int *size,
512 	int flags)
513 {
514 	struct pfr_ktable	*kt;
515 	struct pfr_walktree	 w;
516 	int			 rv;
517 
518 	ACCEPT_FLAGS(0);
519 	if (pfr_validate_table(tbl, 0))
520 		return (EINVAL);
521 	kt = pfr_lookup_table(tbl);
522 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
523 		return (ESRCH);
524 	if (kt->pfrkt_cnt > *size) {
525 		*size = kt->pfrkt_cnt;
526 		return (0);
527 	}
528 
529 	bzero(&w, sizeof(w));
530 	w.pfrw_op = PFRW_GET_ADDRS;
531 	w.pfrw_addr = addr;
532 	w.pfrw_free = kt->pfrkt_cnt;
533 	rv = rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
534 	if (!rv)
535 		rv = rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
536 	if (rv)
537 		return (rv);
538 
539 	if (w.pfrw_free) {
540 		printf("pfr_get_addrs: corruption detected (%d).\n",
541 		    w.pfrw_free);
542 		return (ENOTTY);
543 	}
544 	*size = kt->pfrkt_cnt;
545 	return (0);
546 }
547 
548 int
549 pfr_get_astats(struct pfr_table *tbl, struct pfr_astats *addr, int *size,
550 	int flags)
551 {
552 	struct pfr_ktable	*kt;
553 	struct pfr_walktree	 w;
554 	struct pfr_kentryworkq	 workq;
555 	int			 rv, s;
556 	long			 tzero = time.tv_sec;
557 
558 	ACCEPT_FLAGS(PFR_FLAG_ATOMIC); /* XXX PFR_FLAG_CLSTATS disabled */
559 	if (pfr_validate_table(tbl, 0))
560 		return (EINVAL);
561 	kt = pfr_lookup_table(tbl);
562 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
563 		return (ESRCH);
564 	if (kt->pfrkt_cnt > *size) {
565 		*size = kt->pfrkt_cnt;
566 		return (0);
567 	}
568 
569 	bzero(&w, sizeof(w));
570 	w.pfrw_op = PFRW_GET_ASTATS;
571 	w.pfrw_astats = addr;
572 	w.pfrw_free = kt->pfrkt_cnt;
573 	if (flags & PFR_FLAG_ATOMIC)
574 		s = splsoftnet();
575 	rv = rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
576 	if (!rv)
577 		rv = rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
578 	if (!rv && (flags & PFR_FLAG_CLSTATS)) {
579 		pfr_enqueue_addrs(kt, &workq, NULL, 0);
580 		pfr_clstats_kentries(&workq, tzero, 0);
581 	}
582 	if (flags & PFR_FLAG_ATOMIC)
583 		splx(s);
584 	if (rv)
585 		return (rv);
586 
587 	if (w.pfrw_free) {
588 		printf("pfr_get_astats: corruption detected (%d).\n",
589 		    w.pfrw_free);
590 		return (ENOTTY);
591 	}
592 	*size = kt->pfrkt_cnt;
593 	return (0);
594 }
595 
596 int
597 pfr_clr_astats(struct pfr_table *tbl, struct pfr_addr *addr, int size,
598     int *nzero, int flags)
599 {
600 	struct pfr_ktable	*kt;
601 	struct pfr_kentryworkq	 workq;
602 	struct pfr_kentry	*p;
603 	struct pfr_addr		 ad;
604 	int			 i, rv, s, xzero = 0;
605 
606 	ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_FEEDBACK);
607 	if (pfr_validate_table(tbl, 0))
608 		return (EINVAL);
609 	kt = pfr_lookup_table(tbl);
610 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
611 		return (ESRCH);
612 	SLIST_INIT(&workq);
613 	for (i = 0; i < size; i++) {
614 		if (copyin(addr+i, &ad, sizeof(ad)))
615 			senderr(EFAULT);
616 		if (pfr_validate_addr(&ad))
617 			senderr(EINVAL);
618 		p = pfr_lookup_addr(kt, &ad, 1);
619 		if (flags & PFR_FLAG_FEEDBACK) {
620 			ad.pfra_fback = (p != NULL) ?
621 			    PFR_FB_CLEARED : PFR_FB_NONE;
622 			if (copyout(&ad, addr+i, sizeof(ad)))
623 				senderr(EFAULT);
624 		}
625 		if (p != NULL) {
626 			SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
627 			xzero++;
628 		}
629 	}
630 
631 	if (!(flags & PFR_FLAG_DUMMY)) {
632 		if (flags & PFR_FLAG_ATOMIC)
633 			s = splsoftnet();
634 		pfr_clstats_kentries(&workq, 0, 0);
635 		if (flags & PFR_FLAG_ATOMIC)
636 			splx(s);
637 	}
638 	if (nzero != NULL)
639 		*nzero = xzero;
640 	return (0);
641 _bad:
642 	if (flags & PFR_FLAG_FEEDBACK)
643 		pfr_reset_feedback(addr, size);
644 	return (rv);
645 }
646 
647 int
648 pfr_validate_addr(struct pfr_addr *ad)
649 {
650 	int i;
651 
652 	switch (ad->pfra_af) {
653 	case AF_INET:
654 		if (ad->pfra_net > 32)
655 			return (-1);
656 		break;
657 	case AF_INET6:
658 		if (ad->pfra_net > 128)
659 			return (-1);
660 		break;
661 	default:
662 		return (-1);
663 	}
664 	if (ad->pfra_net < 128 &&
665 		(((caddr_t)ad)[ad->pfra_net/8] & (0xFF >> (ad->pfra_net%8))))
666 			return (-1);
667 	for (i = (ad->pfra_net+7)/8; i < sizeof(ad->pfra_u); i++)
668 		if (((caddr_t)ad)[i])
669 			return (-1);
670 	if (ad->pfra_not && ad->pfra_not != 1)
671 		return (-1);
672 	if (ad->pfra_fback)
673 		return (-1);
674 	return (0);
675 }
676 
677 void
678 pfr_enqueue_addrs(struct pfr_ktable *kt, struct pfr_kentryworkq *workq,
679 	int *naddr, int sweep)
680 {
681 	struct pfr_walktree	w;
682 
683 	SLIST_INIT(workq);
684 	bzero(&w, sizeof(w));
685 	w.pfrw_op = sweep ? PFRW_SWEEP : PFRW_ENQUEUE;
686 	w.pfrw_workq = workq;
687 	if (kt->pfrkt_ip4 != NULL)
688 		if (rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w))
689 			printf("pfr_enqueue_addrs: IPv4 walktree failed.\n");
690 	if (kt->pfrkt_ip6 != NULL)
691 		if (rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w))
692 			printf("pfr_enqueue_addrs: IPv6 walktree failed.\n");
693 	if (naddr != NULL)
694 		*naddr = w.pfrw_cnt;
695 }
696 
697 void
698 pfr_mark_addrs(struct pfr_ktable *kt)
699 {
700 	struct pfr_walktree	w;
701 
702 	bzero(&w, sizeof(w));
703 	w.pfrw_op = PFRW_MARK;
704 	if (rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w))
705 		printf("pfr_mark_addrs: IPv4 walktree failed.\n");
706 	if (rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w))
707 		printf("pfr_mark_addrs: IPv6 walktree failed.\n");
708 }
709 
710 
711 struct pfr_kentry *
712 pfr_lookup_addr(struct pfr_ktable *kt, struct pfr_addr *ad, int exact)
713 {
714 	union sockaddr_union	 sa, mask;
715 	struct radix_node_head	*head;
716 	struct pfr_kentry	*ke;
717 	int			 s;
718 
719 	bzero(&sa, sizeof(sa));
720 	if (ad->pfra_af == AF_INET) {
721 		FILLIN_SIN(sa.sin, ad->pfra_ip4addr);
722 		head = kt->pfrkt_ip4;
723 	} else {
724 		FILLIN_SIN6(sa.sin6, ad->pfra_ip6addr);
725 		head = kt->pfrkt_ip6;
726 	}
727 	if (ADDR_NETWORK(ad)) {
728 		pfr_prepare_network(&mask, ad->pfra_af, ad->pfra_net);
729 		s = splsoftnet(); /* rn_lookup makes use of globals */
730 		ke = (struct pfr_kentry *)rn_lookup(&sa, &mask, head);
731 		splx(s);
732 		if (ke && KENTRY_RNF_ROOT(ke))
733 			ke = NULL;
734 	} else {
735 		ke = (struct pfr_kentry *)rn_match(&sa, head);
736 		if (ke && KENTRY_RNF_ROOT(ke))
737 			ke = NULL;
738 		if (exact && ke && KENTRY_NETWORK(ke))
739 			ke = NULL;
740 	}
741 	return (ke);
742 }
743 
744 struct pfr_kentry *
745 pfr_create_kentry(struct pfr_addr *ad)
746 {
747 	struct pfr_kentry	*ke;
748 
749 	ke = pool_get(&pfr_kentry_pl, PR_NOWAIT);
750 	if (ke == NULL)
751 		return (NULL);
752 	bzero(ke, sizeof(*ke));
753 
754 	if (ad->pfra_af == AF_INET)
755 		FILLIN_SIN(ke->pfrke_sa.sin, ad->pfra_ip4addr);
756 	else
757 		FILLIN_SIN6(ke->pfrke_sa.sin6, ad->pfra_ip6addr);
758 	ke->pfrke_af = ad->pfra_af;
759 	ke->pfrke_net = ad->pfra_net;
760 	ke->pfrke_not = ad->pfra_not;
761 	return (ke);
762 }
763 
764 void
765 pfr_destroy_kentries(struct pfr_kentryworkq *workq)
766 {
767 	struct pfr_kentry	*p, *q;
768 
769 	for (p = SLIST_FIRST(workq); p != NULL; p = q) {
770 		q = SLIST_NEXT(p, pfrke_workq);
771 		pfr_destroy_kentry(p);
772 	}
773 }
774 
775 void
776 pfr_destroy_kentry(struct pfr_kentry *ke)
777 {
778 	pool_put(&pfr_kentry_pl, ke);
779 }
780 
781 void
782 pfr_insert_kentries(struct pfr_ktable *kt,
783     struct pfr_kentryworkq *workq, long tzero)
784 {
785 	struct pfr_kentry	*p;
786 	int			 rv, n = 0;
787 
788 	SLIST_FOREACH(p, workq, pfrke_workq) {
789 		rv = pfr_route_kentry(kt, p);
790 		if (rv) {
791 			printf("pfr_insert_kentries: cannot route entry "
792 			    "(code=%d).\n", rv);
793 			break;
794 		}
795 		p->pfrke_tzero = tzero;
796 		n++;
797 	}
798 	kt->pfrkt_cnt += n;
799 }
800 
801 void
802 pfr_remove_kentries(struct pfr_ktable *kt,
803     struct pfr_kentryworkq *workq)
804 {
805 	struct pfr_kentry	*p;
806 	int			 n = 0;
807 
808 	SLIST_FOREACH(p, workq, pfrke_workq) {
809 		pfr_unroute_kentry(kt, p);
810 		n++;
811 	}
812 	kt->pfrkt_cnt -= n;
813 	pfr_destroy_kentries(workq);
814 }
815 
816 void
817 pfr_clean_node_mask(struct pfr_ktable *kt,
818     struct pfr_kentryworkq *workq)
819 {
820         struct pfr_kentry       *p;
821 
822         SLIST_FOREACH(p, workq, pfrke_workq)
823                 pfr_unroute_kentry(kt, p);
824 }
825 
826 void
827 pfr_clstats_kentries(struct pfr_kentryworkq *workq, long tzero, int negchange)
828 {
829 	struct pfr_kentry	*p;
830 	int			 s;
831 
832 	SLIST_FOREACH(p, workq, pfrke_workq) {
833 		s = splsoftnet();
834 		if (negchange)
835 			p->pfrke_not = !p->pfrke_not;
836 		bzero(p->pfrke_packets, sizeof(p->pfrke_packets));
837 		bzero(p->pfrke_bytes, sizeof(p->pfrke_bytes));
838 		splx(s);
839 		p->pfrke_tzero = tzero;
840 	}
841 }
842 
843 void
844 pfr_reset_feedback(struct pfr_addr *addr, int size)
845 {
846 	struct pfr_addr	ad;
847 	int		i;
848 
849 	for (i = 0; i < size; i++) {
850 		if (copyin(addr+i, &ad, sizeof(ad)))
851 			break;
852 		ad.pfra_fback = PFR_FB_NONE;
853 		if (copyout(&ad, addr+i, sizeof(ad)))
854 			break;
855 	}
856 }
857 
858 void
859 pfr_prepare_network(union sockaddr_union *sa, int af, int net)
860 {
861 	int	i;
862 
863 	bzero(sa, sizeof(*sa));
864 	if (af == AF_INET) {
865 		sa->sin.sin_len = sizeof(sa->sin);
866 		sa->sin.sin_family = AF_INET;
867 		sa->sin.sin_addr.s_addr = htonl(-1 << (32-net));
868 	} else {
869 		sa->sin6.sin6_len = sizeof(sa->sin6);
870 		sa->sin6.sin6_family = AF_INET6;
871 		for (i = 0; i < 4; i++) {
872 			if (net <= 32) {
873 				sa->sin6.sin6_addr.s6_addr32[i] =
874 				    htonl(-1 << (32-net));
875 				break;
876 			}
877 			sa->sin6.sin6_addr.s6_addr32[i] = 0xFFFFFFFF;
878 			net -= 32;
879 		}
880 	}
881 }
882 
883 int
884 pfr_route_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
885 {
886 	union sockaddr_union	 mask;
887 	struct radix_node	*rn;
888 	struct radix_node_head	*head;
889 	int			 s;
890 
891 	bzero(ke->pfrke_node, sizeof(ke->pfrke_node));
892 	if (ke->pfrke_af == AF_INET)
893 		head = kt->pfrkt_ip4;
894 	else
895 		head = kt->pfrkt_ip6;
896 
897 	s = splsoftnet();
898 	if (KENTRY_NETWORK(ke)) {
899 		pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
900 		rn = rn_addroute(&ke->pfrke_sa, &mask, head, ke->pfrke_node);
901 	} else
902 		rn = rn_addroute(&ke->pfrke_sa, NULL, head, ke->pfrke_node);
903 	splx(s);
904 
905 	return (rn == NULL ? -1 : 0);
906 }
907 
908 int
909 pfr_unroute_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
910 {
911 	union sockaddr_union	 mask;
912 	struct radix_node	*rn;
913 	struct radix_node_head	*head;
914 	int			 s;
915 
916 	if (ke->pfrke_af == AF_INET)
917 		head = kt->pfrkt_ip4;
918 	else
919 		head = kt->pfrkt_ip6;
920 
921 	s = splsoftnet();
922 	if (KENTRY_NETWORK(ke)) {
923 		pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
924 		rn = rn_delete(&ke->pfrke_sa, &mask, head);
925 	} else
926 		rn = rn_delete(&ke->pfrke_sa, NULL, head);
927 	splx(s);
928 
929 	if (rn == NULL) {
930 		printf("pfr_unroute_kentry: delete failed.\n");
931 		return (-1);
932 	}
933 	return (0);
934 }
935 
936 void
937 pfr_copyout_addr(struct pfr_addr *ad, struct pfr_kentry *ke)
938 {
939 	bzero(ad, sizeof(*ad));
940 	if (ke == NULL)
941 		return;
942 	ad->pfra_af = ke->pfrke_af;
943 	ad->pfra_net = ke->pfrke_net;
944 	ad->pfra_not = ke->pfrke_not;
945 	if (ad->pfra_af == AF_INET)
946 		ad->pfra_ip4addr = ke->pfrke_sa.sin.sin_addr;
947 	else
948 		ad->pfra_ip6addr = ke->pfrke_sa.sin6.sin6_addr;
949 }
950 
951 int
952 pfr_walktree(struct radix_node *rn, void *arg)
953 {
954 	struct pfr_kentry	*ke = (struct pfr_kentry *)rn;
955 	struct pfr_walktree	*w = arg;
956 	int			 s;
957 
958 	switch (w->pfrw_op) {
959 	case PFRW_MARK:
960 		ke->pfrke_mark = 0;
961 		break;
962 	case PFRW_SWEEP:
963 		if (ke->pfrke_mark)
964 			break;
965 		/* fall trough */
966 	case PFRW_ENQUEUE:
967 		SLIST_INSERT_HEAD(w->pfrw_workq, ke, pfrke_workq);
968 		w->pfrw_cnt++;
969 		break;
970 	case PFRW_GET_ADDRS:
971 		if (w->pfrw_free-- > 0) {
972 			struct pfr_addr ad;
973 
974 			pfr_copyout_addr(&ad, ke);
975 			if (copyout(&ad, w->pfrw_addr, sizeof(ad)))
976 				return (EFAULT);
977 			w->pfrw_addr++;
978 		}
979 		break;
980 	case PFRW_GET_ASTATS:
981 		if (w->pfrw_free-- > 0) {
982 			struct pfr_astats as;
983 
984 			pfr_copyout_addr(&as.pfras_a, ke);
985 
986 			s = splsoftnet();
987 			bcopy(ke->pfrke_packets, as.pfras_packets,
988 			    sizeof(as.pfras_packets));
989 			bcopy(ke->pfrke_bytes, as.pfras_bytes,
990 			    sizeof(as.pfras_bytes));
991 			splx(s);
992 			as.pfras_tzero = ke->pfrke_tzero;
993 
994 			if (copyout(&as, w->pfrw_astats, sizeof(as)))
995 				return (EFAULT);
996 			w->pfrw_astats++;
997 		}
998 		break;
999 	case PFRW_POOL_GET:
1000 		if (ke->pfrke_not)
1001 			break; /* negative entries are ignored */
1002 		if (!w->pfrw_cnt--) {
1003 			w->pfrw_kentry = ke;
1004 			return (1); /* finish search */
1005 		}
1006 		break;
1007 	}
1008 	return (0);
1009 }
1010 
1011 int
1012 pfr_clr_tables(struct pfr_table *filter, int *ndel, int flags)
1013 {
1014 	struct pfr_ktableworkq	 workq;
1015 	struct pfr_ktable	*p;
1016 	int			 s, xdel = 0;
1017 
1018 	ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_ALLRSETS);
1019 	if (pfr_table_count(filter, flags) < 0)
1020 		return (ENOENT);
1021 
1022 	SLIST_INIT(&workq);
1023 	RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1024 		if (pfr_skip_table(filter, p, flags))
1025 			continue;
1026 		if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE))
1027 			continue;
1028 		p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1029 		SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1030 		xdel++;
1031 	}
1032 	if (!(flags & PFR_FLAG_DUMMY)) {
1033 		if (flags & PFR_FLAG_ATOMIC)
1034 			s = splsoftnet();
1035 		pfr_setflags_ktables(&workq);
1036 		if (flags & PFR_FLAG_ATOMIC)
1037 			splx(s);
1038 	}
1039 	if (ndel != NULL)
1040 		*ndel = xdel;
1041 	return (0);
1042 }
1043 
1044 int
1045 pfr_add_tables(struct pfr_table *tbl, int size, int *nadd, int flags)
1046 {
1047 	struct pfr_ktableworkq	 addq, changeq;
1048 	struct pfr_ktable	*p, *q, *r, key;
1049 	int			 i, rv, s, xadd = 0;
1050 	long			 tzero = time.tv_sec;
1051 
1052 	ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY);
1053 	SLIST_INIT(&addq);
1054 	SLIST_INIT(&changeq);
1055 	for (i = 0; i < size; i++) {
1056 		if (copyin(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t)))
1057 			senderr(EFAULT);
1058 		if (pfr_validate_table(&key.pfrkt_t, PFR_TFLAG_USRMASK))
1059 			senderr(EINVAL);
1060 		key.pfrkt_flags |= PFR_TFLAG_ACTIVE;
1061 		p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1062 		if (p == NULL) {
1063 			p = pfr_create_ktable(&key.pfrkt_t, tzero, 1);
1064 			if (p == NULL)
1065 				senderr(ENOMEM);
1066 			SLIST_FOREACH(q, &addq, pfrkt_workq) {
1067 				if (!pfr_ktable_compare(p, q))
1068 					goto _skip;
1069 			}
1070 			SLIST_INSERT_HEAD(&addq, p, pfrkt_workq);
1071 			xadd++;
1072 			if (!key.pfrkt_anchor[0])
1073 				goto _skip;
1074 
1075 			/* find or create root table */
1076 			bzero(key.pfrkt_anchor, sizeof(key.pfrkt_anchor));
1077 			bzero(key.pfrkt_ruleset, sizeof(key.pfrkt_ruleset));
1078 			r = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1079 			if (r != NULL) {
1080 				p->pfrkt_root = r;
1081 				goto _skip;
1082 			}
1083 			SLIST_FOREACH(q, &addq, pfrkt_workq) {
1084 				if (!pfr_ktable_compare(&key, q)) {
1085 					p->pfrkt_root = q;
1086 					goto _skip;
1087 				}
1088 			}
1089 			key.pfrkt_flags = 0;
1090 			r = pfr_create_ktable(&key.pfrkt_t, 0, 1);
1091 			if (r == NULL)
1092 				senderr(ENOMEM);
1093 			SLIST_INSERT_HEAD(&addq, r, pfrkt_workq);
1094 			p->pfrkt_root = r;
1095 		} else if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1096 			SLIST_FOREACH(q, &changeq, pfrkt_workq)
1097 				if (!pfr_ktable_compare(&key, q))
1098 					goto _skip;
1099 			p->pfrkt_nflags = (p->pfrkt_flags &
1100 			    ~PFR_TFLAG_USRMASK) | key.pfrkt_flags;
1101 			SLIST_INSERT_HEAD(&changeq, p, pfrkt_workq);
1102 			xadd++;
1103 		}
1104 _skip:
1105 	;
1106 	}
1107 	if (!(flags & PFR_FLAG_DUMMY)) {
1108 		if (flags & PFR_FLAG_ATOMIC)
1109 			s = splsoftnet();
1110 		pfr_insert_ktables(&addq);
1111 		pfr_setflags_ktables(&changeq);
1112 		if (flags & PFR_FLAG_ATOMIC)
1113 			splx(s);
1114 	} else
1115 		 pfr_destroy_ktables(&addq, 0);
1116 	if (nadd != NULL)
1117 		*nadd = xadd;
1118 	return (0);
1119 _bad:
1120 	pfr_destroy_ktables(&addq, 0);
1121 	return (rv);
1122 }
1123 
1124 int
1125 pfr_del_tables(struct pfr_table *tbl, int size, int *ndel, int flags)
1126 {
1127 	struct pfr_ktableworkq	 workq;
1128 	struct pfr_ktable	*p, *q, key;
1129 	int			 i, s, xdel = 0;
1130 
1131 	ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY);
1132 	SLIST_INIT(&workq);
1133 	for (i = 0; i < size; i++) {
1134 		if (copyin(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t)))
1135 			return (EFAULT);
1136 		if (pfr_validate_table(&key.pfrkt_t, 0))
1137 			return (EINVAL);
1138 		p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1139 		if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1140 			SLIST_FOREACH(q, &workq, pfrkt_workq)
1141 				if (!pfr_ktable_compare(p, q))
1142 					goto _skip;
1143 			p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1144 			SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1145 			xdel++;
1146 		}
1147 _skip:
1148 	;
1149 	}
1150 
1151 	if (!(flags & PFR_FLAG_DUMMY)) {
1152 		if (flags & PFR_FLAG_ATOMIC)
1153 			s = splsoftnet();
1154 		pfr_setflags_ktables(&workq);
1155 		if (flags & PFR_FLAG_ATOMIC)
1156 			splx(s);
1157 	}
1158 	if (ndel != NULL)
1159 		*ndel = xdel;
1160 	return (0);
1161 }
1162 
1163 int
1164 pfr_get_tables(struct pfr_table *filter, struct pfr_table *tbl, int *size,
1165 	int flags)
1166 {
1167 	struct pfr_ktable	*p;
1168 	int			 n, nn;
1169 
1170 	ACCEPT_FLAGS(PFR_FLAG_ALLRSETS);
1171 	n = nn = pfr_table_count(filter, flags);
1172 	if (n < 0)
1173 		return (ENOENT);
1174 	if (n > *size) {
1175 		*size = n;
1176 		return (0);
1177 	}
1178 	RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1179 		if (pfr_skip_table(filter, p, flags))
1180 			continue;
1181 		if (n-- <= 0)
1182 			continue;
1183 		if (copyout(&p->pfrkt_t, tbl++, sizeof(*tbl)))
1184 			return (EFAULT);
1185 	}
1186 	if (n) {
1187 		printf("pfr_get_tables: corruption detected (%d).\n", n);
1188 		return (ENOTTY);
1189 	}
1190 	*size = nn;
1191 	return (0);
1192 }
1193 
1194 int
1195 pfr_get_tstats(struct pfr_table *filter, struct pfr_tstats *tbl, int *size,
1196 	int flags)
1197 {
1198 	struct pfr_ktable	*p;
1199 	struct pfr_ktableworkq	 workq;
1200 	int			 s, n, nn;
1201 	long			 tzero = time.tv_sec;
1202 
1203 	ACCEPT_FLAGS(PFR_FLAG_ATOMIC|PFR_FLAG_ALLRSETS);
1204 					/* XXX PFR_FLAG_CLSTATS disabled */
1205 	n = nn = pfr_table_count(filter, flags);
1206 	if (n < 0)
1207 		return (ENOENT);
1208 	if (n > *size) {
1209 		*size = n;
1210 		return (0);
1211 	}
1212 	SLIST_INIT(&workq);
1213 	if (flags & PFR_FLAG_ATOMIC)
1214 		s = splsoftnet();
1215 	RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1216 		if (pfr_skip_table(filter, p, flags))
1217 			continue;
1218 		if (n-- <= 0)
1219 			continue;
1220 		if (!(flags & PFR_FLAG_ATOMIC))
1221 			s = splsoftnet();
1222 		if (copyout(&p->pfrkt_ts, tbl++, sizeof(*tbl))) {
1223 			splx(s);
1224 			return (EFAULT);
1225 		}
1226 		if (!(flags & PFR_FLAG_ATOMIC))
1227 			splx(s);
1228 		SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1229 	}
1230 	if (flags & PFR_FLAG_CLSTATS)
1231 		pfr_clstats_ktables(&workq, tzero,
1232 		    flags & PFR_FLAG_ADDRSTOO);
1233 	if (flags & PFR_FLAG_ATOMIC)
1234 		splx(s);
1235 	if (n) {
1236 		printf("pfr_get_tstats: corruption detected (%d).\n", n);
1237 		return (ENOTTY);
1238 	}
1239 	*size = nn;
1240 	return (0);
1241 }
1242 
1243 int
1244 pfr_clr_tstats(struct pfr_table *tbl, int size, int *nzero, int flags)
1245 {
1246 	struct pfr_ktableworkq	 workq;
1247 	struct pfr_ktable	*p, key;
1248 	int			 i, s, xzero = 0;
1249 	long			 tzero = time.tv_sec;
1250 
1251 	ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_ADDRSTOO);
1252 	SLIST_INIT(&workq);
1253 	for (i = 0; i < size; i++) {
1254 		if (copyin(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t)))
1255 			return (EFAULT);
1256 		if (pfr_validate_table(&key.pfrkt_t, 0))
1257 			return (EINVAL);
1258 		p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1259 		if (p != NULL) {
1260 			SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1261 			xzero++;
1262 		}
1263 	}
1264 	if (!(flags & PFR_FLAG_DUMMY)) {
1265 		if (flags & PFR_FLAG_ATOMIC)
1266 			s = splsoftnet();
1267 		pfr_clstats_ktables(&workq, tzero, flags & PFR_FLAG_ADDRSTOO);
1268 		if (flags & PFR_FLAG_ATOMIC)
1269 			splx(s);
1270 	}
1271 	if (nzero != NULL)
1272 		*nzero = xzero;
1273 	return (0);
1274 }
1275 
1276 int
1277 pfr_set_tflags(struct pfr_table *tbl, int size, int setflag, int clrflag,
1278 	int *nchange, int *ndel, int flags)
1279 {
1280 	struct pfr_ktableworkq	 workq;
1281 	struct pfr_ktable	*p, *q, key;
1282 	int			 i, s, xchange = 0, xdel = 0;
1283 
1284 	ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY);
1285 	if ((setflag & ~PFR_TFLAG_USRMASK) ||
1286 	    (clrflag & ~PFR_TFLAG_USRMASK) ||
1287 	    (setflag & clrflag))
1288 		return (EINVAL);
1289 	SLIST_INIT(&workq);
1290 	for (i = 0; i < size; i++) {
1291 		if (copyin(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t)))
1292 			return (EFAULT);
1293 		if (pfr_validate_table(&key.pfrkt_t, 0))
1294 			return (EINVAL);
1295 		p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1296 		if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1297 			p->pfrkt_nflags = (p->pfrkt_flags | setflag) &
1298 			    ~clrflag;
1299 			if (p->pfrkt_nflags == p->pfrkt_flags)
1300 				goto _skip;
1301 			SLIST_FOREACH(q, &workq, pfrkt_workq)
1302 				if (!pfr_ktable_compare(p, q))
1303 					goto _skip;
1304 			SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1305 			if ((p->pfrkt_flags & PFR_TFLAG_PERSIST) &&
1306 			    (clrflag & PFR_TFLAG_PERSIST) &&
1307 			    !(p->pfrkt_flags & PFR_TFLAG_REFERENCED))
1308 				xdel++;
1309 			else
1310 				xchange++;
1311 		}
1312 _skip:
1313 	;
1314 	}
1315 	if (!(flags & PFR_FLAG_DUMMY)) {
1316 		if (flags & PFR_FLAG_ATOMIC)
1317 			s = splsoftnet();
1318 		pfr_setflags_ktables(&workq);
1319 		if (flags & PFR_FLAG_ATOMIC)
1320 			splx(s);
1321 	}
1322 	if (nchange != NULL)
1323 		*nchange = xchange;
1324 	if (ndel != NULL)
1325 		*ndel = xdel;
1326 	return (0);
1327 }
1328 
1329 int
1330 pfr_ina_begin(struct pfr_table *trs, u_int32_t *ticket, int *ndel, int flags)
1331 {
1332 	struct pfr_ktableworkq	 workq;
1333 	struct pfr_ktable	*p;
1334 	struct pf_ruleset	*rs;
1335 	int			 xdel = 0;
1336 
1337 	ACCEPT_FLAGS(PFR_FLAG_DUMMY);
1338 	rs = pf_find_or_create_ruleset(trs->pfrt_anchor, trs->pfrt_ruleset);
1339 	if (rs == NULL)
1340 		return (ENOMEM);
1341 	SLIST_INIT(&workq);
1342 	RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1343 		if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1344 		    pfr_skip_table(trs, p, 0))
1345 			continue;
1346 		p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
1347 		SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1348 		xdel++;
1349 	}
1350 	if (!(flags & PFR_FLAG_DUMMY)) {
1351 		pfr_setflags_ktables(&workq);
1352 		if (ticket != NULL)
1353 			*ticket = ++rs->tticket;
1354 		rs->topen = 1;
1355 	} else
1356 		pf_remove_if_empty_ruleset(rs);
1357 	if (ndel != NULL)
1358 		*ndel = xdel;
1359 	return (0);
1360 }
1361 
1362 int
1363 pfr_ina_define(struct pfr_table *tbl, struct pfr_addr *addr, int size,
1364     int *nadd, int *naddr, u_int32_t ticket, int flags)
1365 {
1366 	struct pfr_ktableworkq	 tableq;
1367 	struct pfr_kentryworkq	 addrq;
1368 	struct pfr_ktable	*kt, *rt, *shadow, key;
1369 	struct pfr_kentry	*p;
1370 	struct pfr_addr		 ad;
1371 	struct pf_ruleset	*rs;
1372 	int			 i, rv, xadd = 0, xaddr = 0;
1373 
1374 	ACCEPT_FLAGS(PFR_FLAG_DUMMY|PFR_FLAG_ADDRSTOO);
1375 	if (size && !(flags & PFR_FLAG_ADDRSTOO))
1376 		return (EINVAL);
1377 	if (pfr_validate_table(tbl, PFR_TFLAG_USRMASK))
1378 		return (EINVAL);
1379 	rs = pf_find_ruleset(tbl->pfrt_anchor, tbl->pfrt_ruleset);
1380 	if (rs == NULL || !rs->topen || ticket != rs->tticket)
1381 		return (EBUSY);
1382 	tbl->pfrt_flags |= PFR_TFLAG_INACTIVE;
1383 	SLIST_INIT(&tableq);
1384 	kt = RB_FIND(pfr_ktablehead, &pfr_ktables, (struct pfr_ktable *)tbl);
1385 	if (kt == NULL) {
1386 		kt = pfr_create_ktable(tbl, 0, 1);
1387 		if (kt == NULL)
1388 			return (ENOMEM);
1389 		SLIST_INSERT_HEAD(&tableq, kt, pfrkt_workq);
1390 		xadd++;
1391 		if (!tbl->pfrt_anchor[0])
1392 			goto _skip;
1393 
1394 		/* find or create root table */
1395 		bzero(&key, sizeof(key));
1396 		strlcpy(key.pfrkt_name, tbl->pfrt_name, sizeof(key.pfrkt_name));
1397 		rt = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1398 		if (rt != NULL) {
1399 			kt->pfrkt_root = rt;
1400 			goto _skip;
1401 		}
1402 		rt = pfr_create_ktable(&key.pfrkt_t, 0, 1);
1403 		if (rt == NULL) {
1404 			pfr_destroy_ktables(&tableq, 0);
1405 			return (ENOMEM);
1406 		}
1407 		SLIST_INSERT_HEAD(&tableq, rt, pfrkt_workq);
1408 		kt->pfrkt_root = rt;
1409 	} else if (!(kt->pfrkt_flags & PFR_TFLAG_INACTIVE))
1410 		xadd++;
1411 _skip:
1412 	shadow = pfr_create_ktable(tbl, 0, 0);
1413 	if (shadow == NULL) {
1414 		pfr_destroy_ktables(&tableq, 0);
1415 		return (ENOMEM);
1416 	}
1417 	SLIST_INIT(&addrq);
1418 	for (i = 0; i < size; i++) {
1419 		if (copyin(addr+i, &ad, sizeof(ad)))
1420 			senderr(EFAULT);
1421 		if (pfr_validate_addr(&ad))
1422 			senderr(EINVAL);
1423 		if (pfr_lookup_addr(shadow, &ad, 1) != NULL)
1424 			continue;
1425 		p = pfr_create_kentry(&ad);
1426 		if (p == NULL)
1427 			senderr(ENOMEM);
1428 		if (pfr_route_kentry(shadow, p)) {
1429 			pfr_destroy_kentry(p);
1430 			continue;
1431 		}
1432 		SLIST_INSERT_HEAD(&addrq, p, pfrke_workq);
1433 		xaddr++;
1434 	}
1435 	if (!(flags & PFR_FLAG_DUMMY)) {
1436 		if (kt->pfrkt_shadow != NULL)
1437 			pfr_destroy_ktable(kt->pfrkt_shadow, 1);
1438 		kt->pfrkt_flags |= PFR_TFLAG_INACTIVE;
1439 		pfr_insert_ktables(&tableq);
1440 		shadow->pfrkt_cnt = (flags & PFR_FLAG_ADDRSTOO) ?
1441 		    xaddr : NO_ADDRESSES;
1442 		kt->pfrkt_shadow = shadow;
1443 	} else {
1444 		pfr_clean_node_mask(shadow, &addrq);
1445 		pfr_destroy_ktable(shadow, 0);
1446 		pfr_destroy_ktables(&tableq, 0);
1447 		pfr_destroy_kentries(&addrq);
1448 	}
1449 	if (nadd != NULL)
1450 		*nadd = xadd;
1451 	if (naddr != NULL)
1452 		*naddr = xaddr;
1453 	return (0);
1454 _bad:
1455 	pfr_destroy_ktable(shadow, 0);
1456 	pfr_destroy_ktables(&tableq, 0);
1457 	pfr_destroy_kentries(&addrq);
1458 	return (rv);
1459 }
1460 
1461 int
1462 pfr_ina_rollback(struct pfr_table *trs, u_int32_t ticket, int *ndel, int flags)
1463 {
1464 	struct pfr_ktableworkq	 workq;
1465 	struct pfr_ktable	*p;
1466 	struct pf_ruleset	*rs;
1467 	int			 xdel = 0;
1468 
1469 	ACCEPT_FLAGS(PFR_FLAG_DUMMY);
1470 	rs = pf_find_ruleset(trs->pfrt_anchor, trs->pfrt_ruleset);
1471 	if (rs == NULL || !rs->topen || ticket != rs->tticket)
1472 		return (0);
1473 	SLIST_INIT(&workq);
1474 	RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1475 		if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1476 		    pfr_skip_table(trs, p, 0))
1477 			continue;
1478 		p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
1479 		SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1480 		xdel++;
1481 	}
1482 	if (!(flags & PFR_FLAG_DUMMY)) {
1483 		pfr_setflags_ktables(&workq);
1484 		rs->topen = 0;
1485 		pf_remove_if_empty_ruleset(rs);
1486 	}
1487 	if (ndel != NULL)
1488 		*ndel = xdel;
1489 	return (0);
1490 }
1491 
1492 int
1493 pfr_ina_commit(struct pfr_table *trs, u_int32_t ticket, int *nadd,
1494     int *nchange, int flags)
1495 {
1496 	struct pfr_ktable	*p;
1497 	struct pfr_ktableworkq	 workq;
1498 	struct pf_ruleset	*rs;
1499 	int			 s, xadd = 0, xchange = 0;
1500 	long			 tzero = time.tv_sec;
1501 
1502 	ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY);
1503 	rs = pf_find_ruleset(trs->pfrt_anchor, trs->pfrt_ruleset);
1504 	if (rs == NULL || !rs->topen || ticket != rs->tticket)
1505 		return (EBUSY);
1506 
1507 	SLIST_INIT(&workq);
1508 	RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1509 		if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1510 		    pfr_skip_table(trs, p, 0))
1511 			continue;
1512 		SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1513 		if (p->pfrkt_flags & PFR_TFLAG_ACTIVE)
1514 			xchange++;
1515 		else
1516 			xadd++;
1517 	}
1518 
1519 	if (!(flags & PFR_FLAG_DUMMY)) {
1520 		if (flags & PFR_FLAG_ATOMIC)
1521 			s = splsoftnet();
1522 		SLIST_FOREACH(p, &workq, pfrkt_workq)
1523 			pfr_commit_ktable(p, tzero);
1524 		if (flags & PFR_FLAG_ATOMIC)
1525 			splx(s);
1526 		rs->topen = 0;
1527 		pf_remove_if_empty_ruleset(rs);
1528 	}
1529 	if (nadd != NULL)
1530 		*nadd = xadd;
1531 	if (nchange != NULL)
1532 		*nchange = xchange;
1533 
1534 	return (0);
1535 }
1536 
1537 void
1538 pfr_commit_ktable(struct pfr_ktable *kt, long tzero)
1539 {
1540 	struct pfr_ktable	*shadow = kt->pfrkt_shadow;
1541 	int			 nflags;
1542 
1543 	if (shadow->pfrkt_cnt == NO_ADDRESSES) {
1544 		if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
1545 			pfr_clstats_ktable(kt, tzero, 1);
1546 	} else if (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) {
1547 		/* kt might contain addresses */
1548 		struct pfr_kentryworkq	 addrq, addq, changeq, delq, garbageq;
1549 		struct pfr_kentry	*p, *q, *next;
1550 		struct pfr_addr		 ad;
1551 
1552 		pfr_enqueue_addrs(shadow, &addrq, NULL, 0);
1553 		pfr_mark_addrs(kt);
1554 		SLIST_INIT(&addq);
1555 		SLIST_INIT(&changeq);
1556 		SLIST_INIT(&delq);
1557 		SLIST_INIT(&garbageq);
1558 		pfr_clean_node_mask(shadow, &addrq);
1559 		for (p = SLIST_FIRST(&addrq); p != NULL; p = next) {
1560 			next = SLIST_NEXT(p, pfrke_workq);	/* XXX */
1561 			pfr_copyout_addr(&ad, p);
1562 			q = pfr_lookup_addr(kt, &ad, 1);
1563 			if (q != NULL) {
1564 				if (q->pfrke_not != p->pfrke_not)
1565 					SLIST_INSERT_HEAD(&changeq, q,
1566 					    pfrke_workq);
1567 				q->pfrke_mark = 1;
1568 				SLIST_INSERT_HEAD(&garbageq, p, pfrke_workq);
1569 			} else {
1570 				p->pfrke_tzero = tzero;
1571 				SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
1572 			}
1573 		}
1574 		pfr_enqueue_addrs(kt, &delq, NULL, ENQUEUE_UNMARKED_ONLY);
1575 		pfr_insert_kentries(kt, &addq, tzero);
1576 		pfr_remove_kentries(kt, &delq);
1577 		pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG);
1578 		pfr_destroy_kentries(&garbageq);
1579 	} else {
1580 		/* kt cannot contain addresses */
1581 		SWAP(struct radix_node_head *, kt->pfrkt_ip4,
1582 		    shadow->pfrkt_ip4);
1583 		SWAP(struct radix_node_head *, kt->pfrkt_ip6,
1584 		    shadow->pfrkt_ip6);
1585 		SWAP(int, kt->pfrkt_cnt, shadow->pfrkt_cnt);
1586 		pfr_clstats_ktable(kt, tzero, 1);
1587 	}
1588 	nflags = ((shadow->pfrkt_flags & PFR_TFLAG_USRMASK) |
1589 	    (kt->pfrkt_flags & PFR_TFLAG_SETMASK) | PFR_TFLAG_ACTIVE)
1590 		& ~PFR_TFLAG_INACTIVE;
1591 	pfr_destroy_ktable(shadow, 0);
1592 	kt->pfrkt_shadow = NULL;
1593 	pfr_setflags_ktable(kt, nflags);
1594 }
1595 
1596 int
1597 pfr_validate_table(struct pfr_table *tbl, int allowedflags)
1598 {
1599 	int i;
1600 
1601 	if (!tbl->pfrt_name[0])
1602 		return (-1);
1603 	if (tbl->pfrt_name[PF_TABLE_NAME_SIZE-1])
1604 		return (-1);
1605 	for (i = strlen(tbl->pfrt_name); i < PF_TABLE_NAME_SIZE; i++)
1606 		if (tbl->pfrt_name[i])
1607 			return (-1);
1608 	if (tbl->pfrt_flags & ~allowedflags)
1609 		return (-1);
1610 	return (0);
1611 }
1612 
1613 int
1614 pfr_table_count(struct pfr_table *filter, int flags)
1615 {
1616 	struct pf_ruleset *rs;
1617 	struct pf_anchor *ac;
1618 
1619 	if (flags & PFR_FLAG_ALLRSETS)
1620 		return (pfr_ktable_cnt);
1621 	if (filter->pfrt_ruleset[0]) {
1622 		rs = pf_find_ruleset(filter->pfrt_anchor,
1623 		    filter->pfrt_ruleset);
1624 		return ((rs != NULL) ? rs->tables : -1);
1625 	}
1626 	if (filter->pfrt_anchor[0]) {
1627 		ac = pf_find_anchor(filter->pfrt_anchor);
1628 		return ((ac != NULL) ? ac->tables : -1);
1629 	}
1630 	return (pf_main_ruleset.tables);
1631 }
1632 
1633 int
1634 pfr_skip_table(struct pfr_table *filter, struct pfr_ktable *kt, int flags)
1635 {
1636 	if (flags & PFR_FLAG_ALLRSETS)
1637 		return (0);
1638 	if (strncmp(filter->pfrt_anchor, kt->pfrkt_anchor,
1639 	    PF_ANCHOR_NAME_SIZE))
1640 		return (1);
1641 	if (!filter->pfrt_ruleset[0])
1642 		return (0);
1643 	if (strncmp(filter->pfrt_ruleset, kt->pfrkt_ruleset,
1644 	    PF_RULESET_NAME_SIZE))
1645 		return (1);
1646 	return (0);
1647 }
1648 
1649 void
1650 pfr_insert_ktables(struct pfr_ktableworkq *workq)
1651 {
1652 	struct pfr_ktable	*p;
1653 
1654 	SLIST_FOREACH(p, workq, pfrkt_workq)
1655 		pfr_insert_ktable(p);
1656 }
1657 
1658 void
1659 pfr_insert_ktable(struct pfr_ktable *kt)
1660 {
1661 	RB_INSERT(pfr_ktablehead, &pfr_ktables, kt);
1662 	pfr_ktable_cnt++;
1663 	if (kt->pfrkt_root != NULL)
1664 		if (!kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]++)
1665 			pfr_setflags_ktable(kt->pfrkt_root,
1666 			    kt->pfrkt_root->pfrkt_flags|PFR_TFLAG_REFDANCHOR);
1667 }
1668 
1669 void
1670 pfr_setflags_ktables(struct pfr_ktableworkq *workq)
1671 {
1672 	struct pfr_ktable	*p;
1673 
1674 	SLIST_FOREACH(p, workq, pfrkt_workq)
1675 		pfr_setflags_ktable(p, p->pfrkt_nflags);
1676 }
1677 
1678 void
1679 pfr_setflags_ktable(struct pfr_ktable *kt, int newf)
1680 {
1681 	struct pfr_kentryworkq	addrq;
1682 
1683 	if (!(newf & PFR_TFLAG_REFERENCED) &&
1684 	    !(newf & PFR_TFLAG_PERSIST))
1685 		newf &= ~PFR_TFLAG_ACTIVE;
1686 	if (!(newf & PFR_TFLAG_ACTIVE))
1687 		newf &= ~PFR_TFLAG_USRMASK;
1688 	if (!(newf & PFR_TFLAG_SETMASK)) {
1689 		RB_REMOVE(pfr_ktablehead, &pfr_ktables, kt);
1690 		if (kt->pfrkt_root != NULL)
1691 			if (!--kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR])
1692 				pfr_setflags_ktable(kt->pfrkt_root,
1693 				    kt->pfrkt_root->pfrkt_flags &
1694 					~PFR_TFLAG_REFDANCHOR);
1695 		pfr_destroy_ktable(kt, 1);
1696 		pfr_ktable_cnt--;
1697 		return;
1698 	}
1699 	if (!(newf & PFR_TFLAG_ACTIVE) && kt->pfrkt_cnt) {
1700 		pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1701 		pfr_remove_kentries(kt, &addrq);
1702 	}
1703 	if (!(newf & PFR_TFLAG_INACTIVE) && kt->pfrkt_shadow != NULL) {
1704 		pfr_destroy_ktable(kt->pfrkt_shadow, 1);
1705 		kt->pfrkt_shadow = NULL;
1706 	}
1707 	kt->pfrkt_flags = newf;
1708 }
1709 
1710 void
1711 pfr_clstats_ktables(struct pfr_ktableworkq *workq, long tzero, int recurse)
1712 {
1713 	struct pfr_ktable	*p;
1714 
1715 	SLIST_FOREACH(p, workq, pfrkt_workq)
1716 		pfr_clstats_ktable(p, tzero, recurse);
1717 }
1718 
1719 void
1720 pfr_clstats_ktable(struct pfr_ktable *kt, long tzero, int recurse)
1721 {
1722 	struct pfr_kentryworkq	 addrq;
1723 	int			 s;
1724 
1725 	if (recurse) {
1726 		pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1727 		pfr_clstats_kentries(&addrq, tzero, 0);
1728 	}
1729 	s = splsoftnet();
1730 	bzero(kt->pfrkt_packets, sizeof(kt->pfrkt_packets));
1731 	bzero(kt->pfrkt_bytes, sizeof(kt->pfrkt_bytes));
1732 	kt->pfrkt_match = kt->pfrkt_nomatch = 0;
1733 	splx(s);
1734 	kt->pfrkt_tzero = tzero;
1735 }
1736 
1737 struct pfr_ktable *
1738 pfr_create_ktable(struct pfr_table *tbl, long tzero, int attachruleset)
1739 {
1740 	struct pfr_ktable	*kt;
1741 	struct pf_ruleset	*rs;
1742 
1743 	kt = pool_get(&pfr_ktable_pl, PR_NOWAIT);
1744 	if (kt == NULL)
1745 		return (NULL);
1746 	bzero(kt, sizeof(*kt));
1747 	kt->pfrkt_t = *tbl;
1748 
1749 	if (attachruleset) {
1750 		rs = pf_find_or_create_ruleset(tbl->pfrt_anchor,
1751 		    tbl->pfrt_ruleset);
1752 		if (!rs) {
1753 			pfr_destroy_ktable(kt, 0);
1754 			return (NULL);
1755 		}
1756 		kt->pfrkt_rs = rs;
1757 		rs->tables++;
1758 		if (rs->anchor != NULL)
1759 			rs->anchor->tables++;
1760 	}
1761 
1762 	if (!rn_inithead((void **)&kt->pfrkt_ip4,
1763 	    offsetof(struct sockaddr_in, sin_addr) * 8) ||
1764 	    !rn_inithead((void **)&kt->pfrkt_ip6,
1765 	    offsetof(struct sockaddr_in6, sin6_addr) * 8)) {
1766 		pfr_destroy_ktable(kt, 0);
1767 		return (NULL);
1768 	}
1769 	kt->pfrkt_tzero = tzero;
1770 
1771 	return (kt);
1772 }
1773 
1774 void
1775 pfr_destroy_ktables(struct pfr_ktableworkq *workq, int flushaddr)
1776 {
1777 	struct pfr_ktable	*p, *q;
1778 
1779 	for (p = SLIST_FIRST(workq); p; p = q) {
1780 		q = SLIST_NEXT(p, pfrkt_workq);
1781 		pfr_destroy_ktable(p, flushaddr);
1782 	}
1783 }
1784 
1785 void
1786 pfr_destroy_ktable(struct pfr_ktable *kt, int flushaddr)
1787 {
1788 	struct pfr_kentryworkq	 addrq;
1789 
1790 	if (flushaddr) {
1791 		pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1792 		pfr_clean_node_mask(kt, &addrq);
1793 		pfr_destroy_kentries(&addrq);
1794 	}
1795 	if (kt->pfrkt_ip4 != NULL)
1796 		free((caddr_t)kt->pfrkt_ip4, M_RTABLE);
1797 	if (kt->pfrkt_ip6 != NULL)
1798 		free((caddr_t)kt->pfrkt_ip6, M_RTABLE);
1799 	if (kt->pfrkt_shadow != NULL)
1800 		pfr_destroy_ktable(kt->pfrkt_shadow, flushaddr);
1801 	if (kt->pfrkt_rs != NULL) {
1802 		kt->pfrkt_rs->tables--;
1803 		if (kt->pfrkt_rs->anchor != NULL)
1804 			kt->pfrkt_rs->anchor->tables--;
1805 		pf_remove_if_empty_ruleset(kt->pfrkt_rs);
1806 	}
1807 	pool_put(&pfr_ktable_pl, kt);
1808 }
1809 
1810 int
1811 pfr_ktable_compare(struct pfr_ktable *p, struct pfr_ktable *q)
1812 {
1813 	int d;
1814 
1815 	if ((d = strncmp(p->pfrkt_name, q->pfrkt_name, PF_TABLE_NAME_SIZE)))
1816 		return (d);
1817 	if ((d = strncmp(p->pfrkt_anchor, q->pfrkt_anchor,
1818 	    PF_ANCHOR_NAME_SIZE)))
1819 		return (d);
1820 	return strncmp(p->pfrkt_ruleset, q->pfrkt_ruleset,
1821 	    PF_RULESET_NAME_SIZE);
1822 }
1823 
1824 struct pfr_ktable *
1825 pfr_lookup_table(struct pfr_table *tbl)
1826 {
1827 	/* struct pfr_ktable start like a struct pfr_table */
1828 	return RB_FIND(pfr_ktablehead, &pfr_ktables, (struct pfr_ktable *)tbl);
1829 }
1830 
1831 int
1832 pfr_match_addr(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af)
1833 {
1834 	struct pfr_kentry	*ke = NULL;
1835 	int			 match;
1836 
1837 	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
1838 		kt = kt->pfrkt_root;
1839 	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
1840 		return 0;
1841 
1842 	switch (af) {
1843 	case AF_INET:
1844 		pfr_sin.sin_addr.s_addr = a->addr32[0];
1845 		ke = (struct pfr_kentry *)rn_match(&pfr_sin, kt->pfrkt_ip4);
1846 		if (ke && KENTRY_RNF_ROOT(ke))
1847 			ke = NULL;
1848 		break;
1849 	case AF_INET6:
1850 		bcopy(a, &pfr_sin6.sin6_addr, sizeof(pfr_sin6.sin6_addr));
1851 		ke = (struct pfr_kentry *)rn_match(&pfr_sin6, kt->pfrkt_ip6);
1852 		if (ke && KENTRY_RNF_ROOT(ke))
1853 			ke = NULL;
1854 		break;
1855 	}
1856 	match = (ke && !ke->pfrke_not);
1857 	if (match)
1858 		kt->pfrkt_match++;
1859 	else
1860 		kt->pfrkt_nomatch++;
1861 	return (match);
1862 }
1863 
1864 void
1865 pfr_update_stats(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af,
1866     u_int64_t len, int dir_out, int op_pass, int notrule)
1867 {
1868 	struct pfr_kentry	*ke = NULL;
1869 
1870 	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
1871 		kt = kt->pfrkt_root;
1872 	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
1873 		return;
1874 
1875 	switch (af) {
1876 	case AF_INET:
1877 		pfr_sin.sin_addr.s_addr = a->addr32[0];
1878 		ke = (struct pfr_kentry *)rn_match(&pfr_sin, kt->pfrkt_ip4);
1879 		if (ke && KENTRY_RNF_ROOT(ke))
1880 			ke = NULL;
1881 		break;
1882 	case AF_INET6:
1883 		bcopy(a, &pfr_sin6.sin6_addr, sizeof(pfr_sin6.sin6_addr));
1884 		ke = (struct pfr_kentry *)rn_match(&pfr_sin6, kt->pfrkt_ip6);
1885 		if (ke && KENTRY_RNF_ROOT(ke))
1886 			ke = NULL;
1887 		break;
1888 	}
1889 	if ((ke == NULL || ke->pfrke_not) != notrule) {
1890 		if (op_pass != PFR_OP_PASS)
1891 			printf("pfr_update_stats: assertion failed.\n");
1892 		op_pass = PFR_OP_XPASS;
1893 	}
1894 	kt->pfrkt_packets[dir_out][op_pass]++;
1895 	kt->pfrkt_bytes[dir_out][op_pass] += len;
1896 	if (ke != NULL && op_pass != PFR_OP_XPASS) {
1897 		ke->pfrke_packets[dir_out][op_pass]++;
1898 		ke->pfrke_bytes[dir_out][op_pass] += len;
1899 	}
1900 }
1901 
1902 struct pfr_ktable *
1903 pfr_attach_table(struct pf_ruleset *rs, char *name)
1904 {
1905 	struct pfr_ktable	*kt, *rt;
1906 	struct pfr_table	 tbl;
1907 	struct pf_anchor	*ac = rs->anchor;
1908 
1909 	bzero(&tbl, sizeof(tbl));
1910 	strlcpy(tbl.pfrt_name, name, sizeof(tbl.pfrt_name));
1911 	if (ac != NULL) {
1912 		strlcpy(tbl.pfrt_anchor, ac->name, sizeof(tbl.pfrt_anchor));
1913 		strlcpy(tbl.pfrt_ruleset, rs->name, sizeof(tbl.pfrt_ruleset));
1914 	}
1915 	kt = pfr_lookup_table(&tbl);
1916 	if (kt == NULL) {
1917 		kt = pfr_create_ktable(&tbl, time.tv_sec, 1);
1918 		if (kt == NULL)
1919 			return (NULL);
1920 		if (ac != NULL) {
1921 			bzero(tbl.pfrt_anchor, sizeof(tbl.pfrt_anchor));
1922 			bzero(tbl.pfrt_ruleset, sizeof(tbl.pfrt_ruleset));
1923 			rt = pfr_lookup_table(&tbl);
1924 			if (rt == NULL) {
1925 				rt = pfr_create_ktable(&tbl, 0, 1);
1926 				if (rt == NULL) {
1927 					pfr_destroy_ktable(kt, 0);
1928 					return (NULL);
1929 				}
1930 				pfr_insert_ktable(rt);
1931 			}
1932 			kt->pfrkt_root = rt;
1933 		}
1934 		pfr_insert_ktable(kt);
1935 	}
1936 	if (!kt->pfrkt_refcnt[PFR_REFCNT_RULE]++)
1937 		pfr_setflags_ktable(kt, kt->pfrkt_flags|PFR_TFLAG_REFERENCED);
1938 	return kt;
1939 }
1940 
1941 void
1942 pfr_detach_table(struct pfr_ktable *kt)
1943 {
1944 	if (kt->pfrkt_refcnt[PFR_REFCNT_RULE] <= 0)
1945 		printf("pfr_detach_table: refcount = %d.\n",
1946 		    kt->pfrkt_refcnt[PFR_REFCNT_RULE]);
1947 	else if (!--kt->pfrkt_refcnt[PFR_REFCNT_RULE])
1948 		pfr_setflags_ktable(kt, kt->pfrkt_flags&~PFR_TFLAG_REFERENCED);
1949 }
1950 
1951 int
1952 pfr_pool_get(struct pfr_ktable *kt, int *pidx, struct pf_addr *counter,
1953     struct pf_addr **raddr, struct pf_addr **rmask, sa_family_t af)
1954 {
1955 	struct pfr_kentry	*ke, *ke2;
1956 	struct pf_addr		*addr;
1957 	union sockaddr_union	 mask;
1958 	int			 idx = -1, use_counter = 0;
1959 
1960 	addr = (af == AF_INET) ? (struct pf_addr *)&pfr_sin.sin_addr :
1961 	    (struct pf_addr *)&pfr_sin6.sin6_addr;
1962 	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
1963 		kt = kt->pfrkt_root;
1964 	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
1965 		return (-1);
1966 
1967 	if (pidx != NULL)
1968 		idx = *pidx;
1969 	if (counter != NULL && idx >= 0)
1970 		use_counter = 1;
1971 	if (idx < 0)
1972 		idx = 0;
1973 
1974 _next_block:
1975 	ke = pfr_kentry_byidx(kt, idx, af);
1976 	if (ke == NULL)
1977 		return (1);
1978 	pfr_prepare_network(&pfr_mask, af, ke->pfrke_net);
1979 	*raddr = SUNION2PF(&ke->pfrke_sa, af);
1980 	*rmask = SUNION2PF(&pfr_mask, af);
1981 
1982 	if (use_counter) {
1983 		/* is supplied address within block? */
1984 		if (!PF_MATCHA(0, *raddr, *rmask, counter, af)) {
1985 			/* no, go to next block in table */
1986 			idx++;
1987 			use_counter = 0;
1988 			goto _next_block;
1989 		}
1990 		PF_ACPY(addr, counter, af);
1991 	} else {
1992 		/* use first address of block */
1993 		PF_ACPY(addr, *raddr, af);
1994 	}
1995 
1996 	if (!KENTRY_NETWORK(ke)) {
1997 		/* this is a single IP address - no possible nested block */
1998 		PF_ACPY(counter, addr, af);
1999 		*pidx = idx;
2000 		return (0);
2001 	}
2002 	for (;;) {
2003 		/* we don't want to use a nested block */
2004                 ke2 = (struct pfr_kentry *)(af == AF_INET ?
2005 		    rn_match(&pfr_sin, kt->pfrkt_ip4) :
2006 		    rn_match(&pfr_sin6, kt->pfrkt_ip6));
2007 		/* no need to check KENTRY_RNF_ROOT() here */
2008 		if (ke2 == ke) {
2009 			/* lookup return the same block - perfect */
2010 			PF_ACPY(counter, addr, af);
2011 			*pidx = idx;
2012 			return (0);
2013 		}
2014 
2015 		/* we need to increase the counter past the nested block */
2016 		pfr_prepare_network(&mask, AF_INET, ke2->pfrke_net);
2017 		PF_POOLMASK(addr, addr, SUNION2PF(&mask, af), &pfr_ffaddr, af);
2018 		PF_AINC(addr, af);
2019 		if (!PF_MATCHA(0, *raddr, *rmask, addr, af)) {
2020 			/* ok, we reached the end of our main block */
2021 			/* go to next block in table */
2022 			idx++;
2023 			use_counter = 0;
2024 			goto _next_block;
2025 		}
2026 	}
2027 }
2028 
2029 struct pfr_kentry *
2030 pfr_kentry_byidx(struct pfr_ktable *kt, int idx, int af)
2031 {
2032 	struct pfr_walktree	w;
2033 
2034         bzero(&w, sizeof(w));
2035         w.pfrw_op = PFRW_POOL_GET;
2036         w.pfrw_cnt = idx;
2037 
2038 	switch(af) {
2039 	case AF_INET:
2040 		rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
2041 		return w.pfrw_kentry;
2042 	case AF_INET6:
2043 		rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
2044 		return w.pfrw_kentry;
2045 	default:
2046 		return NULL;
2047 	}
2048 }
2049 
2050