xref: /netbsd-src/sys/dist/pf/net/pf_ioctl.c (revision aaf4ece63a859a04e37cf3a7229b5fab0157cc06)
1 /*	$NetBSD: pf_ioctl.c,v 1.21 2005/12/11 12:24:25 christos Exp $	*/
2 /*	$OpenBSD: pf_ioctl.c,v 1.139 2005/03/03 07:13:39 dhartmei Exp $ */
3 
4 /*
5  * Copyright (c) 2001 Daniel Hartmeier
6  * Copyright (c) 2002,2003 Henning Brauer
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  *
13  *    - Redistributions of source code must retain the above copyright
14  *      notice, this list of conditions and the following disclaimer.
15  *    - Redistributions in binary form must reproduce the above
16  *      copyright notice, this list of conditions and the following
17  *      disclaimer in the documentation and/or other materials provided
18  *      with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
30  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  *
33  * Effort sponsored in part by the Defense Advanced Research Projects
34  * Agency (DARPA) and Air Force Research Laboratory, Air Force
35  * Materiel Command, USAF, under agreement number F30602-01-2-0537.
36  *
37  */
38 
39 #ifdef _KERNEL_OPT
40 #include "opt_inet.h"
41 #include "opt_altq.h"
42 #include "opt_pfil_hooks.h"
43 #endif
44 
45 #ifdef __OpenBSD__
46 #include "pfsync.h"
47 #else
48 #define	NPFSYNC	0
49 #endif
50 
51 #include <sys/param.h>
52 #include <sys/systm.h>
53 #include <sys/mbuf.h>
54 #include <sys/filio.h>
55 #include <sys/fcntl.h>
56 #include <sys/socket.h>
57 #include <sys/socketvar.h>
58 #include <sys/kernel.h>
59 #include <sys/time.h>
60 #ifdef __OpenBSD__
61 #include <sys/timeout.h>
62 #else
63 #include <sys/callout.h>
64 #endif
65 #include <sys/pool.h>
66 #include <sys/malloc.h>
67 #ifdef __NetBSD__
68 #include <sys/conf.h>
69 #endif
70 
71 #include <net/if.h>
72 #include <net/if_types.h>
73 #include <net/route.h>
74 
75 #include <netinet/in.h>
76 #include <netinet/in_var.h>
77 #include <netinet/in_systm.h>
78 #include <netinet/ip.h>
79 #include <netinet/ip_var.h>
80 #include <netinet/ip_icmp.h>
81 
82 #ifdef __OpenBSD__
83 #include <dev/rndvar.h>
84 #endif
85 #include <net/pfvar.h>
86 
87 #if NPFSYNC > 0
88 #include <net/if_pfsync.h>
89 #endif /* NPFSYNC > 0 */
90 
91 #ifdef INET6
92 #include <netinet/ip6.h>
93 #include <netinet/in_pcb.h>
94 #endif /* INET6 */
95 
96 #ifdef ALTQ
97 #include <altq/altq.h>
98 #endif
99 
100 void			 pfattach(int);
101 #ifdef _LKM
102 void			 pfdetach(void);
103 #endif
104 int			 pfopen(dev_t, int, int, struct lwp *);
105 int			 pfclose(dev_t, int, int, struct lwp *);
106 struct pf_pool		*pf_get_pool(char *, u_int32_t, u_int8_t, u_int32_t,
107 			    u_int8_t, u_int8_t, u_int8_t);
108 int			 pf_get_ruleset_number(u_int8_t);
109 void			 pf_init_ruleset(struct pf_ruleset *);
110 int			 pf_anchor_setup(struct pf_rule *,
111 			    const struct pf_ruleset *, const char *);
112 int			 pf_anchor_copyout(const struct pf_ruleset *,
113 			    const struct pf_rule *, struct pfioc_rule *);
114 void			 pf_anchor_remove(struct pf_rule *);
115 
116 void			 pf_mv_pool(struct pf_palist *, struct pf_palist *);
117 void			 pf_empty_pool(struct pf_palist *);
118 int			 pfioctl(dev_t, u_long, caddr_t, int, struct lwp *);
119 #ifdef ALTQ
120 int			 pf_begin_altq(u_int32_t *);
121 int			 pf_rollback_altq(u_int32_t);
122 int			 pf_commit_altq(u_int32_t);
123 int			 pf_enable_altq(struct pf_altq *);
124 int			 pf_disable_altq(struct pf_altq *);
125 #endif /* ALTQ */
126 int			 pf_begin_rules(u_int32_t *, int, const char *);
127 int			 pf_rollback_rules(u_int32_t, int, char *);
128 int			 pf_commit_rules(u_int32_t, int, char *);
129 
130 #ifdef __NetBSD__
131 const struct cdevsw pf_cdevsw = {
132 	pfopen, pfclose, noread, nowrite, pfioctl,
133 	nostop, notty, nopoll, nommap, nokqfilter,
134 };
135 
136 static int pf_pfil_attach(void);
137 static int pf_pfil_detach(void);
138 
139 static int pf_pfil_attached = 0;
140 #endif
141 
142 #ifdef __OpenBSD__
143 extern struct timeout	 pf_expire_to;
144 #else
145 extern struct callout	 pf_expire_to;
146 #endif
147 
148 struct pf_rule		 pf_default_rule;
149 #ifdef ALTQ
150 static int		 pf_altq_running;
151 #endif
152 
153 #define	TAGID_MAX	 50000
154 TAILQ_HEAD(pf_tags, pf_tagname)	pf_tags = TAILQ_HEAD_INITIALIZER(pf_tags),
155 				pf_qids = TAILQ_HEAD_INITIALIZER(pf_qids);
156 
157 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE)
158 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE
159 #endif
160 static u_int16_t	 tagname2tag(struct pf_tags *, char *);
161 static void		 tag2tagname(struct pf_tags *, u_int16_t, char *);
162 static void		 tag_unref(struct pf_tags *, u_int16_t);
163 int			 pf_rtlabel_add(struct pf_addr_wrap *);
164 void			 pf_rtlabel_remove(struct pf_addr_wrap *);
165 void			 pf_rtlabel_copyout(struct pf_addr_wrap *);
166 
167 #define DPFPRINTF(n, x) if (pf_status.debug >= (n)) printf x
168 
169 #ifdef __NetBSD__
170 extern struct pfil_head if_pfil;
171 #endif
172 
173 void
174 pfattach(int num)
175 {
176 	u_int32_t *timeout = pf_default_rule.timeout;
177 
178 	pool_init(&pf_rule_pl, sizeof(struct pf_rule), 0, 0, 0, "pfrulepl",
179 	    &pool_allocator_nointr);
180 	pool_init(&pf_src_tree_pl, sizeof(struct pf_src_node), 0, 0, 0,
181 	    "pfsrctrpl", NULL);
182 	pool_init(&pf_state_pl, sizeof(struct pf_state), 0, 0, 0, "pfstatepl",
183 	    NULL);
184 	pool_init(&pf_altq_pl, sizeof(struct pf_altq), 0, 0, 0, "pfaltqpl",
185 	    &pool_allocator_nointr);
186 	pool_init(&pf_pooladdr_pl, sizeof(struct pf_pooladdr), 0, 0, 0,
187 	    "pfpooladdrpl", &pool_allocator_nointr);
188 	pfr_initialize();
189 	pfi_initialize();
190 	pf_osfp_initialize();
191 
192 	pool_sethardlimit(pf_pool_limits[PF_LIMIT_STATES].pp,
193 	    pf_pool_limits[PF_LIMIT_STATES].limit, NULL, 0);
194 
195 	RB_INIT(&tree_src_tracking);
196 	RB_INIT(&pf_anchors);
197 	pf_init_ruleset(&pf_main_ruleset);
198 	TAILQ_INIT(&pf_altqs[0]);
199 	TAILQ_INIT(&pf_altqs[1]);
200 	TAILQ_INIT(&pf_pabuf);
201 	pf_altqs_active = &pf_altqs[0];
202 	pf_altqs_inactive = &pf_altqs[1];
203 	TAILQ_INIT(&state_updates);
204 
205 	/* default rule should never be garbage collected */
206 	pf_default_rule.entries.tqe_prev = &pf_default_rule.entries.tqe_next;
207 	pf_default_rule.action = PF_PASS;
208 	pf_default_rule.nr = -1;
209 
210 	/* initialize default timeouts */
211 	timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL;
212 	timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL;
213 	timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL;
214 	timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL;
215 	timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL;
216 	timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL;
217 	timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL;
218 	timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL;
219 	timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL;
220 	timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL;
221 	timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL;
222 	timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL;
223 	timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL;
224 	timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL;
225 	timeout[PFTM_FRAG] = PFTM_FRAG_VAL;
226 	timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL;
227 	timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL;
228 	timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL;
229 
230 #ifdef __OpenBSD__
231 	timeout_set(&pf_expire_to, pf_purge_timeout, &pf_expire_to);
232 	timeout_add(&pf_expire_to, timeout[PFTM_INTERVAL] * hz);
233 #else
234 	callout_init(&pf_expire_to);
235 	callout_reset(&pf_expire_to, timeout[PFTM_INTERVAL] * hz,
236 	    pf_purge_timeout, &pf_expire_to);
237 #endif
238 
239 	pf_normalize_init();
240 	bzero(&pf_status, sizeof(pf_status));
241 	pf_status.debug = PF_DEBUG_URGENT;
242 
243 	/* XXX do our best to avoid a conflict */
244 	pf_status.hostid = arc4random();
245 }
246 
247 #ifdef _LKM
248 void
249 pfdetach(void)
250 {
251 	struct pf_anchor	*anchor;
252 	struct pf_state		*state;
253 	struct pf_src_node	*node;
254 	struct pfioc_table	 pt;
255 	u_int32_t		 ticket;
256 	int			 i;
257 	char			 r = '\0';
258 
259 	(void)pf_pfil_detach();
260 
261 	callout_stop(&pf_expire_to);
262 	pf_status.running = 0;
263 
264 	/* clear the rulesets */
265 	for (i = 0; i < PF_RULESET_MAX; i++)
266 		if (pf_begin_rules(&ticket, i, &r) == 0)
267 			pf_commit_rules(ticket, i, &r);
268 #ifdef ALTQ
269 	if (pf_begin_altq(&ticket) == 0)
270 		pf_commit_altq(ticket);
271 #endif
272 
273 	/* clear states */
274 	RB_FOREACH(state, pf_state_tree_id, &tree_id) {
275 		state->timeout = PFTM_PURGE;
276 #if NPFSYNC
277 		state->sync_flags = PFSTATE_NOSYNC;
278 #endif
279 	}
280 	pf_purge_expired_states();
281 #if NPFSYNC
282 	pfsync_clear_states(pf_status.hostid, NULL);
283 #endif
284 
285 	/* clear source nodes */
286 	RB_FOREACH(state, pf_state_tree_id, &tree_id) {
287 		state->src_node = NULL;
288 		state->nat_src_node = NULL;
289 	}
290 	RB_FOREACH(node, pf_src_tree, &tree_src_tracking) {
291 		node->expire = 1;
292 		node->states = 0;
293 	}
294 	pf_purge_expired_src_nodes();
295 
296 	/* clear tables */
297 	memset(&pt, '\0', sizeof(pt));
298 	pfr_clr_tables(&pt.pfrio_table, &pt.pfrio_ndel, pt.pfrio_flags);
299 
300 	/* destroy anchors */
301 	while ((anchor = RB_MIN(pf_anchor_global, &pf_anchors)) != NULL) {
302 		for (i = 0; i < PF_RULESET_MAX; i++)
303 			if (pf_begin_rules(&ticket, i, anchor->name) == 0)
304 				pf_commit_rules(ticket, i, anchor->name);
305 	}
306 
307 	/* destroy main ruleset */
308 	pf_remove_if_empty_ruleset(&pf_main_ruleset);
309 
310 	/* destroy the pools */
311 	pool_destroy(&pf_pooladdr_pl);
312 	pool_destroy(&pf_altq_pl);
313 	pool_destroy(&pf_state_pl);
314 	pool_destroy(&pf_rule_pl);
315 	pool_destroy(&pf_src_tree_pl);
316 
317 	/* destroy subsystems */
318 	pf_normalize_destroy();
319 	pf_osfp_destroy();
320 	pfr_destroy();
321 	pfi_destroy();
322 }
323 #endif
324 
325 int
326 pfopen(dev_t dev, int flags, int fmt, struct lwp *l)
327 {
328 	if (minor(dev) >= 1)
329 		return (ENXIO);
330 	return (0);
331 }
332 
333 int
334 pfclose(dev_t dev, int flags, int fmt, struct lwp *l)
335 {
336 	if (minor(dev) >= 1)
337 		return (ENXIO);
338 	return (0);
339 }
340 
341 struct pf_pool *
342 pf_get_pool(char *anchor, u_int32_t ticket, u_int8_t rule_action,
343     u_int32_t rule_number, u_int8_t r_last, u_int8_t active,
344     u_int8_t check_ticket)
345 {
346 	struct pf_ruleset	*ruleset;
347 	struct pf_rule		*rule;
348 	int			 rs_num;
349 
350 	ruleset = pf_find_ruleset(anchor);
351 	if (ruleset == NULL)
352 		return (NULL);
353 	rs_num = pf_get_ruleset_number(rule_action);
354 	if (rs_num >= PF_RULESET_MAX)
355 		return (NULL);
356 	if (active) {
357 		if (check_ticket && ticket !=
358 		    ruleset->rules[rs_num].active.ticket)
359 			return (NULL);
360 		if (r_last)
361 			rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
362 			    pf_rulequeue);
363 		else
364 			rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
365 	} else {
366 		if (check_ticket && ticket !=
367 		    ruleset->rules[rs_num].inactive.ticket)
368 			return (NULL);
369 		if (r_last)
370 			rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
371 			    pf_rulequeue);
372 		else
373 			rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr);
374 	}
375 	if (!r_last) {
376 		while ((rule != NULL) && (rule->nr != rule_number))
377 			rule = TAILQ_NEXT(rule, entries);
378 	}
379 	if (rule == NULL)
380 		return (NULL);
381 
382 	return (&rule->rpool);
383 }
384 
385 int
386 pf_get_ruleset_number(u_int8_t action)
387 {
388 	switch (action) {
389 	case PF_SCRUB:
390 	case PF_NOSCRUB:
391 		return (PF_RULESET_SCRUB);
392 		break;
393 	case PF_PASS:
394 	case PF_DROP:
395 		return (PF_RULESET_FILTER);
396 		break;
397 	case PF_NAT:
398 	case PF_NONAT:
399 		return (PF_RULESET_NAT);
400 		break;
401 	case PF_BINAT:
402 	case PF_NOBINAT:
403 		return (PF_RULESET_BINAT);
404 		break;
405 	case PF_RDR:
406 	case PF_NORDR:
407 		return (PF_RULESET_RDR);
408 		break;
409 	default:
410 		return (PF_RULESET_MAX);
411 		break;
412 	}
413 }
414 
415 void
416 pf_init_ruleset(struct pf_ruleset *ruleset)
417 {
418 	int	i;
419 
420 	memset(ruleset, 0, sizeof(struct pf_ruleset));
421 	for (i = 0; i < PF_RULESET_MAX; i++) {
422 		TAILQ_INIT(&ruleset->rules[i].queues[0]);
423 		TAILQ_INIT(&ruleset->rules[i].queues[1]);
424 		ruleset->rules[i].active.ptr = &ruleset->rules[i].queues[0];
425 		ruleset->rules[i].inactive.ptr = &ruleset->rules[i].queues[1];
426 	}
427 }
428 
429 struct pf_anchor *
430 pf_find_anchor(const char *path)
431 {
432 	static struct pf_anchor	 key;
433 
434 	memset(&key, 0, sizeof(key));
435 	strlcpy(key.path, path, sizeof(key.path));
436 	return (RB_FIND(pf_anchor_global, &pf_anchors, &key));
437 }
438 
439 struct pf_ruleset *
440 pf_find_ruleset(const char *path)
441 {
442 	struct pf_anchor	*anchor;
443 
444 	while (*path == '/')
445 		path++;
446 	if (!*path)
447 		return (&pf_main_ruleset);
448 	anchor = pf_find_anchor(path);
449 	if (anchor == NULL)
450 		return (NULL);
451 	else
452 		return (&anchor->ruleset);
453 }
454 
455 struct pf_ruleset *
456 pf_find_or_create_ruleset(const char *path)
457 {
458 	static char		 p[MAXPATHLEN];
459 	char			*q = NULL /* XXX gcc */, *r;
460 	struct pf_ruleset	*ruleset;
461 	struct pf_anchor	*anchor = NULL /* XXX gcc */,
462 				*dup, *parent = NULL;
463 
464 	while (*path == '/')
465 		path++;
466 	ruleset = pf_find_ruleset(path);
467 	if (ruleset != NULL)
468 		return (ruleset);
469 	strlcpy(p, path, sizeof(p));
470 	while (parent == NULL && (q = strrchr(p, '/')) != NULL) {
471 		*q = 0;
472 		if ((ruleset = pf_find_ruleset(p)) != NULL) {
473 			parent = ruleset->anchor;
474 			break;
475 		}
476 	}
477 	if (q == NULL)
478 		q = p;
479 	else
480 		q++;
481 	strlcpy(p, path, sizeof(p));
482 	if (!*q)
483 		return (NULL);
484 	while ((r = strchr(q, '/')) != NULL || *q) {
485 		if (r != NULL)
486 			*r = 0;
487 		if (!*q || strlen(q) >= PF_ANCHOR_NAME_SIZE ||
488 		    (parent != NULL && strlen(parent->path) >=
489 		    MAXPATHLEN - PF_ANCHOR_NAME_SIZE - 1))
490 			return (NULL);
491 		anchor = (struct pf_anchor *)malloc(sizeof(*anchor), M_TEMP,
492 		    M_NOWAIT);
493 		if (anchor == NULL)
494 			return (NULL);
495 		memset(anchor, 0, sizeof(*anchor));
496 		RB_INIT(&anchor->children);
497 		strlcpy(anchor->name, q, sizeof(anchor->name));
498 		if (parent != NULL) {
499 			strlcpy(anchor->path, parent->path,
500 			    sizeof(anchor->path));
501 			strlcat(anchor->path, "/", sizeof(anchor->path));
502 		}
503 		strlcat(anchor->path, anchor->name, sizeof(anchor->path));
504 		if ((dup = RB_INSERT(pf_anchor_global, &pf_anchors, anchor)) !=
505 		    NULL) {
506 			printf("pf_find_or_create_ruleset: RB_INSERT1 "
507 			    "'%s' '%s' collides with '%s' '%s'\n",
508 			    anchor->path, anchor->name, dup->path, dup->name);
509 			free(anchor, M_TEMP);
510 			return (NULL);
511 		}
512 		if (parent != NULL) {
513 			anchor->parent = parent;
514 			if ((dup = RB_INSERT(pf_anchor_node, &parent->children,
515 			    anchor)) != NULL) {
516 				printf("pf_find_or_create_ruleset: "
517 				    "RB_INSERT2 '%s' '%s' collides with "
518 				    "'%s' '%s'\n", anchor->path, anchor->name,
519 				    dup->path, dup->name);
520 				RB_REMOVE(pf_anchor_global, &pf_anchors,
521 				    anchor);
522 				free(anchor, M_TEMP);
523 				return (NULL);
524 			}
525 		}
526 		pf_init_ruleset(&anchor->ruleset);
527 		anchor->ruleset.anchor = anchor;
528 		parent = anchor;
529 		if (r != NULL)
530 			q = r + 1;
531 		else
532 			*q = 0;
533 	}
534 	return (&anchor->ruleset);
535 }
536 
537 void
538 pf_remove_if_empty_ruleset(struct pf_ruleset *ruleset)
539 {
540 	struct pf_anchor	*parent;
541 	int			 i;
542 
543 	while (ruleset != NULL) {
544 		if (ruleset == &pf_main_ruleset || ruleset->anchor == NULL ||
545 		    !RB_EMPTY(&ruleset->anchor->children) ||
546 		    ruleset->anchor->refcnt > 0 || ruleset->tables > 0 ||
547 		    ruleset->topen)
548 			return;
549 		for (i = 0; i < PF_RULESET_MAX; ++i)
550 			if (!TAILQ_EMPTY(ruleset->rules[i].active.ptr) ||
551 			    !TAILQ_EMPTY(ruleset->rules[i].inactive.ptr) ||
552 			    ruleset->rules[i].inactive.open)
553 				return;
554 		RB_REMOVE(pf_anchor_global, &pf_anchors, ruleset->anchor);
555 		if ((parent = ruleset->anchor->parent) != NULL)
556 			RB_REMOVE(pf_anchor_node, &parent->children,
557 			    ruleset->anchor);
558 		free(ruleset->anchor, M_TEMP);
559 		if (parent == NULL)
560 			return;
561 		ruleset = &parent->ruleset;
562 	}
563 }
564 
565 int
566 pf_anchor_setup(struct pf_rule *r, const struct pf_ruleset *s,
567     const char *name)
568 {
569 	static char		*p, path[MAXPATHLEN];
570 	struct pf_ruleset	*ruleset;
571 
572 	r->anchor = NULL;
573 	r->anchor_relative = 0;
574 	r->anchor_wildcard = 0;
575 	if (!name[0])
576 		return (0);
577 	if (name[0] == '/')
578 		strlcpy(path, name + 1, sizeof(path));
579 	else {
580 		/* relative path */
581 		r->anchor_relative = 1;
582 		if (s->anchor == NULL || !s->anchor->path[0])
583 			path[0] = 0;
584 		else
585 			strlcpy(path, s->anchor->path, sizeof(path));
586 		while (name[0] == '.' && name[1] == '.' && name[2] == '/') {
587 			if (!path[0]) {
588 				printf("pf_anchor_setup: .. beyond root\n");
589 				return (1);
590 			}
591 			if ((p = strrchr(path, '/')) != NULL)
592 				*p = 0;
593 			else
594 				path[0] = 0;
595 			r->anchor_relative++;
596 			name += 3;
597 		}
598 		if (path[0])
599 			strlcat(path, "/", sizeof(path));
600 		strlcat(path, name, sizeof(path));
601 	}
602 	if ((p = strrchr(path, '/')) != NULL && !strcmp(p, "/*")) {
603 		r->anchor_wildcard = 1;
604 		*p = 0;
605 	}
606 	ruleset = pf_find_or_create_ruleset(path);
607 	if (ruleset == NULL || ruleset->anchor == NULL) {
608 		printf("pf_anchor_setup: ruleset\n");
609 		return (1);
610 	}
611 	r->anchor = ruleset->anchor;
612 	r->anchor->refcnt++;
613 	return (0);
614 }
615 
616 int
617 pf_anchor_copyout(const struct pf_ruleset *rs, const struct pf_rule *r,
618     struct pfioc_rule *pr)
619 {
620 	pr->anchor_call[0] = 0;
621 	if (r->anchor == NULL)
622 		return (0);
623 	if (!r->anchor_relative) {
624 		strlcpy(pr->anchor_call, "/", sizeof(pr->anchor_call));
625 		strlcat(pr->anchor_call, r->anchor->path,
626 		    sizeof(pr->anchor_call));
627 	} else {
628 		char a[MAXPATHLEN], b[MAXPATHLEN], *p;
629 		int i;
630 
631 		if (rs->anchor == NULL)
632 			a[0] = 0;
633 		else
634 			strlcpy(a, rs->anchor->path, sizeof(a));
635 		strlcpy(b, r->anchor->path, sizeof(b));
636 		for (i = 1; i < r->anchor_relative; ++i) {
637 			if ((p = strrchr(a, '/')) == NULL)
638 				p = a;
639 			*p = 0;
640 			strlcat(pr->anchor_call, "../",
641 			    sizeof(pr->anchor_call));
642 		}
643 		if (strncmp(a, b, strlen(a))) {
644 			printf("pf_anchor_copyout: '%s' '%s'\n", a, b);
645 			return (1);
646 		}
647 		if (strlen(b) > strlen(a))
648 			strlcat(pr->anchor_call, b + (a[0] ? strlen(a) + 1 : 0),
649 			    sizeof(pr->anchor_call));
650 	}
651 	if (r->anchor_wildcard)
652 		strlcat(pr->anchor_call, pr->anchor_call[0] ? "/*" : "*",
653 		    sizeof(pr->anchor_call));
654 	return (0);
655 }
656 
657 void
658 pf_anchor_remove(struct pf_rule *r)
659 {
660 	if (r->anchor == NULL)
661 		return;
662 	if (r->anchor->refcnt <= 0) {
663 		printf("pf_anchor_remove: broken refcount");
664 		r->anchor = NULL;
665 		return;
666 	}
667 	if (!--r->anchor->refcnt)
668 		pf_remove_if_empty_ruleset(&r->anchor->ruleset);
669 	r->anchor = NULL;
670 }
671 
672 void
673 pf_mv_pool(struct pf_palist *poola, struct pf_palist *poolb)
674 {
675 	struct pf_pooladdr	*mv_pool_pa;
676 
677 	while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) {
678 		TAILQ_REMOVE(poola, mv_pool_pa, entries);
679 		TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries);
680 	}
681 }
682 
683 void
684 pf_empty_pool(struct pf_palist *poola)
685 {
686 	struct pf_pooladdr	*empty_pool_pa;
687 
688 	while ((empty_pool_pa = TAILQ_FIRST(poola)) != NULL) {
689 		pfi_dynaddr_remove(&empty_pool_pa->addr);
690 		pf_tbladdr_remove(&empty_pool_pa->addr);
691 		pfi_detach_rule(empty_pool_pa->kif);
692 		TAILQ_REMOVE(poola, empty_pool_pa, entries);
693 		pool_put(&pf_pooladdr_pl, empty_pool_pa);
694 	}
695 }
696 
697 void
698 pf_rm_rule(struct pf_rulequeue *rulequeue, struct pf_rule *rule)
699 {
700 	if (rulequeue != NULL) {
701 		if (rule->states <= 0) {
702 			/*
703 			 * XXX - we need to remove the table *before* detaching
704 			 * the rule to make sure the table code does not delete
705 			 * the anchor under our feet.
706 			 */
707 			pf_tbladdr_remove(&rule->src.addr);
708 			pf_tbladdr_remove(&rule->dst.addr);
709 			if (rule->overload_tbl)
710 				pfr_detach_table(rule->overload_tbl);
711 		}
712 		TAILQ_REMOVE(rulequeue, rule, entries);
713 		rule->entries.tqe_prev = NULL;
714 		rule->nr = -1;
715 	}
716 
717 	if (rule->states > 0 || rule->src_nodes > 0 ||
718 	    rule->entries.tqe_prev != NULL)
719 		return;
720 	pf_tag_unref(rule->tag);
721 	pf_tag_unref(rule->match_tag);
722 #ifdef ALTQ
723 	if (rule->pqid != rule->qid)
724 		pf_qid_unref(rule->pqid);
725 	pf_qid_unref(rule->qid);
726 #endif
727 	pf_rtlabel_remove(&rule->src.addr);
728 	pf_rtlabel_remove(&rule->dst.addr);
729 	pfi_dynaddr_remove(&rule->src.addr);
730 	pfi_dynaddr_remove(&rule->dst.addr);
731 	if (rulequeue == NULL) {
732 		pf_tbladdr_remove(&rule->src.addr);
733 		pf_tbladdr_remove(&rule->dst.addr);
734 		if (rule->overload_tbl)
735 			pfr_detach_table(rule->overload_tbl);
736 	}
737 	pfi_detach_rule(rule->kif);
738 	pf_anchor_remove(rule);
739 	pf_empty_pool(&rule->rpool.list);
740 	pool_put(&pf_rule_pl, rule);
741 }
742 
743 static	u_int16_t
744 tagname2tag(struct pf_tags *head, char *tagname)
745 {
746 	struct pf_tagname	*tag, *p = NULL;
747 	u_int16_t		 new_tagid = 1;
748 
749 	TAILQ_FOREACH(tag, head, entries)
750 		if (strcmp(tagname, tag->name) == 0) {
751 			tag->ref++;
752 			return (tag->tag);
753 		}
754 
755 	/*
756 	 * to avoid fragmentation, we do a linear search from the beginning
757 	 * and take the first free slot we find. if there is none or the list
758 	 * is empty, append a new entry at the end.
759 	 */
760 
761 	/* new entry */
762 	if (!TAILQ_EMPTY(head))
763 		for (p = TAILQ_FIRST(head); p != NULL &&
764 		    p->tag == new_tagid; p = TAILQ_NEXT(p, entries))
765 			new_tagid = p->tag + 1;
766 
767 	if (new_tagid > TAGID_MAX)
768 		return (0);
769 
770 	/* allocate and fill new struct pf_tagname */
771 	tag = (struct pf_tagname *)malloc(sizeof(struct pf_tagname),
772 	    M_TEMP, M_NOWAIT);
773 	if (tag == NULL)
774 		return (0);
775 	bzero(tag, sizeof(struct pf_tagname));
776 	strlcpy(tag->name, tagname, sizeof(tag->name));
777 	tag->tag = new_tagid;
778 	tag->ref++;
779 
780 	if (p != NULL)	/* insert new entry before p */
781 		TAILQ_INSERT_BEFORE(p, tag, entries);
782 	else	/* either list empty or no free slot in between */
783 		TAILQ_INSERT_TAIL(head, tag, entries);
784 
785 	return (tag->tag);
786 }
787 
788 static	void
789 tag2tagname(struct pf_tags *head, u_int16_t tagid, char *p)
790 {
791 	struct pf_tagname	*tag;
792 
793 	TAILQ_FOREACH(tag, head, entries)
794 		if (tag->tag == tagid) {
795 			strlcpy(p, tag->name, PF_TAG_NAME_SIZE);
796 			return;
797 		}
798 }
799 
800 static	void
801 tag_unref(struct pf_tags *head, u_int16_t tag)
802 {
803 	struct pf_tagname	*p, *next;
804 
805 	if (tag == 0)
806 		return;
807 
808 	for (p = TAILQ_FIRST(head); p != NULL; p = next) {
809 		next = TAILQ_NEXT(p, entries);
810 		if (tag == p->tag) {
811 			if (--p->ref == 0) {
812 				TAILQ_REMOVE(head, p, entries);
813 				free(p, M_TEMP);
814 			}
815 			break;
816 		}
817 	}
818 }
819 
820 u_int16_t
821 pf_tagname2tag(char *tagname)
822 {
823 	return (tagname2tag(&pf_tags, tagname));
824 }
825 
826 void
827 pf_tag2tagname(u_int16_t tagid, char *p)
828 {
829 	return (tag2tagname(&pf_tags, tagid, p));
830 }
831 
832 void
833 pf_tag_ref(u_int16_t tag)
834 {
835 	struct pf_tagname *t;
836 
837 	TAILQ_FOREACH(t, &pf_tags, entries)
838 		if (t->tag == tag)
839 			break;
840 	if (t != NULL)
841 		t->ref++;
842 }
843 
844 void
845 pf_tag_unref(u_int16_t tag)
846 {
847 	return (tag_unref(&pf_tags, tag));
848 }
849 
850 int
851 pf_rtlabel_add(struct pf_addr_wrap *a)
852 {
853 #ifdef __OpenBSD__
854 	if (a->type == PF_ADDR_RTLABEL &&
855 	    (a->v.rtlabel = rtlabel_name2id(a->v.rtlabelname)) == 0)
856 		return (-1);
857 #endif
858 	return (0);
859 }
860 
861 void
862 pf_rtlabel_remove(struct pf_addr_wrap *a)
863 {
864 #ifdef __OpenBSD__
865 	if (a->type == PF_ADDR_RTLABEL)
866 		rtlabel_unref(a->v.rtlabel);
867 #endif
868 }
869 
870 void
871 pf_rtlabel_copyout(struct pf_addr_wrap *a)
872 {
873 #ifdef __OpenBSD__
874 	const char	*name;
875 
876 	if (a->type == PF_ADDR_RTLABEL && a->v.rtlabel) {
877 		if ((name = rtlabel_id2name(a->v.rtlabel)) == NULL)
878 			strlcpy(a->v.rtlabelname, "?",
879 			    sizeof(a->v.rtlabelname));
880 		else
881 			strlcpy(a->v.rtlabelname, name,
882 			    sizeof(a->v.rtlabelname));
883 	}
884 #endif
885 }
886 
887 #ifdef ALTQ
888 u_int32_t
889 pf_qname2qid(char *qname)
890 {
891 	return ((u_int32_t)tagname2tag(&pf_qids, qname));
892 }
893 
894 void
895 pf_qid2qname(u_int32_t qid, char *p)
896 {
897 	return (tag2tagname(&pf_qids, (u_int16_t)qid, p));
898 }
899 
900 void
901 pf_qid_unref(u_int32_t qid)
902 {
903 	return (tag_unref(&pf_qids, (u_int16_t)qid));
904 }
905 
906 int
907 pf_begin_altq(u_int32_t *ticket)
908 {
909 	struct pf_altq	*altq;
910 	int		 error = 0;
911 
912 	/* Purge the old altq list */
913 	while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) {
914 		TAILQ_REMOVE(pf_altqs_inactive, altq, entries);
915 		if (altq->qname[0] == 0) {
916 			/* detach and destroy the discipline */
917 			error = altq_remove(altq);
918 		} else
919 			pf_qid_unref(altq->qid);
920 		pool_put(&pf_altq_pl, altq);
921 	}
922 	if (error)
923 		return (error);
924 	*ticket = ++ticket_altqs_inactive;
925 	altqs_inactive_open = 1;
926 	return (0);
927 }
928 
929 int
930 pf_rollback_altq(u_int32_t ticket)
931 {
932 	struct pf_altq	*altq;
933 	int		 error = 0;
934 
935 	if (!altqs_inactive_open || ticket != ticket_altqs_inactive)
936 		return (0);
937 	/* Purge the old altq list */
938 	while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) {
939 		TAILQ_REMOVE(pf_altqs_inactive, altq, entries);
940 		if (altq->qname[0] == 0) {
941 			/* detach and destroy the discipline */
942 			error = altq_remove(altq);
943 		} else
944 			pf_qid_unref(altq->qid);
945 		pool_put(&pf_altq_pl, altq);
946 	}
947 	altqs_inactive_open = 0;
948 	return (error);
949 }
950 
951 int
952 pf_commit_altq(u_int32_t ticket)
953 {
954 	struct pf_altqqueue	*old_altqs;
955 	struct pf_altq		*altq;
956 	int			 s, err, error = 0;
957 
958 	if (!altqs_inactive_open || ticket != ticket_altqs_inactive)
959 		return (EBUSY);
960 
961 	/* swap altqs, keep the old. */
962 	s = splsoftnet();
963 	old_altqs = pf_altqs_active;
964 	pf_altqs_active = pf_altqs_inactive;
965 	pf_altqs_inactive = old_altqs;
966 	ticket_altqs_active = ticket_altqs_inactive;
967 
968 	/* Attach new disciplines */
969 	TAILQ_FOREACH(altq, pf_altqs_active, entries) {
970 		if (altq->qname[0] == 0) {
971 			/* attach the discipline */
972 			error = altq_pfattach(altq);
973 			if (error == 0 && pf_altq_running)
974 				error = pf_enable_altq(altq);
975 			if (error != 0) {
976 				splx(s);
977 				return (error);
978 			}
979 		}
980 	}
981 
982 	/* Purge the old altq list */
983 	while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) {
984 		TAILQ_REMOVE(pf_altqs_inactive, altq, entries);
985 		if (altq->qname[0] == 0) {
986 			/* detach and destroy the discipline */
987 			if (pf_altq_running)
988 				error = pf_disable_altq(altq);
989 			err = altq_pfdetach(altq);
990 			if (err != 0 && error == 0)
991 				error = err;
992 			err = altq_remove(altq);
993 			if (err != 0 && error == 0)
994 				error = err;
995 		} else
996 			pf_qid_unref(altq->qid);
997 		pool_put(&pf_altq_pl, altq);
998 	}
999 	splx(s);
1000 
1001 	altqs_inactive_open = 0;
1002 	return (error);
1003 }
1004 
1005 int
1006 pf_enable_altq(struct pf_altq *altq)
1007 {
1008 	struct ifnet		*ifp;
1009 	struct tb_profile	 tb;
1010 	int			 s, error = 0;
1011 
1012 	if ((ifp = ifunit(altq->ifname)) == NULL)
1013 		return (EINVAL);
1014 
1015 	if (ifp->if_snd.altq_type != ALTQT_NONE)
1016 		error = altq_enable(&ifp->if_snd);
1017 
1018 	/* set tokenbucket regulator */
1019 	if (error == 0 && ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) {
1020 		tb.rate = altq->ifbandwidth;
1021 		tb.depth = altq->tbrsize;
1022 		s = splimp();
1023 		error = tbr_set(&ifp->if_snd, &tb);
1024 		splx(s);
1025 	}
1026 
1027 	return (error);
1028 }
1029 
1030 int
1031 pf_disable_altq(struct pf_altq *altq)
1032 {
1033 	struct ifnet		*ifp;
1034 	struct tb_profile	 tb;
1035 	int			 s, error;
1036 
1037 	if ((ifp = ifunit(altq->ifname)) == NULL)
1038 		return (EINVAL);
1039 
1040 	/*
1041 	 * when the discipline is no longer referenced, it was overridden
1042 	 * by a new one.  if so, just return.
1043 	 */
1044 	if (altq->altq_disc != ifp->if_snd.altq_disc)
1045 		return (0);
1046 
1047 	error = altq_disable(&ifp->if_snd);
1048 
1049 	if (error == 0) {
1050 		/* clear tokenbucket regulator */
1051 		tb.rate = 0;
1052 		s = splimp();
1053 		error = tbr_set(&ifp->if_snd, &tb);
1054 		splx(s);
1055 	}
1056 
1057 	return (error);
1058 }
1059 #endif /* ALTQ */
1060 
1061 int
1062 pf_begin_rules(u_int32_t *ticket, int rs_num, const char *anchor)
1063 {
1064 	struct pf_ruleset	*rs;
1065 	struct pf_rule		*rule;
1066 
1067 	if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1068 		return (EINVAL);
1069 	rs = pf_find_or_create_ruleset(anchor);
1070 	if (rs == NULL)
1071 		return (EINVAL);
1072 	while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL)
1073 		pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule);
1074 	*ticket = ++rs->rules[rs_num].inactive.ticket;
1075 	rs->rules[rs_num].inactive.open = 1;
1076 	return (0);
1077 }
1078 
1079 int
1080 pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor)
1081 {
1082 	struct pf_ruleset	*rs;
1083 	struct pf_rule		*rule;
1084 
1085 	if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1086 		return (EINVAL);
1087 	rs = pf_find_ruleset(anchor);
1088 	if (rs == NULL || !rs->rules[rs_num].inactive.open ||
1089 	    rs->rules[rs_num].inactive.ticket != ticket)
1090 		return (0);
1091 	while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL)
1092 		pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule);
1093 	rs->rules[rs_num].inactive.open = 0;
1094 	return (0);
1095 }
1096 
1097 int
1098 pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor)
1099 {
1100 	struct pf_ruleset	*rs;
1101 	struct pf_rule		*rule;
1102 	struct pf_rulequeue	*old_rules;
1103 	int			 s;
1104 
1105 	if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1106 		return (EINVAL);
1107 	rs = pf_find_ruleset(anchor);
1108 	if (rs == NULL || !rs->rules[rs_num].inactive.open ||
1109 	    ticket != rs->rules[rs_num].inactive.ticket)
1110 		return (EBUSY);
1111 
1112 	/* Swap rules, keep the old. */
1113 	s = splsoftnet();
1114 	old_rules = rs->rules[rs_num].active.ptr;
1115 	rs->rules[rs_num].active.ptr =
1116 	    rs->rules[rs_num].inactive.ptr;
1117 	rs->rules[rs_num].inactive.ptr = old_rules;
1118 	rs->rules[rs_num].active.ticket =
1119 	    rs->rules[rs_num].inactive.ticket;
1120 	pf_calc_skip_steps(rs->rules[rs_num].active.ptr);
1121 
1122 	/* Purge the old rule list. */
1123 	while ((rule = TAILQ_FIRST(old_rules)) != NULL)
1124 		pf_rm_rule(old_rules, rule);
1125 	rs->rules[rs_num].inactive.open = 0;
1126 	pf_remove_if_empty_ruleset(rs);
1127 	splx(s);
1128 	return (0);
1129 }
1130 
1131 int
1132 pfioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct lwp *l)
1133 {
1134 	struct pf_pooladdr	*pa = NULL;
1135 	struct pf_pool		*pool = NULL;
1136 	int			 s;
1137 	int			 error = 0;
1138 
1139 	/* XXX keep in sync with switch() below */
1140 	if (securelevel > 1)
1141 		switch (cmd) {
1142 		case DIOCGETRULES:
1143 		case DIOCGETRULE:
1144 		case DIOCGETADDRS:
1145 		case DIOCGETADDR:
1146 		case DIOCGETSTATE:
1147 		case DIOCSETSTATUSIF:
1148 		case DIOCGETSTATUS:
1149 		case DIOCCLRSTATUS:
1150 		case DIOCNATLOOK:
1151 		case DIOCSETDEBUG:
1152 		case DIOCGETSTATES:
1153 		case DIOCGETTIMEOUT:
1154 		case DIOCCLRRULECTRS:
1155 		case DIOCGETLIMIT:
1156 		case DIOCGETALTQS:
1157 		case DIOCGETALTQ:
1158 		case DIOCGETQSTATS:
1159 		case DIOCGETRULESETS:
1160 		case DIOCGETRULESET:
1161 		case DIOCRGETTABLES:
1162 		case DIOCRGETTSTATS:
1163 		case DIOCRCLRTSTATS:
1164 		case DIOCRCLRADDRS:
1165 		case DIOCRADDADDRS:
1166 		case DIOCRDELADDRS:
1167 		case DIOCRSETADDRS:
1168 		case DIOCRGETADDRS:
1169 		case DIOCRGETASTATS:
1170 		case DIOCRCLRASTATS:
1171 		case DIOCRTSTADDRS:
1172 		case DIOCOSFPGET:
1173 		case DIOCGETSRCNODES:
1174 		case DIOCCLRSRCNODES:
1175 		case DIOCIGETIFACES:
1176 		case DIOCICLRISTATS:
1177 		case DIOCSETIFFLAG:
1178 		case DIOCCLRIFFLAG:
1179 			break;
1180 		case DIOCRCLRTABLES:
1181 		case DIOCRADDTABLES:
1182 		case DIOCRDELTABLES:
1183 		case DIOCRSETTFLAGS:
1184 			if (((struct pfioc_table *)addr)->pfrio_flags &
1185 			    PFR_FLAG_DUMMY)
1186 				break; /* dummy operation ok */
1187 			return (EPERM);
1188 		default:
1189 			return (EPERM);
1190 		}
1191 
1192 	if (!(flags & FWRITE))
1193 		switch (cmd) {
1194 		case DIOCGETRULES:
1195 		case DIOCGETRULE:
1196 		case DIOCGETADDRS:
1197 		case DIOCGETADDR:
1198 		case DIOCGETSTATE:
1199 		case DIOCGETSTATUS:
1200 		case DIOCGETSTATES:
1201 		case DIOCGETTIMEOUT:
1202 		case DIOCGETLIMIT:
1203 		case DIOCGETALTQS:
1204 		case DIOCGETALTQ:
1205 		case DIOCGETQSTATS:
1206 		case DIOCGETRULESETS:
1207 		case DIOCGETRULESET:
1208 		case DIOCRGETTABLES:
1209 		case DIOCRGETTSTATS:
1210 		case DIOCRGETADDRS:
1211 		case DIOCRGETASTATS:
1212 		case DIOCRTSTADDRS:
1213 		case DIOCOSFPGET:
1214 		case DIOCGETSRCNODES:
1215 		case DIOCIGETIFACES:
1216 			break;
1217 		case DIOCRCLRTABLES:
1218 		case DIOCRADDTABLES:
1219 		case DIOCRDELTABLES:
1220 		case DIOCRCLRTSTATS:
1221 		case DIOCRCLRADDRS:
1222 		case DIOCRADDADDRS:
1223 		case DIOCRDELADDRS:
1224 		case DIOCRSETADDRS:
1225 		case DIOCRSETTFLAGS:
1226 			if (((struct pfioc_table *)addr)->pfrio_flags &
1227 			    PFR_FLAG_DUMMY)
1228 				break; /* dummy operation ok */
1229 			return (EACCES);
1230 		default:
1231 			return (EACCES);
1232 		}
1233 
1234 	s = splsoftnet();
1235 	switch (cmd) {
1236 
1237 	case DIOCSTART:
1238 		if (pf_status.running)
1239 			error = EEXIST;
1240 		else {
1241 #ifdef __NetBSD__
1242 			error = pf_pfil_attach();
1243 			if (error)
1244 				break;
1245 #endif
1246 			pf_status.running = 1;
1247 			pf_status.since = time_second;
1248 			if (pf_status.stateid == 0) {
1249 				pf_status.stateid = time_second;
1250 				pf_status.stateid = pf_status.stateid << 32;
1251 			}
1252 			DPFPRINTF(PF_DEBUG_MISC, ("pf: started\n"));
1253 		}
1254 		break;
1255 
1256 	case DIOCSTOP:
1257 		if (!pf_status.running)
1258 			error = ENOENT;
1259 		else {
1260 #ifdef __NetBSD__
1261 			error = pf_pfil_detach();
1262 			if (error)
1263 				break;
1264 #endif
1265 			pf_status.running = 0;
1266 			pf_status.since = time_second;
1267 			DPFPRINTF(PF_DEBUG_MISC, ("pf: stopped\n"));
1268 		}
1269 		break;
1270 
1271 	case DIOCADDRULE: {
1272 		struct pfioc_rule	*pr = (struct pfioc_rule *)addr;
1273 		struct pf_ruleset	*ruleset;
1274 		struct pf_rule		*rule, *tail;
1275 		struct pf_pooladdr	*pa;
1276 		int			 rs_num;
1277 
1278 		pr->anchor[sizeof(pr->anchor) - 1] = 0;
1279 		ruleset = pf_find_ruleset(pr->anchor);
1280 		if (ruleset == NULL) {
1281 			error = EINVAL;
1282 			break;
1283 		}
1284 		rs_num = pf_get_ruleset_number(pr->rule.action);
1285 		if (rs_num >= PF_RULESET_MAX) {
1286 			error = EINVAL;
1287 			break;
1288 		}
1289 		if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
1290 			error = EINVAL;
1291 			break;
1292 		}
1293 		if (pr->ticket != ruleset->rules[rs_num].inactive.ticket) {
1294 			error = EBUSY;
1295 			break;
1296 		}
1297 		if (pr->pool_ticket != ticket_pabuf) {
1298 			error = EBUSY;
1299 			break;
1300 		}
1301 		rule = pool_get(&pf_rule_pl, PR_NOWAIT);
1302 		if (rule == NULL) {
1303 			error = ENOMEM;
1304 			break;
1305 		}
1306 		bcopy(&pr->rule, rule, sizeof(struct pf_rule));
1307 		rule->anchor = NULL;
1308 		rule->kif = NULL;
1309 		TAILQ_INIT(&rule->rpool.list);
1310 		/* initialize refcounting */
1311 		rule->states = 0;
1312 		rule->src_nodes = 0;
1313 		rule->entries.tqe_prev = NULL;
1314 #ifndef INET
1315 		if (rule->af == AF_INET) {
1316 			pool_put(&pf_rule_pl, rule);
1317 			error = EAFNOSUPPORT;
1318 			break;
1319 		}
1320 #endif /* INET */
1321 #ifndef INET6
1322 		if (rule->af == AF_INET6) {
1323 			pool_put(&pf_rule_pl, rule);
1324 			error = EAFNOSUPPORT;
1325 			break;
1326 		}
1327 #endif /* INET6 */
1328 		tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
1329 		    pf_rulequeue);
1330 		if (tail)
1331 			rule->nr = tail->nr + 1;
1332 		else
1333 			rule->nr = 0;
1334 		if (rule->ifname[0]) {
1335 			rule->kif = pfi_attach_rule(rule->ifname);
1336 			if (rule->kif == NULL) {
1337 				pool_put(&pf_rule_pl, rule);
1338 				error = EINVAL;
1339 				break;
1340 			}
1341 		}
1342 
1343 #ifdef ALTQ
1344 		/* set queue IDs */
1345 		if (rule->qname[0] != 0) {
1346 			if ((rule->qid = pf_qname2qid(rule->qname)) == 0)
1347 				error = EBUSY;
1348 			else if (rule->pqname[0] != 0) {
1349 				if ((rule->pqid =
1350 				    pf_qname2qid(rule->pqname)) == 0)
1351 					error = EBUSY;
1352 			} else
1353 				rule->pqid = rule->qid;
1354 		}
1355 #endif
1356 		if (rule->tagname[0])
1357 			if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0)
1358 				error = EBUSY;
1359 		if (rule->match_tagname[0])
1360 			if ((rule->match_tag =
1361 			    pf_tagname2tag(rule->match_tagname)) == 0)
1362 				error = EBUSY;
1363 		if (rule->rt && !rule->direction)
1364 			error = EINVAL;
1365 		if (pf_rtlabel_add(&rule->src.addr) ||
1366 		    pf_rtlabel_add(&rule->dst.addr))
1367 			error = EBUSY;
1368 		if (pfi_dynaddr_setup(&rule->src.addr, rule->af))
1369 			error = EINVAL;
1370 		if (pfi_dynaddr_setup(&rule->dst.addr, rule->af))
1371 			error = EINVAL;
1372 		if (pf_tbladdr_setup(ruleset, &rule->src.addr))
1373 			error = EINVAL;
1374 		if (pf_tbladdr_setup(ruleset, &rule->dst.addr))
1375 			error = EINVAL;
1376 		if (pf_anchor_setup(rule, ruleset, pr->anchor_call))
1377 			error = EINVAL;
1378 		TAILQ_FOREACH(pa, &pf_pabuf, entries)
1379 			if (pf_tbladdr_setup(ruleset, &pa->addr))
1380 				error = EINVAL;
1381 
1382 		if (rule->overload_tblname[0]) {
1383 			if ((rule->overload_tbl = pfr_attach_table(ruleset,
1384 			    rule->overload_tblname)) == NULL)
1385 				error = EINVAL;
1386 			else
1387 				rule->overload_tbl->pfrkt_flags |=
1388 				    PFR_TFLAG_ACTIVE;
1389 		}
1390 
1391 		pf_mv_pool(&pf_pabuf, &rule->rpool.list);
1392 		if (((((rule->action == PF_NAT) || (rule->action == PF_RDR) ||
1393 		    (rule->action == PF_BINAT)) && rule->anchor == NULL) ||
1394 		    (rule->rt > PF_FASTROUTE)) &&
1395 		    (TAILQ_FIRST(&rule->rpool.list) == NULL))
1396 			error = EINVAL;
1397 
1398 		if (error) {
1399 			pf_rm_rule(NULL, rule);
1400 			break;
1401 		}
1402 		rule->rpool.cur = TAILQ_FIRST(&rule->rpool.list);
1403 		rule->evaluations = rule->packets = rule->bytes = 0;
1404 		TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr,
1405 		    rule, entries);
1406 		break;
1407 	}
1408 
1409 	case DIOCGETRULES: {
1410 		struct pfioc_rule	*pr = (struct pfioc_rule *)addr;
1411 		struct pf_ruleset	*ruleset;
1412 		struct pf_rule		*tail;
1413 		int			 rs_num;
1414 
1415 		pr->anchor[sizeof(pr->anchor) - 1] = 0;
1416 		ruleset = pf_find_ruleset(pr->anchor);
1417 		if (ruleset == NULL) {
1418 			error = EINVAL;
1419 			break;
1420 		}
1421 		rs_num = pf_get_ruleset_number(pr->rule.action);
1422 		if (rs_num >= PF_RULESET_MAX) {
1423 			error = EINVAL;
1424 			break;
1425 		}
1426 		tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
1427 		    pf_rulequeue);
1428 		if (tail)
1429 			pr->nr = tail->nr + 1;
1430 		else
1431 			pr->nr = 0;
1432 		pr->ticket = ruleset->rules[rs_num].active.ticket;
1433 		break;
1434 	}
1435 
1436 	case DIOCGETRULE: {
1437 		struct pfioc_rule	*pr = (struct pfioc_rule *)addr;
1438 		struct pf_ruleset	*ruleset;
1439 		struct pf_rule		*rule;
1440 		int			 rs_num, i;
1441 
1442 		pr->anchor[sizeof(pr->anchor) - 1] = 0;
1443 		ruleset = pf_find_ruleset(pr->anchor);
1444 		if (ruleset == NULL) {
1445 			error = EINVAL;
1446 			break;
1447 		}
1448 		rs_num = pf_get_ruleset_number(pr->rule.action);
1449 		if (rs_num >= PF_RULESET_MAX) {
1450 			error = EINVAL;
1451 			break;
1452 		}
1453 		if (pr->ticket != ruleset->rules[rs_num].active.ticket) {
1454 			error = EBUSY;
1455 			break;
1456 		}
1457 		rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
1458 		while ((rule != NULL) && (rule->nr != pr->nr))
1459 			rule = TAILQ_NEXT(rule, entries);
1460 		if (rule == NULL) {
1461 			error = EBUSY;
1462 			break;
1463 		}
1464 		bcopy(rule, &pr->rule, sizeof(struct pf_rule));
1465 		if (pf_anchor_copyout(ruleset, rule, pr)) {
1466 			error = EBUSY;
1467 			break;
1468 		}
1469 		pfi_dynaddr_copyout(&pr->rule.src.addr);
1470 		pfi_dynaddr_copyout(&pr->rule.dst.addr);
1471 		pf_tbladdr_copyout(&pr->rule.src.addr);
1472 		pf_tbladdr_copyout(&pr->rule.dst.addr);
1473 		pf_rtlabel_copyout(&pr->rule.src.addr);
1474 		pf_rtlabel_copyout(&pr->rule.dst.addr);
1475 		for (i = 0; i < PF_SKIP_COUNT; ++i)
1476 			if (rule->skip[i].ptr == NULL)
1477 				pr->rule.skip[i].nr = -1;
1478 			else
1479 				pr->rule.skip[i].nr =
1480 				    rule->skip[i].ptr->nr;
1481 		break;
1482 	}
1483 
1484 	case DIOCCHANGERULE: {
1485 		struct pfioc_rule	*pcr = (struct pfioc_rule *)addr;
1486 		struct pf_ruleset	*ruleset;
1487 		struct pf_rule		*oldrule = NULL, *newrule = NULL;
1488 		u_int32_t		 nr = 0;
1489 		int			 rs_num;
1490 
1491 		if (!(pcr->action == PF_CHANGE_REMOVE ||
1492 		    pcr->action == PF_CHANGE_GET_TICKET) &&
1493 		    pcr->pool_ticket != ticket_pabuf) {
1494 			error = EBUSY;
1495 			break;
1496 		}
1497 
1498 		if (pcr->action < PF_CHANGE_ADD_HEAD ||
1499 		    pcr->action > PF_CHANGE_GET_TICKET) {
1500 			error = EINVAL;
1501 			break;
1502 		}
1503 		ruleset = pf_find_ruleset(pcr->anchor);
1504 		if (ruleset == NULL) {
1505 			error = EINVAL;
1506 			break;
1507 		}
1508 		rs_num = pf_get_ruleset_number(pcr->rule.action);
1509 		if (rs_num >= PF_RULESET_MAX) {
1510 			error = EINVAL;
1511 			break;
1512 		}
1513 
1514 		if (pcr->action == PF_CHANGE_GET_TICKET) {
1515 			pcr->ticket = ++ruleset->rules[rs_num].active.ticket;
1516 			break;
1517 		} else {
1518 			if (pcr->ticket !=
1519 			    ruleset->rules[rs_num].active.ticket) {
1520 				error = EINVAL;
1521 				break;
1522 			}
1523 			if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
1524 				error = EINVAL;
1525 				break;
1526 			}
1527 		}
1528 
1529 		if (pcr->action != PF_CHANGE_REMOVE) {
1530 			newrule = pool_get(&pf_rule_pl, PR_NOWAIT);
1531 			if (newrule == NULL) {
1532 				error = ENOMEM;
1533 				break;
1534 			}
1535 			bcopy(&pcr->rule, newrule, sizeof(struct pf_rule));
1536 			TAILQ_INIT(&newrule->rpool.list);
1537 			/* initialize refcounting */
1538 			newrule->states = 0;
1539 			newrule->entries.tqe_prev = NULL;
1540 #ifndef INET
1541 			if (newrule->af == AF_INET) {
1542 				pool_put(&pf_rule_pl, newrule);
1543 				error = EAFNOSUPPORT;
1544 				break;
1545 			}
1546 #endif /* INET */
1547 #ifndef INET6
1548 			if (newrule->af == AF_INET6) {
1549 				pool_put(&pf_rule_pl, newrule);
1550 				error = EAFNOSUPPORT;
1551 				break;
1552 			}
1553 #endif /* INET6 */
1554 			if (newrule->ifname[0]) {
1555 				newrule->kif = pfi_attach_rule(newrule->ifname);
1556 				if (newrule->kif == NULL) {
1557 					pool_put(&pf_rule_pl, newrule);
1558 					error = EINVAL;
1559 					break;
1560 				}
1561 			} else
1562 				newrule->kif = NULL;
1563 
1564 #ifdef ALTQ
1565 			/* set queue IDs */
1566 			if (newrule->qname[0] != 0) {
1567 				if ((newrule->qid =
1568 				    pf_qname2qid(newrule->qname)) == 0)
1569 					error = EBUSY;
1570 				else if (newrule->pqname[0] != 0) {
1571 					if ((newrule->pqid =
1572 					    pf_qname2qid(newrule->pqname)) == 0)
1573 						error = EBUSY;
1574 				} else
1575 					newrule->pqid = newrule->qid;
1576 			}
1577 #endif /* ALTQ */
1578 			if (newrule->tagname[0])
1579 				if ((newrule->tag =
1580 				    pf_tagname2tag(newrule->tagname)) == 0)
1581 					error = EBUSY;
1582 			if (newrule->match_tagname[0])
1583 				if ((newrule->match_tag = pf_tagname2tag(
1584 				    newrule->match_tagname)) == 0)
1585 					error = EBUSY;
1586 			if (newrule->rt && !newrule->direction)
1587 				error = EINVAL;
1588 			if (pf_rtlabel_add(&newrule->src.addr) ||
1589 			    pf_rtlabel_add(&newrule->dst.addr))
1590 				error = EBUSY;
1591 			if (pfi_dynaddr_setup(&newrule->src.addr, newrule->af))
1592 				error = EINVAL;
1593 			if (pfi_dynaddr_setup(&newrule->dst.addr, newrule->af))
1594 				error = EINVAL;
1595 			if (pf_tbladdr_setup(ruleset, &newrule->src.addr))
1596 				error = EINVAL;
1597 			if (pf_tbladdr_setup(ruleset, &newrule->dst.addr))
1598 				error = EINVAL;
1599 			if (pf_anchor_setup(newrule, ruleset, pcr->anchor_call))
1600 				error = EINVAL;
1601 
1602 			if (newrule->overload_tblname[0]) {
1603 				if ((newrule->overload_tbl = pfr_attach_table(
1604 				    ruleset, newrule->overload_tblname)) ==
1605 				    NULL)
1606 					error = EINVAL;
1607 				else
1608 					newrule->overload_tbl->pfrkt_flags |=
1609 					    PFR_TFLAG_ACTIVE;
1610 			}
1611 
1612 			pf_mv_pool(&pf_pabuf, &newrule->rpool.list);
1613 			if (((((newrule->action == PF_NAT) ||
1614 			    (newrule->action == PF_RDR) ||
1615 			    (newrule->action == PF_BINAT) ||
1616 			    (newrule->rt > PF_FASTROUTE)) &&
1617 			    !pcr->anchor[0])) &&
1618 			    (TAILQ_FIRST(&newrule->rpool.list) == NULL))
1619 				error = EINVAL;
1620 
1621 			if (error) {
1622 				pf_rm_rule(NULL, newrule);
1623 				break;
1624 			}
1625 			newrule->rpool.cur = TAILQ_FIRST(&newrule->rpool.list);
1626 			newrule->evaluations = newrule->packets = 0;
1627 			newrule->bytes = 0;
1628 		}
1629 		pf_empty_pool(&pf_pabuf);
1630 
1631 		if (pcr->action == PF_CHANGE_ADD_HEAD)
1632 			oldrule = TAILQ_FIRST(
1633 			    ruleset->rules[rs_num].active.ptr);
1634 		else if (pcr->action == PF_CHANGE_ADD_TAIL)
1635 			oldrule = TAILQ_LAST(
1636 			    ruleset->rules[rs_num].active.ptr, pf_rulequeue);
1637 		else {
1638 			oldrule = TAILQ_FIRST(
1639 			    ruleset->rules[rs_num].active.ptr);
1640 			while ((oldrule != NULL) && (oldrule->nr != pcr->nr))
1641 				oldrule = TAILQ_NEXT(oldrule, entries);
1642 			if (oldrule == NULL) {
1643 				if (newrule != NULL)
1644 					pf_rm_rule(NULL, newrule);
1645 				error = EINVAL;
1646 				break;
1647 			}
1648 		}
1649 
1650 		if (pcr->action == PF_CHANGE_REMOVE)
1651 			pf_rm_rule(ruleset->rules[rs_num].active.ptr, oldrule);
1652 		else {
1653 			if (oldrule == NULL)
1654 				TAILQ_INSERT_TAIL(
1655 				    ruleset->rules[rs_num].active.ptr,
1656 				    newrule, entries);
1657 			else if (pcr->action == PF_CHANGE_ADD_HEAD ||
1658 			    pcr->action == PF_CHANGE_ADD_BEFORE)
1659 				TAILQ_INSERT_BEFORE(oldrule, newrule, entries);
1660 			else
1661 				TAILQ_INSERT_AFTER(
1662 				    ruleset->rules[rs_num].active.ptr,
1663 				    oldrule, newrule, entries);
1664 		}
1665 
1666 		nr = 0;
1667 		TAILQ_FOREACH(oldrule,
1668 		    ruleset->rules[rs_num].active.ptr, entries)
1669 			oldrule->nr = nr++;
1670 
1671 		ruleset->rules[rs_num].active.ticket++;
1672 
1673 		pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr);
1674 		pf_remove_if_empty_ruleset(ruleset);
1675 
1676 		break;
1677 	}
1678 
1679 	case DIOCCLRSTATES: {
1680 		struct pf_state		*state;
1681 		struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr;
1682 		int			 killed = 0;
1683 
1684 		RB_FOREACH(state, pf_state_tree_id, &tree_id) {
1685 			if (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname,
1686 			    state->u.s.kif->pfik_name)) {
1687 				state->timeout = PFTM_PURGE;
1688 #if NPFSYNC
1689 				/* don't send out individual delete messages */
1690 				state->sync_flags = PFSTATE_NOSYNC;
1691 #endif
1692 				killed++;
1693 			}
1694 		}
1695 		pf_purge_expired_states();
1696 		pf_status.states = 0;
1697 		psk->psk_af = killed;
1698 #if NPFSYNC
1699 		pfsync_clear_states(pf_status.hostid, psk->psk_ifname);
1700 #endif
1701 		break;
1702 	}
1703 
1704 	case DIOCKILLSTATES: {
1705 		struct pf_state		*state;
1706 		struct pfioc_state_kill	*psk = (struct pfioc_state_kill *)addr;
1707 		int			 killed = 0;
1708 
1709 		RB_FOREACH(state, pf_state_tree_id, &tree_id) {
1710 			if ((!psk->psk_af || state->af == psk->psk_af)
1711 			    && (!psk->psk_proto || psk->psk_proto ==
1712 			    state->proto) &&
1713 			    PF_MATCHA(psk->psk_src.neg,
1714 			    &psk->psk_src.addr.v.a.addr,
1715 			    &psk->psk_src.addr.v.a.mask,
1716 			    &state->lan.addr, state->af) &&
1717 			    PF_MATCHA(psk->psk_dst.neg,
1718 			    &psk->psk_dst.addr.v.a.addr,
1719 			    &psk->psk_dst.addr.v.a.mask,
1720 			    &state->ext.addr, state->af) &&
1721 			    (psk->psk_src.port_op == 0 ||
1722 			    pf_match_port(psk->psk_src.port_op,
1723 			    psk->psk_src.port[0], psk->psk_src.port[1],
1724 			    state->lan.port)) &&
1725 			    (psk->psk_dst.port_op == 0 ||
1726 			    pf_match_port(psk->psk_dst.port_op,
1727 			    psk->psk_dst.port[0], psk->psk_dst.port[1],
1728 			    state->ext.port)) &&
1729 			    (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname,
1730 			    state->u.s.kif->pfik_name))) {
1731 				state->timeout = PFTM_PURGE;
1732 				killed++;
1733 			}
1734 		}
1735 		pf_purge_expired_states();
1736 		psk->psk_af = killed;
1737 		break;
1738 	}
1739 
1740 	case DIOCADDSTATE: {
1741 		struct pfioc_state	*ps = (struct pfioc_state *)addr;
1742 		struct pf_state		*state;
1743 		struct pfi_kif		*kif;
1744 
1745 		if (ps->state.timeout >= PFTM_MAX &&
1746 		    ps->state.timeout != PFTM_UNTIL_PACKET) {
1747 			error = EINVAL;
1748 			break;
1749 		}
1750 		state = pool_get(&pf_state_pl, PR_NOWAIT);
1751 		if (state == NULL) {
1752 			error = ENOMEM;
1753 			break;
1754 		}
1755 		kif = pfi_lookup_create(ps->state.u.ifname);
1756 		if (kif == NULL) {
1757 			pool_put(&pf_state_pl, state);
1758 			error = ENOENT;
1759 			break;
1760 		}
1761 		bcopy(&ps->state, state, sizeof(struct pf_state));
1762 		bzero(&state->u, sizeof(state->u));
1763 		state->rule.ptr = &pf_default_rule;
1764 		state->nat_rule.ptr = NULL;
1765 		state->anchor.ptr = NULL;
1766 		state->rt_kif = NULL;
1767 		state->creation = time_second;
1768 		state->pfsync_time = 0;
1769 		state->packets[0] = state->packets[1] = 0;
1770 		state->bytes[0] = state->bytes[1] = 0;
1771 
1772 		if (pf_insert_state(kif, state)) {
1773 			pfi_maybe_destroy(kif);
1774 			pool_put(&pf_state_pl, state);
1775 			error = ENOMEM;
1776 		}
1777 		break;
1778 	}
1779 
1780 	case DIOCGETSTATE: {
1781 		struct pfioc_state	*ps = (struct pfioc_state *)addr;
1782 		struct pf_state		*state;
1783 		u_int32_t		 nr;
1784 
1785 		nr = 0;
1786 		RB_FOREACH(state, pf_state_tree_id, &tree_id) {
1787 			if (nr >= ps->nr)
1788 				break;
1789 			nr++;
1790 		}
1791 		if (state == NULL) {
1792 			error = EBUSY;
1793 			break;
1794 		}
1795 		bcopy(state, &ps->state, sizeof(struct pf_state));
1796 		ps->state.rule.nr = state->rule.ptr->nr;
1797 		ps->state.nat_rule.nr = (state->nat_rule.ptr == NULL) ?
1798 		    -1 : state->nat_rule.ptr->nr;
1799 		ps->state.anchor.nr = (state->anchor.ptr == NULL) ?
1800 		    -1 : state->anchor.ptr->nr;
1801 		ps->state.expire = pf_state_expires(state);
1802 		if (ps->state.expire > time_second)
1803 			ps->state.expire -= time_second;
1804 		else
1805 			ps->state.expire = 0;
1806 		break;
1807 	}
1808 
1809 	case DIOCGETSTATES: {
1810 		struct pfioc_states	*ps = (struct pfioc_states *)addr;
1811 		struct pf_state		*state;
1812 		struct pf_state		*p, pstore;
1813 		struct pfi_kif		*kif;
1814 		u_int32_t		 nr = 0;
1815 		int			 space = ps->ps_len;
1816 
1817 		if (space == 0) {
1818 			TAILQ_FOREACH(kif, &pfi_statehead, pfik_w_states)
1819 				nr += kif->pfik_states;
1820 			ps->ps_len = sizeof(struct pf_state) * nr;
1821 			break;
1822 		}
1823 
1824 		p = ps->ps_states;
1825 		TAILQ_FOREACH(kif, &pfi_statehead, pfik_w_states)
1826 			RB_FOREACH(state, pf_state_tree_ext_gwy,
1827 			    &kif->pfik_ext_gwy) {
1828 				int	secs = time_second;
1829 
1830 				if ((nr+1) * sizeof(*p) > (unsigned)ps->ps_len)
1831 					break;
1832 
1833 				bcopy(state, &pstore, sizeof(pstore));
1834 				strlcpy(pstore.u.ifname, kif->pfik_name,
1835 				    sizeof(pstore.u.ifname));
1836 				pstore.rule.nr = state->rule.ptr->nr;
1837 				pstore.nat_rule.nr = (state->nat_rule.ptr ==
1838 				    NULL) ? -1 : state->nat_rule.ptr->nr;
1839 				pstore.anchor.nr = (state->anchor.ptr ==
1840 				    NULL) ? -1 : state->anchor.ptr->nr;
1841 				pstore.creation = secs - pstore.creation;
1842 				pstore.expire = pf_state_expires(state);
1843 				if (pstore.expire > secs)
1844 					pstore.expire -= secs;
1845 				else
1846 					pstore.expire = 0;
1847 				error = copyout(&pstore, p, sizeof(*p));
1848 				if (error)
1849 					goto fail;
1850 				p++;
1851 				nr++;
1852 			}
1853 		ps->ps_len = sizeof(struct pf_state) * nr;
1854 		break;
1855 	}
1856 
1857 	case DIOCGETSTATUS: {
1858 		struct pf_status *s = (struct pf_status *)addr;
1859 		bcopy(&pf_status, s, sizeof(struct pf_status));
1860 		pfi_fill_oldstatus(s);
1861 		break;
1862 	}
1863 
1864 	case DIOCSETSTATUSIF: {
1865 		struct pfioc_if	*pi = (struct pfioc_if *)addr;
1866 
1867 		if (pi->ifname[0] == 0) {
1868 			bzero(pf_status.ifname, IFNAMSIZ);
1869 			break;
1870 		}
1871 		if (ifunit(pi->ifname) == NULL) {
1872 			error = EINVAL;
1873 			break;
1874 		}
1875 		strlcpy(pf_status.ifname, pi->ifname, IFNAMSIZ);
1876 		break;
1877 	}
1878 
1879 	case DIOCCLRSTATUS: {
1880 		bzero(pf_status.counters, sizeof(pf_status.counters));
1881 		bzero(pf_status.fcounters, sizeof(pf_status.fcounters));
1882 		bzero(pf_status.scounters, sizeof(pf_status.scounters));
1883 		if (*pf_status.ifname)
1884 			pfi_clr_istats(pf_status.ifname, NULL,
1885 			    PFI_FLAG_INSTANCE);
1886 		break;
1887 	}
1888 
1889 	case DIOCNATLOOK: {
1890 		struct pfioc_natlook	*pnl = (struct pfioc_natlook *)addr;
1891 		struct pf_state		*state;
1892 		struct pf_state		 key;
1893 		int			 m = 0, direction = pnl->direction;
1894 
1895 		key.af = pnl->af;
1896 		key.proto = pnl->proto;
1897 
1898 		if (!pnl->proto ||
1899 		    PF_AZERO(&pnl->saddr, pnl->af) ||
1900 		    PF_AZERO(&pnl->daddr, pnl->af) ||
1901 		    !pnl->dport || !pnl->sport)
1902 			error = EINVAL;
1903 		else {
1904 			/*
1905 			 * userland gives us source and dest of connection,
1906 			 * reverse the lookup so we ask for what happens with
1907 			 * the return traffic, enabling us to find it in the
1908 			 * state tree.
1909 			 */
1910 			if (direction == PF_IN) {
1911 				PF_ACPY(&key.ext.addr, &pnl->daddr, pnl->af);
1912 				key.ext.port = pnl->dport;
1913 				PF_ACPY(&key.gwy.addr, &pnl->saddr, pnl->af);
1914 				key.gwy.port = pnl->sport;
1915 				state = pf_find_state_all(&key, PF_EXT_GWY, &m);
1916 			} else {
1917 				PF_ACPY(&key.lan.addr, &pnl->daddr, pnl->af);
1918 				key.lan.port = pnl->dport;
1919 				PF_ACPY(&key.ext.addr, &pnl->saddr, pnl->af);
1920 				key.ext.port = pnl->sport;
1921 				state = pf_find_state_all(&key, PF_LAN_EXT, &m);
1922 			}
1923 			if (m > 1)
1924 				error = E2BIG;	/* more than one state */
1925 			else if (state != NULL) {
1926 				if (direction == PF_IN) {
1927 					PF_ACPY(&pnl->rsaddr, &state->lan.addr,
1928 					    state->af);
1929 					pnl->rsport = state->lan.port;
1930 					PF_ACPY(&pnl->rdaddr, &pnl->daddr,
1931 					    pnl->af);
1932 					pnl->rdport = pnl->dport;
1933 				} else {
1934 					PF_ACPY(&pnl->rdaddr, &state->gwy.addr,
1935 					    state->af);
1936 					pnl->rdport = state->gwy.port;
1937 					PF_ACPY(&pnl->rsaddr, &pnl->saddr,
1938 					    pnl->af);
1939 					pnl->rsport = pnl->sport;
1940 				}
1941 			} else
1942 				error = ENOENT;
1943 		}
1944 		break;
1945 	}
1946 
1947 	case DIOCSETTIMEOUT: {
1948 		struct pfioc_tm	*pt = (struct pfioc_tm *)addr;
1949 		int		 old;
1950 
1951 		if (pt->timeout < 0 || pt->timeout >= PFTM_MAX ||
1952 		    pt->seconds < 0) {
1953 			error = EINVAL;
1954 			goto fail;
1955 		}
1956 		old = pf_default_rule.timeout[pt->timeout];
1957 		pf_default_rule.timeout[pt->timeout] = pt->seconds;
1958 		pt->seconds = old;
1959 		break;
1960 	}
1961 
1962 	case DIOCGETTIMEOUT: {
1963 		struct pfioc_tm	*pt = (struct pfioc_tm *)addr;
1964 
1965 		if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) {
1966 			error = EINVAL;
1967 			goto fail;
1968 		}
1969 		pt->seconds = pf_default_rule.timeout[pt->timeout];
1970 		break;
1971 	}
1972 
1973 	case DIOCGETLIMIT: {
1974 		struct pfioc_limit	*pl = (struct pfioc_limit *)addr;
1975 
1976 		if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) {
1977 			error = EINVAL;
1978 			goto fail;
1979 		}
1980 		pl->limit = pf_pool_limits[pl->index].limit;
1981 		break;
1982 	}
1983 
1984 	case DIOCSETLIMIT: {
1985 		struct pfioc_limit	*pl = (struct pfioc_limit *)addr;
1986 		int			 old_limit;
1987 
1988 		if (pl->index < 0 || pl->index >= PF_LIMIT_MAX ||
1989 		    pf_pool_limits[pl->index].pp == NULL) {
1990 			error = EINVAL;
1991 			goto fail;
1992 		}
1993 #ifdef __OpenBSD__
1994 		if (pool_sethardlimit(pf_pool_limits[pl->index].pp,
1995 		    pl->limit, NULL, 0) != 0) {
1996 			error = EBUSY;
1997 			goto fail;
1998 		}
1999 #else
2000 		pool_sethardlimit(pf_pool_limits[pl->index].pp,
2001 		    pl->limit, NULL, 0);
2002 #endif
2003 		old_limit = pf_pool_limits[pl->index].limit;
2004 		pf_pool_limits[pl->index].limit = pl->limit;
2005 		pl->limit = old_limit;
2006 		break;
2007 	}
2008 
2009 	case DIOCSETDEBUG: {
2010 		u_int32_t	*level = (u_int32_t *)addr;
2011 
2012 		pf_status.debug = *level;
2013 		break;
2014 	}
2015 
2016 	case DIOCCLRRULECTRS: {
2017 		struct pf_ruleset	*ruleset = &pf_main_ruleset;
2018 		struct pf_rule		*rule;
2019 
2020 		TAILQ_FOREACH(rule,
2021 		    ruleset->rules[PF_RULESET_FILTER].active.ptr, entries)
2022 			rule->evaluations = rule->packets =
2023 			    rule->bytes = 0;
2024 		break;
2025 	}
2026 
2027 #ifdef ALTQ
2028 	case DIOCSTARTALTQ: {
2029 		struct pf_altq		*altq;
2030 
2031 		/* enable all altq interfaces on active list */
2032 		TAILQ_FOREACH(altq, pf_altqs_active, entries) {
2033 			if (altq->qname[0] == 0) {
2034 				error = pf_enable_altq(altq);
2035 				if (error != 0)
2036 					break;
2037 			}
2038 		}
2039 		if (error == 0)
2040 			pf_altq_running = 1;
2041 		DPFPRINTF(PF_DEBUG_MISC, ("altq: started\n"));
2042 		break;
2043 	}
2044 
2045 	case DIOCSTOPALTQ: {
2046 		struct pf_altq		*altq;
2047 
2048 		/* disable all altq interfaces on active list */
2049 		TAILQ_FOREACH(altq, pf_altqs_active, entries) {
2050 			if (altq->qname[0] == 0) {
2051 				error = pf_disable_altq(altq);
2052 				if (error != 0)
2053 					break;
2054 			}
2055 		}
2056 		if (error == 0)
2057 			pf_altq_running = 0;
2058 		DPFPRINTF(PF_DEBUG_MISC, ("altq: stopped\n"));
2059 		break;
2060 	}
2061 
2062 	case DIOCADDALTQ: {
2063 		struct pfioc_altq	*pa = (struct pfioc_altq *)addr;
2064 		struct pf_altq		*altq, *a;
2065 
2066 		if (pa->ticket != ticket_altqs_inactive) {
2067 			error = EBUSY;
2068 			break;
2069 		}
2070 		altq = pool_get(&pf_altq_pl, PR_NOWAIT);
2071 		if (altq == NULL) {
2072 			error = ENOMEM;
2073 			break;
2074 		}
2075 		bcopy(&pa->altq, altq, sizeof(struct pf_altq));
2076 
2077 		/*
2078 		 * if this is for a queue, find the discipline and
2079 		 * copy the necessary fields
2080 		 */
2081 		if (altq->qname[0] != 0) {
2082 			if ((altq->qid = pf_qname2qid(altq->qname)) == 0) {
2083 				error = EBUSY;
2084 				pool_put(&pf_altq_pl, altq);
2085 				break;
2086 			}
2087 			TAILQ_FOREACH(a, pf_altqs_inactive, entries) {
2088 				if (strncmp(a->ifname, altq->ifname,
2089 				    IFNAMSIZ) == 0 && a->qname[0] == 0) {
2090 					altq->altq_disc = a->altq_disc;
2091 					break;
2092 				}
2093 			}
2094 		}
2095 
2096 		error = altq_add(altq);
2097 		if (error) {
2098 			pool_put(&pf_altq_pl, altq);
2099 			break;
2100 		}
2101 
2102 		TAILQ_INSERT_TAIL(pf_altqs_inactive, altq, entries);
2103 		bcopy(altq, &pa->altq, sizeof(struct pf_altq));
2104 		break;
2105 	}
2106 
2107 	case DIOCGETALTQS: {
2108 		struct pfioc_altq	*pa = (struct pfioc_altq *)addr;
2109 		struct pf_altq		*altq;
2110 
2111 		pa->nr = 0;
2112 		TAILQ_FOREACH(altq, pf_altqs_active, entries)
2113 			pa->nr++;
2114 		pa->ticket = ticket_altqs_active;
2115 		break;
2116 	}
2117 
2118 	case DIOCGETALTQ: {
2119 		struct pfioc_altq	*pa = (struct pfioc_altq *)addr;
2120 		struct pf_altq		*altq;
2121 		u_int32_t		 nr;
2122 
2123 		if (pa->ticket != ticket_altqs_active) {
2124 			error = EBUSY;
2125 			break;
2126 		}
2127 		nr = 0;
2128 		altq = TAILQ_FIRST(pf_altqs_active);
2129 		while ((altq != NULL) && (nr < pa->nr)) {
2130 			altq = TAILQ_NEXT(altq, entries);
2131 			nr++;
2132 		}
2133 		if (altq == NULL) {
2134 			error = EBUSY;
2135 			break;
2136 		}
2137 		bcopy(altq, &pa->altq, sizeof(struct pf_altq));
2138 		break;
2139 	}
2140 
2141 	case DIOCCHANGEALTQ:
2142 		/* CHANGEALTQ not supported yet! */
2143 		error = ENODEV;
2144 		break;
2145 
2146 	case DIOCGETQSTATS: {
2147 		struct pfioc_qstats	*pq = (struct pfioc_qstats *)addr;
2148 		struct pf_altq		*altq;
2149 		u_int32_t		 nr;
2150 		int			 nbytes;
2151 
2152 		if (pq->ticket != ticket_altqs_active) {
2153 			error = EBUSY;
2154 			break;
2155 		}
2156 		nbytes = pq->nbytes;
2157 		nr = 0;
2158 		altq = TAILQ_FIRST(pf_altqs_active);
2159 		while ((altq != NULL) && (nr < pq->nr)) {
2160 			altq = TAILQ_NEXT(altq, entries);
2161 			nr++;
2162 		}
2163 		if (altq == NULL) {
2164 			error = EBUSY;
2165 			break;
2166 		}
2167 		error = altq_getqstats(altq, pq->buf, &nbytes);
2168 		if (error == 0) {
2169 			pq->scheduler = altq->scheduler;
2170 			pq->nbytes = nbytes;
2171 		}
2172 		break;
2173 	}
2174 #endif /* ALTQ */
2175 
2176 	case DIOCBEGINADDRS: {
2177 		struct pfioc_pooladdr	*pp = (struct pfioc_pooladdr *)addr;
2178 
2179 		pf_empty_pool(&pf_pabuf);
2180 		pp->ticket = ++ticket_pabuf;
2181 		break;
2182 	}
2183 
2184 	case DIOCADDADDR: {
2185 		struct pfioc_pooladdr	*pp = (struct pfioc_pooladdr *)addr;
2186 
2187 #ifndef INET
2188 		if (pp->af == AF_INET) {
2189 			error = EAFNOSUPPORT;
2190 			break;
2191 		}
2192 #endif /* INET */
2193 #ifndef INET6
2194 		if (pp->af == AF_INET6) {
2195 			error = EAFNOSUPPORT;
2196 			break;
2197 		}
2198 #endif /* INET6 */
2199 		if (pp->addr.addr.type != PF_ADDR_ADDRMASK &&
2200 		    pp->addr.addr.type != PF_ADDR_DYNIFTL &&
2201 		    pp->addr.addr.type != PF_ADDR_TABLE) {
2202 			error = EINVAL;
2203 			break;
2204 		}
2205 		pa = pool_get(&pf_pooladdr_pl, PR_NOWAIT);
2206 		if (pa == NULL) {
2207 			error = ENOMEM;
2208 			break;
2209 		}
2210 		bcopy(&pp->addr, pa, sizeof(struct pf_pooladdr));
2211 		if (pa->ifname[0]) {
2212 			pa->kif = pfi_attach_rule(pa->ifname);
2213 			if (pa->kif == NULL) {
2214 				pool_put(&pf_pooladdr_pl, pa);
2215 				error = EINVAL;
2216 				break;
2217 			}
2218 		}
2219 		if (pfi_dynaddr_setup(&pa->addr, pp->af)) {
2220 			pfi_dynaddr_remove(&pa->addr);
2221 			pfi_detach_rule(pa->kif);
2222 			pool_put(&pf_pooladdr_pl, pa);
2223 			error = EINVAL;
2224 			break;
2225 		}
2226 		TAILQ_INSERT_TAIL(&pf_pabuf, pa, entries);
2227 		break;
2228 	}
2229 
2230 	case DIOCGETADDRS: {
2231 		struct pfioc_pooladdr	*pp = (struct pfioc_pooladdr *)addr;
2232 
2233 		pp->nr = 0;
2234 		pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action,
2235 		    pp->r_num, 0, 1, 0);
2236 		if (pool == NULL) {
2237 			error = EBUSY;
2238 			break;
2239 		}
2240 		TAILQ_FOREACH(pa, &pool->list, entries)
2241 			pp->nr++;
2242 		break;
2243 	}
2244 
2245 	case DIOCGETADDR: {
2246 		struct pfioc_pooladdr	*pp = (struct pfioc_pooladdr *)addr;
2247 		u_int32_t		 nr = 0;
2248 
2249 		pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action,
2250 		    pp->r_num, 0, 1, 1);
2251 		if (pool == NULL) {
2252 			error = EBUSY;
2253 			break;
2254 		}
2255 		pa = TAILQ_FIRST(&pool->list);
2256 		while ((pa != NULL) && (nr < pp->nr)) {
2257 			pa = TAILQ_NEXT(pa, entries);
2258 			nr++;
2259 		}
2260 		if (pa == NULL) {
2261 			error = EBUSY;
2262 			break;
2263 		}
2264 		bcopy(pa, &pp->addr, sizeof(struct pf_pooladdr));
2265 		pfi_dynaddr_copyout(&pp->addr.addr);
2266 		pf_tbladdr_copyout(&pp->addr.addr);
2267 		pf_rtlabel_copyout(&pp->addr.addr);
2268 		break;
2269 	}
2270 
2271 	case DIOCCHANGEADDR: {
2272 		struct pfioc_pooladdr	*pca = (struct pfioc_pooladdr *)addr;
2273 		struct pf_pooladdr	*oldpa = NULL, *newpa = NULL;
2274 		struct pf_ruleset	*ruleset;
2275 
2276 		if (pca->action < PF_CHANGE_ADD_HEAD ||
2277 		    pca->action > PF_CHANGE_REMOVE) {
2278 			error = EINVAL;
2279 			break;
2280 		}
2281 		if (pca->addr.addr.type != PF_ADDR_ADDRMASK &&
2282 		    pca->addr.addr.type != PF_ADDR_DYNIFTL &&
2283 		    pca->addr.addr.type != PF_ADDR_TABLE) {
2284 			error = EINVAL;
2285 			break;
2286 		}
2287 
2288 		ruleset = pf_find_ruleset(pca->anchor);
2289 		if (ruleset == NULL) {
2290 			error = EBUSY;
2291 			break;
2292 		}
2293 		pool = pf_get_pool(pca->anchor, pca->ticket, pca->r_action,
2294 		    pca->r_num, pca->r_last, 1, 1);
2295 		if (pool == NULL) {
2296 			error = EBUSY;
2297 			break;
2298 		}
2299 		if (pca->action != PF_CHANGE_REMOVE) {
2300 			newpa = pool_get(&pf_pooladdr_pl, PR_NOWAIT);
2301 			if (newpa == NULL) {
2302 				error = ENOMEM;
2303 				break;
2304 			}
2305 			bcopy(&pca->addr, newpa, sizeof(struct pf_pooladdr));
2306 #ifndef INET
2307 			if (pca->af == AF_INET) {
2308 				pool_put(&pf_pooladdr_pl, newpa);
2309 				error = EAFNOSUPPORT;
2310 				break;
2311 			}
2312 #endif /* INET */
2313 #ifndef INET6
2314 			if (pca->af == AF_INET6) {
2315 				pool_put(&pf_pooladdr_pl, newpa);
2316 				error = EAFNOSUPPORT;
2317 				break;
2318 			}
2319 #endif /* INET6 */
2320 			if (newpa->ifname[0]) {
2321 				newpa->kif = pfi_attach_rule(newpa->ifname);
2322 				if (newpa->kif == NULL) {
2323 					pool_put(&pf_pooladdr_pl, newpa);
2324 					error = EINVAL;
2325 					break;
2326 				}
2327 			} else
2328 				newpa->kif = NULL;
2329 			if (pfi_dynaddr_setup(&newpa->addr, pca->af) ||
2330 			    pf_tbladdr_setup(ruleset, &newpa->addr)) {
2331 				pfi_dynaddr_remove(&newpa->addr);
2332 				pfi_detach_rule(newpa->kif);
2333 				pool_put(&pf_pooladdr_pl, newpa);
2334 				error = EINVAL;
2335 				break;
2336 			}
2337 		}
2338 
2339 		if (pca->action == PF_CHANGE_ADD_HEAD)
2340 			oldpa = TAILQ_FIRST(&pool->list);
2341 		else if (pca->action == PF_CHANGE_ADD_TAIL)
2342 			oldpa = TAILQ_LAST(&pool->list, pf_palist);
2343 		else {
2344 			int	i = 0;
2345 
2346 			oldpa = TAILQ_FIRST(&pool->list);
2347 			while ((oldpa != NULL) && (i < pca->nr)) {
2348 				oldpa = TAILQ_NEXT(oldpa, entries);
2349 				i++;
2350 			}
2351 			if (oldpa == NULL) {
2352 				error = EINVAL;
2353 				break;
2354 			}
2355 		}
2356 
2357 		if (pca->action == PF_CHANGE_REMOVE) {
2358 			TAILQ_REMOVE(&pool->list, oldpa, entries);
2359 			pfi_dynaddr_remove(&oldpa->addr);
2360 			pf_tbladdr_remove(&oldpa->addr);
2361 			pfi_detach_rule(oldpa->kif);
2362 			pool_put(&pf_pooladdr_pl, oldpa);
2363 		} else {
2364 			if (oldpa == NULL)
2365 				TAILQ_INSERT_TAIL(&pool->list, newpa, entries);
2366 			else if (pca->action == PF_CHANGE_ADD_HEAD ||
2367 			    pca->action == PF_CHANGE_ADD_BEFORE)
2368 				TAILQ_INSERT_BEFORE(oldpa, newpa, entries);
2369 			else
2370 				TAILQ_INSERT_AFTER(&pool->list, oldpa,
2371 				    newpa, entries);
2372 		}
2373 
2374 		pool->cur = TAILQ_FIRST(&pool->list);
2375 		PF_ACPY(&pool->counter, &pool->cur->addr.v.a.addr,
2376 		    pca->af);
2377 		break;
2378 	}
2379 
2380 	case DIOCGETRULESETS: {
2381 		struct pfioc_ruleset	*pr = (struct pfioc_ruleset *)addr;
2382 		struct pf_ruleset	*ruleset;
2383 		struct pf_anchor	*anchor;
2384 
2385 		pr->path[sizeof(pr->path) - 1] = 0;
2386 		if ((ruleset = pf_find_ruleset(pr->path)) == NULL) {
2387 			error = EINVAL;
2388 			break;
2389 		}
2390 		pr->nr = 0;
2391 		if (ruleset->anchor == NULL) {
2392 			/* XXX kludge for pf_main_ruleset */
2393 			RB_FOREACH(anchor, pf_anchor_global, &pf_anchors)
2394 				if (anchor->parent == NULL)
2395 					pr->nr++;
2396 		} else {
2397 			RB_FOREACH(anchor, pf_anchor_node,
2398 			    &ruleset->anchor->children)
2399 				pr->nr++;
2400 		}
2401 		break;
2402 	}
2403 
2404 	case DIOCGETRULESET: {
2405 		struct pfioc_ruleset	*pr = (struct pfioc_ruleset *)addr;
2406 		struct pf_ruleset	*ruleset;
2407 		struct pf_anchor	*anchor;
2408 		u_int32_t		 nr = 0;
2409 
2410 		pr->path[sizeof(pr->path) - 1] = 0;
2411 		if ((ruleset = pf_find_ruleset(pr->path)) == NULL) {
2412 			error = EINVAL;
2413 			break;
2414 		}
2415 		pr->name[0] = 0;
2416 		if (ruleset->anchor == NULL) {
2417 			/* XXX kludge for pf_main_ruleset */
2418 			RB_FOREACH(anchor, pf_anchor_global, &pf_anchors)
2419 				if (anchor->parent == NULL && nr++ == pr->nr) {
2420 					strlcpy(pr->name, anchor->name,
2421 					    sizeof(pr->name));
2422 					break;
2423 				}
2424 		} else {
2425 			RB_FOREACH(anchor, pf_anchor_node,
2426 			    &ruleset->anchor->children)
2427 				if (nr++ == pr->nr) {
2428 					strlcpy(pr->name, anchor->name,
2429 					    sizeof(pr->name));
2430 					break;
2431 				}
2432 		}
2433 		if (!pr->name[0])
2434 			error = EBUSY;
2435 		break;
2436 	}
2437 
2438 	case DIOCRCLRTABLES: {
2439 		struct pfioc_table *io = (struct pfioc_table *)addr;
2440 
2441 		if (io->pfrio_esize != 0) {
2442 			error = ENODEV;
2443 			break;
2444 		}
2445 		error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel,
2446 		    io->pfrio_flags | PFR_FLAG_USERIOCTL);
2447 		break;
2448 	}
2449 
2450 	case DIOCRADDTABLES: {
2451 		struct pfioc_table *io = (struct pfioc_table *)addr;
2452 
2453 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
2454 			error = ENODEV;
2455 			break;
2456 		}
2457 		error = pfr_add_tables(io->pfrio_buffer, io->pfrio_size,
2458 		    &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2459 		break;
2460 	}
2461 
2462 	case DIOCRDELTABLES: {
2463 		struct pfioc_table *io = (struct pfioc_table *)addr;
2464 
2465 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
2466 			error = ENODEV;
2467 			break;
2468 		}
2469 		error = pfr_del_tables(io->pfrio_buffer, io->pfrio_size,
2470 		    &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2471 		break;
2472 	}
2473 
2474 	case DIOCRGETTABLES: {
2475 		struct pfioc_table *io = (struct pfioc_table *)addr;
2476 
2477 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
2478 			error = ENODEV;
2479 			break;
2480 		}
2481 		error = pfr_get_tables(&io->pfrio_table, io->pfrio_buffer,
2482 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2483 		break;
2484 	}
2485 
2486 	case DIOCRGETTSTATS: {
2487 		struct pfioc_table *io = (struct pfioc_table *)addr;
2488 
2489 		if (io->pfrio_esize != sizeof(struct pfr_tstats)) {
2490 			error = ENODEV;
2491 			break;
2492 		}
2493 		error = pfr_get_tstats(&io->pfrio_table, io->pfrio_buffer,
2494 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2495 		break;
2496 	}
2497 
2498 	case DIOCRCLRTSTATS: {
2499 		struct pfioc_table *io = (struct pfioc_table *)addr;
2500 
2501 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
2502 			error = ENODEV;
2503 			break;
2504 		}
2505 		error = pfr_clr_tstats(io->pfrio_buffer, io->pfrio_size,
2506 		    &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2507 		break;
2508 	}
2509 
2510 	case DIOCRSETTFLAGS: {
2511 		struct pfioc_table *io = (struct pfioc_table *)addr;
2512 
2513 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
2514 			error = ENODEV;
2515 			break;
2516 		}
2517 		error = pfr_set_tflags(io->pfrio_buffer, io->pfrio_size,
2518 		    io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange,
2519 		    &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2520 		break;
2521 	}
2522 
2523 	case DIOCRCLRADDRS: {
2524 		struct pfioc_table *io = (struct pfioc_table *)addr;
2525 
2526 		if (io->pfrio_esize != 0) {
2527 			error = ENODEV;
2528 			break;
2529 		}
2530 		error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel,
2531 		    io->pfrio_flags | PFR_FLAG_USERIOCTL);
2532 		break;
2533 	}
2534 
2535 	case DIOCRADDADDRS: {
2536 		struct pfioc_table *io = (struct pfioc_table *)addr;
2537 
2538 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2539 			error = ENODEV;
2540 			break;
2541 		}
2542 		error = pfr_add_addrs(&io->pfrio_table, io->pfrio_buffer,
2543 		    io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags |
2544 		    PFR_FLAG_USERIOCTL);
2545 		break;
2546 	}
2547 
2548 	case DIOCRDELADDRS: {
2549 		struct pfioc_table *io = (struct pfioc_table *)addr;
2550 
2551 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2552 			error = ENODEV;
2553 			break;
2554 		}
2555 		error = pfr_del_addrs(&io->pfrio_table, io->pfrio_buffer,
2556 		    io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags |
2557 		    PFR_FLAG_USERIOCTL);
2558 		break;
2559 	}
2560 
2561 	case DIOCRSETADDRS: {
2562 		struct pfioc_table *io = (struct pfioc_table *)addr;
2563 
2564 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2565 			error = ENODEV;
2566 			break;
2567 		}
2568 		error = pfr_set_addrs(&io->pfrio_table, io->pfrio_buffer,
2569 		    io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd,
2570 		    &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags |
2571 		    PFR_FLAG_USERIOCTL);
2572 		break;
2573 	}
2574 
2575 	case DIOCRGETADDRS: {
2576 		struct pfioc_table *io = (struct pfioc_table *)addr;
2577 
2578 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2579 			error = ENODEV;
2580 			break;
2581 		}
2582 		error = pfr_get_addrs(&io->pfrio_table, io->pfrio_buffer,
2583 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2584 		break;
2585 	}
2586 
2587 	case DIOCRGETASTATS: {
2588 		struct pfioc_table *io = (struct pfioc_table *)addr;
2589 
2590 		if (io->pfrio_esize != sizeof(struct pfr_astats)) {
2591 			error = ENODEV;
2592 			break;
2593 		}
2594 		error = pfr_get_astats(&io->pfrio_table, io->pfrio_buffer,
2595 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2596 		break;
2597 	}
2598 
2599 	case DIOCRCLRASTATS: {
2600 		struct pfioc_table *io = (struct pfioc_table *)addr;
2601 
2602 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2603 			error = ENODEV;
2604 			break;
2605 		}
2606 		error = pfr_clr_astats(&io->pfrio_table, io->pfrio_buffer,
2607 		    io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags |
2608 		    PFR_FLAG_USERIOCTL);
2609 		break;
2610 	}
2611 
2612 	case DIOCRTSTADDRS: {
2613 		struct pfioc_table *io = (struct pfioc_table *)addr;
2614 
2615 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2616 			error = ENODEV;
2617 			break;
2618 		}
2619 		error = pfr_tst_addrs(&io->pfrio_table, io->pfrio_buffer,
2620 		    io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags |
2621 		    PFR_FLAG_USERIOCTL);
2622 		break;
2623 	}
2624 
2625 	case DIOCRINADEFINE: {
2626 		struct pfioc_table *io = (struct pfioc_table *)addr;
2627 
2628 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2629 			error = ENODEV;
2630 			break;
2631 		}
2632 		error = pfr_ina_define(&io->pfrio_table, io->pfrio_buffer,
2633 		    io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr,
2634 		    io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2635 		break;
2636 	}
2637 
2638 	case DIOCOSFPADD: {
2639 		struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
2640 		error = pf_osfp_add(io);
2641 		break;
2642 	}
2643 
2644 	case DIOCOSFPGET: {
2645 		struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
2646 		error = pf_osfp_get(io);
2647 		break;
2648 	}
2649 
2650 	case DIOCXBEGIN: {
2651 		struct pfioc_trans		*io = (struct pfioc_trans *)
2652 						    addr;
2653 		static struct pfioc_trans_e	 ioe;
2654 		static struct pfr_table		 table;
2655 		int				 i;
2656 
2657 		if (io->esize != sizeof(ioe)) {
2658 			error = ENODEV;
2659 			goto fail;
2660 		}
2661 		for (i = 0; i < io->size; i++) {
2662 			if (copyin(io->array+i, &ioe, sizeof(ioe))) {
2663 				error = EFAULT;
2664 				goto fail;
2665 			}
2666 			switch (ioe.rs_num) {
2667 #ifdef ALTQ
2668 			case PF_RULESET_ALTQ:
2669 				if (ioe.anchor[0]) {
2670 					error = EINVAL;
2671 					goto fail;
2672 				}
2673 				if ((error = pf_begin_altq(&ioe.ticket)))
2674 					goto fail;
2675 				break;
2676 #endif /* ALTQ */
2677 			case PF_RULESET_TABLE:
2678 				bzero(&table, sizeof(table));
2679 				strlcpy(table.pfrt_anchor, ioe.anchor,
2680 				    sizeof(table.pfrt_anchor));
2681 				if ((error = pfr_ina_begin(&table,
2682 				    &ioe.ticket, NULL, 0)))
2683 					goto fail;
2684 				break;
2685 			default:
2686 				if ((error = pf_begin_rules(&ioe.ticket,
2687 				    ioe.rs_num, ioe.anchor)))
2688 					goto fail;
2689 				break;
2690 			}
2691 			if (copyout(&ioe, io->array+i, sizeof(io->array[i]))) {
2692 				error = EFAULT;
2693 				goto fail;
2694 			}
2695 		}
2696 		break;
2697 	}
2698 
2699 	case DIOCXROLLBACK: {
2700 		struct pfioc_trans		*io = (struct pfioc_trans *)
2701 						    addr;
2702 		static struct pfioc_trans_e	 ioe;
2703 		static struct pfr_table		 table;
2704 		int				 i;
2705 
2706 		if (io->esize != sizeof(ioe)) {
2707 			error = ENODEV;
2708 			goto fail;
2709 		}
2710 		for (i = 0; i < io->size; i++) {
2711 			if (copyin(io->array+i, &ioe, sizeof(ioe))) {
2712 				error = EFAULT;
2713 				goto fail;
2714 			}
2715 			switch (ioe.rs_num) {
2716 #ifdef ALTQ
2717 			case PF_RULESET_ALTQ:
2718 				if (ioe.anchor[0]) {
2719 					error = EINVAL;
2720 					goto fail;
2721 				}
2722 				if ((error = pf_rollback_altq(ioe.ticket)))
2723 					goto fail; /* really bad */
2724 				break;
2725 #endif /* ALTQ */
2726 			case PF_RULESET_TABLE:
2727 				bzero(&table, sizeof(table));
2728 				strlcpy(table.pfrt_anchor, ioe.anchor,
2729 				    sizeof(table.pfrt_anchor));
2730 				if ((error = pfr_ina_rollback(&table,
2731 				    ioe.ticket, NULL, 0)))
2732 					goto fail; /* really bad */
2733 				break;
2734 			default:
2735 				if ((error = pf_rollback_rules(ioe.ticket,
2736 				    ioe.rs_num, ioe.anchor)))
2737 					goto fail; /* really bad */
2738 				break;
2739 			}
2740 		}
2741 		break;
2742 	}
2743 
2744 	case DIOCXCOMMIT: {
2745 		struct pfioc_trans		*io = (struct pfioc_trans *)
2746 						    addr;
2747 		static struct pfioc_trans_e	 ioe;
2748 		static struct pfr_table		 table;
2749 		struct pf_ruleset		*rs;
2750 		int				 i;
2751 
2752 		if (io->esize != sizeof(ioe)) {
2753 			error = ENODEV;
2754 			goto fail;
2755 		}
2756 		/* first makes sure everything will succeed */
2757 		for (i = 0; i < io->size; i++) {
2758 			if (copyin(io->array+i, &ioe, sizeof(ioe))) {
2759 				error = EFAULT;
2760 				goto fail;
2761 			}
2762 			switch (ioe.rs_num) {
2763 #ifdef ALTQ
2764 			case PF_RULESET_ALTQ:
2765 				if (ioe.anchor[0]) {
2766 					error = EINVAL;
2767 					goto fail;
2768 				}
2769 				if (!altqs_inactive_open || ioe.ticket !=
2770 				    ticket_altqs_inactive) {
2771 					error = EBUSY;
2772 					goto fail;
2773 				}
2774 				break;
2775 #endif /* ALTQ */
2776 			case PF_RULESET_TABLE:
2777 				rs = pf_find_ruleset(ioe.anchor);
2778 				if (rs == NULL || !rs->topen || ioe.ticket !=
2779 				     rs->tticket) {
2780 					error = EBUSY;
2781 					goto fail;
2782 				}
2783 				break;
2784 			default:
2785 				if (ioe.rs_num < 0 || ioe.rs_num >=
2786 				    PF_RULESET_MAX) {
2787 					error = EINVAL;
2788 					goto fail;
2789 				}
2790 				rs = pf_find_ruleset(ioe.anchor);
2791 				if (rs == NULL ||
2792 				    !rs->rules[ioe.rs_num].inactive.open ||
2793 				    rs->rules[ioe.rs_num].inactive.ticket !=
2794 				    ioe.ticket) {
2795 					error = EBUSY;
2796 					goto fail;
2797 				}
2798 				break;
2799 			}
2800 		}
2801 		/* now do the commit - no errors should happen here */
2802 		for (i = 0; i < io->size; i++) {
2803 			if (copyin(io->array+i, &ioe, sizeof(ioe))) {
2804 				error = EFAULT;
2805 				goto fail;
2806 			}
2807 			switch (ioe.rs_num) {
2808 #ifdef ALTQ
2809 			case PF_RULESET_ALTQ:
2810 				if ((error = pf_commit_altq(ioe.ticket)))
2811 					goto fail; /* really bad */
2812 				break;
2813 #endif /* ALTQ */
2814 			case PF_RULESET_TABLE:
2815 				bzero(&table, sizeof(table));
2816 				strlcpy(table.pfrt_anchor, ioe.anchor,
2817 				    sizeof(table.pfrt_anchor));
2818 				if ((error = pfr_ina_commit(&table, ioe.ticket,
2819 				    NULL, NULL, 0)))
2820 					goto fail; /* really bad */
2821 				break;
2822 			default:
2823 				if ((error = pf_commit_rules(ioe.ticket,
2824 				    ioe.rs_num, ioe.anchor)))
2825 					goto fail; /* really bad */
2826 				break;
2827 			}
2828 		}
2829 		break;
2830 	}
2831 
2832 	case DIOCGETSRCNODES: {
2833 		struct pfioc_src_nodes	*psn = (struct pfioc_src_nodes *)addr;
2834 		struct pf_src_node	*n;
2835 		struct pf_src_node *p, pstore;
2836 		u_int32_t		 nr = 0;
2837 		int			 space = psn->psn_len;
2838 
2839 		if (space == 0) {
2840 			RB_FOREACH(n, pf_src_tree, &tree_src_tracking)
2841 				nr++;
2842 			psn->psn_len = sizeof(struct pf_src_node) * nr;
2843 			break;
2844 		}
2845 
2846 		p = psn->psn_src_nodes;
2847 		RB_FOREACH(n, pf_src_tree, &tree_src_tracking) {
2848 			int	secs = time_second, diff;
2849 
2850 			if ((nr + 1) * sizeof(*p) > (unsigned)psn->psn_len)
2851 				break;
2852 
2853 			bcopy(n, &pstore, sizeof(pstore));
2854 			if (n->rule.ptr != NULL)
2855 				pstore.rule.nr = n->rule.ptr->nr;
2856 			pstore.creation = secs - pstore.creation;
2857 			if (pstore.expire > secs)
2858 				pstore.expire -= secs;
2859 			else
2860 				pstore.expire = 0;
2861 
2862 			/* adjust the connection rate estimate */
2863 			diff = secs - n->conn_rate.last;
2864 			if (diff >= n->conn_rate.seconds)
2865 				pstore.conn_rate.count = 0;
2866 			else
2867 				pstore.conn_rate.count -=
2868 				    n->conn_rate.count * diff /
2869 				    n->conn_rate.seconds;
2870 
2871 			error = copyout(&pstore, p, sizeof(*p));
2872 			if (error)
2873 				goto fail;
2874 			p++;
2875 			nr++;
2876 		}
2877 		psn->psn_len = sizeof(struct pf_src_node) * nr;
2878 		break;
2879 	}
2880 
2881 	case DIOCCLRSRCNODES: {
2882 		struct pf_src_node	*n;
2883 		struct pf_state		*state;
2884 
2885 		RB_FOREACH(state, pf_state_tree_id, &tree_id) {
2886 			state->src_node = NULL;
2887 			state->nat_src_node = NULL;
2888 		}
2889 		RB_FOREACH(n, pf_src_tree, &tree_src_tracking) {
2890 			n->expire = 1;
2891 			n->states = 0;
2892 		}
2893 		pf_purge_expired_src_nodes();
2894 		pf_status.src_nodes = 0;
2895 		break;
2896 	}
2897 
2898 	case DIOCSETHOSTID: {
2899 		u_int32_t	*hostid = (u_int32_t *)addr;
2900 
2901 		if (*hostid == 0)
2902 			pf_status.hostid = arc4random();
2903 		else
2904 			pf_status.hostid = *hostid;
2905 		break;
2906 	}
2907 
2908 	case DIOCOSFPFLUSH:
2909 		pf_osfp_flush();
2910 		break;
2911 
2912 	case DIOCIGETIFACES: {
2913 		struct pfioc_iface *io = (struct pfioc_iface *)addr;
2914 
2915 		if (io->pfiio_esize != sizeof(struct pfi_if)) {
2916 			error = ENODEV;
2917 			break;
2918 		}
2919 		error = pfi_get_ifaces(io->pfiio_name, io->pfiio_buffer,
2920 		    &io->pfiio_size, io->pfiio_flags);
2921 		break;
2922 	}
2923 
2924 	case DIOCICLRISTATS: {
2925 		struct pfioc_iface *io = (struct pfioc_iface *)addr;
2926 
2927 		error = pfi_clr_istats(io->pfiio_name, &io->pfiio_nzero,
2928 		    io->pfiio_flags);
2929 		break;
2930 	}
2931 
2932 	case DIOCSETIFFLAG: {
2933 		struct pfioc_iface *io = (struct pfioc_iface *)addr;
2934 
2935 		error = pfi_set_flags(io->pfiio_name, io->pfiio_flags);
2936 		break;
2937 	}
2938 
2939 	case DIOCCLRIFFLAG: {
2940 		struct pfioc_iface *io = (struct pfioc_iface *)addr;
2941 
2942 		error = pfi_clear_flags(io->pfiio_name, io->pfiio_flags);
2943 		break;
2944 	}
2945 
2946 	default:
2947 		error = ENODEV;
2948 		break;
2949 	}
2950 fail:
2951 	splx(s);
2952 	return (error);
2953 }
2954 
2955 #ifdef __NetBSD__
2956 #ifdef INET
2957 int
2958 pfil4_wrapper(void *arg, struct mbuf **mp, struct ifnet *ifp, int dir)
2959 {
2960 	int error;
2961 
2962 	/*
2963 	 * ensure that mbufs are writable beforehand
2964 	 * as it's assumed by pf code.
2965 	 * ip hdr (60 bytes) + tcp hdr (60 bytes) should be enough.
2966 	 * XXX inefficient
2967 	 */
2968 	error = m_makewritable(mp, 0, 60 + 60, M_DONTWAIT);
2969 	if (error) {
2970 		m_freem(*mp);
2971 		*mp = NULL;
2972 		return error;
2973 	}
2974 
2975 	/*
2976 	 * If the packet is out-bound, we can't delay checksums
2977 	 * here.  For in-bound, the checksum has already been
2978 	 * validated.
2979 	 */
2980 	if (dir == PFIL_OUT) {
2981 		if ((*mp)->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) {
2982 			in_delayed_cksum(*mp);
2983 			(*mp)->m_pkthdr.csum_flags &=
2984 			    ~(M_CSUM_TCPv4|M_CSUM_UDPv4);
2985 		}
2986 	}
2987 
2988 	if (pf_test(dir == PFIL_OUT ? PF_OUT : PF_IN, ifp, mp, NULL)
2989 	    != PF_PASS) {
2990 		m_freem(*mp);
2991 		*mp = NULL;
2992 		return EHOSTUNREACH;
2993 	}
2994 
2995 	/*
2996 	 * we're not compatible with fast-forward.
2997 	 */
2998 
2999 	if (dir == PFIL_IN && *mp) {
3000 		(*mp)->m_flags &= ~M_CANFASTFWD;
3001 	}
3002 
3003 	return (0);
3004 }
3005 #endif /* INET */
3006 
3007 #ifdef INET6
3008 int
3009 pfil6_wrapper(void *arg, struct mbuf **mp, struct ifnet *ifp, int dir)
3010 {
3011 	int error;
3012 
3013 	/*
3014 	 * ensure that mbufs are writable beforehand
3015 	 * as it's assumed by pf code.
3016 	 * XXX inefficient
3017 	 */
3018 	error = m_makewritable(mp, 0, M_COPYALL, M_DONTWAIT);
3019 	if (error) {
3020 		m_freem(*mp);
3021 		*mp = NULL;
3022 		return error;
3023 	}
3024 
3025 	/*
3026 	 * If the packet is out-bound, we can't delay checksums
3027 	 * here.  For in-bound, the checksum has already been
3028 	 * validated.
3029 	 */
3030 	if (dir == PFIL_OUT) {
3031 		if ((*mp)->m_pkthdr.csum_flags & (M_CSUM_TCPv6|M_CSUM_UDPv6)) {
3032 			in6_delayed_cksum(*mp);
3033 			(*mp)->m_pkthdr.csum_flags &=
3034 			    ~(M_CSUM_TCPv6|M_CSUM_UDPv6);
3035 		}
3036 	}
3037 
3038 	if (pf_test6(dir == PFIL_OUT ? PF_OUT : PF_IN, ifp, mp, NULL)
3039 	    != PF_PASS) {
3040 		m_freem(*mp);
3041 		*mp = NULL;
3042 		return EHOSTUNREACH;
3043 	} else
3044 		return (0);
3045 }
3046 #endif
3047 
3048 int
3049 pfil_ifnet_wrapper(void *arg, struct mbuf **mp, struct ifnet *ifp, int dir)
3050 {
3051 	u_long cmd = (u_long)mp;
3052 
3053 	switch (cmd) {
3054 	case PFIL_IFNET_ATTACH:
3055 		pfi_attach_ifnet(ifp);
3056 		break;
3057 	case PFIL_IFNET_DETACH:
3058 		pfi_detach_ifnet(ifp);
3059 		break;
3060 	}
3061 
3062 	return (0);
3063 }
3064 
3065 int
3066 pfil_ifaddr_wrapper(void *arg, struct mbuf **mp, struct ifnet *ifp, int dir)
3067 {
3068 	extern void pfi_kifaddr_update_if(struct ifnet *);
3069 
3070 	u_long cmd = (u_long)mp;
3071 
3072 	switch (cmd) {
3073 	case SIOCSIFADDR:
3074 	case SIOCAIFADDR:
3075 	case SIOCDIFADDR:
3076 #ifdef INET6
3077 	case SIOCAIFADDR_IN6:
3078 	case SIOCDIFADDR_IN6:
3079 #endif
3080 		pfi_kifaddr_update_if(ifp);
3081 		break;
3082 	default:
3083 		panic("unexpected ioctl");
3084 	}
3085 
3086 	return (0);
3087 }
3088 
3089 static int
3090 pf_pfil_attach(void)
3091 {
3092 	struct pfil_head *ph_inet;
3093 #ifdef INET6
3094 	struct pfil_head *ph_inet6;
3095 #endif
3096 	int error;
3097 	int i;
3098 
3099 	if (pf_pfil_attached)
3100 		return (0);
3101 
3102 	error = pfil_add_hook(pfil_ifnet_wrapper, NULL, PFIL_IFNET, &if_pfil);
3103 	if (error)
3104 		goto bad1;
3105 	error = pfil_add_hook(pfil_ifaddr_wrapper, NULL, PFIL_IFADDR, &if_pfil);
3106 	if (error)
3107 		goto bad2;
3108 
3109 	ph_inet = pfil_head_get(PFIL_TYPE_AF, AF_INET);
3110 	if (ph_inet)
3111 		error = pfil_add_hook((void *)pfil4_wrapper, NULL,
3112 		    PFIL_IN|PFIL_OUT, ph_inet);
3113 	else
3114 		error = ENOENT;
3115 	if (error)
3116 		goto bad3;
3117 
3118 #ifdef INET6
3119 	ph_inet6 = pfil_head_get(PFIL_TYPE_AF, AF_INET6);
3120 	if (ph_inet6)
3121 		error = pfil_add_hook((void *)pfil6_wrapper, NULL,
3122 		    PFIL_IN|PFIL_OUT, ph_inet6);
3123 	else
3124 		error = ENOENT;
3125 	if (error)
3126 		goto bad4;
3127 #endif
3128 
3129 	for (i = 0; i < if_indexlim; i++)
3130 		if (ifindex2ifnet[i])
3131 			pfi_attach_ifnet(ifindex2ifnet[i]);
3132 	pf_pfil_attached = 1;
3133 
3134 	return (0);
3135 
3136 #ifdef INET6
3137 bad4:
3138 	pfil_remove_hook(pfil4_wrapper, NULL, PFIL_IN|PFIL_OUT, ph_inet);
3139 #endif
3140 bad3:
3141 	pfil_remove_hook(pfil_ifaddr_wrapper, NULL, PFIL_IFADDR, &if_pfil);
3142 bad2:
3143 	pfil_remove_hook(pfil_ifnet_wrapper, NULL, PFIL_IFNET, &if_pfil);
3144 bad1:
3145 	return (error);
3146 }
3147 
3148 static int
3149 pf_pfil_detach(void)
3150 {
3151 	struct pfil_head *ph_inet;
3152 #ifdef INET6
3153 	struct pfil_head *ph_inet6;
3154 #endif
3155 	int i;
3156 
3157 	if (pf_pfil_attached == 0)
3158 		return (0);
3159 
3160 	for (i = 0; i < if_indexlim; i++)
3161 		if (pfi_index2kif[i])
3162 			pfi_detach_ifnet(ifindex2ifnet[i]);
3163 
3164 	pfil_remove_hook(pfil_ifaddr_wrapper, NULL, PFIL_IFADDR, &if_pfil);
3165 	pfil_remove_hook(pfil_ifnet_wrapper, NULL, PFIL_IFNET, &if_pfil);
3166 
3167 	ph_inet = pfil_head_get(PFIL_TYPE_AF, AF_INET);
3168 	if (ph_inet)
3169 		pfil_remove_hook((void *)pfil4_wrapper, NULL,
3170 		    PFIL_IN|PFIL_OUT, ph_inet);
3171 #ifdef INET6
3172 	ph_inet6 = pfil_head_get(PFIL_TYPE_AF, AF_INET6);
3173 	if (ph_inet6)
3174 		pfil_remove_hook((void *)pfil6_wrapper, NULL,
3175 		    PFIL_IN|PFIL_OUT, ph_inet6);
3176 #endif
3177 	pf_pfil_attached = 0;
3178 
3179 	return (0);
3180 }
3181 #endif
3182