xref: /netbsd-src/sys/dist/pf/net/pf_ioctl.c (revision 1c9b56c830954ccf3b57004ac65562e3d6afacf6)
1 /*	$NetBSD: pf_ioctl.c,v 1.15 2005/02/14 21:28:33 peter Exp $	*/
2 /*	$OpenBSD: pf_ioctl.c,v 1.130.2.1 2004/12/19 19:01:50 brad Exp $ */
3 
4 /*
5  * Copyright (c) 2001 Daniel Hartmeier
6  * Copyright (c) 2002,2003 Henning Brauer
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  *
13  *    - Redistributions of source code must retain the above copyright
14  *      notice, this list of conditions and the following disclaimer.
15  *    - Redistributions in binary form must reproduce the above
16  *      copyright notice, this list of conditions and the following
17  *      disclaimer in the documentation and/or other materials provided
18  *      with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
30  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  *
33  * Effort sponsored in part by the Defense Advanced Research Projects
34  * Agency (DARPA) and Air Force Research Laboratory, Air Force
35  * Materiel Command, USAF, under agreement number F30602-01-2-0537.
36  *
37  */
38 
39 #ifdef _KERNEL_OPT
40 #include "opt_inet.h"
41 #include "opt_altq.h"
42 #include "opt_pfil_hooks.h"
43 #endif
44 
45 #ifdef __OpenBSD__
46 #include "pfsync.h"
47 #else
48 #define	NPFSYNC	0
49 #endif
50 
51 #include <sys/param.h>
52 #include <sys/systm.h>
53 #include <sys/mbuf.h>
54 #include <sys/filio.h>
55 #include <sys/fcntl.h>
56 #include <sys/socket.h>
57 #include <sys/socketvar.h>
58 #include <sys/kernel.h>
59 #include <sys/time.h>
60 #ifdef __OpenBSD__
61 #include <sys/timeout.h>
62 #else
63 #include <sys/callout.h>
64 #endif
65 #include <sys/pool.h>
66 #include <sys/malloc.h>
67 #ifdef __NetBSD__
68 #include <sys/conf.h>
69 #endif
70 
71 #include <net/if.h>
72 #include <net/if_types.h>
73 #include <net/route.h>
74 
75 #include <netinet/in.h>
76 #include <netinet/in_var.h>
77 #include <netinet/in_systm.h>
78 #include <netinet/ip.h>
79 #include <netinet/ip_var.h>
80 #include <netinet/ip_icmp.h>
81 
82 #ifdef __OpenBSD__
83 #include <dev/rndvar.h>
84 #endif
85 #include <net/pfvar.h>
86 
87 #if NPFSYNC > 0
88 #include <net/if_pfsync.h>
89 #endif /* NPFSYNC > 0 */
90 
91 #ifdef INET6
92 #include <netinet/ip6.h>
93 #include <netinet/in_pcb.h>
94 #endif /* INET6 */
95 
96 #ifdef ALTQ
97 #include <altq/altq.h>
98 #endif
99 
100 void			 pfattach(int);
101 #ifdef _LKM
102 void			 pfdetach(void);
103 #endif
104 int			 pfopen(dev_t, int, int, struct proc *);
105 int			 pfclose(dev_t, int, int, struct proc *);
106 struct pf_pool		*pf_get_pool(char *, u_int32_t, u_int8_t, u_int32_t,
107 			    u_int8_t, u_int8_t, u_int8_t);
108 int			 pf_get_ruleset_number(u_int8_t);
109 void			 pf_init_ruleset(struct pf_ruleset *);
110 int			 pf_anchor_setup(struct pf_rule *,
111 			    const struct pf_ruleset *, const char *);
112 int			 pf_anchor_copyout(const struct pf_ruleset *,
113 			    const struct pf_rule *, struct pfioc_rule *);
114 void			 pf_anchor_remove(struct pf_rule *);
115 
116 void			 pf_mv_pool(struct pf_palist *, struct pf_palist *);
117 void			 pf_empty_pool(struct pf_palist *);
118 int			 pfioctl(dev_t, u_long, caddr_t, int, struct proc *);
119 #ifdef ALTQ
120 int			 pf_begin_altq(u_int32_t *);
121 int			 pf_rollback_altq(u_int32_t);
122 int			 pf_commit_altq(u_int32_t);
123 int			 pf_enable_altq(struct pf_altq *);
124 int			 pf_disable_altq(struct pf_altq *);
125 #endif /* ALTQ */
126 int			 pf_begin_rules(u_int32_t *, int, const char *);
127 int			 pf_rollback_rules(u_int32_t, int, char *);
128 int			 pf_commit_rules(u_int32_t, int, char *);
129 
130 #ifdef __NetBSD__
131 const struct cdevsw pf_cdevsw = {
132 	pfopen, pfclose, noread, nowrite, pfioctl,
133 	nostop, notty, nopoll, nommap, nokqfilter,
134 };
135 
136 static int pf_pfil_attach(void);
137 static int pf_pfil_detach(void);
138 
139 static int pf_pfil_attached = 0;
140 #endif
141 
142 #ifdef __OpenBSD__
143 extern struct timeout	 pf_expire_to;
144 #else
145 extern struct callout	 pf_expire_to;
146 #endif
147 
148 struct pf_rule		 pf_default_rule;
149 #ifdef ALTQ
150 static int		 pf_altq_running;
151 #endif
152 
153 #define	TAGID_MAX	 50000
154 TAILQ_HEAD(pf_tags, pf_tagname)	pf_tags = TAILQ_HEAD_INITIALIZER(pf_tags),
155 				pf_qids = TAILQ_HEAD_INITIALIZER(pf_qids);
156 
157 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE)
158 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE
159 #endif
160 static u_int16_t	 tagname2tag(struct pf_tags *, char *);
161 static void		 tag2tagname(struct pf_tags *, u_int16_t, char *);
162 static void		 tag_unref(struct pf_tags *, u_int16_t);
163 
164 #define DPFPRINTF(n, x) if (pf_status.debug >= (n)) printf x
165 
166 #ifdef __NetBSD__
167 extern struct pfil_head if_pfil;
168 #endif
169 
170 void
171 pfattach(int num)
172 {
173 	u_int32_t *timeout = pf_default_rule.timeout;
174 
175 	pool_init(&pf_rule_pl, sizeof(struct pf_rule), 0, 0, 0, "pfrulepl",
176 	    &pool_allocator_nointr);
177 	pool_init(&pf_src_tree_pl, sizeof(struct pf_src_node), 0, 0, 0,
178 	    "pfsrctrpl", NULL);
179 	pool_init(&pf_state_pl, sizeof(struct pf_state), 0, 0, 0, "pfstatepl",
180 	    NULL);
181 	pool_init(&pf_altq_pl, sizeof(struct pf_altq), 0, 0, 0, "pfaltqpl",
182 	    &pool_allocator_nointr);
183 	pool_init(&pf_pooladdr_pl, sizeof(struct pf_pooladdr), 0, 0, 0,
184 	    "pfpooladdrpl", &pool_allocator_nointr);
185 	pfr_initialize();
186 	pfi_initialize();
187 	pf_osfp_initialize();
188 
189 	pool_sethardlimit(pf_pool_limits[PF_LIMIT_STATES].pp,
190 	    pf_pool_limits[PF_LIMIT_STATES].limit, NULL, 0);
191 
192 	RB_INIT(&tree_src_tracking);
193 	RB_INIT(&pf_anchors);
194 	pf_init_ruleset(&pf_main_ruleset);
195 	TAILQ_INIT(&pf_altqs[0]);
196 	TAILQ_INIT(&pf_altqs[1]);
197 	TAILQ_INIT(&pf_pabuf);
198 	pf_altqs_active = &pf_altqs[0];
199 	pf_altqs_inactive = &pf_altqs[1];
200 	TAILQ_INIT(&state_updates);
201 
202 	/* default rule should never be garbage collected */
203 	pf_default_rule.entries.tqe_prev = &pf_default_rule.entries.tqe_next;
204 	pf_default_rule.action = PF_PASS;
205 	pf_default_rule.nr = -1;
206 
207 	/* initialize default timeouts */
208 	timeout[PFTM_TCP_FIRST_PACKET] = 120;		/* First TCP packet */
209 	timeout[PFTM_TCP_OPENING] = 30;			/* No response yet */
210 	timeout[PFTM_TCP_ESTABLISHED] = 24*60*60;	/* Established */
211 	timeout[PFTM_TCP_CLOSING] = 15 * 60;		/* Half closed */
212 	timeout[PFTM_TCP_FIN_WAIT] = 45;		/* Got both FINs */
213 	timeout[PFTM_TCP_CLOSED] = 90;			/* Got a RST */
214 	timeout[PFTM_UDP_FIRST_PACKET] = 60;		/* First UDP packet */
215 	timeout[PFTM_UDP_SINGLE] = 30;			/* Unidirectional */
216 	timeout[PFTM_UDP_MULTIPLE] = 60;		/* Bidirectional */
217 	timeout[PFTM_ICMP_FIRST_PACKET] = 20;		/* First ICMP packet */
218 	timeout[PFTM_ICMP_ERROR_REPLY] = 10;		/* Got error response */
219 	timeout[PFTM_OTHER_FIRST_PACKET] = 60;		/* First packet */
220 	timeout[PFTM_OTHER_SINGLE] = 30;		/* Unidirectional */
221 	timeout[PFTM_OTHER_MULTIPLE] = 60;		/* Bidirectional */
222 	timeout[PFTM_FRAG] = 30;			/* Fragment expire */
223 	timeout[PFTM_INTERVAL] = 10;			/* Expire interval */
224 	timeout[PFTM_SRC_NODE] = 0;			/* Source tracking */
225 	timeout[PFTM_TS_DIFF] = 30;			/* Allowed TS diff */
226 
227 #ifdef __OpenBSD__
228 	timeout_set(&pf_expire_to, pf_purge_timeout, &pf_expire_to);
229 	timeout_add(&pf_expire_to, timeout[PFTM_INTERVAL] * hz);
230 #else
231 	callout_init(&pf_expire_to);
232 	callout_reset(&pf_expire_to, timeout[PFTM_INTERVAL] * hz,
233 	    pf_purge_timeout, &pf_expire_to);
234 #endif
235 
236 	pf_normalize_init();
237 	bzero(&pf_status, sizeof(pf_status));
238 	pf_status.debug = PF_DEBUG_URGENT;
239 
240 	/* XXX do our best to avoid a conflict */
241 	pf_status.hostid = arc4random();
242 }
243 
244 #ifdef _LKM
245 void
246 pfdetach(void)
247 {
248 	struct pf_anchor	*anchor;
249 	struct pf_state		*state;
250 	struct pf_src_node	*node;
251 	struct pfioc_table	 pt;
252 	u_int32_t		 ticket;
253 	int			 i;
254 	char			 r = '\0';
255 
256 	(void)pf_pfil_detach();
257 
258 	callout_stop(&pf_expire_to);
259 	pf_status.running = 0;
260 
261 	/* clear the rulesets */
262 	for (i = 0; i < PF_RULESET_MAX; i++)
263 		if (pf_begin_rules(&ticket, i, &r) == 0)
264 			pf_commit_rules(ticket, i, &r);
265 #ifdef ALTQ
266 	if (pf_begin_altq(&ticket) == 0)
267 		pf_commit_altq(ticket);
268 #endif
269 
270 	/* clear states */
271 	RB_FOREACH(state, pf_state_tree_id, &tree_id) {
272 		state->timeout = PFTM_PURGE;
273 #if NPFSYNC
274 		state->sync_flags = PFSTATE_NOSYNC;
275 #endif
276 	}
277 	pf_purge_expired_states();
278 #if NPFSYNC
279 	pfsync_clear_states(pf_status.hostid, NULL);
280 #endif
281 
282 	/* clear source nodes */
283 	RB_FOREACH(state, pf_state_tree_id, &tree_id) {
284 		state->src_node = NULL;
285 		state->nat_src_node = NULL;
286 	}
287 	RB_FOREACH(node, pf_src_tree, &tree_src_tracking) {
288 		node->expire = 1;
289 		node->states = 0;
290 	}
291 	pf_purge_expired_src_nodes();
292 
293 	/* clear tables */
294 	memset(&pt, '\0', sizeof(pt));
295 	pfr_clr_tables(&pt.pfrio_table, &pt.pfrio_ndel, pt.pfrio_flags);
296 
297 	/* destroy anchors */
298 	while ((anchor = RB_MIN(pf_anchor_global, &pf_anchors)) != NULL) {
299 		for (i = 0; i < PF_RULESET_MAX; i++)
300 			if (pf_begin_rules(&ticket, i, anchor->name) == 0)
301 				pf_commit_rules(ticket, i, anchor->name);
302 	}
303 
304 	/* destroy main ruleset */
305 	pf_remove_if_empty_ruleset(&pf_main_ruleset);
306 
307 	/* destroy the pools */
308 	pool_destroy(&pf_pooladdr_pl);
309 	pool_destroy(&pf_altq_pl);
310 	pool_destroy(&pf_state_pl);
311 	pool_destroy(&pf_rule_pl);
312 	pool_destroy(&pf_src_tree_pl);
313 
314 	/* destroy subsystems */
315 	pf_normalize_destroy();
316 	pf_osfp_destroy();
317 	pfr_destroy();
318 	pfi_destroy();
319 }
320 #endif
321 
322 int
323 pfopen(dev_t dev, int flags, int fmt, struct proc *p)
324 {
325 	if (minor(dev) >= 1)
326 		return (ENXIO);
327 	return (0);
328 }
329 
330 int
331 pfclose(dev_t dev, int flags, int fmt, struct proc *p)
332 {
333 	if (minor(dev) >= 1)
334 		return (ENXIO);
335 	return (0);
336 }
337 
338 struct pf_pool *
339 pf_get_pool(char *anchor, u_int32_t ticket, u_int8_t rule_action,
340     u_int32_t rule_number, u_int8_t r_last, u_int8_t active,
341     u_int8_t check_ticket)
342 {
343 	struct pf_ruleset	*ruleset;
344 	struct pf_rule		*rule;
345 	int			 rs_num;
346 
347 	ruleset = pf_find_ruleset(anchor);
348 	if (ruleset == NULL)
349 		return (NULL);
350 	rs_num = pf_get_ruleset_number(rule_action);
351 	if (rs_num >= PF_RULESET_MAX)
352 		return (NULL);
353 	if (active) {
354 		if (check_ticket && ticket !=
355 		    ruleset->rules[rs_num].active.ticket)
356 			return (NULL);
357 		if (r_last)
358 			rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
359 			    pf_rulequeue);
360 		else
361 			rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
362 	} else {
363 		if (check_ticket && ticket !=
364 		    ruleset->rules[rs_num].inactive.ticket)
365 			return (NULL);
366 		if (r_last)
367 			rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
368 			    pf_rulequeue);
369 		else
370 			rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr);
371 	}
372 	if (!r_last) {
373 		while ((rule != NULL) && (rule->nr != rule_number))
374 			rule = TAILQ_NEXT(rule, entries);
375 	}
376 	if (rule == NULL)
377 		return (NULL);
378 
379 	return (&rule->rpool);
380 }
381 
382 int
383 pf_get_ruleset_number(u_int8_t action)
384 {
385 	switch (action) {
386 	case PF_SCRUB:
387 		return (PF_RULESET_SCRUB);
388 		break;
389 	case PF_PASS:
390 	case PF_DROP:
391 		return (PF_RULESET_FILTER);
392 		break;
393 	case PF_NAT:
394 	case PF_NONAT:
395 		return (PF_RULESET_NAT);
396 		break;
397 	case PF_BINAT:
398 	case PF_NOBINAT:
399 		return (PF_RULESET_BINAT);
400 		break;
401 	case PF_RDR:
402 	case PF_NORDR:
403 		return (PF_RULESET_RDR);
404 		break;
405 	default:
406 		return (PF_RULESET_MAX);
407 		break;
408 	}
409 }
410 
411 void
412 pf_init_ruleset(struct pf_ruleset *ruleset)
413 {
414 	int	i;
415 
416 	memset(ruleset, 0, sizeof(struct pf_ruleset));
417 	for (i = 0; i < PF_RULESET_MAX; i++) {
418 		TAILQ_INIT(&ruleset->rules[i].queues[0]);
419 		TAILQ_INIT(&ruleset->rules[i].queues[1]);
420 		ruleset->rules[i].active.ptr = &ruleset->rules[i].queues[0];
421 		ruleset->rules[i].inactive.ptr = &ruleset->rules[i].queues[1];
422 	}
423 }
424 
425 struct pf_anchor *
426 pf_find_anchor(const char *path)
427 {
428 	static struct pf_anchor	 key;
429 
430 	memset(&key, 0, sizeof(key));
431 	strlcpy(key.path, path, sizeof(key.path));
432 	return (RB_FIND(pf_anchor_global, &pf_anchors, &key));
433 }
434 
435 struct pf_ruleset *
436 pf_find_ruleset(const char *path)
437 {
438 	struct pf_anchor	*anchor;
439 
440 	while (*path == '/')
441 		path++;
442 	if (!*path)
443 		return (&pf_main_ruleset);
444 	anchor = pf_find_anchor(path);
445 	if (anchor == NULL)
446 		return (NULL);
447 	else
448 		return (&anchor->ruleset);
449 }
450 
451 struct pf_ruleset *
452 pf_find_or_create_ruleset(const char *path)
453 {
454 	static char		 p[MAXPATHLEN];
455 	char			*q, *r;
456 	struct pf_ruleset	*ruleset;
457 	struct pf_anchor	*anchor = NULL /* XXX gcc */,
458 				*dup, *parent = NULL;
459 
460 	while (*path == '/')
461 		path++;
462 	ruleset = pf_find_ruleset(path);
463 	if (ruleset != NULL)
464 		return (ruleset);
465 	strlcpy(p, path, sizeof(p));
466 	while (parent == NULL && (q = strrchr(p, '/')) != NULL) {
467 		*q = 0;
468 		if ((ruleset = pf_find_ruleset(p)) != NULL) {
469 			parent = ruleset->anchor;
470 			break;
471 		}
472 	}
473 	if (q == NULL)
474 		q = p;
475 	else
476 		q++;
477 	strlcpy(p, path, sizeof(p));
478 	if (!*q)
479 		return (NULL);
480 	while ((r = strchr(q, '/')) != NULL || *q) {
481 		if (r != NULL)
482 			*r = 0;
483 		if (!*q || strlen(q) >= PF_ANCHOR_NAME_SIZE ||
484 		    (parent != NULL && strlen(parent->path) >=
485 		    MAXPATHLEN - PF_ANCHOR_NAME_SIZE - 1))
486 			return (NULL);
487 		anchor = (struct pf_anchor *)malloc(sizeof(*anchor), M_TEMP,
488 		    M_NOWAIT);
489 		if (anchor == NULL)
490 			return (NULL);
491 		memset(anchor, 0, sizeof(*anchor));
492 		RB_INIT(&anchor->children);
493 		strlcpy(anchor->name, q, sizeof(anchor->name));
494 		if (parent != NULL) {
495 			strlcpy(anchor->path, parent->path,
496 			    sizeof(anchor->path));
497 			strlcat(anchor->path, "/", sizeof(anchor->path));
498 		}
499 		strlcat(anchor->path, anchor->name, sizeof(anchor->path));
500 		if ((dup = RB_INSERT(pf_anchor_global, &pf_anchors, anchor)) !=
501 		    NULL) {
502 			printf("pf_find_or_create_ruleset: RB_INSERT1 "
503 			    "'%s' '%s' collides with '%s' '%s'\n",
504 			    anchor->path, anchor->name, dup->path, dup->name);
505 			free(anchor, M_TEMP);
506 			return (NULL);
507 		}
508 		if (parent != NULL) {
509 			anchor->parent = parent;
510 			if ((dup = RB_INSERT(pf_anchor_node, &parent->children,
511 			    anchor)) != NULL) {
512 				printf("pf_find_or_create_ruleset: "
513 				    "RB_INSERT2 '%s' '%s' collides with "
514 				    "'%s' '%s'\n", anchor->path, anchor->name,
515 				    dup->path, dup->name);
516 				RB_REMOVE(pf_anchor_global, &pf_anchors,
517 				    anchor);
518 				free(anchor, M_TEMP);
519 				return (NULL);
520 			}
521 		}
522 		pf_init_ruleset(&anchor->ruleset);
523 		anchor->ruleset.anchor = anchor;
524 		parent = anchor;
525 		if (r != NULL)
526 			q = r + 1;
527 		else
528 			*q = 0;
529 	}
530 	return (&anchor->ruleset);
531 }
532 
533 void
534 pf_remove_if_empty_ruleset(struct pf_ruleset *ruleset)
535 {
536 	struct pf_anchor	*parent;
537 	int			 i;
538 
539 	while (ruleset != NULL) {
540 		if (ruleset == &pf_main_ruleset || ruleset->anchor == NULL ||
541 		    !RB_EMPTY(&ruleset->anchor->children) ||
542 		    ruleset->anchor->refcnt > 0 || ruleset->tables > 0 ||
543 		    ruleset->topen)
544 			return;
545 		for (i = 0; i < PF_RULESET_MAX; ++i)
546 			if (!TAILQ_EMPTY(ruleset->rules[i].active.ptr) ||
547 			    !TAILQ_EMPTY(ruleset->rules[i].inactive.ptr) ||
548 			    ruleset->rules[i].inactive.open)
549 				return;
550 		RB_REMOVE(pf_anchor_global, &pf_anchors, ruleset->anchor);
551 		if ((parent = ruleset->anchor->parent) != NULL)
552 			RB_REMOVE(pf_anchor_node, &parent->children,
553 			    ruleset->anchor);
554 		free(ruleset->anchor, M_TEMP);
555 		if (parent == NULL)
556 			return;
557 		ruleset = &parent->ruleset;
558 	}
559 }
560 
561 int
562 pf_anchor_setup(struct pf_rule *r, const struct pf_ruleset *s,
563     const char *name)
564 {
565 	static char		*p, path[MAXPATHLEN];
566 	struct pf_ruleset	*ruleset;
567 
568 	r->anchor = NULL;
569 	r->anchor_relative = 0;
570 	r->anchor_wildcard = 0;
571 	if (!name[0])
572 		return (0);
573 	if (name[0] == '/')
574 		strlcpy(path, name + 1, sizeof(path));
575 	else {
576 		/* relative path */
577 		r->anchor_relative = 1;
578 		if (s->anchor == NULL || !s->anchor->path[0])
579 			path[0] = 0;
580 		else
581 			strlcpy(path, s->anchor->path, sizeof(path));
582 		while (name[0] == '.' && name[1] == '.' && name[2] == '/') {
583 			if (!path[0]) {
584 				printf("pf_anchor_setup: .. beyond root\n");
585 				return (1);
586 			}
587 			if ((p = strrchr(path, '/')) != NULL)
588 				*p = 0;
589 			else
590 				path[0] = 0;
591 			r->anchor_relative++;
592 			name += 3;
593 		}
594 		if (path[0])
595 			strlcat(path, "/", sizeof(path));
596 		strlcat(path, name, sizeof(path));
597 	}
598 	if ((p = strrchr(path, '/')) != NULL && !strcmp(p, "/*")) {
599 		r->anchor_wildcard = 1;
600 		*p = 0;
601 	}
602 	ruleset = pf_find_or_create_ruleset(path);
603 	if (ruleset == NULL || ruleset->anchor == NULL) {
604 		printf("pf_anchor_setup: ruleset\n");
605 		return (1);
606 	}
607 	r->anchor = ruleset->anchor;
608 	r->anchor->refcnt++;
609 	return (0);
610 }
611 
612 int
613 pf_anchor_copyout(const struct pf_ruleset *rs, const struct pf_rule *r,
614     struct pfioc_rule *pr)
615 {
616 	pr->anchor_call[0] = 0;
617 	if (r->anchor == NULL)
618 		return (0);
619 	if (!r->anchor_relative) {
620 		strlcpy(pr->anchor_call, "/", sizeof(pr->anchor_call));
621 		strlcat(pr->anchor_call, r->anchor->path,
622 		    sizeof(pr->anchor_call));
623 	} else {
624 		char a[MAXPATHLEN], b[MAXPATHLEN], *p;
625 		int i;
626 
627 		if (rs->anchor == NULL)
628 			a[0] = 0;
629 		else
630 			strlcpy(a, rs->anchor->path, sizeof(a));
631 		strlcpy(b, r->anchor->path, sizeof(b));
632 		for (i = 1; i < r->anchor_relative; ++i) {
633 			if ((p = strrchr(a, '/')) == NULL)
634 				p = a;
635 			*p = 0;
636 			strlcat(pr->anchor_call, "../",
637 			    sizeof(pr->anchor_call));
638 		}
639 		if (strncmp(a, b, strlen(a))) {
640 			printf("pf_anchor_copyout: '%s' '%s'\n", a, b);
641 			return (1);
642 		}
643 		if (strlen(b) > strlen(a))
644 			strlcat(pr->anchor_call, b + (a[0] ? strlen(a) + 1 : 0),
645 			    sizeof(pr->anchor_call));
646 	}
647 	if (r->anchor_wildcard)
648 		strlcat(pr->anchor_call, pr->anchor_call[0] ? "/*" : "*",
649 		    sizeof(pr->anchor_call));
650 	return (0);
651 }
652 
653 void
654 pf_anchor_remove(struct pf_rule *r)
655 {
656 	if (r->anchor == NULL)
657 		return;
658 	if (r->anchor->refcnt <= 0) {
659 		printf("pf_anchor_remove: broken refcount");
660 		r->anchor = NULL;
661 		return;
662 	}
663 	if (!--r->anchor->refcnt)
664 		pf_remove_if_empty_ruleset(&r->anchor->ruleset);
665 	r->anchor = NULL;
666 }
667 
668 void
669 pf_mv_pool(struct pf_palist *poola, struct pf_palist *poolb)
670 {
671 	struct pf_pooladdr	*mv_pool_pa;
672 
673 	while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) {
674 		TAILQ_REMOVE(poola, mv_pool_pa, entries);
675 		TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries);
676 	}
677 }
678 
679 void
680 pf_empty_pool(struct pf_palist *poola)
681 {
682 	struct pf_pooladdr	*empty_pool_pa;
683 
684 	while ((empty_pool_pa = TAILQ_FIRST(poola)) != NULL) {
685 		pfi_dynaddr_remove(&empty_pool_pa->addr);
686 		pf_tbladdr_remove(&empty_pool_pa->addr);
687 		pfi_detach_rule(empty_pool_pa->kif);
688 		TAILQ_REMOVE(poola, empty_pool_pa, entries);
689 		pool_put(&pf_pooladdr_pl, empty_pool_pa);
690 	}
691 }
692 
693 void
694 pf_rm_rule(struct pf_rulequeue *rulequeue, struct pf_rule *rule)
695 {
696 	if (rulequeue != NULL) {
697 		if (rule->states <= 0) {
698 			/*
699 			 * XXX - we need to remove the table *before* detaching
700 			 * the rule to make sure the table code does not delete
701 			 * the anchor under our feet.
702 			 */
703 			pf_tbladdr_remove(&rule->src.addr);
704 			pf_tbladdr_remove(&rule->dst.addr);
705 		}
706 		TAILQ_REMOVE(rulequeue, rule, entries);
707 		rule->entries.tqe_prev = NULL;
708 		rule->nr = -1;
709 	}
710 
711 	if (rule->states > 0 || rule->src_nodes > 0 ||
712 	    rule->entries.tqe_prev != NULL)
713 		return;
714 	pf_tag_unref(rule->tag);
715 	pf_tag_unref(rule->match_tag);
716 #ifdef ALTQ
717 	if (rule->pqid != rule->qid)
718 		pf_qid_unref(rule->pqid);
719 	pf_qid_unref(rule->qid);
720 #endif
721 	pfi_dynaddr_remove(&rule->src.addr);
722 	pfi_dynaddr_remove(&rule->dst.addr);
723 	if (rulequeue == NULL) {
724 		pf_tbladdr_remove(&rule->src.addr);
725 		pf_tbladdr_remove(&rule->dst.addr);
726 	}
727 	pfi_detach_rule(rule->kif);
728 	pf_anchor_remove(rule);
729 	pf_empty_pool(&rule->rpool.list);
730 	pool_put(&pf_rule_pl, rule);
731 }
732 
733 static	u_int16_t
734 tagname2tag(struct pf_tags *head, char *tagname)
735 {
736 	struct pf_tagname	*tag, *p = NULL;
737 	u_int16_t		 new_tagid = 1;
738 
739 	TAILQ_FOREACH(tag, head, entries)
740 		if (strcmp(tagname, tag->name) == 0) {
741 			tag->ref++;
742 			return (tag->tag);
743 		}
744 
745 	/*
746 	 * to avoid fragmentation, we do a linear search from the beginning
747 	 * and take the first free slot we find. if there is none or the list
748 	 * is empty, append a new entry at the end.
749 	 */
750 
751 	/* new entry */
752 	if (!TAILQ_EMPTY(head))
753 		for (p = TAILQ_FIRST(head); p != NULL &&
754 		    p->tag == new_tagid; p = TAILQ_NEXT(p, entries))
755 			new_tagid = p->tag + 1;
756 
757 	if (new_tagid > TAGID_MAX)
758 		return (0);
759 
760 	/* allocate and fill new struct pf_tagname */
761 	tag = (struct pf_tagname *)malloc(sizeof(struct pf_tagname),
762 	    M_TEMP, M_NOWAIT);
763 	if (tag == NULL)
764 		return (0);
765 	bzero(tag, sizeof(struct pf_tagname));
766 	strlcpy(tag->name, tagname, sizeof(tag->name));
767 	tag->tag = new_tagid;
768 	tag->ref++;
769 
770 	if (p != NULL)	/* insert new entry before p */
771 		TAILQ_INSERT_BEFORE(p, tag, entries);
772 	else	/* either list empty or no free slot in between */
773 		TAILQ_INSERT_TAIL(head, tag, entries);
774 
775 	return (tag->tag);
776 }
777 
778 static	void
779 tag2tagname(struct pf_tags *head, u_int16_t tagid, char *p)
780 {
781 	struct pf_tagname	*tag;
782 
783 	TAILQ_FOREACH(tag, head, entries)
784 		if (tag->tag == tagid) {
785 			strlcpy(p, tag->name, PF_TAG_NAME_SIZE);
786 			return;
787 		}
788 }
789 
790 static	void
791 tag_unref(struct pf_tags *head, u_int16_t tag)
792 {
793 	struct pf_tagname	*p, *next;
794 
795 	if (tag == 0)
796 		return;
797 
798 	for (p = TAILQ_FIRST(head); p != NULL; p = next) {
799 		next = TAILQ_NEXT(p, entries);
800 		if (tag == p->tag) {
801 			if (--p->ref == 0) {
802 				TAILQ_REMOVE(head, p, entries);
803 				free(p, M_TEMP);
804 			}
805 			break;
806 		}
807 	}
808 }
809 
810 u_int16_t
811 pf_tagname2tag(char *tagname)
812 {
813 	return (tagname2tag(&pf_tags, tagname));
814 }
815 
816 void
817 pf_tag2tagname(u_int16_t tagid, char *p)
818 {
819 	return (tag2tagname(&pf_tags, tagid, p));
820 }
821 
822 void
823 pf_tag_unref(u_int16_t tag)
824 {
825 	return (tag_unref(&pf_tags, tag));
826 }
827 
828 #ifdef ALTQ
829 u_int32_t
830 pf_qname2qid(char *qname)
831 {
832 	return ((u_int32_t)tagname2tag(&pf_qids, qname));
833 }
834 
835 void
836 pf_qid2qname(u_int32_t qid, char *p)
837 {
838 	return (tag2tagname(&pf_qids, (u_int16_t)qid, p));
839 }
840 
841 void
842 pf_qid_unref(u_int32_t qid)
843 {
844 	return (tag_unref(&pf_qids, (u_int16_t)qid));
845 }
846 
847 int
848 pf_begin_altq(u_int32_t *ticket)
849 {
850 	struct pf_altq	*altq;
851 	int		 error = 0;
852 
853 	/* Purge the old altq list */
854 	while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) {
855 		TAILQ_REMOVE(pf_altqs_inactive, altq, entries);
856 		if (altq->qname[0] == 0) {
857 			/* detach and destroy the discipline */
858 			error = altq_remove(altq);
859 		} else
860 			pf_qid_unref(altq->qid);
861 		pool_put(&pf_altq_pl, altq);
862 	}
863 	if (error)
864 		return (error);
865 	*ticket = ++ticket_altqs_inactive;
866 	altqs_inactive_open = 1;
867 	return (0);
868 }
869 
870 int
871 pf_rollback_altq(u_int32_t ticket)
872 {
873 	struct pf_altq	*altq;
874 	int		 error = 0;
875 
876 	if (!altqs_inactive_open || ticket != ticket_altqs_inactive)
877 		return (0);
878 	/* Purge the old altq list */
879 	while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) {
880 		TAILQ_REMOVE(pf_altqs_inactive, altq, entries);
881 		if (altq->qname[0] == 0) {
882 			/* detach and destroy the discipline */
883 			error = altq_remove(altq);
884 		} else
885 			pf_qid_unref(altq->qid);
886 		pool_put(&pf_altq_pl, altq);
887 	}
888 	altqs_inactive_open = 0;
889 	return (error);
890 }
891 
892 int
893 pf_commit_altq(u_int32_t ticket)
894 {
895 	struct pf_altqqueue	*old_altqs;
896 	struct pf_altq		*altq;
897 	int			 s, err, error = 0;
898 
899 	if (!altqs_inactive_open || ticket != ticket_altqs_inactive)
900 		return (EBUSY);
901 
902 	/* swap altqs, keep the old. */
903 	s = splsoftnet();
904 	old_altqs = pf_altqs_active;
905 	pf_altqs_active = pf_altqs_inactive;
906 	pf_altqs_inactive = old_altqs;
907 	ticket_altqs_active = ticket_altqs_inactive;
908 
909 	/* Attach new disciplines */
910 	TAILQ_FOREACH(altq, pf_altqs_active, entries) {
911 		if (altq->qname[0] == 0) {
912 			/* attach the discipline */
913 			error = altq_pfattach(altq);
914 			if (error == 0 && pf_altq_running)
915 				error = pf_enable_altq(altq);
916 			if (error != 0) {
917 				splx(s);
918 				return (error);
919 			}
920 		}
921 	}
922 
923 	/* Purge the old altq list */
924 	while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) {
925 		TAILQ_REMOVE(pf_altqs_inactive, altq, entries);
926 		if (altq->qname[0] == 0) {
927 			/* detach and destroy the discipline */
928 			if (pf_altq_running)
929 				error = pf_disable_altq(altq);
930 			err = altq_pfdetach(altq);
931 			if (err != 0 && error == 0)
932 				error = err;
933 			err = altq_remove(altq);
934 			if (err != 0 && error == 0)
935 				error = err;
936 		} else
937 			pf_qid_unref(altq->qid);
938 		pool_put(&pf_altq_pl, altq);
939 	}
940 	splx(s);
941 
942 	altqs_inactive_open = 0;
943 	return (error);
944 }
945 
946 int
947 pf_enable_altq(struct pf_altq *altq)
948 {
949 	struct ifnet		*ifp;
950 	struct tb_profile	 tb;
951 	int			 s, error = 0;
952 
953 	if ((ifp = ifunit(altq->ifname)) == NULL)
954 		return (EINVAL);
955 
956 	if (ifp->if_snd.altq_type != ALTQT_NONE)
957 		error = altq_enable(&ifp->if_snd);
958 
959 	/* set tokenbucket regulator */
960 	if (error == 0 && ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) {
961 		tb.rate = altq->ifbandwidth;
962 		tb.depth = altq->tbrsize;
963 		s = splimp();
964 		error = tbr_set(&ifp->if_snd, &tb);
965 		splx(s);
966 	}
967 
968 	return (error);
969 }
970 
971 int
972 pf_disable_altq(struct pf_altq *altq)
973 {
974 	struct ifnet		*ifp;
975 	struct tb_profile	 tb;
976 	int			 s, error;
977 
978 	if ((ifp = ifunit(altq->ifname)) == NULL)
979 		return (EINVAL);
980 
981 	/*
982 	 * when the discipline is no longer referenced, it was overridden
983 	 * by a new one.  if so, just return.
984 	 */
985 	if (altq->altq_disc != ifp->if_snd.altq_disc)
986 		return (0);
987 
988 	error = altq_disable(&ifp->if_snd);
989 
990 	if (error == 0) {
991 		/* clear tokenbucket regulator */
992 		tb.rate = 0;
993 		s = splimp();
994 		error = tbr_set(&ifp->if_snd, &tb);
995 		splx(s);
996 	}
997 
998 	return (error);
999 }
1000 #endif /* ALTQ */
1001 
1002 int
1003 pf_begin_rules(u_int32_t *ticket, int rs_num, const char *anchor)
1004 {
1005 	struct pf_ruleset	*rs;
1006 	struct pf_rule		*rule;
1007 
1008 	if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1009 		return (EINVAL);
1010 	rs = pf_find_or_create_ruleset(anchor);
1011 	if (rs == NULL)
1012 		return (EINVAL);
1013 	while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL)
1014 		pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule);
1015 	*ticket = ++rs->rules[rs_num].inactive.ticket;
1016 	rs->rules[rs_num].inactive.open = 1;
1017 	return (0);
1018 }
1019 
1020 int
1021 pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor)
1022 {
1023 	struct pf_ruleset	*rs;
1024 	struct pf_rule		*rule;
1025 
1026 	if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1027 		return (EINVAL);
1028 	rs = pf_find_ruleset(anchor);
1029 	if (rs == NULL || !rs->rules[rs_num].inactive.open ||
1030 	    rs->rules[rs_num].inactive.ticket != ticket)
1031 		return (0);
1032 	while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL)
1033 		pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule);
1034 	rs->rules[rs_num].inactive.open = 0;
1035 	return (0);
1036 }
1037 
1038 int
1039 pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor)
1040 {
1041 	struct pf_ruleset	*rs;
1042 	struct pf_rule		*rule;
1043 	struct pf_rulequeue	*old_rules;
1044 	int			 s;
1045 
1046 	if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1047 		return (EINVAL);
1048 	rs = pf_find_ruleset(anchor);
1049 	if (rs == NULL || !rs->rules[rs_num].inactive.open ||
1050 	    ticket != rs->rules[rs_num].inactive.ticket)
1051 		return (EBUSY);
1052 
1053 	/* Swap rules, keep the old. */
1054 	s = splsoftnet();
1055 	old_rules = rs->rules[rs_num].active.ptr;
1056 	rs->rules[rs_num].active.ptr =
1057 	    rs->rules[rs_num].inactive.ptr;
1058 	rs->rules[rs_num].inactive.ptr = old_rules;
1059 	rs->rules[rs_num].active.ticket =
1060 	    rs->rules[rs_num].inactive.ticket;
1061 	pf_calc_skip_steps(rs->rules[rs_num].active.ptr);
1062 
1063 	/* Purge the old rule list. */
1064 	while ((rule = TAILQ_FIRST(old_rules)) != NULL)
1065 		pf_rm_rule(old_rules, rule);
1066 	rs->rules[rs_num].inactive.open = 0;
1067 	pf_remove_if_empty_ruleset(rs);
1068 	splx(s);
1069 	return (0);
1070 }
1071 
1072 int
1073 pfioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct proc *p)
1074 {
1075 	struct pf_pooladdr	*pa = NULL;
1076 	struct pf_pool		*pool = NULL;
1077 	int			 s;
1078 	int			 error = 0;
1079 
1080 	/* XXX keep in sync with switch() below */
1081 	if (securelevel > 1)
1082 		switch (cmd) {
1083 		case DIOCGETRULES:
1084 		case DIOCGETRULE:
1085 		case DIOCGETADDRS:
1086 		case DIOCGETADDR:
1087 		case DIOCGETSTATE:
1088 		case DIOCSETSTATUSIF:
1089 		case DIOCGETSTATUS:
1090 		case DIOCCLRSTATUS:
1091 		case DIOCNATLOOK:
1092 		case DIOCSETDEBUG:
1093 		case DIOCGETSTATES:
1094 		case DIOCGETTIMEOUT:
1095 		case DIOCCLRRULECTRS:
1096 		case DIOCGETLIMIT:
1097 		case DIOCGETALTQS:
1098 		case DIOCGETALTQ:
1099 		case DIOCGETQSTATS:
1100 		case DIOCGETRULESETS:
1101 		case DIOCGETRULESET:
1102 		case DIOCRGETTABLES:
1103 		case DIOCRGETTSTATS:
1104 		case DIOCRCLRTSTATS:
1105 		case DIOCRCLRADDRS:
1106 		case DIOCRADDADDRS:
1107 		case DIOCRDELADDRS:
1108 		case DIOCRSETADDRS:
1109 		case DIOCRGETADDRS:
1110 		case DIOCRGETASTATS:
1111 		case DIOCRCLRASTATS:
1112 		case DIOCRTSTADDRS:
1113 		case DIOCOSFPGET:
1114 		case DIOCGETSRCNODES:
1115 		case DIOCCLRSRCNODES:
1116 		case DIOCIGETIFACES:
1117 		case DIOCICLRISTATS:
1118 			break;
1119 		case DIOCRCLRTABLES:
1120 		case DIOCRADDTABLES:
1121 		case DIOCRDELTABLES:
1122 		case DIOCRSETTFLAGS:
1123 			if (((struct pfioc_table *)addr)->pfrio_flags &
1124 			    PFR_FLAG_DUMMY)
1125 				break; /* dummy operation ok */
1126 			return (EPERM);
1127 		default:
1128 			return (EPERM);
1129 		}
1130 
1131 	if (!(flags & FWRITE))
1132 		switch (cmd) {
1133 		case DIOCGETRULES:
1134 		case DIOCGETRULE:
1135 		case DIOCGETADDRS:
1136 		case DIOCGETADDR:
1137 		case DIOCGETSTATE:
1138 		case DIOCGETSTATUS:
1139 		case DIOCGETSTATES:
1140 		case DIOCGETTIMEOUT:
1141 		case DIOCGETLIMIT:
1142 		case DIOCGETALTQS:
1143 		case DIOCGETALTQ:
1144 		case DIOCGETQSTATS:
1145 		case DIOCGETRULESETS:
1146 		case DIOCGETRULESET:
1147 		case DIOCRGETTABLES:
1148 		case DIOCRGETTSTATS:
1149 		case DIOCRGETADDRS:
1150 		case DIOCRGETASTATS:
1151 		case DIOCRTSTADDRS:
1152 		case DIOCOSFPGET:
1153 		case DIOCGETSRCNODES:
1154 		case DIOCIGETIFACES:
1155 			break;
1156 		case DIOCRCLRTABLES:
1157 		case DIOCRADDTABLES:
1158 		case DIOCRDELTABLES:
1159 		case DIOCRCLRTSTATS:
1160 		case DIOCRCLRADDRS:
1161 		case DIOCRADDADDRS:
1162 		case DIOCRDELADDRS:
1163 		case DIOCRSETADDRS:
1164 		case DIOCRSETTFLAGS:
1165 			if (((struct pfioc_table *)addr)->pfrio_flags &
1166 			    PFR_FLAG_DUMMY)
1167 				break; /* dummy operation ok */
1168 			return (EACCES);
1169 		default:
1170 			return (EACCES);
1171 		}
1172 
1173 	s = splsoftnet();
1174 	switch (cmd) {
1175 
1176 	case DIOCSTART:
1177 		if (pf_status.running)
1178 			error = EEXIST;
1179 		else {
1180 #ifdef __NetBSD__
1181 			error = pf_pfil_attach();
1182 			if (error)
1183 				break;
1184 #endif
1185 			pf_status.running = 1;
1186 			pf_status.since = time_second;
1187 			if (pf_status.stateid == 0) {
1188 				pf_status.stateid = time_second;
1189 				pf_status.stateid = pf_status.stateid << 32;
1190 			}
1191 			DPFPRINTF(PF_DEBUG_MISC, ("pf: started\n"));
1192 		}
1193 		break;
1194 
1195 	case DIOCSTOP:
1196 		if (!pf_status.running)
1197 			error = ENOENT;
1198 		else {
1199 #ifdef __NetBSD__
1200 			error = pf_pfil_detach();
1201 			if (error)
1202 				break;
1203 #endif
1204 			pf_status.running = 0;
1205 			pf_status.since = time_second;
1206 			DPFPRINTF(PF_DEBUG_MISC, ("pf: stopped\n"));
1207 		}
1208 		break;
1209 
1210 	case DIOCADDRULE: {
1211 		struct pfioc_rule	*pr = (struct pfioc_rule *)addr;
1212 		struct pf_ruleset	*ruleset;
1213 		struct pf_rule		*rule, *tail;
1214 		struct pf_pooladdr	*pa;
1215 		int			 rs_num;
1216 
1217 		pr->anchor[sizeof(pr->anchor) - 1] = 0;
1218 		ruleset = pf_find_ruleset(pr->anchor);
1219 		if (ruleset == NULL) {
1220 			error = EINVAL;
1221 			break;
1222 		}
1223 		rs_num = pf_get_ruleset_number(pr->rule.action);
1224 		if (rs_num >= PF_RULESET_MAX) {
1225 			error = EINVAL;
1226 			break;
1227 		}
1228 		if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
1229 			error = EINVAL;
1230 			break;
1231 		}
1232 		if (pr->ticket != ruleset->rules[rs_num].inactive.ticket) {
1233 			error = EBUSY;
1234 			break;
1235 		}
1236 		if (pr->pool_ticket != ticket_pabuf) {
1237 			error = EBUSY;
1238 			break;
1239 		}
1240 		rule = pool_get(&pf_rule_pl, PR_NOWAIT);
1241 		if (rule == NULL) {
1242 			error = ENOMEM;
1243 			break;
1244 		}
1245 		bcopy(&pr->rule, rule, sizeof(struct pf_rule));
1246 		rule->anchor = NULL;
1247 		rule->kif = NULL;
1248 		TAILQ_INIT(&rule->rpool.list);
1249 		/* initialize refcounting */
1250 		rule->states = 0;
1251 		rule->src_nodes = 0;
1252 		rule->entries.tqe_prev = NULL;
1253 #ifndef INET
1254 		if (rule->af == AF_INET) {
1255 			pool_put(&pf_rule_pl, rule);
1256 			error = EAFNOSUPPORT;
1257 			break;
1258 		}
1259 #endif /* INET */
1260 #ifndef INET6
1261 		if (rule->af == AF_INET6) {
1262 			pool_put(&pf_rule_pl, rule);
1263 			error = EAFNOSUPPORT;
1264 			break;
1265 		}
1266 #endif /* INET6 */
1267 		tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
1268 		    pf_rulequeue);
1269 		if (tail)
1270 			rule->nr = tail->nr + 1;
1271 		else
1272 			rule->nr = 0;
1273 		if (rule->ifname[0]) {
1274 			rule->kif = pfi_attach_rule(rule->ifname);
1275 			if (rule->kif == NULL) {
1276 				pool_put(&pf_rule_pl, rule);
1277 				error = EINVAL;
1278 				break;
1279 			}
1280 		}
1281 
1282 #ifdef ALTQ
1283 		/* set queue IDs */
1284 		if (rule->qname[0] != 0) {
1285 			if ((rule->qid = pf_qname2qid(rule->qname)) == 0)
1286 				error = EBUSY;
1287 			else if (rule->pqname[0] != 0) {
1288 				if ((rule->pqid =
1289 				    pf_qname2qid(rule->pqname)) == 0)
1290 					error = EBUSY;
1291 			} else
1292 				rule->pqid = rule->qid;
1293 		}
1294 #endif
1295 		if (rule->tagname[0])
1296 			if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0)
1297 				error = EBUSY;
1298 		if (rule->match_tagname[0])
1299 			if ((rule->match_tag =
1300 			    pf_tagname2tag(rule->match_tagname)) == 0)
1301 				error = EBUSY;
1302 		if (rule->rt && !rule->direction)
1303 			error = EINVAL;
1304 		if (pfi_dynaddr_setup(&rule->src.addr, rule->af))
1305 			error = EINVAL;
1306 		if (pfi_dynaddr_setup(&rule->dst.addr, rule->af))
1307 			error = EINVAL;
1308 		if (pf_tbladdr_setup(ruleset, &rule->src.addr))
1309 			error = EINVAL;
1310 		if (pf_tbladdr_setup(ruleset, &rule->dst.addr))
1311 			error = EINVAL;
1312 		if (pf_anchor_setup(rule, ruleset, pr->anchor_call))
1313 			error = EINVAL;
1314 		TAILQ_FOREACH(pa, &pf_pabuf, entries)
1315 			if (pf_tbladdr_setup(ruleset, &pa->addr))
1316 				error = EINVAL;
1317 
1318 		pf_mv_pool(&pf_pabuf, &rule->rpool.list);
1319 		if (((((rule->action == PF_NAT) || (rule->action == PF_RDR) ||
1320 		    (rule->action == PF_BINAT)) && rule->anchor == NULL) ||
1321 		    (rule->rt > PF_FASTROUTE)) &&
1322 		    (TAILQ_FIRST(&rule->rpool.list) == NULL))
1323 			error = EINVAL;
1324 
1325 		if (error) {
1326 			pf_rm_rule(NULL, rule);
1327 			break;
1328 		}
1329 		rule->rpool.cur = TAILQ_FIRST(&rule->rpool.list);
1330 		rule->evaluations = rule->packets = rule->bytes = 0;
1331 		TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr,
1332 		    rule, entries);
1333 		break;
1334 	}
1335 
1336 	case DIOCGETRULES: {
1337 		struct pfioc_rule	*pr = (struct pfioc_rule *)addr;
1338 		struct pf_ruleset	*ruleset;
1339 		struct pf_rule		*tail;
1340 		int			 rs_num;
1341 
1342 		pr->anchor[sizeof(pr->anchor) - 1] = 0;
1343 		ruleset = pf_find_ruleset(pr->anchor);
1344 		if (ruleset == NULL) {
1345 			error = EINVAL;
1346 			break;
1347 		}
1348 		rs_num = pf_get_ruleset_number(pr->rule.action);
1349 		if (rs_num >= PF_RULESET_MAX) {
1350 			error = EINVAL;
1351 			break;
1352 		}
1353 		tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
1354 		    pf_rulequeue);
1355 		if (tail)
1356 			pr->nr = tail->nr + 1;
1357 		else
1358 			pr->nr = 0;
1359 		pr->ticket = ruleset->rules[rs_num].active.ticket;
1360 		break;
1361 	}
1362 
1363 	case DIOCGETRULE: {
1364 		struct pfioc_rule	*pr = (struct pfioc_rule *)addr;
1365 		struct pf_ruleset	*ruleset;
1366 		struct pf_rule		*rule;
1367 		int			 rs_num, i;
1368 
1369 		pr->anchor[sizeof(pr->anchor) - 1] = 0;
1370 		ruleset = pf_find_ruleset(pr->anchor);
1371 		if (ruleset == NULL) {
1372 			error = EINVAL;
1373 			break;
1374 		}
1375 		rs_num = pf_get_ruleset_number(pr->rule.action);
1376 		if (rs_num >= PF_RULESET_MAX) {
1377 			error = EINVAL;
1378 			break;
1379 		}
1380 		if (pr->ticket != ruleset->rules[rs_num].active.ticket) {
1381 			error = EBUSY;
1382 			break;
1383 		}
1384 		rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
1385 		while ((rule != NULL) && (rule->nr != pr->nr))
1386 			rule = TAILQ_NEXT(rule, entries);
1387 		if (rule == NULL) {
1388 			error = EBUSY;
1389 			break;
1390 		}
1391 		bcopy(rule, &pr->rule, sizeof(struct pf_rule));
1392 		if (pf_anchor_copyout(ruleset, rule, pr)) {
1393 			error = EBUSY;
1394 			break;
1395 		}
1396 		pfi_dynaddr_copyout(&pr->rule.src.addr);
1397 		pfi_dynaddr_copyout(&pr->rule.dst.addr);
1398 		pf_tbladdr_copyout(&pr->rule.src.addr);
1399 		pf_tbladdr_copyout(&pr->rule.dst.addr);
1400 		for (i = 0; i < PF_SKIP_COUNT; ++i)
1401 			if (rule->skip[i].ptr == NULL)
1402 				pr->rule.skip[i].nr = -1;
1403 			else
1404 				pr->rule.skip[i].nr =
1405 				    rule->skip[i].ptr->nr;
1406 		break;
1407 	}
1408 
1409 	case DIOCCHANGERULE: {
1410 		struct pfioc_rule	*pcr = (struct pfioc_rule *)addr;
1411 		struct pf_ruleset	*ruleset;
1412 		struct pf_rule		*oldrule = NULL, *newrule = NULL;
1413 		u_int32_t		 nr = 0;
1414 		int			 rs_num;
1415 
1416 		if (!(pcr->action == PF_CHANGE_REMOVE ||
1417 		    pcr->action == PF_CHANGE_GET_TICKET) &&
1418 		    pcr->pool_ticket != ticket_pabuf) {
1419 			error = EBUSY;
1420 			break;
1421 		}
1422 
1423 		if (pcr->action < PF_CHANGE_ADD_HEAD ||
1424 		    pcr->action > PF_CHANGE_GET_TICKET) {
1425 			error = EINVAL;
1426 			break;
1427 		}
1428 		ruleset = pf_find_ruleset(pcr->anchor);
1429 		if (ruleset == NULL) {
1430 			error = EINVAL;
1431 			break;
1432 		}
1433 		rs_num = pf_get_ruleset_number(pcr->rule.action);
1434 		if (rs_num >= PF_RULESET_MAX) {
1435 			error = EINVAL;
1436 			break;
1437 		}
1438 
1439 		if (pcr->action == PF_CHANGE_GET_TICKET) {
1440 			pcr->ticket = ++ruleset->rules[rs_num].active.ticket;
1441 			break;
1442 		} else {
1443 			if (pcr->ticket !=
1444 			    ruleset->rules[rs_num].active.ticket) {
1445 				error = EINVAL;
1446 				break;
1447 			}
1448 			if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
1449 				error = EINVAL;
1450 				break;
1451 			}
1452 		}
1453 
1454 		if (pcr->action != PF_CHANGE_REMOVE) {
1455 			newrule = pool_get(&pf_rule_pl, PR_NOWAIT);
1456 			if (newrule == NULL) {
1457 				error = ENOMEM;
1458 				break;
1459 			}
1460 			bcopy(&pcr->rule, newrule, sizeof(struct pf_rule));
1461 			TAILQ_INIT(&newrule->rpool.list);
1462 			/* initialize refcounting */
1463 			newrule->states = 0;
1464 			newrule->entries.tqe_prev = NULL;
1465 #ifndef INET
1466 			if (newrule->af == AF_INET) {
1467 				pool_put(&pf_rule_pl, newrule);
1468 				error = EAFNOSUPPORT;
1469 				break;
1470 			}
1471 #endif /* INET */
1472 #ifndef INET6
1473 			if (newrule->af == AF_INET6) {
1474 				pool_put(&pf_rule_pl, newrule);
1475 				error = EAFNOSUPPORT;
1476 				break;
1477 			}
1478 #endif /* INET6 */
1479 			if (newrule->ifname[0]) {
1480 				newrule->kif = pfi_attach_rule(newrule->ifname);
1481 				if (newrule->kif == NULL) {
1482 					pool_put(&pf_rule_pl, newrule);
1483 					error = EINVAL;
1484 					break;
1485 				}
1486 			} else
1487 				newrule->kif = NULL;
1488 
1489 #ifdef ALTQ
1490 			/* set queue IDs */
1491 			if (newrule->qname[0] != 0) {
1492 				if ((newrule->qid =
1493 				    pf_qname2qid(newrule->qname)) == 0)
1494 					error = EBUSY;
1495 				else if (newrule->pqname[0] != 0) {
1496 					if ((newrule->pqid =
1497 					    pf_qname2qid(newrule->pqname)) == 0)
1498 						error = EBUSY;
1499 				} else
1500 					newrule->pqid = newrule->qid;
1501 			}
1502 #endif /* ALTQ */
1503 			if (newrule->tagname[0])
1504 				if ((newrule->tag =
1505 				    pf_tagname2tag(newrule->tagname)) == 0)
1506 					error = EBUSY;
1507 			if (newrule->match_tagname[0])
1508 				if ((newrule->match_tag = pf_tagname2tag(
1509 				    newrule->match_tagname)) == 0)
1510 					error = EBUSY;
1511 
1512 			if (newrule->rt && !newrule->direction)
1513 				error = EINVAL;
1514 			if (pfi_dynaddr_setup(&newrule->src.addr, newrule->af))
1515 				error = EINVAL;
1516 			if (pfi_dynaddr_setup(&newrule->dst.addr, newrule->af))
1517 				error = EINVAL;
1518 			if (pf_tbladdr_setup(ruleset, &newrule->src.addr))
1519 				error = EINVAL;
1520 			if (pf_tbladdr_setup(ruleset, &newrule->dst.addr))
1521 				error = EINVAL;
1522 			if (pf_anchor_setup(newrule, ruleset, pcr->anchor_call))
1523 				error = EINVAL;
1524 
1525 			pf_mv_pool(&pf_pabuf, &newrule->rpool.list);
1526 			if (((((newrule->action == PF_NAT) ||
1527 			    (newrule->action == PF_RDR) ||
1528 			    (newrule->action == PF_BINAT) ||
1529 			    (newrule->rt > PF_FASTROUTE)) &&
1530 			    !pcr->anchor[0])) &&
1531 			    (TAILQ_FIRST(&newrule->rpool.list) == NULL))
1532 				error = EINVAL;
1533 
1534 			if (error) {
1535 				pf_rm_rule(NULL, newrule);
1536 				break;
1537 			}
1538 			newrule->rpool.cur = TAILQ_FIRST(&newrule->rpool.list);
1539 			newrule->evaluations = newrule->packets = 0;
1540 			newrule->bytes = 0;
1541 		}
1542 		pf_empty_pool(&pf_pabuf);
1543 
1544 		if (pcr->action == PF_CHANGE_ADD_HEAD)
1545 			oldrule = TAILQ_FIRST(
1546 			    ruleset->rules[rs_num].active.ptr);
1547 		else if (pcr->action == PF_CHANGE_ADD_TAIL)
1548 			oldrule = TAILQ_LAST(
1549 			    ruleset->rules[rs_num].active.ptr, pf_rulequeue);
1550 		else {
1551 			oldrule = TAILQ_FIRST(
1552 			    ruleset->rules[rs_num].active.ptr);
1553 			while ((oldrule != NULL) && (oldrule->nr != pcr->nr))
1554 				oldrule = TAILQ_NEXT(oldrule, entries);
1555 			if (oldrule == NULL) {
1556 				if (newrule != NULL)
1557 					pf_rm_rule(NULL, newrule);
1558 				error = EINVAL;
1559 				break;
1560 			}
1561 		}
1562 
1563 		if (pcr->action == PF_CHANGE_REMOVE)
1564 			pf_rm_rule(ruleset->rules[rs_num].active.ptr, oldrule);
1565 		else {
1566 			if (oldrule == NULL)
1567 				TAILQ_INSERT_TAIL(
1568 				    ruleset->rules[rs_num].active.ptr,
1569 				    newrule, entries);
1570 			else if (pcr->action == PF_CHANGE_ADD_HEAD ||
1571 			    pcr->action == PF_CHANGE_ADD_BEFORE)
1572 				TAILQ_INSERT_BEFORE(oldrule, newrule, entries);
1573 			else
1574 				TAILQ_INSERT_AFTER(
1575 				    ruleset->rules[rs_num].active.ptr,
1576 				    oldrule, newrule, entries);
1577 		}
1578 
1579 		nr = 0;
1580 		TAILQ_FOREACH(oldrule,
1581 		    ruleset->rules[rs_num].active.ptr, entries)
1582 			oldrule->nr = nr++;
1583 
1584 		ruleset->rules[rs_num].active.ticket++;
1585 
1586 		pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr);
1587 		pf_remove_if_empty_ruleset(ruleset);
1588 
1589 		break;
1590 	}
1591 
1592 	case DIOCCLRSTATES: {
1593 		struct pf_state		*state;
1594 		struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr;
1595 		int			 killed = 0;
1596 
1597 		RB_FOREACH(state, pf_state_tree_id, &tree_id) {
1598 			if (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname,
1599 			    state->u.s.kif->pfik_name)) {
1600 				state->timeout = PFTM_PURGE;
1601 #if NPFSYNC
1602 				/* don't send out individual delete messages */
1603 				state->sync_flags = PFSTATE_NOSYNC;
1604 #endif
1605 				killed++;
1606 			}
1607 		}
1608 		pf_purge_expired_states();
1609 		pf_status.states = 0;
1610 		psk->psk_af = killed;
1611 #if NPFSYNC
1612 		pfsync_clear_states(pf_status.hostid, psk->psk_ifname);
1613 #endif
1614 		break;
1615 	}
1616 
1617 	case DIOCKILLSTATES: {
1618 		struct pf_state		*state;
1619 		struct pfioc_state_kill	*psk = (struct pfioc_state_kill *)addr;
1620 		int			 killed = 0;
1621 
1622 		RB_FOREACH(state, pf_state_tree_id, &tree_id) {
1623 			if ((!psk->psk_af || state->af == psk->psk_af)
1624 			    && (!psk->psk_proto || psk->psk_proto ==
1625 			    state->proto) &&
1626 			    PF_MATCHA(psk->psk_src.neg,
1627 			    &psk->psk_src.addr.v.a.addr,
1628 			    &psk->psk_src.addr.v.a.mask,
1629 			    &state->lan.addr, state->af) &&
1630 			    PF_MATCHA(psk->psk_dst.neg,
1631 			    &psk->psk_dst.addr.v.a.addr,
1632 			    &psk->psk_dst.addr.v.a.mask,
1633 			    &state->ext.addr, state->af) &&
1634 			    (psk->psk_src.port_op == 0 ||
1635 			    pf_match_port(psk->psk_src.port_op,
1636 			    psk->psk_src.port[0], psk->psk_src.port[1],
1637 			    state->lan.port)) &&
1638 			    (psk->psk_dst.port_op == 0 ||
1639 			    pf_match_port(psk->psk_dst.port_op,
1640 			    psk->psk_dst.port[0], psk->psk_dst.port[1],
1641 			    state->ext.port)) &&
1642 			    (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname,
1643 			    state->u.s.kif->pfik_name))) {
1644 				state->timeout = PFTM_PURGE;
1645 				killed++;
1646 			}
1647 		}
1648 		pf_purge_expired_states();
1649 		psk->psk_af = killed;
1650 		break;
1651 	}
1652 
1653 	case DIOCADDSTATE: {
1654 		struct pfioc_state	*ps = (struct pfioc_state *)addr;
1655 		struct pf_state		*state;
1656 		struct pfi_kif		*kif;
1657 
1658 		if (ps->state.timeout >= PFTM_MAX &&
1659 		    ps->state.timeout != PFTM_UNTIL_PACKET) {
1660 			error = EINVAL;
1661 			break;
1662 		}
1663 		state = pool_get(&pf_state_pl, PR_NOWAIT);
1664 		if (state == NULL) {
1665 			error = ENOMEM;
1666 			break;
1667 		}
1668 		kif = pfi_lookup_create(ps->state.u.ifname);
1669 		if (kif == NULL) {
1670 			pool_put(&pf_state_pl, state);
1671 			error = ENOENT;
1672 			break;
1673 		}
1674 		bcopy(&ps->state, state, sizeof(struct pf_state));
1675 		bzero(&state->u, sizeof(state->u));
1676 		state->rule.ptr = &pf_default_rule;
1677 		state->nat_rule.ptr = NULL;
1678 		state->anchor.ptr = NULL;
1679 		state->rt_kif = NULL;
1680 		state->creation = time_second;
1681 		state->pfsync_time = 0;
1682 		state->packets[0] = state->packets[1] = 0;
1683 		state->bytes[0] = state->bytes[1] = 0;
1684 
1685 		if (pf_insert_state(kif, state)) {
1686 			pfi_maybe_destroy(kif);
1687 			pool_put(&pf_state_pl, state);
1688 			error = ENOMEM;
1689 		}
1690 		break;
1691 	}
1692 
1693 	case DIOCGETSTATE: {
1694 		struct pfioc_state	*ps = (struct pfioc_state *)addr;
1695 		struct pf_state		*state;
1696 		u_int32_t		 nr;
1697 
1698 		nr = 0;
1699 		RB_FOREACH(state, pf_state_tree_id, &tree_id) {
1700 			if (nr >= ps->nr)
1701 				break;
1702 			nr++;
1703 		}
1704 		if (state == NULL) {
1705 			error = EBUSY;
1706 			break;
1707 		}
1708 		bcopy(state, &ps->state, sizeof(struct pf_state));
1709 		ps->state.rule.nr = state->rule.ptr->nr;
1710 		ps->state.nat_rule.nr = (state->nat_rule.ptr == NULL) ?
1711 		    -1 : state->nat_rule.ptr->nr;
1712 		ps->state.anchor.nr = (state->anchor.ptr == NULL) ?
1713 		    -1 : state->anchor.ptr->nr;
1714 		ps->state.expire = pf_state_expires(state);
1715 		if (ps->state.expire > time_second)
1716 			ps->state.expire -= time_second;
1717 		else
1718 			ps->state.expire = 0;
1719 		break;
1720 	}
1721 
1722 	case DIOCGETSTATES: {
1723 		struct pfioc_states	*ps = (struct pfioc_states *)addr;
1724 		struct pf_state		*state;
1725 		struct pf_state		*p, pstore;
1726 		struct pfi_kif		*kif;
1727 		u_int32_t		 nr = 0;
1728 		int			 space = ps->ps_len;
1729 
1730 		if (space == 0) {
1731 			TAILQ_FOREACH(kif, &pfi_statehead, pfik_w_states)
1732 				nr += kif->pfik_states;
1733 			ps->ps_len = sizeof(struct pf_state) * nr;
1734 			break;
1735 		}
1736 
1737 		p = ps->ps_states;
1738 		TAILQ_FOREACH(kif, &pfi_statehead, pfik_w_states)
1739 			RB_FOREACH(state, pf_state_tree_ext_gwy,
1740 			    &kif->pfik_ext_gwy) {
1741 				int	secs = time_second;
1742 
1743 				if ((nr+1) * sizeof(*p) > (unsigned)ps->ps_len)
1744 					break;
1745 
1746 				bcopy(state, &pstore, sizeof(pstore));
1747 				strlcpy(pstore.u.ifname, kif->pfik_name,
1748 				    sizeof(pstore.u.ifname));
1749 				pstore.rule.nr = state->rule.ptr->nr;
1750 				pstore.nat_rule.nr = (state->nat_rule.ptr ==
1751 				    NULL) ? -1 : state->nat_rule.ptr->nr;
1752 				pstore.anchor.nr = (state->anchor.ptr ==
1753 				    NULL) ? -1 : state->anchor.ptr->nr;
1754 				pstore.creation = secs - pstore.creation;
1755 				pstore.expire = pf_state_expires(state);
1756 				if (pstore.expire > secs)
1757 					pstore.expire -= secs;
1758 				else
1759 					pstore.expire = 0;
1760 				error = copyout(&pstore, p, sizeof(*p));
1761 				if (error)
1762 					goto fail;
1763 				p++;
1764 				nr++;
1765 			}
1766 		ps->ps_len = sizeof(struct pf_state) * nr;
1767 		break;
1768 	}
1769 
1770 	case DIOCGETSTATUS: {
1771 		struct pf_status *s = (struct pf_status *)addr;
1772 		bcopy(&pf_status, s, sizeof(struct pf_status));
1773 		pfi_fill_oldstatus(s);
1774 		break;
1775 	}
1776 
1777 	case DIOCSETSTATUSIF: {
1778 		struct pfioc_if	*pi = (struct pfioc_if *)addr;
1779 
1780 		if (pi->ifname[0] == 0) {
1781 			bzero(pf_status.ifname, IFNAMSIZ);
1782 			break;
1783 		}
1784 		if (ifunit(pi->ifname) == NULL) {
1785 			error = EINVAL;
1786 			break;
1787 		}
1788 		strlcpy(pf_status.ifname, pi->ifname, IFNAMSIZ);
1789 		break;
1790 	}
1791 
1792 	case DIOCCLRSTATUS: {
1793 		bzero(pf_status.counters, sizeof(pf_status.counters));
1794 		bzero(pf_status.fcounters, sizeof(pf_status.fcounters));
1795 		bzero(pf_status.scounters, sizeof(pf_status.scounters));
1796 		if (*pf_status.ifname)
1797 			pfi_clr_istats(pf_status.ifname, NULL,
1798 			    PFI_FLAG_INSTANCE);
1799 		break;
1800 	}
1801 
1802 	case DIOCNATLOOK: {
1803 		struct pfioc_natlook	*pnl = (struct pfioc_natlook *)addr;
1804 		struct pf_state		*state;
1805 		struct pf_state		 key;
1806 		int			 m = 0, direction = pnl->direction;
1807 
1808 		key.af = pnl->af;
1809 		key.proto = pnl->proto;
1810 
1811 		if (!pnl->proto ||
1812 		    PF_AZERO(&pnl->saddr, pnl->af) ||
1813 		    PF_AZERO(&pnl->daddr, pnl->af) ||
1814 		    !pnl->dport || !pnl->sport)
1815 			error = EINVAL;
1816 		else {
1817 			/*
1818 			 * userland gives us source and dest of connection,
1819 			 * reverse the lookup so we ask for what happens with
1820 			 * the return traffic, enabling us to find it in the
1821 			 * state tree.
1822 			 */
1823 			if (direction == PF_IN) {
1824 				PF_ACPY(&key.ext.addr, &pnl->daddr, pnl->af);
1825 				key.ext.port = pnl->dport;
1826 				PF_ACPY(&key.gwy.addr, &pnl->saddr, pnl->af);
1827 				key.gwy.port = pnl->sport;
1828 				state = pf_find_state_all(&key, PF_EXT_GWY, &m);
1829 			} else {
1830 				PF_ACPY(&key.lan.addr, &pnl->daddr, pnl->af);
1831 				key.lan.port = pnl->dport;
1832 				PF_ACPY(&key.ext.addr, &pnl->saddr, pnl->af);
1833 				key.ext.port = pnl->sport;
1834 				state = pf_find_state_all(&key, PF_LAN_EXT, &m);
1835 			}
1836 			if (m > 1)
1837 				error = E2BIG;	/* more than one state */
1838 			else if (state != NULL) {
1839 				if (direction == PF_IN) {
1840 					PF_ACPY(&pnl->rsaddr, &state->lan.addr,
1841 					    state->af);
1842 					pnl->rsport = state->lan.port;
1843 					PF_ACPY(&pnl->rdaddr, &pnl->daddr,
1844 					    pnl->af);
1845 					pnl->rdport = pnl->dport;
1846 				} else {
1847 					PF_ACPY(&pnl->rdaddr, &state->gwy.addr,
1848 					    state->af);
1849 					pnl->rdport = state->gwy.port;
1850 					PF_ACPY(&pnl->rsaddr, &pnl->saddr,
1851 					    pnl->af);
1852 					pnl->rsport = pnl->sport;
1853 				}
1854 			} else
1855 				error = ENOENT;
1856 		}
1857 		break;
1858 	}
1859 
1860 	case DIOCSETTIMEOUT: {
1861 		struct pfioc_tm	*pt = (struct pfioc_tm *)addr;
1862 		int		 old;
1863 
1864 		if (pt->timeout < 0 || pt->timeout >= PFTM_MAX ||
1865 		    pt->seconds < 0) {
1866 			error = EINVAL;
1867 			goto fail;
1868 		}
1869 		old = pf_default_rule.timeout[pt->timeout];
1870 		pf_default_rule.timeout[pt->timeout] = pt->seconds;
1871 		pt->seconds = old;
1872 		break;
1873 	}
1874 
1875 	case DIOCGETTIMEOUT: {
1876 		struct pfioc_tm	*pt = (struct pfioc_tm *)addr;
1877 
1878 		if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) {
1879 			error = EINVAL;
1880 			goto fail;
1881 		}
1882 		pt->seconds = pf_default_rule.timeout[pt->timeout];
1883 		break;
1884 	}
1885 
1886 	case DIOCGETLIMIT: {
1887 		struct pfioc_limit	*pl = (struct pfioc_limit *)addr;
1888 
1889 		if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) {
1890 			error = EINVAL;
1891 			goto fail;
1892 		}
1893 		pl->limit = pf_pool_limits[pl->index].limit;
1894 		break;
1895 	}
1896 
1897 	case DIOCSETLIMIT: {
1898 		struct pfioc_limit	*pl = (struct pfioc_limit *)addr;
1899 		int			 old_limit;
1900 
1901 		if (pl->index < 0 || pl->index >= PF_LIMIT_MAX ||
1902 		    pf_pool_limits[pl->index].pp == NULL) {
1903 			error = EINVAL;
1904 			goto fail;
1905 		}
1906 #ifdef __OpenBSD__
1907 		if (pool_sethardlimit(pf_pool_limits[pl->index].pp,
1908 		    pl->limit, NULL, 0) != 0) {
1909 			error = EBUSY;
1910 			goto fail;
1911 		}
1912 #else
1913 		pool_sethardlimit(pf_pool_limits[pl->index].pp,
1914 		    pl->limit, NULL, 0);
1915 #endif
1916 		old_limit = pf_pool_limits[pl->index].limit;
1917 		pf_pool_limits[pl->index].limit = pl->limit;
1918 		pl->limit = old_limit;
1919 		break;
1920 	}
1921 
1922 	case DIOCSETDEBUG: {
1923 		u_int32_t	*level = (u_int32_t *)addr;
1924 
1925 		pf_status.debug = *level;
1926 		break;
1927 	}
1928 
1929 	case DIOCCLRRULECTRS: {
1930 		struct pf_ruleset	*ruleset = &pf_main_ruleset;
1931 		struct pf_rule		*rule;
1932 
1933 		TAILQ_FOREACH(rule,
1934 		    ruleset->rules[PF_RULESET_FILTER].active.ptr, entries)
1935 			rule->evaluations = rule->packets =
1936 			    rule->bytes = 0;
1937 		break;
1938 	}
1939 
1940 #ifdef ALTQ
1941 	case DIOCSTARTALTQ: {
1942 		struct pf_altq		*altq;
1943 
1944 		/* enable all altq interfaces on active list */
1945 		TAILQ_FOREACH(altq, pf_altqs_active, entries) {
1946 			if (altq->qname[0] == 0) {
1947 				error = pf_enable_altq(altq);
1948 				if (error != 0)
1949 					break;
1950 			}
1951 		}
1952 		if (error == 0)
1953 			pf_altq_running = 1;
1954 		DPFPRINTF(PF_DEBUG_MISC, ("altq: started\n"));
1955 		break;
1956 	}
1957 
1958 	case DIOCSTOPALTQ: {
1959 		struct pf_altq		*altq;
1960 
1961 		/* disable all altq interfaces on active list */
1962 		TAILQ_FOREACH(altq, pf_altqs_active, entries) {
1963 			if (altq->qname[0] == 0) {
1964 				error = pf_disable_altq(altq);
1965 				if (error != 0)
1966 					break;
1967 			}
1968 		}
1969 		if (error == 0)
1970 			pf_altq_running = 0;
1971 		DPFPRINTF(PF_DEBUG_MISC, ("altq: stopped\n"));
1972 		break;
1973 	}
1974 
1975 	case DIOCADDALTQ: {
1976 		struct pfioc_altq	*pa = (struct pfioc_altq *)addr;
1977 		struct pf_altq		*altq, *a;
1978 
1979 		if (pa->ticket != ticket_altqs_inactive) {
1980 			error = EBUSY;
1981 			break;
1982 		}
1983 		altq = pool_get(&pf_altq_pl, PR_NOWAIT);
1984 		if (altq == NULL) {
1985 			error = ENOMEM;
1986 			break;
1987 		}
1988 		bcopy(&pa->altq, altq, sizeof(struct pf_altq));
1989 
1990 		/*
1991 		 * if this is for a queue, find the discipline and
1992 		 * copy the necessary fields
1993 		 */
1994 		if (altq->qname[0] != 0) {
1995 			if ((altq->qid = pf_qname2qid(altq->qname)) == 0) {
1996 				error = EBUSY;
1997 				pool_put(&pf_altq_pl, altq);
1998 				break;
1999 			}
2000 			TAILQ_FOREACH(a, pf_altqs_inactive, entries) {
2001 				if (strncmp(a->ifname, altq->ifname,
2002 				    IFNAMSIZ) == 0 && a->qname[0] == 0) {
2003 					altq->altq_disc = a->altq_disc;
2004 					break;
2005 				}
2006 			}
2007 		}
2008 
2009 		error = altq_add(altq);
2010 		if (error) {
2011 			pool_put(&pf_altq_pl, altq);
2012 			break;
2013 		}
2014 
2015 		TAILQ_INSERT_TAIL(pf_altqs_inactive, altq, entries);
2016 		bcopy(altq, &pa->altq, sizeof(struct pf_altq));
2017 		break;
2018 	}
2019 
2020 	case DIOCGETALTQS: {
2021 		struct pfioc_altq	*pa = (struct pfioc_altq *)addr;
2022 		struct pf_altq		*altq;
2023 
2024 		pa->nr = 0;
2025 		TAILQ_FOREACH(altq, pf_altqs_active, entries)
2026 			pa->nr++;
2027 		pa->ticket = ticket_altqs_active;
2028 		break;
2029 	}
2030 
2031 	case DIOCGETALTQ: {
2032 		struct pfioc_altq	*pa = (struct pfioc_altq *)addr;
2033 		struct pf_altq		*altq;
2034 		u_int32_t		 nr;
2035 
2036 		if (pa->ticket != ticket_altqs_active) {
2037 			error = EBUSY;
2038 			break;
2039 		}
2040 		nr = 0;
2041 		altq = TAILQ_FIRST(pf_altqs_active);
2042 		while ((altq != NULL) && (nr < pa->nr)) {
2043 			altq = TAILQ_NEXT(altq, entries);
2044 			nr++;
2045 		}
2046 		if (altq == NULL) {
2047 			error = EBUSY;
2048 			break;
2049 		}
2050 		bcopy(altq, &pa->altq, sizeof(struct pf_altq));
2051 		break;
2052 	}
2053 
2054 	case DIOCCHANGEALTQ:
2055 		/* CHANGEALTQ not supported yet! */
2056 		error = ENODEV;
2057 		break;
2058 
2059 	case DIOCGETQSTATS: {
2060 		struct pfioc_qstats	*pq = (struct pfioc_qstats *)addr;
2061 		struct pf_altq		*altq;
2062 		u_int32_t		 nr;
2063 		int			 nbytes;
2064 
2065 		if (pq->ticket != ticket_altqs_active) {
2066 			error = EBUSY;
2067 			break;
2068 		}
2069 		nbytes = pq->nbytes;
2070 		nr = 0;
2071 		altq = TAILQ_FIRST(pf_altqs_active);
2072 		while ((altq != NULL) && (nr < pq->nr)) {
2073 			altq = TAILQ_NEXT(altq, entries);
2074 			nr++;
2075 		}
2076 		if (altq == NULL) {
2077 			error = EBUSY;
2078 			break;
2079 		}
2080 		error = altq_getqstats(altq, pq->buf, &nbytes);
2081 		if (error == 0) {
2082 			pq->scheduler = altq->scheduler;
2083 			pq->nbytes = nbytes;
2084 		}
2085 		break;
2086 	}
2087 #endif /* ALTQ */
2088 
2089 	case DIOCBEGINADDRS: {
2090 		struct pfioc_pooladdr	*pp = (struct pfioc_pooladdr *)addr;
2091 
2092 		pf_empty_pool(&pf_pabuf);
2093 		pp->ticket = ++ticket_pabuf;
2094 		break;
2095 	}
2096 
2097 	case DIOCADDADDR: {
2098 		struct pfioc_pooladdr	*pp = (struct pfioc_pooladdr *)addr;
2099 
2100 #ifndef INET
2101 		if (pp->af == AF_INET) {
2102 			error = EAFNOSUPPORT;
2103 			break;
2104 		}
2105 #endif /* INET */
2106 #ifndef INET6
2107 		if (pp->af == AF_INET6) {
2108 			error = EAFNOSUPPORT;
2109 			break;
2110 		}
2111 #endif /* INET6 */
2112 		if (pp->addr.addr.type != PF_ADDR_ADDRMASK &&
2113 		    pp->addr.addr.type != PF_ADDR_DYNIFTL &&
2114 		    pp->addr.addr.type != PF_ADDR_TABLE) {
2115 			error = EINVAL;
2116 			break;
2117 		}
2118 		pa = pool_get(&pf_pooladdr_pl, PR_NOWAIT);
2119 		if (pa == NULL) {
2120 			error = ENOMEM;
2121 			break;
2122 		}
2123 		bcopy(&pp->addr, pa, sizeof(struct pf_pooladdr));
2124 		if (pa->ifname[0]) {
2125 			pa->kif = pfi_attach_rule(pa->ifname);
2126 			if (pa->kif == NULL) {
2127 				pool_put(&pf_pooladdr_pl, pa);
2128 				error = EINVAL;
2129 				break;
2130 			}
2131 		}
2132 		if (pfi_dynaddr_setup(&pa->addr, pp->af)) {
2133 			pfi_dynaddr_remove(&pa->addr);
2134 			pfi_detach_rule(pa->kif);
2135 			pool_put(&pf_pooladdr_pl, pa);
2136 			error = EINVAL;
2137 			break;
2138 		}
2139 		TAILQ_INSERT_TAIL(&pf_pabuf, pa, entries);
2140 		break;
2141 	}
2142 
2143 	case DIOCGETADDRS: {
2144 		struct pfioc_pooladdr	*pp = (struct pfioc_pooladdr *)addr;
2145 
2146 		pp->nr = 0;
2147 		pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action,
2148 		    pp->r_num, 0, 1, 0);
2149 		if (pool == NULL) {
2150 			error = EBUSY;
2151 			break;
2152 		}
2153 		TAILQ_FOREACH(pa, &pool->list, entries)
2154 			pp->nr++;
2155 		break;
2156 	}
2157 
2158 	case DIOCGETADDR: {
2159 		struct pfioc_pooladdr	*pp = (struct pfioc_pooladdr *)addr;
2160 		u_int32_t		 nr = 0;
2161 
2162 		pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action,
2163 		    pp->r_num, 0, 1, 1);
2164 		if (pool == NULL) {
2165 			error = EBUSY;
2166 			break;
2167 		}
2168 		pa = TAILQ_FIRST(&pool->list);
2169 		while ((pa != NULL) && (nr < pp->nr)) {
2170 			pa = TAILQ_NEXT(pa, entries);
2171 			nr++;
2172 		}
2173 		if (pa == NULL) {
2174 			error = EBUSY;
2175 			break;
2176 		}
2177 		bcopy(pa, &pp->addr, sizeof(struct pf_pooladdr));
2178 		pfi_dynaddr_copyout(&pp->addr.addr);
2179 		pf_tbladdr_copyout(&pp->addr.addr);
2180 		break;
2181 	}
2182 
2183 	case DIOCCHANGEADDR: {
2184 		struct pfioc_pooladdr	*pca = (struct pfioc_pooladdr *)addr;
2185 		struct pf_pooladdr	*oldpa = NULL, *newpa = NULL;
2186 		struct pf_ruleset	*ruleset;
2187 
2188 		if (pca->action < PF_CHANGE_ADD_HEAD ||
2189 		    pca->action > PF_CHANGE_REMOVE) {
2190 			error = EINVAL;
2191 			break;
2192 		}
2193 		if (pca->addr.addr.type != PF_ADDR_ADDRMASK &&
2194 		    pca->addr.addr.type != PF_ADDR_DYNIFTL &&
2195 		    pca->addr.addr.type != PF_ADDR_TABLE) {
2196 			error = EINVAL;
2197 			break;
2198 		}
2199 
2200 		ruleset = pf_find_ruleset(pca->anchor);
2201 		if (ruleset == NULL) {
2202 			error = EBUSY;
2203 			break;
2204 		}
2205 		pool = pf_get_pool(pca->anchor, pca->ticket, pca->r_action,
2206 		    pca->r_num, pca->r_last, 1, 1);
2207 		if (pool == NULL) {
2208 			error = EBUSY;
2209 			break;
2210 		}
2211 		if (pca->action != PF_CHANGE_REMOVE) {
2212 			newpa = pool_get(&pf_pooladdr_pl, PR_NOWAIT);
2213 			if (newpa == NULL) {
2214 				error = ENOMEM;
2215 				break;
2216 			}
2217 			bcopy(&pca->addr, newpa, sizeof(struct pf_pooladdr));
2218 #ifndef INET
2219 			if (pca->af == AF_INET) {
2220 				pool_put(&pf_pooladdr_pl, newpa);
2221 				error = EAFNOSUPPORT;
2222 				break;
2223 			}
2224 #endif /* INET */
2225 #ifndef INET6
2226 			if (pca->af == AF_INET6) {
2227 				pool_put(&pf_pooladdr_pl, newpa);
2228 				error = EAFNOSUPPORT;
2229 				break;
2230 			}
2231 #endif /* INET6 */
2232 			if (newpa->ifname[0]) {
2233 				newpa->kif = pfi_attach_rule(newpa->ifname);
2234 				if (newpa->kif == NULL) {
2235 					pool_put(&pf_pooladdr_pl, newpa);
2236 					error = EINVAL;
2237 					break;
2238 				}
2239 			} else
2240 				newpa->kif = NULL;
2241 			if (pfi_dynaddr_setup(&newpa->addr, pca->af) ||
2242 			    pf_tbladdr_setup(ruleset, &newpa->addr)) {
2243 				pfi_dynaddr_remove(&newpa->addr);
2244 				pfi_detach_rule(newpa->kif);
2245 				pool_put(&pf_pooladdr_pl, newpa);
2246 				error = EINVAL;
2247 				break;
2248 			}
2249 		}
2250 
2251 		if (pca->action == PF_CHANGE_ADD_HEAD)
2252 			oldpa = TAILQ_FIRST(&pool->list);
2253 		else if (pca->action == PF_CHANGE_ADD_TAIL)
2254 			oldpa = TAILQ_LAST(&pool->list, pf_palist);
2255 		else {
2256 			int	i = 0;
2257 
2258 			oldpa = TAILQ_FIRST(&pool->list);
2259 			while ((oldpa != NULL) && (i < pca->nr)) {
2260 				oldpa = TAILQ_NEXT(oldpa, entries);
2261 				i++;
2262 			}
2263 			if (oldpa == NULL) {
2264 				error = EINVAL;
2265 				break;
2266 			}
2267 		}
2268 
2269 		if (pca->action == PF_CHANGE_REMOVE) {
2270 			TAILQ_REMOVE(&pool->list, oldpa, entries);
2271 			pfi_dynaddr_remove(&oldpa->addr);
2272 			pf_tbladdr_remove(&oldpa->addr);
2273 			pfi_detach_rule(oldpa->kif);
2274 			pool_put(&pf_pooladdr_pl, oldpa);
2275 		} else {
2276 			if (oldpa == NULL)
2277 				TAILQ_INSERT_TAIL(&pool->list, newpa, entries);
2278 			else if (pca->action == PF_CHANGE_ADD_HEAD ||
2279 			    pca->action == PF_CHANGE_ADD_BEFORE)
2280 				TAILQ_INSERT_BEFORE(oldpa, newpa, entries);
2281 			else
2282 				TAILQ_INSERT_AFTER(&pool->list, oldpa,
2283 				    newpa, entries);
2284 		}
2285 
2286 		pool->cur = TAILQ_FIRST(&pool->list);
2287 		PF_ACPY(&pool->counter, &pool->cur->addr.v.a.addr,
2288 		    pca->af);
2289 		break;
2290 	}
2291 
2292 	case DIOCGETRULESETS: {
2293 		struct pfioc_ruleset	*pr = (struct pfioc_ruleset *)addr;
2294 		struct pf_ruleset	*ruleset;
2295 		struct pf_anchor	*anchor;
2296 
2297 		pr->path[sizeof(pr->path) - 1] = 0;
2298 		if ((ruleset = pf_find_ruleset(pr->path)) == NULL) {
2299 			error = EINVAL;
2300 			break;
2301 		}
2302 		pr->nr = 0;
2303 		if (ruleset->anchor == NULL) {
2304 			/* XXX kludge for pf_main_ruleset */
2305 			RB_FOREACH(anchor, pf_anchor_global, &pf_anchors)
2306 				if (anchor->parent == NULL)
2307 					pr->nr++;
2308 		} else {
2309 			RB_FOREACH(anchor, pf_anchor_node,
2310 			    &ruleset->anchor->children)
2311 				pr->nr++;
2312 		}
2313 		break;
2314 	}
2315 
2316 	case DIOCGETRULESET: {
2317 		struct pfioc_ruleset	*pr = (struct pfioc_ruleset *)addr;
2318 		struct pf_ruleset	*ruleset;
2319 		struct pf_anchor	*anchor;
2320 		u_int32_t		 nr = 0;
2321 
2322 		pr->path[sizeof(pr->path) - 1] = 0;
2323 		if ((ruleset = pf_find_ruleset(pr->path)) == NULL) {
2324 			error = EINVAL;
2325 			break;
2326 		}
2327 		pr->name[0] = 0;
2328 		if (ruleset->anchor == NULL) {
2329 			/* XXX kludge for pf_main_ruleset */
2330 			RB_FOREACH(anchor, pf_anchor_global, &pf_anchors)
2331 				if (anchor->parent == NULL && nr++ == pr->nr) {
2332 					strlcpy(pr->name, anchor->name,
2333 					    sizeof(pr->name));
2334 					break;
2335 				}
2336 		} else {
2337 			RB_FOREACH(anchor, pf_anchor_node,
2338 			    &ruleset->anchor->children)
2339 				if (nr++ == pr->nr) {
2340 					strlcpy(pr->name, anchor->name,
2341 					    sizeof(pr->name));
2342 					break;
2343 				}
2344 		}
2345 		if (!pr->name[0])
2346 			error = EBUSY;
2347 		break;
2348 	}
2349 
2350 	case DIOCRCLRTABLES: {
2351 		struct pfioc_table *io = (struct pfioc_table *)addr;
2352 
2353 		if (io->pfrio_esize != 0) {
2354 			error = ENODEV;
2355 			break;
2356 		}
2357 		error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel,
2358 		    io->pfrio_flags | PFR_FLAG_USERIOCTL);
2359 		break;
2360 	}
2361 
2362 	case DIOCRADDTABLES: {
2363 		struct pfioc_table *io = (struct pfioc_table *)addr;
2364 
2365 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
2366 			error = ENODEV;
2367 			break;
2368 		}
2369 		error = pfr_add_tables(io->pfrio_buffer, io->pfrio_size,
2370 		    &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2371 		break;
2372 	}
2373 
2374 	case DIOCRDELTABLES: {
2375 		struct pfioc_table *io = (struct pfioc_table *)addr;
2376 
2377 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
2378 			error = ENODEV;
2379 			break;
2380 		}
2381 		error = pfr_del_tables(io->pfrio_buffer, io->pfrio_size,
2382 		    &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2383 		break;
2384 	}
2385 
2386 	case DIOCRGETTABLES: {
2387 		struct pfioc_table *io = (struct pfioc_table *)addr;
2388 
2389 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
2390 			error = ENODEV;
2391 			break;
2392 		}
2393 		error = pfr_get_tables(&io->pfrio_table, io->pfrio_buffer,
2394 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2395 		break;
2396 	}
2397 
2398 	case DIOCRGETTSTATS: {
2399 		struct pfioc_table *io = (struct pfioc_table *)addr;
2400 
2401 		if (io->pfrio_esize != sizeof(struct pfr_tstats)) {
2402 			error = ENODEV;
2403 			break;
2404 		}
2405 		error = pfr_get_tstats(&io->pfrio_table, io->pfrio_buffer,
2406 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2407 		break;
2408 	}
2409 
2410 	case DIOCRCLRTSTATS: {
2411 		struct pfioc_table *io = (struct pfioc_table *)addr;
2412 
2413 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
2414 			error = ENODEV;
2415 			break;
2416 		}
2417 		error = pfr_clr_tstats(io->pfrio_buffer, io->pfrio_size,
2418 		    &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2419 		break;
2420 	}
2421 
2422 	case DIOCRSETTFLAGS: {
2423 		struct pfioc_table *io = (struct pfioc_table *)addr;
2424 
2425 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
2426 			error = ENODEV;
2427 			break;
2428 		}
2429 		error = pfr_set_tflags(io->pfrio_buffer, io->pfrio_size,
2430 		    io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange,
2431 		    &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2432 		break;
2433 	}
2434 
2435 	case DIOCRCLRADDRS: {
2436 		struct pfioc_table *io = (struct pfioc_table *)addr;
2437 
2438 		if (io->pfrio_esize != 0) {
2439 			error = ENODEV;
2440 			break;
2441 		}
2442 		error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel,
2443 		    io->pfrio_flags | PFR_FLAG_USERIOCTL);
2444 		break;
2445 	}
2446 
2447 	case DIOCRADDADDRS: {
2448 		struct pfioc_table *io = (struct pfioc_table *)addr;
2449 
2450 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2451 			error = ENODEV;
2452 			break;
2453 		}
2454 		error = pfr_add_addrs(&io->pfrio_table, io->pfrio_buffer,
2455 		    io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags |
2456 		    PFR_FLAG_USERIOCTL);
2457 		break;
2458 	}
2459 
2460 	case DIOCRDELADDRS: {
2461 		struct pfioc_table *io = (struct pfioc_table *)addr;
2462 
2463 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2464 			error = ENODEV;
2465 			break;
2466 		}
2467 		error = pfr_del_addrs(&io->pfrio_table, io->pfrio_buffer,
2468 		    io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags |
2469 		    PFR_FLAG_USERIOCTL);
2470 		break;
2471 	}
2472 
2473 	case DIOCRSETADDRS: {
2474 		struct pfioc_table *io = (struct pfioc_table *)addr;
2475 
2476 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2477 			error = ENODEV;
2478 			break;
2479 		}
2480 		error = pfr_set_addrs(&io->pfrio_table, io->pfrio_buffer,
2481 		    io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd,
2482 		    &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags |
2483 		    PFR_FLAG_USERIOCTL);
2484 		break;
2485 	}
2486 
2487 	case DIOCRGETADDRS: {
2488 		struct pfioc_table *io = (struct pfioc_table *)addr;
2489 
2490 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2491 			error = ENODEV;
2492 			break;
2493 		}
2494 		error = pfr_get_addrs(&io->pfrio_table, io->pfrio_buffer,
2495 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2496 		break;
2497 	}
2498 
2499 	case DIOCRGETASTATS: {
2500 		struct pfioc_table *io = (struct pfioc_table *)addr;
2501 
2502 		if (io->pfrio_esize != sizeof(struct pfr_astats)) {
2503 			error = ENODEV;
2504 			break;
2505 		}
2506 		error = pfr_get_astats(&io->pfrio_table, io->pfrio_buffer,
2507 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2508 		break;
2509 	}
2510 
2511 	case DIOCRCLRASTATS: {
2512 		struct pfioc_table *io = (struct pfioc_table *)addr;
2513 
2514 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2515 			error = ENODEV;
2516 			break;
2517 		}
2518 		error = pfr_clr_astats(&io->pfrio_table, io->pfrio_buffer,
2519 		    io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags |
2520 		    PFR_FLAG_USERIOCTL);
2521 		break;
2522 	}
2523 
2524 	case DIOCRTSTADDRS: {
2525 		struct pfioc_table *io = (struct pfioc_table *)addr;
2526 
2527 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2528 			error = ENODEV;
2529 			break;
2530 		}
2531 		error = pfr_tst_addrs(&io->pfrio_table, io->pfrio_buffer,
2532 		    io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags |
2533 		    PFR_FLAG_USERIOCTL);
2534 		break;
2535 	}
2536 
2537 	case DIOCRINADEFINE: {
2538 		struct pfioc_table *io = (struct pfioc_table *)addr;
2539 
2540 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2541 			error = ENODEV;
2542 			break;
2543 		}
2544 		error = pfr_ina_define(&io->pfrio_table, io->pfrio_buffer,
2545 		    io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr,
2546 		    io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2547 		break;
2548 	}
2549 
2550 	case DIOCOSFPADD: {
2551 		struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
2552 		error = pf_osfp_add(io);
2553 		break;
2554 	}
2555 
2556 	case DIOCOSFPGET: {
2557 		struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
2558 		error = pf_osfp_get(io);
2559 		break;
2560 	}
2561 
2562 	case DIOCXBEGIN: {
2563 		struct pfioc_trans		*io = (struct pfioc_trans *)
2564 						    addr;
2565 		static struct pfioc_trans_e	 ioe;
2566 		static struct pfr_table		 table;
2567 		int				 i;
2568 
2569 		if (io->esize != sizeof(ioe)) {
2570 			error = ENODEV;
2571 			goto fail;
2572 		}
2573 		for (i = 0; i < io->size; i++) {
2574 			if (copyin(io->array+i, &ioe, sizeof(ioe))) {
2575 				error = EFAULT;
2576 				goto fail;
2577 			}
2578 			switch (ioe.rs_num) {
2579 #ifdef ALTQ
2580 			case PF_RULESET_ALTQ:
2581 				if (ioe.anchor[0]) {
2582 					error = EINVAL;
2583 					goto fail;
2584 				}
2585 				if ((error = pf_begin_altq(&ioe.ticket)))
2586 					goto fail;
2587 				break;
2588 #endif /* ALTQ */
2589 			case PF_RULESET_TABLE:
2590 				bzero(&table, sizeof(table));
2591 				strlcpy(table.pfrt_anchor, ioe.anchor,
2592 				    sizeof(table.pfrt_anchor));
2593 				if ((error = pfr_ina_begin(&table,
2594 				    &ioe.ticket, NULL, 0)))
2595 					goto fail;
2596 				break;
2597 			default:
2598 				if ((error = pf_begin_rules(&ioe.ticket,
2599 				    ioe.rs_num, ioe.anchor)))
2600 					goto fail;
2601 				break;
2602 			}
2603 			if (copyout(&ioe, io->array+i, sizeof(io->array[i]))) {
2604 				error = EFAULT;
2605 				goto fail;
2606 			}
2607 		}
2608 		break;
2609 	}
2610 
2611 	case DIOCXROLLBACK: {
2612 		struct pfioc_trans		*io = (struct pfioc_trans *)
2613 						    addr;
2614 		static struct pfioc_trans_e	 ioe;
2615 		static struct pfr_table		 table;
2616 		int				 i;
2617 
2618 		if (io->esize != sizeof(ioe)) {
2619 			error = ENODEV;
2620 			goto fail;
2621 		}
2622 		for (i = 0; i < io->size; i++) {
2623 			if (copyin(io->array+i, &ioe, sizeof(ioe))) {
2624 				error = EFAULT;
2625 				goto fail;
2626 			}
2627 			switch (ioe.rs_num) {
2628 #ifdef ALTQ
2629 			case PF_RULESET_ALTQ:
2630 				if (ioe.anchor[0]) {
2631 					error = EINVAL;
2632 					goto fail;
2633 				}
2634 				if ((error = pf_rollback_altq(ioe.ticket)))
2635 					goto fail; /* really bad */
2636 				break;
2637 #endif /* ALTQ */
2638 			case PF_RULESET_TABLE:
2639 				bzero(&table, sizeof(table));
2640 				strlcpy(table.pfrt_anchor, ioe.anchor,
2641 				    sizeof(table.pfrt_anchor));
2642 				if ((error = pfr_ina_rollback(&table,
2643 				    ioe.ticket, NULL, 0)))
2644 					goto fail; /* really bad */
2645 				break;
2646 			default:
2647 				if ((error = pf_rollback_rules(ioe.ticket,
2648 				    ioe.rs_num, ioe.anchor)))
2649 					goto fail; /* really bad */
2650 				break;
2651 			}
2652 		}
2653 		break;
2654 	}
2655 
2656 	case DIOCXCOMMIT: {
2657 		struct pfioc_trans		*io = (struct pfioc_trans *)
2658 						    addr;
2659 		static struct pfioc_trans_e	 ioe;
2660 		static struct pfr_table		 table;
2661 		struct pf_ruleset		*rs;
2662 		int				 i;
2663 
2664 		if (io->esize != sizeof(ioe)) {
2665 			error = ENODEV;
2666 			goto fail;
2667 		}
2668 		/* first makes sure everything will succeed */
2669 		for (i = 0; i < io->size; i++) {
2670 			if (copyin(io->array+i, &ioe, sizeof(ioe))) {
2671 				error = EFAULT;
2672 				goto fail;
2673 			}
2674 			switch (ioe.rs_num) {
2675 #ifdef ALTQ
2676 			case PF_RULESET_ALTQ:
2677 				if (ioe.anchor[0]) {
2678 					error = EINVAL;
2679 					goto fail;
2680 				}
2681 				if (!altqs_inactive_open || ioe.ticket !=
2682 				    ticket_altqs_inactive) {
2683 					error = EBUSY;
2684 					goto fail;
2685 				}
2686 				break;
2687 #endif /* ALTQ */
2688 			case PF_RULESET_TABLE:
2689 				rs = pf_find_ruleset(ioe.anchor);
2690 				if (rs == NULL || !rs->topen || ioe.ticket !=
2691 				     rs->tticket) {
2692 					error = EBUSY;
2693 					goto fail;
2694 				}
2695 				break;
2696 			default:
2697 				if (ioe.rs_num < 0 || ioe.rs_num >=
2698 				    PF_RULESET_MAX) {
2699 					error = EINVAL;
2700 					goto fail;
2701 				}
2702 				rs = pf_find_ruleset(ioe.anchor);
2703 				if (rs == NULL ||
2704 				    !rs->rules[ioe.rs_num].inactive.open ||
2705 				    rs->rules[ioe.rs_num].inactive.ticket !=
2706 				    ioe.ticket) {
2707 					error = EBUSY;
2708 					goto fail;
2709 				}
2710 				break;
2711 			}
2712 		}
2713 		/* now do the commit - no errors should happen here */
2714 		for (i = 0; i < io->size; i++) {
2715 			if (copyin(io->array+i, &ioe, sizeof(ioe))) {
2716 				error = EFAULT;
2717 				goto fail;
2718 			}
2719 			switch (ioe.rs_num) {
2720 #ifdef ALTQ
2721 			case PF_RULESET_ALTQ:
2722 				if ((error = pf_commit_altq(ioe.ticket)))
2723 					goto fail; /* really bad */
2724 				break;
2725 #endif /* ALTQ */
2726 			case PF_RULESET_TABLE:
2727 				bzero(&table, sizeof(table));
2728 				strlcpy(table.pfrt_anchor, ioe.anchor,
2729 				    sizeof(table.pfrt_anchor));
2730 				if ((error = pfr_ina_commit(&table, ioe.ticket,
2731 				    NULL, NULL, 0)))
2732 					goto fail; /* really bad */
2733 				break;
2734 			default:
2735 				if ((error = pf_commit_rules(ioe.ticket,
2736 				    ioe.rs_num, ioe.anchor)))
2737 					goto fail; /* really bad */
2738 				break;
2739 			}
2740 		}
2741 		break;
2742 	}
2743 
2744 	case DIOCGETSRCNODES: {
2745 		struct pfioc_src_nodes	*psn = (struct pfioc_src_nodes *)addr;
2746 		struct pf_src_node	*n;
2747 		struct pf_src_node *p, pstore;
2748 		u_int32_t		 nr = 0;
2749 		int			 space = psn->psn_len;
2750 
2751 		if (space == 0) {
2752 			RB_FOREACH(n, pf_src_tree, &tree_src_tracking)
2753 				nr++;
2754 			psn->psn_len = sizeof(struct pf_src_node) * nr;
2755 			break;
2756 		}
2757 
2758 		p = psn->psn_src_nodes;
2759 		RB_FOREACH(n, pf_src_tree, &tree_src_tracking) {
2760 			int	secs = time_second;
2761 
2762 			if ((nr + 1) * sizeof(*p) > (unsigned)psn->psn_len)
2763 				break;
2764 
2765 			bcopy(n, &pstore, sizeof(pstore));
2766 			if (n->rule.ptr != NULL)
2767 				pstore.rule.nr = n->rule.ptr->nr;
2768 			pstore.creation = secs - pstore.creation;
2769 			if (pstore.expire > secs)
2770 				pstore.expire -= secs;
2771 			else
2772 				pstore.expire = 0;
2773 			error = copyout(&pstore, p, sizeof(*p));
2774 			if (error)
2775 				goto fail;
2776 			p++;
2777 			nr++;
2778 		}
2779 		psn->psn_len = sizeof(struct pf_src_node) * nr;
2780 		break;
2781 	}
2782 
2783 	case DIOCCLRSRCNODES: {
2784 		struct pf_src_node	*n;
2785 		struct pf_state		*state;
2786 
2787 		RB_FOREACH(state, pf_state_tree_id, &tree_id) {
2788 			state->src_node = NULL;
2789 			state->nat_src_node = NULL;
2790 		}
2791 		RB_FOREACH(n, pf_src_tree, &tree_src_tracking) {
2792 			n->expire = 1;
2793 			n->states = 0;
2794 		}
2795 		pf_purge_expired_src_nodes();
2796 		pf_status.src_nodes = 0;
2797 		break;
2798 	}
2799 
2800 	case DIOCSETHOSTID: {
2801 		u_int32_t	*hostid = (u_int32_t *)addr;
2802 
2803 		if (*hostid == 0) {
2804 			error = EINVAL;
2805 			goto fail;
2806 		}
2807 		pf_status.hostid = *hostid;
2808 		break;
2809 	}
2810 
2811 	case DIOCOSFPFLUSH:
2812 		pf_osfp_flush();
2813 		break;
2814 
2815 	case DIOCIGETIFACES: {
2816 		struct pfioc_iface *io = (struct pfioc_iface *)addr;
2817 
2818 		if (io->pfiio_esize != sizeof(struct pfi_if)) {
2819 			error = ENODEV;
2820 			break;
2821 		}
2822 		error = pfi_get_ifaces(io->pfiio_name, io->pfiio_buffer,
2823 		    &io->pfiio_size, io->pfiio_flags);
2824 		break;
2825 	}
2826 
2827 	case DIOCICLRISTATS: {
2828 		struct pfioc_iface *io = (struct pfioc_iface *)addr;
2829 
2830 		error = pfi_clr_istats(io->pfiio_name, &io->pfiio_nzero,
2831 		    io->pfiio_flags);
2832 		break;
2833 	}
2834 
2835 	default:
2836 		error = ENODEV;
2837 		break;
2838 	}
2839 fail:
2840 	splx(s);
2841 	return (error);
2842 }
2843 
2844 #ifdef __NetBSD__
2845 int
2846 pfil4_wrapper(void *arg, struct mbuf **mp, struct ifnet *ifp, int dir)
2847 {
2848 	int error;
2849 
2850 	/*
2851 	 * ensure that mbufs are writable beforehand
2852 	 * as it's assumed by pf code.
2853 	 * ip hdr (60 bytes) + tcp hdr (60 bytes) should be enough.
2854 	 * XXX inefficient
2855 	 */
2856 	error = m_makewritable(mp, 0, 60 + 60, M_DONTWAIT);
2857 	if (error) {
2858 		m_freem(*mp);
2859 		*mp = NULL;
2860 		return error;
2861 	}
2862 
2863 	/*
2864 	 * If the packet is out-bound, we can't delay checksums
2865 	 * here.  For in-bound, the checksum has already been
2866 	 * validated.
2867 	 */
2868 	if (dir == PFIL_OUT) {
2869 		if ((*mp)->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) {
2870 			in_delayed_cksum(*mp);
2871 			(*mp)->m_pkthdr.csum_flags &=
2872 			    ~(M_CSUM_TCPv4|M_CSUM_UDPv4);
2873 		}
2874 	}
2875 
2876 	if (pf_test(dir == PFIL_OUT ? PF_OUT : PF_IN, ifp, mp, NULL)
2877 	    != PF_PASS) {
2878 		m_freem(*mp);
2879 		*mp = NULL;
2880 		return EHOSTUNREACH;
2881 	}
2882 
2883 	/*
2884 	 * we're not compatible with fast-forward.
2885 	 */
2886 
2887 	if (dir == PFIL_IN) {
2888 		(*mp)->m_flags &= ~M_CANFASTFWD;
2889 	}
2890 
2891 	return (0);
2892 }
2893 
2894 #ifdef INET6
2895 int
2896 pfil6_wrapper(void *arg, struct mbuf **mp, struct ifnet *ifp, int dir)
2897 {
2898 	int error;
2899 
2900 	/*
2901 	 * ensure that mbufs are writable beforehand
2902 	 * as it's assumed by pf code.
2903 	 * XXX inefficient
2904 	 */
2905 	error = m_makewritable(mp, 0, M_COPYALL, M_DONTWAIT);
2906 	if (error) {
2907 		m_freem(*mp);
2908 		*mp = NULL;
2909 		return error;
2910 	}
2911 
2912 	if (pf_test6(dir == PFIL_OUT ? PF_OUT : PF_IN, ifp, mp, NULL)
2913 	    != PF_PASS) {
2914 		m_freem(*mp);
2915 		*mp = NULL;
2916 		return EHOSTUNREACH;
2917 	} else
2918 		return (0);
2919 }
2920 #endif
2921 
2922 int
2923 pfil_ifnet_wrapper(void *arg, struct mbuf **mp, struct ifnet *ifp, int dir)
2924 {
2925 	u_long cmd = (u_long)mp;
2926 
2927 	switch (cmd) {
2928 	case PFIL_IFNET_ATTACH:
2929 		pfi_attach_ifnet(ifp);
2930 		break;
2931 	case PFIL_IFNET_DETACH:
2932 		pfi_detach_ifnet(ifp);
2933 		break;
2934 	}
2935 
2936 	return (0);
2937 }
2938 
2939 int
2940 pfil_ifaddr_wrapper(void *arg, struct mbuf **mp, struct ifnet *ifp, int dir)
2941 {
2942 	extern void pfi_kifaddr_update_if(struct ifnet *);
2943 
2944 	u_long cmd = (u_long)mp;
2945 
2946 	switch (cmd) {
2947 	case SIOCSIFADDR:
2948 	case SIOCAIFADDR:
2949 	case SIOCDIFADDR:
2950 #ifdef INET6
2951 	case SIOCAIFADDR_IN6:
2952 	case SIOCDIFADDR_IN6:
2953 #endif
2954 		pfi_kifaddr_update_if(ifp);
2955 		break;
2956 	default:
2957 		panic("unexpected ioctl");
2958 	}
2959 
2960 	return (0);
2961 }
2962 
2963 static int
2964 pf_pfil_attach(void)
2965 {
2966 	struct pfil_head *ph_inet;
2967 #ifdef INET6
2968 	struct pfil_head *ph_inet6;
2969 #endif
2970 	int error;
2971 	int i;
2972 
2973 	if (pf_pfil_attached)
2974 		return (0);
2975 
2976 	error = pfil_add_hook(pfil_ifnet_wrapper, NULL, PFIL_IFNET, &if_pfil);
2977 	if (error)
2978 		goto bad1;
2979 	error = pfil_add_hook(pfil_ifaddr_wrapper, NULL, PFIL_IFADDR, &if_pfil);
2980 	if (error)
2981 		goto bad2;
2982 
2983 	ph_inet = pfil_head_get(PFIL_TYPE_AF, AF_INET);
2984 	if (ph_inet)
2985 		error = pfil_add_hook((void *)pfil4_wrapper, NULL,
2986 		    PFIL_IN|PFIL_OUT, ph_inet);
2987 	else
2988 		error = ENOENT;
2989 	if (error)
2990 		goto bad3;
2991 
2992 #ifdef INET6
2993 	ph_inet6 = pfil_head_get(PFIL_TYPE_AF, AF_INET6);
2994 	if (ph_inet6)
2995 		error = pfil_add_hook((void *)pfil6_wrapper, NULL,
2996 		    PFIL_IN|PFIL_OUT, ph_inet6);
2997 	else
2998 		error = ENOENT;
2999 	if (error)
3000 		goto bad4;
3001 #endif
3002 
3003 	for (i = 0; i < if_indexlim; i++)
3004 		if (ifindex2ifnet[i])
3005 			pfi_attach_ifnet(ifindex2ifnet[i]);
3006 	pf_pfil_attached = 1;
3007 
3008 	return (0);
3009 
3010 #ifdef INET6
3011 bad4:
3012 	pfil_remove_hook(pfil4_wrapper, NULL, PFIL_IN|PFIL_OUT, ph_inet);
3013 #endif
3014 bad3:
3015 	pfil_remove_hook(pfil_ifaddr_wrapper, NULL, PFIL_IFADDR, &if_pfil);
3016 bad2:
3017 	pfil_remove_hook(pfil_ifnet_wrapper, NULL, PFIL_IFNET, &if_pfil);
3018 bad1:
3019 	return (error);
3020 }
3021 
3022 static int
3023 pf_pfil_detach(void)
3024 {
3025 	struct pfil_head *ph_inet;
3026 #ifdef INET6
3027 	struct pfil_head *ph_inet6;
3028 #endif
3029 	int i;
3030 
3031 	if (pf_pfil_attached == 0)
3032 		return (0);
3033 
3034 	for (i = 0; i < if_indexlim; i++)
3035 		if (pfi_index2kif[i])
3036 			pfi_detach_ifnet(ifindex2ifnet[i]);
3037 
3038 	pfil_remove_hook(pfil_ifaddr_wrapper, NULL, PFIL_IFADDR, &if_pfil);
3039 	pfil_remove_hook(pfil_ifnet_wrapper, NULL, PFIL_IFNET, &if_pfil);
3040 
3041 	ph_inet = pfil_head_get(PFIL_TYPE_AF, AF_INET);
3042 	if (ph_inet)
3043 		pfil_remove_hook((void *)pfil4_wrapper, NULL,
3044 		    PFIL_IN|PFIL_OUT, ph_inet);
3045 #ifdef INET6
3046 	ph_inet6 = pfil_head_get(PFIL_TYPE_AF, AF_INET6);
3047 	if (ph_inet6)
3048 		pfil_remove_hook((void *)pfil6_wrapper, NULL,
3049 		    PFIL_IN|PFIL_OUT, ph_inet6);
3050 #endif
3051 	pf_pfil_attached = 0;
3052 
3053 	return (0);
3054 }
3055 #endif
3056