xref: /netbsd-src/sys/dist/pf/net/pf_ioctl.c (revision fd5cb0acea84d278e04e640d37ca2398f894991f)
1 /*	$NetBSD: pf_ioctl.c,v 1.14 2005/01/01 09:13:14 yamt Exp $	*/
2 /*	$OpenBSD: pf_ioctl.c,v 1.130 2004/09/09 22:08:42 dhartmei Exp $ */
3 
4 /*
5  * Copyright (c) 2001 Daniel Hartmeier
6  * Copyright (c) 2002,2003 Henning Brauer
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  *
13  *    - Redistributions of source code must retain the above copyright
14  *      notice, this list of conditions and the following disclaimer.
15  *    - Redistributions in binary form must reproduce the above
16  *      copyright notice, this list of conditions and the following
17  *      disclaimer in the documentation and/or other materials provided
18  *      with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
30  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  *
33  * Effort sponsored in part by the Defense Advanced Research Projects
34  * Agency (DARPA) and Air Force Research Laboratory, Air Force
35  * Materiel Command, USAF, under agreement number F30602-01-2-0537.
36  *
37  */
38 
39 #ifdef _KERNEL_OPT
40 #include "opt_inet.h"
41 #include "opt_altq.h"
42 #include "opt_pfil_hooks.h"
43 #endif
44 
45 #ifdef __OpenBSD__
46 #include "pfsync.h"
47 #else
48 #define	NPFSYNC	0
49 #endif
50 
51 #include <sys/param.h>
52 #include <sys/systm.h>
53 #include <sys/mbuf.h>
54 #include <sys/filio.h>
55 #include <sys/fcntl.h>
56 #include <sys/socket.h>
57 #include <sys/socketvar.h>
58 #include <sys/kernel.h>
59 #include <sys/time.h>
60 #ifdef __OpenBSD__
61 #include <sys/timeout.h>
62 #else
63 #include <sys/callout.h>
64 #endif
65 #include <sys/pool.h>
66 #include <sys/malloc.h>
67 #ifdef __NetBSD__
68 #include <sys/conf.h>
69 #endif
70 
71 #include <net/if.h>
72 #include <net/if_types.h>
73 #include <net/route.h>
74 
75 #include <netinet/in.h>
76 #include <netinet/in_var.h>
77 #include <netinet/in_systm.h>
78 #include <netinet/ip.h>
79 #include <netinet/ip_var.h>
80 #include <netinet/ip_icmp.h>
81 
82 #ifdef __OpenBSD__
83 #include <dev/rndvar.h>
84 #endif
85 #include <net/pfvar.h>
86 
87 #if NPFSYNC > 0
88 #include <net/if_pfsync.h>
89 #endif /* NPFSYNC > 0 */
90 
91 #ifdef INET6
92 #include <netinet/ip6.h>
93 #include <netinet/in_pcb.h>
94 #endif /* INET6 */
95 
96 #ifdef ALTQ
97 #include <altq/altq.h>
98 #endif
99 
100 void			 pfattach(int);
101 #ifdef _LKM
102 void			 pfdetach(void);
103 #endif
104 int			 pfopen(dev_t, int, int, struct proc *);
105 int			 pfclose(dev_t, int, int, struct proc *);
106 struct pf_pool		*pf_get_pool(char *, u_int32_t, u_int8_t, u_int32_t,
107 			    u_int8_t, u_int8_t, u_int8_t);
108 int			 pf_get_ruleset_number(u_int8_t);
109 void			 pf_init_ruleset(struct pf_ruleset *);
110 int			 pf_anchor_setup(struct pf_rule *,
111 			    const struct pf_ruleset *, const char *);
112 int			 pf_anchor_copyout(const struct pf_ruleset *,
113 			    const struct pf_rule *, struct pfioc_rule *);
114 void			 pf_anchor_remove(struct pf_rule *);
115 
116 void			 pf_mv_pool(struct pf_palist *, struct pf_palist *);
117 void			 pf_empty_pool(struct pf_palist *);
118 int			 pfioctl(dev_t, u_long, caddr_t, int, struct proc *);
119 #ifdef ALTQ
120 int			 pf_begin_altq(u_int32_t *);
121 int			 pf_rollback_altq(u_int32_t);
122 int			 pf_commit_altq(u_int32_t);
123 int			 pf_enable_altq(struct pf_altq *);
124 int			 pf_disable_altq(struct pf_altq *);
125 #endif /* ALTQ */
126 int			 pf_begin_rules(u_int32_t *, int, const char *);
127 int			 pf_rollback_rules(u_int32_t, int, char *);
128 int			 pf_commit_rules(u_int32_t, int, char *);
129 
130 #ifdef __NetBSD__
131 const struct cdevsw pf_cdevsw = {
132 	pfopen, pfclose, noread, nowrite, pfioctl,
133 	nostop, notty, nopoll, nommap, nokqfilter,
134 };
135 
136 static int pf_pfil_attach(void);
137 static int pf_pfil_detach(void);
138 
139 static int pf_pfil_attached = 0;
140 #endif
141 
142 #ifdef __OpenBSD__
143 extern struct timeout	 pf_expire_to;
144 #else
145 extern struct callout	 pf_expire_to;
146 #endif
147 
148 struct pf_rule		 pf_default_rule;
149 #ifdef ALTQ
150 static int		 pf_altq_running;
151 #endif
152 
153 #define	TAGID_MAX	 50000
154 TAILQ_HEAD(pf_tags, pf_tagname)	pf_tags = TAILQ_HEAD_INITIALIZER(pf_tags),
155 				pf_qids = TAILQ_HEAD_INITIALIZER(pf_qids);
156 
157 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE)
158 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE
159 #endif
160 static u_int16_t	 tagname2tag(struct pf_tags *, char *);
161 static void		 tag2tagname(struct pf_tags *, u_int16_t, char *);
162 static void		 tag_unref(struct pf_tags *, u_int16_t);
163 
164 #define DPFPRINTF(n, x) if (pf_status.debug >= (n)) printf x
165 
166 #ifdef __NetBSD__
167 extern struct pfil_head if_pfil;
168 #endif
169 
170 void
171 pfattach(int num)
172 {
173 	u_int32_t *timeout = pf_default_rule.timeout;
174 
175 	pool_init(&pf_rule_pl, sizeof(struct pf_rule), 0, 0, 0, "pfrulepl",
176 	    &pool_allocator_nointr);
177 	pool_init(&pf_src_tree_pl, sizeof(struct pf_src_node), 0, 0, 0,
178 	    "pfsrctrpl", NULL);
179 	pool_init(&pf_state_pl, sizeof(struct pf_state), 0, 0, 0, "pfstatepl",
180 	    NULL);
181 	pool_init(&pf_altq_pl, sizeof(struct pf_altq), 0, 0, 0, "pfaltqpl",
182 	    &pool_allocator_nointr);
183 	pool_init(&pf_pooladdr_pl, sizeof(struct pf_pooladdr), 0, 0, 0,
184 	    "pfpooladdrpl", &pool_allocator_nointr);
185 	pfr_initialize();
186 	pfi_initialize();
187 	pf_osfp_initialize();
188 
189 	pool_sethardlimit(pf_pool_limits[PF_LIMIT_STATES].pp,
190 	    pf_pool_limits[PF_LIMIT_STATES].limit, NULL, 0);
191 
192 	RB_INIT(&tree_src_tracking);
193 	RB_INIT(&pf_anchors);
194 	pf_init_ruleset(&pf_main_ruleset);
195 	TAILQ_INIT(&pf_altqs[0]);
196 	TAILQ_INIT(&pf_altqs[1]);
197 	TAILQ_INIT(&pf_pabuf);
198 	pf_altqs_active = &pf_altqs[0];
199 	pf_altqs_inactive = &pf_altqs[1];
200 	TAILQ_INIT(&state_updates);
201 
202 	/* default rule should never be garbage collected */
203 	pf_default_rule.entries.tqe_prev = &pf_default_rule.entries.tqe_next;
204 	pf_default_rule.action = PF_PASS;
205 	pf_default_rule.nr = -1;
206 
207 	/* initialize default timeouts */
208 	timeout[PFTM_TCP_FIRST_PACKET] = 120;		/* First TCP packet */
209 	timeout[PFTM_TCP_OPENING] = 30;			/* No response yet */
210 	timeout[PFTM_TCP_ESTABLISHED] = 24*60*60;	/* Established */
211 	timeout[PFTM_TCP_CLOSING] = 15 * 60;		/* Half closed */
212 	timeout[PFTM_TCP_FIN_WAIT] = 45;		/* Got both FINs */
213 	timeout[PFTM_TCP_CLOSED] = 90;			/* Got a RST */
214 	timeout[PFTM_UDP_FIRST_PACKET] = 60;		/* First UDP packet */
215 	timeout[PFTM_UDP_SINGLE] = 30;			/* Unidirectional */
216 	timeout[PFTM_UDP_MULTIPLE] = 60;		/* Bidirectional */
217 	timeout[PFTM_ICMP_FIRST_PACKET] = 20;		/* First ICMP packet */
218 	timeout[PFTM_ICMP_ERROR_REPLY] = 10;		/* Got error response */
219 	timeout[PFTM_OTHER_FIRST_PACKET] = 60;		/* First packet */
220 	timeout[PFTM_OTHER_SINGLE] = 30;		/* Unidirectional */
221 	timeout[PFTM_OTHER_MULTIPLE] = 60;		/* Bidirectional */
222 	timeout[PFTM_FRAG] = 30;			/* Fragment expire */
223 	timeout[PFTM_INTERVAL] = 10;			/* Expire interval */
224 	timeout[PFTM_SRC_NODE] = 0;			/* Source tracking */
225 	timeout[PFTM_TS_DIFF] = 30;			/* Allowed TS diff */
226 
227 #ifdef __OpenBSD__
228 	timeout_set(&pf_expire_to, pf_purge_timeout, &pf_expire_to);
229 	timeout_add(&pf_expire_to, timeout[PFTM_INTERVAL] * hz);
230 #else
231 	callout_init(&pf_expire_to);
232 	callout_reset(&pf_expire_to, timeout[PFTM_INTERVAL] * hz,
233 	    pf_purge_timeout, &pf_expire_to);
234 #endif
235 
236 	pf_normalize_init();
237 	bzero(&pf_status, sizeof(pf_status));
238 	pf_status.debug = PF_DEBUG_URGENT;
239 
240 	/* XXX do our best to avoid a conflict */
241 	pf_status.hostid = arc4random();
242 }
243 
244 #ifdef _LKM
245 void
246 pfdetach(void)
247 {
248 	struct pf_anchor	*anchor;
249 	struct pf_state		*state;
250 	struct pf_src_node	*node;
251 	struct pfioc_table	 pt;
252 	u_int32_t		 ticket;
253 	int			 i;
254 	char			 r = '\0';
255 
256 	(void)pf_pfil_detach();
257 
258 	callout_stop(&pf_expire_to);
259 	pf_status.running = 0;
260 
261 	/* clear the rulesets */
262 	for (i = 0; i < PF_RULESET_MAX; i++)
263 		if (pf_begin_rules(&ticket, i, &r) == 0)
264 			pf_commit_rules(ticket, i, &r);
265 #ifdef ALTQ
266 	if (pf_begin_altq(&ticket) == 0)
267 		pf_commit_altq(ticket);
268 #endif
269 
270 	/* clear states */
271 	RB_FOREACH(state, pf_state_tree_id, &tree_id) {
272 		state->timeout = PFTM_PURGE;
273 #if NPFSYNC
274 		state->sync_flags = PFSTATE_NOSYNC;
275 #endif
276 	}
277 	pf_purge_expired_states();
278 #if NPFSYNC
279 	pfsync_clear_states(pf_status.hostid, NULL);
280 #endif
281 
282 	/* clear source nodes */
283 	RB_FOREACH(state, pf_state_tree_id, &tree_id) {
284 		state->src_node = NULL;
285 		state->nat_src_node = NULL;
286 	}
287 	RB_FOREACH(node, pf_src_tree, &tree_src_tracking) {
288 		node->expire = 1;
289 		node->states = 0;
290 	}
291 	pf_purge_expired_src_nodes();
292 
293 	/* clear tables */
294 	memset(&pt, '\0', sizeof(pt));
295 	pfr_clr_tables(&pt.pfrio_table, &pt.pfrio_ndel, pt.pfrio_flags);
296 
297 	/* destroy anchors */
298 	while ((anchor = RB_MIN(pf_anchor_global, &pf_anchors)) != NULL) {
299 		for (i = 0; i < PF_RULESET_MAX; i++)
300 			if (pf_begin_rules(&ticket, i, anchor->name) == 0)
301 				pf_commit_rules(ticket, i, anchor->name);
302 	}
303 
304 	/* destroy main ruleset */
305 	pf_remove_if_empty_ruleset(&pf_main_ruleset);
306 
307 	/* destroy the pools */
308 	pool_destroy(&pf_pooladdr_pl);
309 	pool_destroy(&pf_altq_pl);
310 	pool_destroy(&pf_state_pl);
311 	pool_destroy(&pf_rule_pl);
312 	pool_destroy(&pf_src_tree_pl);
313 
314 	/* destroy subsystems */
315 	pf_normalize_destroy();
316 	pf_osfp_destroy();
317 	pfr_destroy();
318 	pfi_destroy();
319 }
320 #endif
321 
322 int
323 pfopen(dev_t dev, int flags, int fmt, struct proc *p)
324 {
325 	if (minor(dev) >= 1)
326 		return (ENXIO);
327 	return (0);
328 }
329 
330 int
331 pfclose(dev_t dev, int flags, int fmt, struct proc *p)
332 {
333 	if (minor(dev) >= 1)
334 		return (ENXIO);
335 	return (0);
336 }
337 
338 struct pf_pool *
339 pf_get_pool(char *anchor, u_int32_t ticket, u_int8_t rule_action,
340     u_int32_t rule_number, u_int8_t r_last, u_int8_t active,
341     u_int8_t check_ticket)
342 {
343 	struct pf_ruleset	*ruleset;
344 	struct pf_rule		*rule;
345 	int			 rs_num;
346 
347 	ruleset = pf_find_ruleset(anchor);
348 	if (ruleset == NULL)
349 		return (NULL);
350 	rs_num = pf_get_ruleset_number(rule_action);
351 	if (rs_num >= PF_RULESET_MAX)
352 		return (NULL);
353 	if (active) {
354 		if (check_ticket && ticket !=
355 		    ruleset->rules[rs_num].active.ticket)
356 			return (NULL);
357 		if (r_last)
358 			rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
359 			    pf_rulequeue);
360 		else
361 			rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
362 	} else {
363 		if (check_ticket && ticket !=
364 		    ruleset->rules[rs_num].inactive.ticket)
365 			return (NULL);
366 		if (r_last)
367 			rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
368 			    pf_rulequeue);
369 		else
370 			rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr);
371 	}
372 	if (!r_last) {
373 		while ((rule != NULL) && (rule->nr != rule_number))
374 			rule = TAILQ_NEXT(rule, entries);
375 	}
376 	if (rule == NULL)
377 		return (NULL);
378 
379 	return (&rule->rpool);
380 }
381 
382 int
383 pf_get_ruleset_number(u_int8_t action)
384 {
385 	switch (action) {
386 	case PF_SCRUB:
387 		return (PF_RULESET_SCRUB);
388 		break;
389 	case PF_PASS:
390 	case PF_DROP:
391 		return (PF_RULESET_FILTER);
392 		break;
393 	case PF_NAT:
394 	case PF_NONAT:
395 		return (PF_RULESET_NAT);
396 		break;
397 	case PF_BINAT:
398 	case PF_NOBINAT:
399 		return (PF_RULESET_BINAT);
400 		break;
401 	case PF_RDR:
402 	case PF_NORDR:
403 		return (PF_RULESET_RDR);
404 		break;
405 	default:
406 		return (PF_RULESET_MAX);
407 		break;
408 	}
409 }
410 
411 void
412 pf_init_ruleset(struct pf_ruleset *ruleset)
413 {
414 	int	i;
415 
416 	memset(ruleset, 0, sizeof(struct pf_ruleset));
417 	for (i = 0; i < PF_RULESET_MAX; i++) {
418 		TAILQ_INIT(&ruleset->rules[i].queues[0]);
419 		TAILQ_INIT(&ruleset->rules[i].queues[1]);
420 		ruleset->rules[i].active.ptr = &ruleset->rules[i].queues[0];
421 		ruleset->rules[i].inactive.ptr = &ruleset->rules[i].queues[1];
422 	}
423 }
424 
425 struct pf_anchor *
426 pf_find_anchor(const char *path)
427 {
428 	static struct pf_anchor	 key;
429 
430 	memset(&key, 0, sizeof(key));
431 	strlcpy(key.path, path, sizeof(key.path));
432 	return (RB_FIND(pf_anchor_global, &pf_anchors, &key));
433 }
434 
435 struct pf_ruleset *
436 pf_find_ruleset(const char *path)
437 {
438 	struct pf_anchor	*anchor;
439 
440 	while (*path == '/')
441 		path++;
442 	if (!*path)
443 		return (&pf_main_ruleset);
444 	anchor = pf_find_anchor(path);
445 	if (anchor == NULL)
446 		return (NULL);
447 	else
448 		return (&anchor->ruleset);
449 }
450 
451 struct pf_ruleset *
452 pf_find_or_create_ruleset(const char *path)
453 {
454 	static char		 p[MAXPATHLEN];
455 	char			*q, *r;
456 	struct pf_ruleset	*ruleset;
457 	struct pf_anchor	*anchor = NULL /* XXX gcc */,
458 				*dup, *parent = NULL;
459 
460 	while (*path == '/')
461 		path++;
462 	ruleset = pf_find_ruleset(path);
463 	if (ruleset != NULL)
464 		return (ruleset);
465 	strlcpy(p, path, sizeof(p));
466 	while (parent == NULL && (q = strrchr(p, '/')) != NULL) {
467 		*q = 0;
468 		if ((ruleset = pf_find_ruleset(p)) != NULL) {
469 			parent = ruleset->anchor;
470 			break;
471 		}
472 	}
473 	if (q == NULL)
474 		q = p;
475 	else
476 		q++;
477 	strlcpy(p, path, sizeof(p));
478 	if (!*q)
479 		return (NULL);
480 	while ((r = strchr(q, '/')) != NULL || *q) {
481 		if (r != NULL)
482 			*r = 0;
483 		if (!*q || strlen(q) >= PF_ANCHOR_NAME_SIZE ||
484 		    (parent != NULL && strlen(parent->path) >=
485 		    MAXPATHLEN - PF_ANCHOR_NAME_SIZE - 1))
486 			return (NULL);
487 		anchor = (struct pf_anchor *)malloc(sizeof(*anchor), M_TEMP,
488 		    M_NOWAIT);
489 		if (anchor == NULL)
490 			return (NULL);
491 		memset(anchor, 0, sizeof(*anchor));
492 		RB_INIT(&anchor->children);
493 		strlcpy(anchor->name, q, sizeof(anchor->name));
494 		if (parent != NULL) {
495 			strlcpy(anchor->path, parent->path,
496 			    sizeof(anchor->path));
497 			strlcat(anchor->path, "/", sizeof(anchor->path));
498 		}
499 		strlcat(anchor->path, anchor->name, sizeof(anchor->path));
500 		if ((dup = RB_INSERT(pf_anchor_global, &pf_anchors, anchor)) !=
501 		    NULL) {
502 			printf("pf_find_or_create_ruleset: RB_INSERT1 "
503 			    "'%s' '%s' collides with '%s' '%s'\n",
504 			    anchor->path, anchor->name, dup->path, dup->name);
505 			free(anchor, M_TEMP);
506 			return (NULL);
507 		}
508 		if (parent != NULL) {
509 			anchor->parent = parent;
510 			if ((dup = RB_INSERT(pf_anchor_node, &parent->children,
511 			    anchor)) != NULL) {
512 				printf("pf_find_or_create_ruleset: "
513 				    "RB_INSERT2 '%s' '%s' collides with "
514 				    "'%s' '%s'\n", anchor->path, anchor->name,
515 				    dup->path, dup->name);
516 				RB_REMOVE(pf_anchor_global, &pf_anchors,
517 				    anchor);
518 				free(anchor, M_TEMP);
519 				return (NULL);
520 			}
521 		}
522 		pf_init_ruleset(&anchor->ruleset);
523 		anchor->ruleset.anchor = anchor;
524 		parent = anchor;
525 		if (r != NULL)
526 			q = r + 1;
527 		else
528 			*q = 0;
529 	}
530 	return (&anchor->ruleset);
531 }
532 
533 void
534 pf_remove_if_empty_ruleset(struct pf_ruleset *ruleset)
535 {
536 	struct pf_anchor	*parent;
537 	int			 i;
538 
539 	while (ruleset != NULL) {
540 		if (ruleset == &pf_main_ruleset || ruleset->anchor == NULL ||
541 		    !RB_EMPTY(&ruleset->anchor->children) ||
542 		    ruleset->anchor->refcnt > 0 || ruleset->tables > 0 ||
543 		    ruleset->topen)
544 			return;
545 		for (i = 0; i < PF_RULESET_MAX; ++i)
546 			if (!TAILQ_EMPTY(ruleset->rules[i].active.ptr) ||
547 			    !TAILQ_EMPTY(ruleset->rules[i].inactive.ptr) ||
548 			    ruleset->rules[i].inactive.open)
549 				return;
550 		RB_REMOVE(pf_anchor_global, &pf_anchors, ruleset->anchor);
551 		if ((parent = ruleset->anchor->parent) != NULL)
552 			RB_REMOVE(pf_anchor_node, &parent->children,
553 			    ruleset->anchor);
554 		free(ruleset->anchor, M_TEMP);
555 		if (parent == NULL)
556 			return;
557 		ruleset = &parent->ruleset;
558 	}
559 }
560 
561 int
562 pf_anchor_setup(struct pf_rule *r, const struct pf_ruleset *s,
563     const char *name)
564 {
565 	static char		*p, path[MAXPATHLEN];
566 	struct pf_ruleset	*ruleset;
567 
568 	r->anchor = NULL;
569 	r->anchor_relative = 0;
570 	r->anchor_wildcard = 0;
571 	if (!name[0])
572 		return (0);
573 	if (name[0] == '/')
574 		strlcpy(path, name + 1, sizeof(path));
575 	else {
576 		/* relative path */
577 		r->anchor_relative = 1;
578 		if (s->anchor == NULL || !s->anchor->path[0])
579 			path[0] = 0;
580 		else
581 			strlcpy(path, s->anchor->path, sizeof(path));
582 		while (name[0] == '.' && name[1] == '.' && name[2] == '/') {
583 			if (!path[0]) {
584 				printf("pf_anchor_setup: .. beyond root\n");
585 				return (1);
586 			}
587 			if ((p = strrchr(path, '/')) != NULL)
588 				*p = 0;
589 			else
590 				path[0] = 0;
591 			r->anchor_relative++;
592 			name += 3;
593 		}
594 		if (path[0])
595 			strlcat(path, "/", sizeof(path));
596 		strlcat(path, name, sizeof(path));
597 	}
598 	if ((p = strrchr(path, '/')) != NULL && !strcmp(p, "/*")) {
599 		r->anchor_wildcard = 1;
600 		*p = 0;
601 	}
602 	ruleset = pf_find_or_create_ruleset(path);
603 	if (ruleset == NULL || ruleset->anchor == NULL) {
604 		printf("pf_anchor_setup: ruleset\n");
605 		return (1);
606 	}
607 	r->anchor = ruleset->anchor;
608 	r->anchor->refcnt++;
609 	return (0);
610 }
611 
612 int
613 pf_anchor_copyout(const struct pf_ruleset *rs, const struct pf_rule *r,
614     struct pfioc_rule *pr)
615 {
616 	pr->anchor_call[0] = 0;
617 	if (r->anchor == NULL)
618 		return (0);
619 	if (!r->anchor_relative) {
620 		strlcpy(pr->anchor_call, "/", sizeof(pr->anchor_call));
621 		strlcat(pr->anchor_call, r->anchor->path,
622 		    sizeof(pr->anchor_call));
623 	} else {
624 		char a[MAXPATHLEN], b[MAXPATHLEN], *p;
625 		int i;
626 
627 		if (rs->anchor == NULL)
628 			a[0] = 0;
629 		else
630 			strlcpy(a, rs->anchor->path, sizeof(a));
631 		strlcpy(b, r->anchor->path, sizeof(b));
632 		for (i = 1; i < r->anchor_relative; ++i) {
633 			if ((p = strrchr(a, '/')) == NULL)
634 				p = a;
635 			*p = 0;
636 			strlcat(pr->anchor_call, "../",
637 			    sizeof(pr->anchor_call));
638 		}
639 		if (strncmp(a, b, strlen(a))) {
640 			printf("pf_anchor_copyout: '%s' '%s'\n", a, b);
641 			return (1);
642 		}
643 		if (strlen(b) > strlen(a))
644 			strlcat(pr->anchor_call, b + (a[0] ? strlen(a) + 1 : 0),
645 			    sizeof(pr->anchor_call));
646 	}
647 	if (r->anchor_wildcard)
648 		strlcat(pr->anchor_call, pr->anchor_call[0] ? "/*" : "*",
649 		    sizeof(pr->anchor_call));
650 	return (0);
651 }
652 
653 void
654 pf_anchor_remove(struct pf_rule *r)
655 {
656 	if (r->anchor == NULL)
657 		return;
658 	if (r->anchor->refcnt <= 0) {
659 		printf("pf_anchor_remove: broken refcount");
660 		r->anchor = NULL;
661 		return;
662 	}
663 	if (!--r->anchor->refcnt)
664 		pf_remove_if_empty_ruleset(&r->anchor->ruleset);
665 	r->anchor = NULL;
666 }
667 
668 void
669 pf_mv_pool(struct pf_palist *poola, struct pf_palist *poolb)
670 {
671 	struct pf_pooladdr	*mv_pool_pa;
672 
673 	while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) {
674 		TAILQ_REMOVE(poola, mv_pool_pa, entries);
675 		TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries);
676 	}
677 }
678 
679 void
680 pf_empty_pool(struct pf_palist *poola)
681 {
682 	struct pf_pooladdr	*empty_pool_pa;
683 
684 	while ((empty_pool_pa = TAILQ_FIRST(poola)) != NULL) {
685 		pfi_dynaddr_remove(&empty_pool_pa->addr);
686 		pf_tbladdr_remove(&empty_pool_pa->addr);
687 		pfi_detach_rule(empty_pool_pa->kif);
688 		TAILQ_REMOVE(poola, empty_pool_pa, entries);
689 		pool_put(&pf_pooladdr_pl, empty_pool_pa);
690 	}
691 }
692 
693 void
694 pf_rm_rule(struct pf_rulequeue *rulequeue, struct pf_rule *rule)
695 {
696 	if (rulequeue != NULL) {
697 		if (rule->states <= 0) {
698 			/*
699 			 * XXX - we need to remove the table *before* detaching
700 			 * the rule to make sure the table code does not delete
701 			 * the anchor under our feet.
702 			 */
703 			pf_tbladdr_remove(&rule->src.addr);
704 			pf_tbladdr_remove(&rule->dst.addr);
705 		}
706 		TAILQ_REMOVE(rulequeue, rule, entries);
707 		rule->entries.tqe_prev = NULL;
708 		rule->nr = -1;
709 	}
710 
711 	if (rule->states > 0 || rule->src_nodes > 0 ||
712 	    rule->entries.tqe_prev != NULL)
713 		return;
714 	pf_tag_unref(rule->tag);
715 	pf_tag_unref(rule->match_tag);
716 #ifdef ALTQ
717 	if (rule->pqid != rule->qid)
718 		pf_qid_unref(rule->pqid);
719 	pf_qid_unref(rule->qid);
720 #endif
721 	pfi_dynaddr_remove(&rule->src.addr);
722 	pfi_dynaddr_remove(&rule->dst.addr);
723 	if (rulequeue == NULL) {
724 		pf_tbladdr_remove(&rule->src.addr);
725 		pf_tbladdr_remove(&rule->dst.addr);
726 	}
727 	pfi_detach_rule(rule->kif);
728 	pf_anchor_remove(rule);
729 	pf_empty_pool(&rule->rpool.list);
730 	pool_put(&pf_rule_pl, rule);
731 }
732 
733 static	u_int16_t
734 tagname2tag(struct pf_tags *head, char *tagname)
735 {
736 	struct pf_tagname	*tag, *p = NULL;
737 	u_int16_t		 new_tagid = 1;
738 
739 	TAILQ_FOREACH(tag, head, entries)
740 		if (strcmp(tagname, tag->name) == 0) {
741 			tag->ref++;
742 			return (tag->tag);
743 		}
744 
745 	/*
746 	 * to avoid fragmentation, we do a linear search from the beginning
747 	 * and take the first free slot we find. if there is none or the list
748 	 * is empty, append a new entry at the end.
749 	 */
750 
751 	/* new entry */
752 	if (!TAILQ_EMPTY(head))
753 		for (p = TAILQ_FIRST(head); p != NULL &&
754 		    p->tag == new_tagid; p = TAILQ_NEXT(p, entries))
755 			new_tagid = p->tag + 1;
756 
757 	if (new_tagid > TAGID_MAX)
758 		return (0);
759 
760 	/* allocate and fill new struct pf_tagname */
761 	tag = (struct pf_tagname *)malloc(sizeof(struct pf_tagname),
762 	    M_TEMP, M_NOWAIT);
763 	if (tag == NULL)
764 		return (0);
765 	bzero(tag, sizeof(struct pf_tagname));
766 	strlcpy(tag->name, tagname, sizeof(tag->name));
767 	tag->tag = new_tagid;
768 	tag->ref++;
769 
770 	if (p != NULL)	/* insert new entry before p */
771 		TAILQ_INSERT_BEFORE(p, tag, entries);
772 	else	/* either list empty or no free slot in between */
773 		TAILQ_INSERT_TAIL(head, tag, entries);
774 
775 	return (tag->tag);
776 }
777 
778 static	void
779 tag2tagname(struct pf_tags *head, u_int16_t tagid, char *p)
780 {
781 	struct pf_tagname	*tag;
782 
783 	TAILQ_FOREACH(tag, head, entries)
784 		if (tag->tag == tagid) {
785 			strlcpy(p, tag->name, PF_TAG_NAME_SIZE);
786 			return;
787 		}
788 }
789 
790 static	void
791 tag_unref(struct pf_tags *head, u_int16_t tag)
792 {
793 	struct pf_tagname	*p, *next;
794 
795 	if (tag == 0)
796 		return;
797 
798 	for (p = TAILQ_FIRST(head); p != NULL; p = next) {
799 		next = TAILQ_NEXT(p, entries);
800 		if (tag == p->tag) {
801 			if (--p->ref == 0) {
802 				TAILQ_REMOVE(head, p, entries);
803 				free(p, M_TEMP);
804 			}
805 			break;
806 		}
807 	}
808 }
809 
810 u_int16_t
811 pf_tagname2tag(char *tagname)
812 {
813 	return (tagname2tag(&pf_tags, tagname));
814 }
815 
816 void
817 pf_tag2tagname(u_int16_t tagid, char *p)
818 {
819 	return (tag2tagname(&pf_tags, tagid, p));
820 }
821 
822 void
823 pf_tag_unref(u_int16_t tag)
824 {
825 	return (tag_unref(&pf_tags, tag));
826 }
827 
828 #ifdef ALTQ
829 u_int32_t
830 pf_qname2qid(char *qname)
831 {
832 	return ((u_int32_t)tagname2tag(&pf_qids, qname));
833 }
834 
835 void
836 pf_qid2qname(u_int32_t qid, char *p)
837 {
838 	return (tag2tagname(&pf_qids, (u_int16_t)qid, p));
839 }
840 
841 void
842 pf_qid_unref(u_int32_t qid)
843 {
844 	return (tag_unref(&pf_qids, (u_int16_t)qid));
845 }
846 
847 int
848 pf_begin_altq(u_int32_t *ticket)
849 {
850 	struct pf_altq	*altq;
851 	int		 error = 0;
852 
853 	/* Purge the old altq list */
854 	while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) {
855 		TAILQ_REMOVE(pf_altqs_inactive, altq, entries);
856 		if (altq->qname[0] == 0) {
857 			/* detach and destroy the discipline */
858 			error = altq_remove(altq);
859 		} else
860 			pf_qid_unref(altq->qid);
861 		pool_put(&pf_altq_pl, altq);
862 	}
863 	if (error)
864 		return (error);
865 	*ticket = ++ticket_altqs_inactive;
866 	altqs_inactive_open = 1;
867 	return (0);
868 }
869 
870 int
871 pf_rollback_altq(u_int32_t ticket)
872 {
873 	struct pf_altq	*altq;
874 	int		 error = 0;
875 
876 	if (!altqs_inactive_open || ticket != ticket_altqs_inactive)
877 		return (0);
878 	/* Purge the old altq list */
879 	while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) {
880 		TAILQ_REMOVE(pf_altqs_inactive, altq, entries);
881 		if (altq->qname[0] == 0) {
882 			/* detach and destroy the discipline */
883 			error = altq_remove(altq);
884 		} else
885 			pf_qid_unref(altq->qid);
886 		pool_put(&pf_altq_pl, altq);
887 	}
888 	altqs_inactive_open = 0;
889 	return (error);
890 }
891 
892 int
893 pf_commit_altq(u_int32_t ticket)
894 {
895 	struct pf_altqqueue	*old_altqs;
896 	struct pf_altq		*altq;
897 	int			 s, err, error = 0;
898 
899 	if (!altqs_inactive_open || ticket != ticket_altqs_inactive)
900 		return (EBUSY);
901 
902 	/* swap altqs, keep the old. */
903 	s = splsoftnet();
904 	old_altqs = pf_altqs_active;
905 	pf_altqs_active = pf_altqs_inactive;
906 	pf_altqs_inactive = old_altqs;
907 	ticket_altqs_active = ticket_altqs_inactive;
908 
909 	/* Attach new disciplines */
910 	TAILQ_FOREACH(altq, pf_altqs_active, entries) {
911 		if (altq->qname[0] == 0) {
912 			/* attach the discipline */
913 			error = altq_pfattach(altq);
914 			if (error == 0 && pf_altq_running)
915 				error = pf_enable_altq(altq);
916 			if (error != 0) {
917 				splx(s);
918 				return (error);
919 			}
920 		}
921 	}
922 
923 	/* Purge the old altq list */
924 	while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) {
925 		TAILQ_REMOVE(pf_altqs_inactive, altq, entries);
926 		if (altq->qname[0] == 0) {
927 			/* detach and destroy the discipline */
928 			if (pf_altq_running)
929 				error = pf_disable_altq(altq);
930 			err = altq_pfdetach(altq);
931 			if (err != 0 && error == 0)
932 				error = err;
933 			err = altq_remove(altq);
934 			if (err != 0 && error == 0)
935 				error = err;
936 		} else
937 			pf_qid_unref(altq->qid);
938 		pool_put(&pf_altq_pl, altq);
939 	}
940 	splx(s);
941 
942 	altqs_inactive_open = 0;
943 	return (error);
944 }
945 
946 int
947 pf_enable_altq(struct pf_altq *altq)
948 {
949 	struct ifnet		*ifp;
950 	struct tb_profile	 tb;
951 	int			 s, error = 0;
952 
953 	if ((ifp = ifunit(altq->ifname)) == NULL)
954 		return (EINVAL);
955 
956 	if (ifp->if_snd.altq_type != ALTQT_NONE)
957 		error = altq_enable(&ifp->if_snd);
958 
959 	/* set tokenbucket regulator */
960 	if (error == 0 && ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) {
961 		tb.rate = altq->ifbandwidth;
962 		tb.depth = altq->tbrsize;
963 		s = splimp();
964 		error = tbr_set(&ifp->if_snd, &tb);
965 		splx(s);
966 	}
967 
968 	return (error);
969 }
970 
971 int
972 pf_disable_altq(struct pf_altq *altq)
973 {
974 	struct ifnet		*ifp;
975 	struct tb_profile	 tb;
976 	int			 s, error;
977 
978 	if ((ifp = ifunit(altq->ifname)) == NULL)
979 		return (EINVAL);
980 
981 	/*
982 	 * when the discipline is no longer referenced, it was overridden
983 	 * by a new one.  if so, just return.
984 	 */
985 	if (altq->altq_disc != ifp->if_snd.altq_disc)
986 		return (0);
987 
988 	error = altq_disable(&ifp->if_snd);
989 
990 	if (error == 0) {
991 		/* clear tokenbucket regulator */
992 		tb.rate = 0;
993 		s = splimp();
994 		error = tbr_set(&ifp->if_snd, &tb);
995 		splx(s);
996 	}
997 
998 	return (error);
999 }
1000 #endif /* ALTQ */
1001 
1002 int
1003 pf_begin_rules(u_int32_t *ticket, int rs_num, const char *anchor)
1004 {
1005 	struct pf_ruleset	*rs;
1006 	struct pf_rule		*rule;
1007 
1008 	if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1009 		return (EINVAL);
1010 	rs = pf_find_or_create_ruleset(anchor);
1011 	if (rs == NULL)
1012 		return (EINVAL);
1013 	while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL)
1014 		pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule);
1015 	*ticket = ++rs->rules[rs_num].inactive.ticket;
1016 	rs->rules[rs_num].inactive.open = 1;
1017 	return (0);
1018 }
1019 
1020 int
1021 pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor)
1022 {
1023 	struct pf_ruleset	*rs;
1024 	struct pf_rule		*rule;
1025 
1026 	if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1027 		return (EINVAL);
1028 	rs = pf_find_ruleset(anchor);
1029 	if (rs == NULL || !rs->rules[rs_num].inactive.open ||
1030 	    rs->rules[rs_num].inactive.ticket != ticket)
1031 		return (0);
1032 	while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL)
1033 		pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule);
1034 	rs->rules[rs_num].inactive.open = 0;
1035 	return (0);
1036 }
1037 
1038 int
1039 pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor)
1040 {
1041 	struct pf_ruleset	*rs;
1042 	struct pf_rule		*rule;
1043 	struct pf_rulequeue	*old_rules;
1044 	int			 s;
1045 
1046 	if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1047 		return (EINVAL);
1048 	rs = pf_find_ruleset(anchor);
1049 	if (rs == NULL || !rs->rules[rs_num].inactive.open ||
1050 	    ticket != rs->rules[rs_num].inactive.ticket)
1051 		return (EBUSY);
1052 
1053 	/* Swap rules, keep the old. */
1054 	s = splsoftnet();
1055 	old_rules = rs->rules[rs_num].active.ptr;
1056 	rs->rules[rs_num].active.ptr =
1057 	    rs->rules[rs_num].inactive.ptr;
1058 	rs->rules[rs_num].inactive.ptr = old_rules;
1059 	rs->rules[rs_num].active.ticket =
1060 	    rs->rules[rs_num].inactive.ticket;
1061 	pf_calc_skip_steps(rs->rules[rs_num].active.ptr);
1062 
1063 	/* Purge the old rule list. */
1064 	while ((rule = TAILQ_FIRST(old_rules)) != NULL)
1065 		pf_rm_rule(old_rules, rule);
1066 	rs->rules[rs_num].inactive.open = 0;
1067 	pf_remove_if_empty_ruleset(rs);
1068 	splx(s);
1069 	return (0);
1070 }
1071 
1072 int
1073 pfioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct proc *p)
1074 {
1075 	struct pf_pooladdr	*pa = NULL;
1076 	struct pf_pool		*pool = NULL;
1077 	int			 s;
1078 	int			 error = 0;
1079 
1080 	/* XXX keep in sync with switch() below */
1081 	if (securelevel > 1)
1082 		switch (cmd) {
1083 		case DIOCGETRULES:
1084 		case DIOCGETRULE:
1085 		case DIOCGETADDRS:
1086 		case DIOCGETADDR:
1087 		case DIOCGETSTATE:
1088 		case DIOCSETSTATUSIF:
1089 		case DIOCGETSTATUS:
1090 		case DIOCCLRSTATUS:
1091 		case DIOCNATLOOK:
1092 		case DIOCSETDEBUG:
1093 		case DIOCGETSTATES:
1094 		case DIOCGETTIMEOUT:
1095 		case DIOCCLRRULECTRS:
1096 		case DIOCGETLIMIT:
1097 		case DIOCGETALTQS:
1098 		case DIOCGETALTQ:
1099 		case DIOCGETQSTATS:
1100 		case DIOCGETRULESETS:
1101 		case DIOCGETRULESET:
1102 		case DIOCRGETTABLES:
1103 		case DIOCRGETTSTATS:
1104 		case DIOCRCLRTSTATS:
1105 		case DIOCRCLRADDRS:
1106 		case DIOCRADDADDRS:
1107 		case DIOCRDELADDRS:
1108 		case DIOCRSETADDRS:
1109 		case DIOCRGETADDRS:
1110 		case DIOCRGETASTATS:
1111 		case DIOCRCLRASTATS:
1112 		case DIOCRTSTADDRS:
1113 		case DIOCOSFPGET:
1114 		case DIOCGETSRCNODES:
1115 		case DIOCCLRSRCNODES:
1116 		case DIOCIGETIFACES:
1117 		case DIOCICLRISTATS:
1118 			break;
1119 		case DIOCRCLRTABLES:
1120 		case DIOCRADDTABLES:
1121 		case DIOCRDELTABLES:
1122 		case DIOCRSETTFLAGS:
1123 			if (((struct pfioc_table *)addr)->pfrio_flags &
1124 			    PFR_FLAG_DUMMY)
1125 				break; /* dummy operation ok */
1126 			return (EPERM);
1127 		default:
1128 			return (EPERM);
1129 		}
1130 
1131 	if (!(flags & FWRITE))
1132 		switch (cmd) {
1133 		case DIOCGETRULES:
1134 		case DIOCGETRULE:
1135 		case DIOCGETADDRS:
1136 		case DIOCGETADDR:
1137 		case DIOCGETSTATE:
1138 		case DIOCGETSTATUS:
1139 		case DIOCGETSTATES:
1140 		case DIOCGETTIMEOUT:
1141 		case DIOCGETLIMIT:
1142 		case DIOCGETALTQS:
1143 		case DIOCGETALTQ:
1144 		case DIOCGETQSTATS:
1145 		case DIOCGETRULESETS:
1146 		case DIOCGETRULESET:
1147 		case DIOCRGETTABLES:
1148 		case DIOCRGETTSTATS:
1149 		case DIOCRGETADDRS:
1150 		case DIOCRGETASTATS:
1151 		case DIOCRTSTADDRS:
1152 		case DIOCOSFPGET:
1153 		case DIOCGETSRCNODES:
1154 		case DIOCIGETIFACES:
1155 			break;
1156 		case DIOCRCLRTABLES:
1157 		case DIOCRADDTABLES:
1158 		case DIOCRDELTABLES:
1159 		case DIOCRCLRTSTATS:
1160 		case DIOCRCLRADDRS:
1161 		case DIOCRADDADDRS:
1162 		case DIOCRDELADDRS:
1163 		case DIOCRSETADDRS:
1164 		case DIOCRSETTFLAGS:
1165 			if (((struct pfioc_table *)addr)->pfrio_flags &
1166 			    PFR_FLAG_DUMMY)
1167 				break; /* dummy operation ok */
1168 			return (EACCES);
1169 		default:
1170 			return (EACCES);
1171 		}
1172 
1173 	switch (cmd) {
1174 
1175 	case DIOCSTART:
1176 		if (pf_status.running)
1177 			error = EEXIST;
1178 		else {
1179 #ifdef __NetBSD__
1180 			error = pf_pfil_attach();
1181 			if (error)
1182 				break;
1183 #endif
1184 			pf_status.running = 1;
1185 			pf_status.since = time_second;
1186 			if (pf_status.stateid == 0) {
1187 				pf_status.stateid = time_second;
1188 				pf_status.stateid = pf_status.stateid << 32;
1189 			}
1190 			DPFPRINTF(PF_DEBUG_MISC, ("pf: started\n"));
1191 		}
1192 		break;
1193 
1194 	case DIOCSTOP:
1195 		if (!pf_status.running)
1196 			error = ENOENT;
1197 		else {
1198 #ifdef __NetBSD__
1199 			error = pf_pfil_detach();
1200 			if (error)
1201 				break;
1202 #endif
1203 			pf_status.running = 0;
1204 			pf_status.since = time_second;
1205 			DPFPRINTF(PF_DEBUG_MISC, ("pf: stopped\n"));
1206 		}
1207 		break;
1208 
1209 	case DIOCADDRULE: {
1210 		struct pfioc_rule	*pr = (struct pfioc_rule *)addr;
1211 		struct pf_ruleset	*ruleset;
1212 		struct pf_rule		*rule, *tail;
1213 		struct pf_pooladdr	*pa;
1214 		int			 rs_num;
1215 
1216 		pr->anchor[sizeof(pr->anchor) - 1] = 0;
1217 		ruleset = pf_find_ruleset(pr->anchor);
1218 		if (ruleset == NULL) {
1219 			error = EINVAL;
1220 			break;
1221 		}
1222 		rs_num = pf_get_ruleset_number(pr->rule.action);
1223 		if (rs_num >= PF_RULESET_MAX) {
1224 			error = EINVAL;
1225 			break;
1226 		}
1227 		if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
1228 			error = EINVAL;
1229 			break;
1230 		}
1231 		if (pr->ticket != ruleset->rules[rs_num].inactive.ticket) {
1232 			error = EBUSY;
1233 			break;
1234 		}
1235 		if (pr->pool_ticket != ticket_pabuf) {
1236 			error = EBUSY;
1237 			break;
1238 		}
1239 		rule = pool_get(&pf_rule_pl, PR_NOWAIT);
1240 		if (rule == NULL) {
1241 			error = ENOMEM;
1242 			break;
1243 		}
1244 		bcopy(&pr->rule, rule, sizeof(struct pf_rule));
1245 		rule->anchor = NULL;
1246 		rule->kif = NULL;
1247 		TAILQ_INIT(&rule->rpool.list);
1248 		/* initialize refcounting */
1249 		rule->states = 0;
1250 		rule->src_nodes = 0;
1251 		rule->entries.tqe_prev = NULL;
1252 #ifndef INET
1253 		if (rule->af == AF_INET) {
1254 			pool_put(&pf_rule_pl, rule);
1255 			error = EAFNOSUPPORT;
1256 			break;
1257 		}
1258 #endif /* INET */
1259 #ifndef INET6
1260 		if (rule->af == AF_INET6) {
1261 			pool_put(&pf_rule_pl, rule);
1262 			error = EAFNOSUPPORT;
1263 			break;
1264 		}
1265 #endif /* INET6 */
1266 		tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
1267 		    pf_rulequeue);
1268 		if (tail)
1269 			rule->nr = tail->nr + 1;
1270 		else
1271 			rule->nr = 0;
1272 		if (rule->ifname[0]) {
1273 			rule->kif = pfi_attach_rule(rule->ifname);
1274 			if (rule->kif == NULL) {
1275 				pool_put(&pf_rule_pl, rule);
1276 				error = EINVAL;
1277 				break;
1278 			}
1279 		}
1280 
1281 #ifdef ALTQ
1282 		/* set queue IDs */
1283 		if (rule->qname[0] != 0) {
1284 			if ((rule->qid = pf_qname2qid(rule->qname)) == 0)
1285 				error = EBUSY;
1286 			else if (rule->pqname[0] != 0) {
1287 				if ((rule->pqid =
1288 				    pf_qname2qid(rule->pqname)) == 0)
1289 					error = EBUSY;
1290 			} else
1291 				rule->pqid = rule->qid;
1292 		}
1293 #endif
1294 		if (rule->tagname[0])
1295 			if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0)
1296 				error = EBUSY;
1297 		if (rule->match_tagname[0])
1298 			if ((rule->match_tag =
1299 			    pf_tagname2tag(rule->match_tagname)) == 0)
1300 				error = EBUSY;
1301 		if (rule->rt && !rule->direction)
1302 			error = EINVAL;
1303 		if (pfi_dynaddr_setup(&rule->src.addr, rule->af))
1304 			error = EINVAL;
1305 		if (pfi_dynaddr_setup(&rule->dst.addr, rule->af))
1306 			error = EINVAL;
1307 		if (pf_tbladdr_setup(ruleset, &rule->src.addr))
1308 			error = EINVAL;
1309 		if (pf_tbladdr_setup(ruleset, &rule->dst.addr))
1310 			error = EINVAL;
1311 		if (pf_anchor_setup(rule, ruleset, pr->anchor_call))
1312 			error = EINVAL;
1313 		TAILQ_FOREACH(pa, &pf_pabuf, entries)
1314 			if (pf_tbladdr_setup(ruleset, &pa->addr))
1315 				error = EINVAL;
1316 
1317 		pf_mv_pool(&pf_pabuf, &rule->rpool.list);
1318 		if (((((rule->action == PF_NAT) || (rule->action == PF_RDR) ||
1319 		    (rule->action == PF_BINAT)) && rule->anchor == NULL) ||
1320 		    (rule->rt > PF_FASTROUTE)) &&
1321 		    (TAILQ_FIRST(&rule->rpool.list) == NULL))
1322 			error = EINVAL;
1323 
1324 		if (error) {
1325 			pf_rm_rule(NULL, rule);
1326 			break;
1327 		}
1328 		rule->rpool.cur = TAILQ_FIRST(&rule->rpool.list);
1329 		rule->evaluations = rule->packets = rule->bytes = 0;
1330 		TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr,
1331 		    rule, entries);
1332 		break;
1333 	}
1334 
1335 	case DIOCGETRULES: {
1336 		struct pfioc_rule	*pr = (struct pfioc_rule *)addr;
1337 		struct pf_ruleset	*ruleset;
1338 		struct pf_rule		*tail;
1339 		int			 rs_num;
1340 
1341 		pr->anchor[sizeof(pr->anchor) - 1] = 0;
1342 		ruleset = pf_find_ruleset(pr->anchor);
1343 		if (ruleset == NULL) {
1344 			error = EINVAL;
1345 			break;
1346 		}
1347 		rs_num = pf_get_ruleset_number(pr->rule.action);
1348 		if (rs_num >= PF_RULESET_MAX) {
1349 			error = EINVAL;
1350 			break;
1351 		}
1352 		s = splsoftnet();
1353 		tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
1354 		    pf_rulequeue);
1355 		if (tail)
1356 			pr->nr = tail->nr + 1;
1357 		else
1358 			pr->nr = 0;
1359 		pr->ticket = ruleset->rules[rs_num].active.ticket;
1360 		splx(s);
1361 		break;
1362 	}
1363 
1364 	case DIOCGETRULE: {
1365 		struct pfioc_rule	*pr = (struct pfioc_rule *)addr;
1366 		struct pf_ruleset	*ruleset;
1367 		struct pf_rule		*rule;
1368 		int			 rs_num, i;
1369 
1370 		pr->anchor[sizeof(pr->anchor) - 1] = 0;
1371 		ruleset = pf_find_ruleset(pr->anchor);
1372 		if (ruleset == NULL) {
1373 			error = EINVAL;
1374 			break;
1375 		}
1376 		rs_num = pf_get_ruleset_number(pr->rule.action);
1377 		if (rs_num >= PF_RULESET_MAX) {
1378 			error = EINVAL;
1379 			break;
1380 		}
1381 		if (pr->ticket != ruleset->rules[rs_num].active.ticket) {
1382 			error = EBUSY;
1383 			break;
1384 		}
1385 		s = splsoftnet();
1386 		rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
1387 		while ((rule != NULL) && (rule->nr != pr->nr))
1388 			rule = TAILQ_NEXT(rule, entries);
1389 		if (rule == NULL) {
1390 			error = EBUSY;
1391 			splx(s);
1392 			break;
1393 		}
1394 		bcopy(rule, &pr->rule, sizeof(struct pf_rule));
1395 		if (pf_anchor_copyout(ruleset, rule, pr)) {
1396 			error = EBUSY;
1397 			splx(s);
1398 			break;
1399 		}
1400 		pfi_dynaddr_copyout(&pr->rule.src.addr);
1401 		pfi_dynaddr_copyout(&pr->rule.dst.addr);
1402 		pf_tbladdr_copyout(&pr->rule.src.addr);
1403 		pf_tbladdr_copyout(&pr->rule.dst.addr);
1404 		for (i = 0; i < PF_SKIP_COUNT; ++i)
1405 			if (rule->skip[i].ptr == NULL)
1406 				pr->rule.skip[i].nr = -1;
1407 			else
1408 				pr->rule.skip[i].nr =
1409 				    rule->skip[i].ptr->nr;
1410 		splx(s);
1411 		break;
1412 	}
1413 
1414 	case DIOCCHANGERULE: {
1415 		struct pfioc_rule	*pcr = (struct pfioc_rule *)addr;
1416 		struct pf_ruleset	*ruleset;
1417 		struct pf_rule		*oldrule = NULL, *newrule = NULL;
1418 		u_int32_t		 nr = 0;
1419 		int			 rs_num;
1420 
1421 		if (!(pcr->action == PF_CHANGE_REMOVE ||
1422 		    pcr->action == PF_CHANGE_GET_TICKET) &&
1423 		    pcr->pool_ticket != ticket_pabuf) {
1424 			error = EBUSY;
1425 			break;
1426 		}
1427 
1428 		if (pcr->action < PF_CHANGE_ADD_HEAD ||
1429 		    pcr->action > PF_CHANGE_GET_TICKET) {
1430 			error = EINVAL;
1431 			break;
1432 		}
1433 		ruleset = pf_find_ruleset(pcr->anchor);
1434 		if (ruleset == NULL) {
1435 			error = EINVAL;
1436 			break;
1437 		}
1438 		rs_num = pf_get_ruleset_number(pcr->rule.action);
1439 		if (rs_num >= PF_RULESET_MAX) {
1440 			error = EINVAL;
1441 			break;
1442 		}
1443 
1444 		if (pcr->action == PF_CHANGE_GET_TICKET) {
1445 			pcr->ticket = ++ruleset->rules[rs_num].active.ticket;
1446 			break;
1447 		} else {
1448 			if (pcr->ticket !=
1449 			    ruleset->rules[rs_num].active.ticket) {
1450 				error = EINVAL;
1451 				break;
1452 			}
1453 			if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
1454 				error = EINVAL;
1455 				break;
1456 			}
1457 		}
1458 
1459 		if (pcr->action != PF_CHANGE_REMOVE) {
1460 			newrule = pool_get(&pf_rule_pl, PR_NOWAIT);
1461 			if (newrule == NULL) {
1462 				error = ENOMEM;
1463 				break;
1464 			}
1465 			bcopy(&pcr->rule, newrule, sizeof(struct pf_rule));
1466 			TAILQ_INIT(&newrule->rpool.list);
1467 			/* initialize refcounting */
1468 			newrule->states = 0;
1469 			newrule->entries.tqe_prev = NULL;
1470 #ifndef INET
1471 			if (newrule->af == AF_INET) {
1472 				pool_put(&pf_rule_pl, newrule);
1473 				error = EAFNOSUPPORT;
1474 				break;
1475 			}
1476 #endif /* INET */
1477 #ifndef INET6
1478 			if (newrule->af == AF_INET6) {
1479 				pool_put(&pf_rule_pl, newrule);
1480 				error = EAFNOSUPPORT;
1481 				break;
1482 			}
1483 #endif /* INET6 */
1484 			if (newrule->ifname[0]) {
1485 				newrule->kif = pfi_attach_rule(newrule->ifname);
1486 				if (newrule->kif == NULL) {
1487 					pool_put(&pf_rule_pl, newrule);
1488 					error = EINVAL;
1489 					break;
1490 				}
1491 			} else
1492 				newrule->kif = NULL;
1493 
1494 #ifdef ALTQ
1495 			/* set queue IDs */
1496 			if (newrule->qname[0] != 0) {
1497 				if ((newrule->qid =
1498 				    pf_qname2qid(newrule->qname)) == 0)
1499 					error = EBUSY;
1500 				else if (newrule->pqname[0] != 0) {
1501 					if ((newrule->pqid =
1502 					    pf_qname2qid(newrule->pqname)) == 0)
1503 						error = EBUSY;
1504 				} else
1505 					newrule->pqid = newrule->qid;
1506 			}
1507 #endif /* ALTQ */
1508 			if (newrule->tagname[0])
1509 				if ((newrule->tag =
1510 				    pf_tagname2tag(newrule->tagname)) == 0)
1511 					error = EBUSY;
1512 			if (newrule->match_tagname[0])
1513 				if ((newrule->match_tag = pf_tagname2tag(
1514 				    newrule->match_tagname)) == 0)
1515 					error = EBUSY;
1516 
1517 			if (newrule->rt && !newrule->direction)
1518 				error = EINVAL;
1519 			if (pfi_dynaddr_setup(&newrule->src.addr, newrule->af))
1520 				error = EINVAL;
1521 			if (pfi_dynaddr_setup(&newrule->dst.addr, newrule->af))
1522 				error = EINVAL;
1523 			if (pf_tbladdr_setup(ruleset, &newrule->src.addr))
1524 				error = EINVAL;
1525 			if (pf_tbladdr_setup(ruleset, &newrule->dst.addr))
1526 				error = EINVAL;
1527 			if (pf_anchor_setup(newrule, ruleset, pcr->anchor_call))
1528 				error = EINVAL;
1529 
1530 			pf_mv_pool(&pf_pabuf, &newrule->rpool.list);
1531 			if (((((newrule->action == PF_NAT) ||
1532 			    (newrule->action == PF_RDR) ||
1533 			    (newrule->action == PF_BINAT) ||
1534 			    (newrule->rt > PF_FASTROUTE)) &&
1535 			    !pcr->anchor[0])) &&
1536 			    (TAILQ_FIRST(&newrule->rpool.list) == NULL))
1537 				error = EINVAL;
1538 
1539 			if (error) {
1540 				pf_rm_rule(NULL, newrule);
1541 				break;
1542 			}
1543 			newrule->rpool.cur = TAILQ_FIRST(&newrule->rpool.list);
1544 			newrule->evaluations = newrule->packets = 0;
1545 			newrule->bytes = 0;
1546 		}
1547 		pf_empty_pool(&pf_pabuf);
1548 
1549 		s = splsoftnet();
1550 
1551 		if (pcr->action == PF_CHANGE_ADD_HEAD)
1552 			oldrule = TAILQ_FIRST(
1553 			    ruleset->rules[rs_num].active.ptr);
1554 		else if (pcr->action == PF_CHANGE_ADD_TAIL)
1555 			oldrule = TAILQ_LAST(
1556 			    ruleset->rules[rs_num].active.ptr, pf_rulequeue);
1557 		else {
1558 			oldrule = TAILQ_FIRST(
1559 			    ruleset->rules[rs_num].active.ptr);
1560 			while ((oldrule != NULL) && (oldrule->nr != pcr->nr))
1561 				oldrule = TAILQ_NEXT(oldrule, entries);
1562 			if (oldrule == NULL) {
1563 				if (newrule != NULL)
1564 					pf_rm_rule(NULL, newrule);
1565 				error = EINVAL;
1566 				splx(s);
1567 				break;
1568 			}
1569 		}
1570 
1571 		if (pcr->action == PF_CHANGE_REMOVE)
1572 			pf_rm_rule(ruleset->rules[rs_num].active.ptr, oldrule);
1573 		else {
1574 			if (oldrule == NULL)
1575 				TAILQ_INSERT_TAIL(
1576 				    ruleset->rules[rs_num].active.ptr,
1577 				    newrule, entries);
1578 			else if (pcr->action == PF_CHANGE_ADD_HEAD ||
1579 			    pcr->action == PF_CHANGE_ADD_BEFORE)
1580 				TAILQ_INSERT_BEFORE(oldrule, newrule, entries);
1581 			else
1582 				TAILQ_INSERT_AFTER(
1583 				    ruleset->rules[rs_num].active.ptr,
1584 				    oldrule, newrule, entries);
1585 		}
1586 
1587 		nr = 0;
1588 		TAILQ_FOREACH(oldrule,
1589 		    ruleset->rules[rs_num].active.ptr, entries)
1590 			oldrule->nr = nr++;
1591 
1592 		ruleset->rules[rs_num].active.ticket++;
1593 
1594 		pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr);
1595 		pf_remove_if_empty_ruleset(ruleset);
1596 
1597 		splx(s);
1598 		break;
1599 	}
1600 
1601 	case DIOCCLRSTATES: {
1602 		struct pf_state		*state;
1603 		struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr;
1604 		int			 killed = 0;
1605 
1606 		s = splsoftnet();
1607 		RB_FOREACH(state, pf_state_tree_id, &tree_id) {
1608 			if (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname,
1609 			    state->u.s.kif->pfik_name)) {
1610 				state->timeout = PFTM_PURGE;
1611 #if NPFSYNC
1612 				/* don't send out individual delete messages */
1613 				state->sync_flags = PFSTATE_NOSYNC;
1614 #endif
1615 				killed++;
1616 			}
1617 		}
1618 		pf_purge_expired_states();
1619 		pf_status.states = 0;
1620 		psk->psk_af = killed;
1621 #if NPFSYNC
1622 		pfsync_clear_states(pf_status.hostid, psk->psk_ifname);
1623 #endif
1624 		splx(s);
1625 		break;
1626 	}
1627 
1628 	case DIOCKILLSTATES: {
1629 		struct pf_state		*state;
1630 		struct pfioc_state_kill	*psk = (struct pfioc_state_kill *)addr;
1631 		int			 killed = 0;
1632 
1633 		s = splsoftnet();
1634 		RB_FOREACH(state, pf_state_tree_id, &tree_id) {
1635 			if ((!psk->psk_af || state->af == psk->psk_af)
1636 			    && (!psk->psk_proto || psk->psk_proto ==
1637 			    state->proto) &&
1638 			    PF_MATCHA(psk->psk_src.neg,
1639 			    &psk->psk_src.addr.v.a.addr,
1640 			    &psk->psk_src.addr.v.a.mask,
1641 			    &state->lan.addr, state->af) &&
1642 			    PF_MATCHA(psk->psk_dst.neg,
1643 			    &psk->psk_dst.addr.v.a.addr,
1644 			    &psk->psk_dst.addr.v.a.mask,
1645 			    &state->ext.addr, state->af) &&
1646 			    (psk->psk_src.port_op == 0 ||
1647 			    pf_match_port(psk->psk_src.port_op,
1648 			    psk->psk_src.port[0], psk->psk_src.port[1],
1649 			    state->lan.port)) &&
1650 			    (psk->psk_dst.port_op == 0 ||
1651 			    pf_match_port(psk->psk_dst.port_op,
1652 			    psk->psk_dst.port[0], psk->psk_dst.port[1],
1653 			    state->ext.port)) &&
1654 			    (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname,
1655 			    state->u.s.kif->pfik_name))) {
1656 				state->timeout = PFTM_PURGE;
1657 				killed++;
1658 			}
1659 		}
1660 		pf_purge_expired_states();
1661 		splx(s);
1662 		psk->psk_af = killed;
1663 		break;
1664 	}
1665 
1666 	case DIOCADDSTATE: {
1667 		struct pfioc_state	*ps = (struct pfioc_state *)addr;
1668 		struct pf_state		*state;
1669 		struct pfi_kif		*kif;
1670 
1671 		if (ps->state.timeout >= PFTM_MAX &&
1672 		    ps->state.timeout != PFTM_UNTIL_PACKET) {
1673 			error = EINVAL;
1674 			break;
1675 		}
1676 		state = pool_get(&pf_state_pl, PR_NOWAIT);
1677 		if (state == NULL) {
1678 			error = ENOMEM;
1679 			break;
1680 		}
1681 		s = splsoftnet();
1682 		kif = pfi_lookup_create(ps->state.u.ifname);
1683 		if (kif == NULL) {
1684 			pool_put(&pf_state_pl, state);
1685 			error = ENOENT;
1686 			splx(s);
1687 			break;
1688 		}
1689 		bcopy(&ps->state, state, sizeof(struct pf_state));
1690 		bzero(&state->u, sizeof(state->u));
1691 		state->rule.ptr = &pf_default_rule;
1692 		state->nat_rule.ptr = NULL;
1693 		state->anchor.ptr = NULL;
1694 		state->rt_kif = NULL;
1695 		state->creation = time_second;
1696 		state->pfsync_time = 0;
1697 		state->packets[0] = state->packets[1] = 0;
1698 		state->bytes[0] = state->bytes[1] = 0;
1699 
1700 		if (pf_insert_state(kif, state)) {
1701 			pfi_maybe_destroy(kif);
1702 			pool_put(&pf_state_pl, state);
1703 			error = ENOMEM;
1704 		}
1705 		splx(s);
1706 		break;
1707 	}
1708 
1709 	case DIOCGETSTATE: {
1710 		struct pfioc_state	*ps = (struct pfioc_state *)addr;
1711 		struct pf_state		*state;
1712 		u_int32_t		 nr;
1713 
1714 		nr = 0;
1715 		s = splsoftnet();
1716 		RB_FOREACH(state, pf_state_tree_id, &tree_id) {
1717 			if (nr >= ps->nr)
1718 				break;
1719 			nr++;
1720 		}
1721 		if (state == NULL) {
1722 			error = EBUSY;
1723 			splx(s);
1724 			break;
1725 		}
1726 		bcopy(state, &ps->state, sizeof(struct pf_state));
1727 		ps->state.rule.nr = state->rule.ptr->nr;
1728 		ps->state.nat_rule.nr = (state->nat_rule.ptr == NULL) ?
1729 		    -1 : state->nat_rule.ptr->nr;
1730 		ps->state.anchor.nr = (state->anchor.ptr == NULL) ?
1731 		    -1 : state->anchor.ptr->nr;
1732 		splx(s);
1733 		ps->state.expire = pf_state_expires(state);
1734 		if (ps->state.expire > time_second)
1735 			ps->state.expire -= time_second;
1736 		else
1737 			ps->state.expire = 0;
1738 		break;
1739 	}
1740 
1741 	case DIOCGETSTATES: {
1742 		struct pfioc_states	*ps = (struct pfioc_states *)addr;
1743 		struct pf_state		*state;
1744 		struct pf_state		*p, pstore;
1745 		struct pfi_kif		*kif;
1746 		u_int32_t		 nr = 0;
1747 		int			 space = ps->ps_len;
1748 
1749 		if (space == 0) {
1750 			s = splsoftnet();
1751 			TAILQ_FOREACH(kif, &pfi_statehead, pfik_w_states)
1752 				nr += kif->pfik_states;
1753 			splx(s);
1754 			ps->ps_len = sizeof(struct pf_state) * nr;
1755 			return (0);
1756 		}
1757 
1758 		s = splsoftnet();
1759 		p = ps->ps_states;
1760 		TAILQ_FOREACH(kif, &pfi_statehead, pfik_w_states)
1761 			RB_FOREACH(state, pf_state_tree_ext_gwy,
1762 			    &kif->pfik_ext_gwy) {
1763 				int	secs = time_second;
1764 
1765 				if ((nr+1) * sizeof(*p) > (unsigned)ps->ps_len)
1766 					break;
1767 
1768 				bcopy(state, &pstore, sizeof(pstore));
1769 				strlcpy(pstore.u.ifname, kif->pfik_name,
1770 				    sizeof(pstore.u.ifname));
1771 				pstore.rule.nr = state->rule.ptr->nr;
1772 				pstore.nat_rule.nr = (state->nat_rule.ptr ==
1773 				    NULL) ? -1 : state->nat_rule.ptr->nr;
1774 				pstore.anchor.nr = (state->anchor.ptr ==
1775 				    NULL) ? -1 : state->anchor.ptr->nr;
1776 				pstore.creation = secs - pstore.creation;
1777 				pstore.expire = pf_state_expires(state);
1778 				if (pstore.expire > secs)
1779 					pstore.expire -= secs;
1780 				else
1781 					pstore.expire = 0;
1782 				error = copyout(&pstore, p, sizeof(*p));
1783 				if (error) {
1784 					splx(s);
1785 					goto fail;
1786 				}
1787 				p++;
1788 				nr++;
1789 			}
1790 		ps->ps_len = sizeof(struct pf_state) * nr;
1791 		splx(s);
1792 		break;
1793 	}
1794 
1795 	case DIOCGETSTATUS: {
1796 		struct pf_status *s = (struct pf_status *)addr;
1797 		bcopy(&pf_status, s, sizeof(struct pf_status));
1798 		pfi_fill_oldstatus(s);
1799 		break;
1800 	}
1801 
1802 	case DIOCSETSTATUSIF: {
1803 		struct pfioc_if	*pi = (struct pfioc_if *)addr;
1804 
1805 		if (pi->ifname[0] == 0) {
1806 			bzero(pf_status.ifname, IFNAMSIZ);
1807 			break;
1808 		}
1809 		if (ifunit(pi->ifname) == NULL) {
1810 			error = EINVAL;
1811 			break;
1812 		}
1813 		strlcpy(pf_status.ifname, pi->ifname, IFNAMSIZ);
1814 		break;
1815 	}
1816 
1817 	case DIOCCLRSTATUS: {
1818 		bzero(pf_status.counters, sizeof(pf_status.counters));
1819 		bzero(pf_status.fcounters, sizeof(pf_status.fcounters));
1820 		bzero(pf_status.scounters, sizeof(pf_status.scounters));
1821 		if (*pf_status.ifname)
1822 			pfi_clr_istats(pf_status.ifname, NULL,
1823 			    PFI_FLAG_INSTANCE);
1824 		break;
1825 	}
1826 
1827 	case DIOCNATLOOK: {
1828 		struct pfioc_natlook	*pnl = (struct pfioc_natlook *)addr;
1829 		struct pf_state		*state;
1830 		struct pf_state		 key;
1831 		int			 m = 0, direction = pnl->direction;
1832 
1833 		key.af = pnl->af;
1834 		key.proto = pnl->proto;
1835 
1836 		if (!pnl->proto ||
1837 		    PF_AZERO(&pnl->saddr, pnl->af) ||
1838 		    PF_AZERO(&pnl->daddr, pnl->af) ||
1839 		    !pnl->dport || !pnl->sport)
1840 			error = EINVAL;
1841 		else {
1842 			s = splsoftnet();
1843 
1844 			/*
1845 			 * userland gives us source and dest of connection,
1846 			 * reverse the lookup so we ask for what happens with
1847 			 * the return traffic, enabling us to find it in the
1848 			 * state tree.
1849 			 */
1850 			if (direction == PF_IN) {
1851 				PF_ACPY(&key.ext.addr, &pnl->daddr, pnl->af);
1852 				key.ext.port = pnl->dport;
1853 				PF_ACPY(&key.gwy.addr, &pnl->saddr, pnl->af);
1854 				key.gwy.port = pnl->sport;
1855 				state = pf_find_state_all(&key, PF_EXT_GWY, &m);
1856 			} else {
1857 				PF_ACPY(&key.lan.addr, &pnl->daddr, pnl->af);
1858 				key.lan.port = pnl->dport;
1859 				PF_ACPY(&key.ext.addr, &pnl->saddr, pnl->af);
1860 				key.ext.port = pnl->sport;
1861 				state = pf_find_state_all(&key, PF_LAN_EXT, &m);
1862 			}
1863 			if (m > 1)
1864 				error = E2BIG;	/* more than one state */
1865 			else if (state != NULL) {
1866 				if (direction == PF_IN) {
1867 					PF_ACPY(&pnl->rsaddr, &state->lan.addr,
1868 					    state->af);
1869 					pnl->rsport = state->lan.port;
1870 					PF_ACPY(&pnl->rdaddr, &pnl->daddr,
1871 					    pnl->af);
1872 					pnl->rdport = pnl->dport;
1873 				} else {
1874 					PF_ACPY(&pnl->rdaddr, &state->gwy.addr,
1875 					    state->af);
1876 					pnl->rdport = state->gwy.port;
1877 					PF_ACPY(&pnl->rsaddr, &pnl->saddr,
1878 					    pnl->af);
1879 					pnl->rsport = pnl->sport;
1880 				}
1881 			} else
1882 				error = ENOENT;
1883 			splx(s);
1884 		}
1885 		break;
1886 	}
1887 
1888 	case DIOCSETTIMEOUT: {
1889 		struct pfioc_tm	*pt = (struct pfioc_tm *)addr;
1890 		int		 old;
1891 
1892 		if (pt->timeout < 0 || pt->timeout >= PFTM_MAX ||
1893 		    pt->seconds < 0) {
1894 			error = EINVAL;
1895 			goto fail;
1896 		}
1897 		old = pf_default_rule.timeout[pt->timeout];
1898 		pf_default_rule.timeout[pt->timeout] = pt->seconds;
1899 		pt->seconds = old;
1900 		break;
1901 	}
1902 
1903 	case DIOCGETTIMEOUT: {
1904 		struct pfioc_tm	*pt = (struct pfioc_tm *)addr;
1905 
1906 		if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) {
1907 			error = EINVAL;
1908 			goto fail;
1909 		}
1910 		pt->seconds = pf_default_rule.timeout[pt->timeout];
1911 		break;
1912 	}
1913 
1914 	case DIOCGETLIMIT: {
1915 		struct pfioc_limit	*pl = (struct pfioc_limit *)addr;
1916 
1917 		if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) {
1918 			error = EINVAL;
1919 			goto fail;
1920 		}
1921 		pl->limit = pf_pool_limits[pl->index].limit;
1922 		break;
1923 	}
1924 
1925 	case DIOCSETLIMIT: {
1926 		struct pfioc_limit	*pl = (struct pfioc_limit *)addr;
1927 		int			 old_limit;
1928 
1929 		if (pl->index < 0 || pl->index >= PF_LIMIT_MAX ||
1930 		    pf_pool_limits[pl->index].pp == NULL) {
1931 			error = EINVAL;
1932 			goto fail;
1933 		}
1934 #ifdef __OpenBSD__
1935 		if (pool_sethardlimit(pf_pool_limits[pl->index].pp,
1936 		    pl->limit, NULL, 0) != 0) {
1937 			error = EBUSY;
1938 			goto fail;
1939 		}
1940 #else
1941 		pool_sethardlimit(pf_pool_limits[pl->index].pp,
1942 		    pl->limit, NULL, 0);
1943 #endif
1944 		old_limit = pf_pool_limits[pl->index].limit;
1945 		pf_pool_limits[pl->index].limit = pl->limit;
1946 		pl->limit = old_limit;
1947 		break;
1948 	}
1949 
1950 	case DIOCSETDEBUG: {
1951 		u_int32_t	*level = (u_int32_t *)addr;
1952 
1953 		pf_status.debug = *level;
1954 		break;
1955 	}
1956 
1957 	case DIOCCLRRULECTRS: {
1958 		struct pf_ruleset	*ruleset = &pf_main_ruleset;
1959 		struct pf_rule		*rule;
1960 
1961 		s = splsoftnet();
1962 		TAILQ_FOREACH(rule,
1963 		    ruleset->rules[PF_RULESET_FILTER].active.ptr, entries)
1964 			rule->evaluations = rule->packets =
1965 			    rule->bytes = 0;
1966 		splx(s);
1967 		break;
1968 	}
1969 
1970 #ifdef ALTQ
1971 	case DIOCSTARTALTQ: {
1972 		struct pf_altq		*altq;
1973 
1974 		/* enable all altq interfaces on active list */
1975 		s = splsoftnet();
1976 		TAILQ_FOREACH(altq, pf_altqs_active, entries) {
1977 			if (altq->qname[0] == 0) {
1978 				error = pf_enable_altq(altq);
1979 				if (error != 0)
1980 					break;
1981 			}
1982 		}
1983 		if (error == 0)
1984 			pf_altq_running = 1;
1985 		splx(s);
1986 		DPFPRINTF(PF_DEBUG_MISC, ("altq: started\n"));
1987 		break;
1988 	}
1989 
1990 	case DIOCSTOPALTQ: {
1991 		struct pf_altq		*altq;
1992 
1993 		/* disable all altq interfaces on active list */
1994 		s = splsoftnet();
1995 		TAILQ_FOREACH(altq, pf_altqs_active, entries) {
1996 			if (altq->qname[0] == 0) {
1997 				error = pf_disable_altq(altq);
1998 				if (error != 0)
1999 					break;
2000 			}
2001 		}
2002 		if (error == 0)
2003 			pf_altq_running = 0;
2004 		splx(s);
2005 		DPFPRINTF(PF_DEBUG_MISC, ("altq: stopped\n"));
2006 		break;
2007 	}
2008 
2009 	case DIOCADDALTQ: {
2010 		struct pfioc_altq	*pa = (struct pfioc_altq *)addr;
2011 		struct pf_altq		*altq, *a;
2012 
2013 		if (pa->ticket != ticket_altqs_inactive) {
2014 			error = EBUSY;
2015 			break;
2016 		}
2017 		altq = pool_get(&pf_altq_pl, PR_NOWAIT);
2018 		if (altq == NULL) {
2019 			error = ENOMEM;
2020 			break;
2021 		}
2022 		bcopy(&pa->altq, altq, sizeof(struct pf_altq));
2023 
2024 		/*
2025 		 * if this is for a queue, find the discipline and
2026 		 * copy the necessary fields
2027 		 */
2028 		if (altq->qname[0] != 0) {
2029 			if ((altq->qid = pf_qname2qid(altq->qname)) == 0) {
2030 				error = EBUSY;
2031 				pool_put(&pf_altq_pl, altq);
2032 				break;
2033 			}
2034 			TAILQ_FOREACH(a, pf_altqs_inactive, entries) {
2035 				if (strncmp(a->ifname, altq->ifname,
2036 				    IFNAMSIZ) == 0 && a->qname[0] == 0) {
2037 					altq->altq_disc = a->altq_disc;
2038 					break;
2039 				}
2040 			}
2041 		}
2042 
2043 		error = altq_add(altq);
2044 		if (error) {
2045 			pool_put(&pf_altq_pl, altq);
2046 			break;
2047 		}
2048 
2049 		TAILQ_INSERT_TAIL(pf_altqs_inactive, altq, entries);
2050 		bcopy(altq, &pa->altq, sizeof(struct pf_altq));
2051 		break;
2052 	}
2053 
2054 	case DIOCGETALTQS: {
2055 		struct pfioc_altq	*pa = (struct pfioc_altq *)addr;
2056 		struct pf_altq		*altq;
2057 
2058 		pa->nr = 0;
2059 		s = splsoftnet();
2060 		TAILQ_FOREACH(altq, pf_altqs_active, entries)
2061 			pa->nr++;
2062 		pa->ticket = ticket_altqs_active;
2063 		splx(s);
2064 		break;
2065 	}
2066 
2067 	case DIOCGETALTQ: {
2068 		struct pfioc_altq	*pa = (struct pfioc_altq *)addr;
2069 		struct pf_altq		*altq;
2070 		u_int32_t		 nr;
2071 
2072 		if (pa->ticket != ticket_altqs_active) {
2073 			error = EBUSY;
2074 			break;
2075 		}
2076 		nr = 0;
2077 		s = splsoftnet();
2078 		altq = TAILQ_FIRST(pf_altqs_active);
2079 		while ((altq != NULL) && (nr < pa->nr)) {
2080 			altq = TAILQ_NEXT(altq, entries);
2081 			nr++;
2082 		}
2083 		if (altq == NULL) {
2084 			error = EBUSY;
2085 			splx(s);
2086 			break;
2087 		}
2088 		bcopy(altq, &pa->altq, sizeof(struct pf_altq));
2089 		splx(s);
2090 		break;
2091 	}
2092 
2093 	case DIOCCHANGEALTQ:
2094 		/* CHANGEALTQ not supported yet! */
2095 		error = ENODEV;
2096 		break;
2097 
2098 	case DIOCGETQSTATS: {
2099 		struct pfioc_qstats	*pq = (struct pfioc_qstats *)addr;
2100 		struct pf_altq		*altq;
2101 		u_int32_t		 nr;
2102 		int			 nbytes;
2103 
2104 		if (pq->ticket != ticket_altqs_active) {
2105 			error = EBUSY;
2106 			break;
2107 		}
2108 		nbytes = pq->nbytes;
2109 		nr = 0;
2110 		s = splsoftnet();
2111 		altq = TAILQ_FIRST(pf_altqs_active);
2112 		while ((altq != NULL) && (nr < pq->nr)) {
2113 			altq = TAILQ_NEXT(altq, entries);
2114 			nr++;
2115 		}
2116 		if (altq == NULL) {
2117 			error = EBUSY;
2118 			splx(s);
2119 			break;
2120 		}
2121 		error = altq_getqstats(altq, pq->buf, &nbytes);
2122 		splx(s);
2123 		if (error == 0) {
2124 			pq->scheduler = altq->scheduler;
2125 			pq->nbytes = nbytes;
2126 		}
2127 		break;
2128 	}
2129 #endif /* ALTQ */
2130 
2131 	case DIOCBEGINADDRS: {
2132 		struct pfioc_pooladdr	*pp = (struct pfioc_pooladdr *)addr;
2133 
2134 		pf_empty_pool(&pf_pabuf);
2135 		pp->ticket = ++ticket_pabuf;
2136 		break;
2137 	}
2138 
2139 	case DIOCADDADDR: {
2140 		struct pfioc_pooladdr	*pp = (struct pfioc_pooladdr *)addr;
2141 
2142 #ifndef INET
2143 		if (pp->af == AF_INET) {
2144 			error = EAFNOSUPPORT;
2145 			break;
2146 		}
2147 #endif /* INET */
2148 #ifndef INET6
2149 		if (pp->af == AF_INET6) {
2150 			error = EAFNOSUPPORT;
2151 			break;
2152 		}
2153 #endif /* INET6 */
2154 		if (pp->addr.addr.type != PF_ADDR_ADDRMASK &&
2155 		    pp->addr.addr.type != PF_ADDR_DYNIFTL &&
2156 		    pp->addr.addr.type != PF_ADDR_TABLE) {
2157 			error = EINVAL;
2158 			break;
2159 		}
2160 		pa = pool_get(&pf_pooladdr_pl, PR_NOWAIT);
2161 		if (pa == NULL) {
2162 			error = ENOMEM;
2163 			break;
2164 		}
2165 		bcopy(&pp->addr, pa, sizeof(struct pf_pooladdr));
2166 		if (pa->ifname[0]) {
2167 			pa->kif = pfi_attach_rule(pa->ifname);
2168 			if (pa->kif == NULL) {
2169 				pool_put(&pf_pooladdr_pl, pa);
2170 				error = EINVAL;
2171 				break;
2172 			}
2173 		}
2174 		if (pfi_dynaddr_setup(&pa->addr, pp->af)) {
2175 			pfi_dynaddr_remove(&pa->addr);
2176 			pfi_detach_rule(pa->kif);
2177 			pool_put(&pf_pooladdr_pl, pa);
2178 			error = EINVAL;
2179 			break;
2180 		}
2181 		TAILQ_INSERT_TAIL(&pf_pabuf, pa, entries);
2182 		break;
2183 	}
2184 
2185 	case DIOCGETADDRS: {
2186 		struct pfioc_pooladdr	*pp = (struct pfioc_pooladdr *)addr;
2187 
2188 		pp->nr = 0;
2189 		s = splsoftnet();
2190 		pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action,
2191 		    pp->r_num, 0, 1, 0);
2192 		if (pool == NULL) {
2193 			error = EBUSY;
2194 			splx(s);
2195 			break;
2196 		}
2197 		TAILQ_FOREACH(pa, &pool->list, entries)
2198 			pp->nr++;
2199 		splx(s);
2200 		break;
2201 	}
2202 
2203 	case DIOCGETADDR: {
2204 		struct pfioc_pooladdr	*pp = (struct pfioc_pooladdr *)addr;
2205 		u_int32_t		 nr = 0;
2206 
2207 		s = splsoftnet();
2208 		pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action,
2209 		    pp->r_num, 0, 1, 1);
2210 		if (pool == NULL) {
2211 			error = EBUSY;
2212 			splx(s);
2213 			break;
2214 		}
2215 		pa = TAILQ_FIRST(&pool->list);
2216 		while ((pa != NULL) && (nr < pp->nr)) {
2217 			pa = TAILQ_NEXT(pa, entries);
2218 			nr++;
2219 		}
2220 		if (pa == NULL) {
2221 			error = EBUSY;
2222 			splx(s);
2223 			break;
2224 		}
2225 		bcopy(pa, &pp->addr, sizeof(struct pf_pooladdr));
2226 		pfi_dynaddr_copyout(&pp->addr.addr);
2227 		pf_tbladdr_copyout(&pp->addr.addr);
2228 		splx(s);
2229 		break;
2230 	}
2231 
2232 	case DIOCCHANGEADDR: {
2233 		struct pfioc_pooladdr	*pca = (struct pfioc_pooladdr *)addr;
2234 		struct pf_pooladdr	*oldpa = NULL, *newpa = NULL;
2235 		struct pf_ruleset	*ruleset;
2236 
2237 		if (pca->action < PF_CHANGE_ADD_HEAD ||
2238 		    pca->action > PF_CHANGE_REMOVE) {
2239 			error = EINVAL;
2240 			break;
2241 		}
2242 		if (pca->addr.addr.type != PF_ADDR_ADDRMASK &&
2243 		    pca->addr.addr.type != PF_ADDR_DYNIFTL &&
2244 		    pca->addr.addr.type != PF_ADDR_TABLE) {
2245 			error = EINVAL;
2246 			break;
2247 		}
2248 
2249 		ruleset = pf_find_ruleset(pca->anchor);
2250 		if (ruleset == NULL) {
2251 			error = EBUSY;
2252 			break;
2253 		}
2254 		pool = pf_get_pool(pca->anchor, pca->ticket, pca->r_action,
2255 		    pca->r_num, pca->r_last, 1, 1);
2256 		if (pool == NULL) {
2257 			error = EBUSY;
2258 			break;
2259 		}
2260 		if (pca->action != PF_CHANGE_REMOVE) {
2261 			newpa = pool_get(&pf_pooladdr_pl, PR_NOWAIT);
2262 			if (newpa == NULL) {
2263 				error = ENOMEM;
2264 				break;
2265 			}
2266 			bcopy(&pca->addr, newpa, sizeof(struct pf_pooladdr));
2267 #ifndef INET
2268 			if (pca->af == AF_INET) {
2269 				pool_put(&pf_pooladdr_pl, newpa);
2270 				error = EAFNOSUPPORT;
2271 				break;
2272 			}
2273 #endif /* INET */
2274 #ifndef INET6
2275 			if (pca->af == AF_INET6) {
2276 				pool_put(&pf_pooladdr_pl, newpa);
2277 				error = EAFNOSUPPORT;
2278 				break;
2279 			}
2280 #endif /* INET6 */
2281 			if (newpa->ifname[0]) {
2282 				newpa->kif = pfi_attach_rule(newpa->ifname);
2283 				if (newpa->kif == NULL) {
2284 					pool_put(&pf_pooladdr_pl, newpa);
2285 					error = EINVAL;
2286 					break;
2287 				}
2288 			} else
2289 				newpa->kif = NULL;
2290 			if (pfi_dynaddr_setup(&newpa->addr, pca->af) ||
2291 			    pf_tbladdr_setup(ruleset, &newpa->addr)) {
2292 				pfi_dynaddr_remove(&newpa->addr);
2293 				pfi_detach_rule(newpa->kif);
2294 				pool_put(&pf_pooladdr_pl, newpa);
2295 				error = EINVAL;
2296 				break;
2297 			}
2298 		}
2299 
2300 		s = splsoftnet();
2301 
2302 		if (pca->action == PF_CHANGE_ADD_HEAD)
2303 			oldpa = TAILQ_FIRST(&pool->list);
2304 		else if (pca->action == PF_CHANGE_ADD_TAIL)
2305 			oldpa = TAILQ_LAST(&pool->list, pf_palist);
2306 		else {
2307 			int	i = 0;
2308 
2309 			oldpa = TAILQ_FIRST(&pool->list);
2310 			while ((oldpa != NULL) && (i < pca->nr)) {
2311 				oldpa = TAILQ_NEXT(oldpa, entries);
2312 				i++;
2313 			}
2314 			if (oldpa == NULL) {
2315 				error = EINVAL;
2316 				splx(s);
2317 				break;
2318 			}
2319 		}
2320 
2321 		if (pca->action == PF_CHANGE_REMOVE) {
2322 			TAILQ_REMOVE(&pool->list, oldpa, entries);
2323 			pfi_dynaddr_remove(&oldpa->addr);
2324 			pf_tbladdr_remove(&oldpa->addr);
2325 			pfi_detach_rule(oldpa->kif);
2326 			pool_put(&pf_pooladdr_pl, oldpa);
2327 		} else {
2328 			if (oldpa == NULL)
2329 				TAILQ_INSERT_TAIL(&pool->list, newpa, entries);
2330 			else if (pca->action == PF_CHANGE_ADD_HEAD ||
2331 			    pca->action == PF_CHANGE_ADD_BEFORE)
2332 				TAILQ_INSERT_BEFORE(oldpa, newpa, entries);
2333 			else
2334 				TAILQ_INSERT_AFTER(&pool->list, oldpa,
2335 				    newpa, entries);
2336 		}
2337 
2338 		pool->cur = TAILQ_FIRST(&pool->list);
2339 		PF_ACPY(&pool->counter, &pool->cur->addr.v.a.addr,
2340 		    pca->af);
2341 		splx(s);
2342 		break;
2343 	}
2344 
2345 	case DIOCGETRULESETS: {
2346 		struct pfioc_ruleset	*pr = (struct pfioc_ruleset *)addr;
2347 		struct pf_ruleset	*ruleset;
2348 		struct pf_anchor	*anchor;
2349 
2350 		pr->path[sizeof(pr->path) - 1] = 0;
2351 		if ((ruleset = pf_find_ruleset(pr->path)) == NULL) {
2352 			error = EINVAL;
2353 			break;
2354 		}
2355 		pr->nr = 0;
2356 		if (ruleset->anchor == NULL) {
2357 			/* XXX kludge for pf_main_ruleset */
2358 			RB_FOREACH(anchor, pf_anchor_global, &pf_anchors)
2359 				if (anchor->parent == NULL)
2360 					pr->nr++;
2361 		} else {
2362 			RB_FOREACH(anchor, pf_anchor_node,
2363 			    &ruleset->anchor->children)
2364 				pr->nr++;
2365 		}
2366 		break;
2367 	}
2368 
2369 	case DIOCGETRULESET: {
2370 		struct pfioc_ruleset	*pr = (struct pfioc_ruleset *)addr;
2371 		struct pf_ruleset	*ruleset;
2372 		struct pf_anchor	*anchor;
2373 		u_int32_t		 nr = 0;
2374 
2375 		pr->path[sizeof(pr->path) - 1] = 0;
2376 		if ((ruleset = pf_find_ruleset(pr->path)) == NULL) {
2377 			error = EINVAL;
2378 			break;
2379 		}
2380 		pr->name[0] = 0;
2381 		if (ruleset->anchor == NULL) {
2382 			/* XXX kludge for pf_main_ruleset */
2383 			RB_FOREACH(anchor, pf_anchor_global, &pf_anchors)
2384 				if (anchor->parent == NULL && nr++ == pr->nr) {
2385 					strlcpy(pr->name, anchor->name,
2386 					    sizeof(pr->name));
2387 					break;
2388 				}
2389 		} else {
2390 			RB_FOREACH(anchor, pf_anchor_node,
2391 			    &ruleset->anchor->children)
2392 				if (nr++ == pr->nr) {
2393 					strlcpy(pr->name, anchor->name,
2394 					    sizeof(pr->name));
2395 					break;
2396 				}
2397 		}
2398 		if (!pr->name[0])
2399 			error = EBUSY;
2400 		break;
2401 	}
2402 
2403 	case DIOCRCLRTABLES: {
2404 		struct pfioc_table *io = (struct pfioc_table *)addr;
2405 
2406 		if (io->pfrio_esize != 0) {
2407 			error = ENODEV;
2408 			break;
2409 		}
2410 		error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel,
2411 		    io->pfrio_flags | PFR_FLAG_USERIOCTL);
2412 		break;
2413 	}
2414 
2415 	case DIOCRADDTABLES: {
2416 		struct pfioc_table *io = (struct pfioc_table *)addr;
2417 
2418 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
2419 			error = ENODEV;
2420 			break;
2421 		}
2422 		error = pfr_add_tables(io->pfrio_buffer, io->pfrio_size,
2423 		    &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2424 		break;
2425 	}
2426 
2427 	case DIOCRDELTABLES: {
2428 		struct pfioc_table *io = (struct pfioc_table *)addr;
2429 
2430 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
2431 			error = ENODEV;
2432 			break;
2433 		}
2434 		error = pfr_del_tables(io->pfrio_buffer, io->pfrio_size,
2435 		    &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2436 		break;
2437 	}
2438 
2439 	case DIOCRGETTABLES: {
2440 		struct pfioc_table *io = (struct pfioc_table *)addr;
2441 
2442 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
2443 			error = ENODEV;
2444 			break;
2445 		}
2446 		error = pfr_get_tables(&io->pfrio_table, io->pfrio_buffer,
2447 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2448 		break;
2449 	}
2450 
2451 	case DIOCRGETTSTATS: {
2452 		struct pfioc_table *io = (struct pfioc_table *)addr;
2453 
2454 		if (io->pfrio_esize != sizeof(struct pfr_tstats)) {
2455 			error = ENODEV;
2456 			break;
2457 		}
2458 		error = pfr_get_tstats(&io->pfrio_table, io->pfrio_buffer,
2459 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2460 		break;
2461 	}
2462 
2463 	case DIOCRCLRTSTATS: {
2464 		struct pfioc_table *io = (struct pfioc_table *)addr;
2465 
2466 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
2467 			error = ENODEV;
2468 			break;
2469 		}
2470 		error = pfr_clr_tstats(io->pfrio_buffer, io->pfrio_size,
2471 		    &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2472 		break;
2473 	}
2474 
2475 	case DIOCRSETTFLAGS: {
2476 		struct pfioc_table *io = (struct pfioc_table *)addr;
2477 
2478 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
2479 			error = ENODEV;
2480 			break;
2481 		}
2482 		error = pfr_set_tflags(io->pfrio_buffer, io->pfrio_size,
2483 		    io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange,
2484 		    &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2485 		break;
2486 	}
2487 
2488 	case DIOCRCLRADDRS: {
2489 		struct pfioc_table *io = (struct pfioc_table *)addr;
2490 
2491 		if (io->pfrio_esize != 0) {
2492 			error = ENODEV;
2493 			break;
2494 		}
2495 		error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel,
2496 		    io->pfrio_flags | PFR_FLAG_USERIOCTL);
2497 		break;
2498 	}
2499 
2500 	case DIOCRADDADDRS: {
2501 		struct pfioc_table *io = (struct pfioc_table *)addr;
2502 
2503 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2504 			error = ENODEV;
2505 			break;
2506 		}
2507 		error = pfr_add_addrs(&io->pfrio_table, io->pfrio_buffer,
2508 		    io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags |
2509 		    PFR_FLAG_USERIOCTL);
2510 		break;
2511 	}
2512 
2513 	case DIOCRDELADDRS: {
2514 		struct pfioc_table *io = (struct pfioc_table *)addr;
2515 
2516 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2517 			error = ENODEV;
2518 			break;
2519 		}
2520 		error = pfr_del_addrs(&io->pfrio_table, io->pfrio_buffer,
2521 		    io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags |
2522 		    PFR_FLAG_USERIOCTL);
2523 		break;
2524 	}
2525 
2526 	case DIOCRSETADDRS: {
2527 		struct pfioc_table *io = (struct pfioc_table *)addr;
2528 
2529 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2530 			error = ENODEV;
2531 			break;
2532 		}
2533 		error = pfr_set_addrs(&io->pfrio_table, io->pfrio_buffer,
2534 		    io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd,
2535 		    &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags |
2536 		    PFR_FLAG_USERIOCTL);
2537 		break;
2538 	}
2539 
2540 	case DIOCRGETADDRS: {
2541 		struct pfioc_table *io = (struct pfioc_table *)addr;
2542 
2543 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2544 			error = ENODEV;
2545 			break;
2546 		}
2547 		error = pfr_get_addrs(&io->pfrio_table, io->pfrio_buffer,
2548 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2549 		break;
2550 	}
2551 
2552 	case DIOCRGETASTATS: {
2553 		struct pfioc_table *io = (struct pfioc_table *)addr;
2554 
2555 		if (io->pfrio_esize != sizeof(struct pfr_astats)) {
2556 			error = ENODEV;
2557 			break;
2558 		}
2559 		error = pfr_get_astats(&io->pfrio_table, io->pfrio_buffer,
2560 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2561 		break;
2562 	}
2563 
2564 	case DIOCRCLRASTATS: {
2565 		struct pfioc_table *io = (struct pfioc_table *)addr;
2566 
2567 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2568 			error = ENODEV;
2569 			break;
2570 		}
2571 		error = pfr_clr_astats(&io->pfrio_table, io->pfrio_buffer,
2572 		    io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags |
2573 		    PFR_FLAG_USERIOCTL);
2574 		break;
2575 	}
2576 
2577 	case DIOCRTSTADDRS: {
2578 		struct pfioc_table *io = (struct pfioc_table *)addr;
2579 
2580 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2581 			error = ENODEV;
2582 			break;
2583 		}
2584 		error = pfr_tst_addrs(&io->pfrio_table, io->pfrio_buffer,
2585 		    io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags |
2586 		    PFR_FLAG_USERIOCTL);
2587 		break;
2588 	}
2589 
2590 	case DIOCRINADEFINE: {
2591 		struct pfioc_table *io = (struct pfioc_table *)addr;
2592 
2593 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2594 			error = ENODEV;
2595 			break;
2596 		}
2597 		error = pfr_ina_define(&io->pfrio_table, io->pfrio_buffer,
2598 		    io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr,
2599 		    io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2600 		break;
2601 	}
2602 
2603 	case DIOCOSFPADD: {
2604 		struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
2605 		s = splsoftnet();
2606 		error = pf_osfp_add(io);
2607 		splx(s);
2608 		break;
2609 	}
2610 
2611 	case DIOCOSFPGET: {
2612 		struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
2613 		s = splsoftnet();
2614 		error = pf_osfp_get(io);
2615 		splx(s);
2616 		break;
2617 	}
2618 
2619 	case DIOCXBEGIN: {
2620 		struct pfioc_trans		*io = (struct pfioc_trans *)
2621 						    addr;
2622 		static struct pfioc_trans_e	 ioe;
2623 		static struct pfr_table		 table;
2624 		int				 i;
2625 
2626 		if (io->esize != sizeof(ioe)) {
2627 			error = ENODEV;
2628 			goto fail;
2629 		}
2630 		for (i = 0; i < io->size; i++) {
2631 			if (copyin(io->array+i, &ioe, sizeof(ioe))) {
2632 				error = EFAULT;
2633 				goto fail;
2634 			}
2635 			switch (ioe.rs_num) {
2636 #ifdef ALTQ
2637 			case PF_RULESET_ALTQ:
2638 				if (ioe.anchor[0]) {
2639 					error = EINVAL;
2640 					goto fail;
2641 				}
2642 				if ((error = pf_begin_altq(&ioe.ticket)))
2643 					goto fail;
2644 				break;
2645 #endif /* ALTQ */
2646 			case PF_RULESET_TABLE:
2647 				bzero(&table, sizeof(table));
2648 				strlcpy(table.pfrt_anchor, ioe.anchor,
2649 				    sizeof(table.pfrt_anchor));
2650 				if ((error = pfr_ina_begin(&table,
2651 				    &ioe.ticket, NULL, 0)))
2652 					goto fail;
2653 				break;
2654 			default:
2655 				if ((error = pf_begin_rules(&ioe.ticket,
2656 				    ioe.rs_num, ioe.anchor)))
2657 					goto fail;
2658 				break;
2659 			}
2660 			if (copyout(&ioe, io->array+i, sizeof(io->array[i]))) {
2661 				error = EFAULT;
2662 				goto fail;
2663 			}
2664 		}
2665 		break;
2666 	}
2667 
2668 	case DIOCXROLLBACK: {
2669 		struct pfioc_trans		*io = (struct pfioc_trans *)
2670 						    addr;
2671 		static struct pfioc_trans_e	 ioe;
2672 		static struct pfr_table		 table;
2673 		int				 i;
2674 
2675 		if (io->esize != sizeof(ioe)) {
2676 			error = ENODEV;
2677 			goto fail;
2678 		}
2679 		for (i = 0; i < io->size; i++) {
2680 			if (copyin(io->array+i, &ioe, sizeof(ioe))) {
2681 				error = EFAULT;
2682 				goto fail;
2683 			}
2684 			switch (ioe.rs_num) {
2685 #ifdef ALTQ
2686 			case PF_RULESET_ALTQ:
2687 				if (ioe.anchor[0]) {
2688 					error = EINVAL;
2689 					goto fail;
2690 				}
2691 				if ((error = pf_rollback_altq(ioe.ticket)))
2692 					goto fail; /* really bad */
2693 				break;
2694 #endif /* ALTQ */
2695 			case PF_RULESET_TABLE:
2696 				bzero(&table, sizeof(table));
2697 				strlcpy(table.pfrt_anchor, ioe.anchor,
2698 				    sizeof(table.pfrt_anchor));
2699 				if ((error = pfr_ina_rollback(&table,
2700 				    ioe.ticket, NULL, 0)))
2701 					goto fail; /* really bad */
2702 				break;
2703 			default:
2704 				if ((error = pf_rollback_rules(ioe.ticket,
2705 				    ioe.rs_num, ioe.anchor)))
2706 					goto fail; /* really bad */
2707 				break;
2708 			}
2709 		}
2710 		break;
2711 	}
2712 
2713 	case DIOCXCOMMIT: {
2714 		struct pfioc_trans		*io = (struct pfioc_trans *)
2715 						    addr;
2716 		static struct pfioc_trans_e	 ioe;
2717 		static struct pfr_table		 table;
2718 		struct pf_ruleset		*rs;
2719 		int				 i;
2720 
2721 		if (io->esize != sizeof(ioe)) {
2722 			error = ENODEV;
2723 			goto fail;
2724 		}
2725 		/* first makes sure everything will succeed */
2726 		for (i = 0; i < io->size; i++) {
2727 			if (copyin(io->array+i, &ioe, sizeof(ioe))) {
2728 				error = EFAULT;
2729 				goto fail;
2730 			}
2731 			switch (ioe.rs_num) {
2732 #ifdef ALTQ
2733 			case PF_RULESET_ALTQ:
2734 				if (ioe.anchor[0]) {
2735 					error = EINVAL;
2736 					goto fail;
2737 				}
2738 				if (!altqs_inactive_open || ioe.ticket !=
2739 				    ticket_altqs_inactive) {
2740 					error = EBUSY;
2741 					goto fail;
2742 				}
2743 				break;
2744 #endif /* ALTQ */
2745 			case PF_RULESET_TABLE:
2746 				rs = pf_find_ruleset(ioe.anchor);
2747 				if (rs == NULL || !rs->topen || ioe.ticket !=
2748 				     rs->tticket) {
2749 					error = EBUSY;
2750 					goto fail;
2751 				}
2752 				break;
2753 			default:
2754 				if (ioe.rs_num < 0 || ioe.rs_num >=
2755 				    PF_RULESET_MAX) {
2756 					error = EINVAL;
2757 					goto fail;
2758 				}
2759 				rs = pf_find_ruleset(ioe.anchor);
2760 				if (rs == NULL ||
2761 				    !rs->rules[ioe.rs_num].inactive.open ||
2762 				    rs->rules[ioe.rs_num].inactive.ticket !=
2763 				    ioe.ticket) {
2764 					error = EBUSY;
2765 					goto fail;
2766 				}
2767 				break;
2768 			}
2769 		}
2770 		/* now do the commit - no errors should happen here */
2771 		for (i = 0; i < io->size; i++) {
2772 			if (copyin(io->array+i, &ioe, sizeof(ioe))) {
2773 				error = EFAULT;
2774 				goto fail;
2775 			}
2776 			switch (ioe.rs_num) {
2777 #ifdef ALTQ
2778 			case PF_RULESET_ALTQ:
2779 				if ((error = pf_commit_altq(ioe.ticket)))
2780 					goto fail; /* really bad */
2781 				break;
2782 #endif /* ALTQ */
2783 			case PF_RULESET_TABLE:
2784 				bzero(&table, sizeof(table));
2785 				strlcpy(table.pfrt_anchor, ioe.anchor,
2786 				    sizeof(table.pfrt_anchor));
2787 				if ((error = pfr_ina_commit(&table, ioe.ticket,
2788 				    NULL, NULL, 0)))
2789 					goto fail; /* really bad */
2790 				break;
2791 			default:
2792 				if ((error = pf_commit_rules(ioe.ticket,
2793 				    ioe.rs_num, ioe.anchor)))
2794 					goto fail; /* really bad */
2795 				break;
2796 			}
2797 		}
2798 		break;
2799 	}
2800 
2801 	case DIOCGETSRCNODES: {
2802 		struct pfioc_src_nodes	*psn = (struct pfioc_src_nodes *)addr;
2803 		struct pf_src_node	*n;
2804 		struct pf_src_node *p, pstore;
2805 		u_int32_t		 nr = 0;
2806 		int			 space = psn->psn_len;
2807 
2808 		if (space == 0) {
2809 			s = splsoftnet();
2810 			RB_FOREACH(n, pf_src_tree, &tree_src_tracking)
2811 				nr++;
2812 			splx(s);
2813 			psn->psn_len = sizeof(struct pf_src_node) * nr;
2814 			return (0);
2815 		}
2816 
2817 		s = splsoftnet();
2818 		p = psn->psn_src_nodes;
2819 		RB_FOREACH(n, pf_src_tree, &tree_src_tracking) {
2820 			int	secs = time_second;
2821 
2822 			if ((nr + 1) * sizeof(*p) > (unsigned)psn->psn_len)
2823 				break;
2824 
2825 			bcopy(n, &pstore, sizeof(pstore));
2826 			if (n->rule.ptr != NULL)
2827 				pstore.rule.nr = n->rule.ptr->nr;
2828 			pstore.creation = secs - pstore.creation;
2829 			if (pstore.expire > secs)
2830 				pstore.expire -= secs;
2831 			else
2832 				pstore.expire = 0;
2833 			error = copyout(&pstore, p, sizeof(*p));
2834 			if (error) {
2835 				splx(s);
2836 				goto fail;
2837 			}
2838 			p++;
2839 			nr++;
2840 		}
2841 		psn->psn_len = sizeof(struct pf_src_node) * nr;
2842 		splx(s);
2843 		break;
2844 	}
2845 
2846 	case DIOCCLRSRCNODES: {
2847 		struct pf_src_node	*n;
2848 		struct pf_state		*state;
2849 
2850 		s = splsoftnet();
2851 		RB_FOREACH(state, pf_state_tree_id, &tree_id) {
2852 			state->src_node = NULL;
2853 			state->nat_src_node = NULL;
2854 		}
2855 		RB_FOREACH(n, pf_src_tree, &tree_src_tracking) {
2856 			n->expire = 1;
2857 			n->states = 0;
2858 		}
2859 		pf_purge_expired_src_nodes();
2860 		pf_status.src_nodes = 0;
2861 		splx(s);
2862 		break;
2863 	}
2864 
2865 	case DIOCSETHOSTID: {
2866 		u_int32_t	*hostid = (u_int32_t *)addr;
2867 
2868 		if (*hostid == 0) {
2869 			error = EINVAL;
2870 			goto fail;
2871 		}
2872 		pf_status.hostid = *hostid;
2873 		break;
2874 	}
2875 
2876 	case DIOCOSFPFLUSH:
2877 		s = splsoftnet();
2878 		pf_osfp_flush();
2879 		splx(s);
2880 		break;
2881 
2882 	case DIOCIGETIFACES: {
2883 		struct pfioc_iface *io = (struct pfioc_iface *)addr;
2884 
2885 		if (io->pfiio_esize != sizeof(struct pfi_if)) {
2886 			error = ENODEV;
2887 			break;
2888 		}
2889 		error = pfi_get_ifaces(io->pfiio_name, io->pfiio_buffer,
2890 		    &io->pfiio_size, io->pfiio_flags);
2891 		break;
2892 	}
2893 
2894 	case DIOCICLRISTATS: {
2895 		struct pfioc_iface *io = (struct pfioc_iface *)addr;
2896 
2897 		error = pfi_clr_istats(io->pfiio_name, &io->pfiio_nzero,
2898 		    io->pfiio_flags);
2899 		break;
2900 	}
2901 
2902 	default:
2903 		error = ENODEV;
2904 		break;
2905 	}
2906 fail:
2907 
2908 	return (error);
2909 }
2910 
2911 #ifdef __NetBSD__
2912 int
2913 pfil4_wrapper(void *arg, struct mbuf **mp, struct ifnet *ifp, int dir)
2914 {
2915 	int error;
2916 
2917 	/*
2918 	 * ensure that mbufs are writable beforehand
2919 	 * as it's assumed by pf code.
2920 	 * ip hdr (60 bytes) + tcp hdr (60 bytes) should be enough.
2921 	 * XXX inefficient
2922 	 */
2923 	error = m_makewritable(mp, 0, 60 + 60, M_DONTWAIT);
2924 	if (error) {
2925 		m_freem(*mp);
2926 		*mp = NULL;
2927 		return error;
2928 	}
2929 
2930 	/*
2931 	 * If the packet is out-bound, we can't delay checksums
2932 	 * here.  For in-bound, the checksum has already been
2933 	 * validated.
2934 	 */
2935 	if (dir == PFIL_OUT) {
2936 		if ((*mp)->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) {
2937 			in_delayed_cksum(*mp);
2938 			(*mp)->m_pkthdr.csum_flags &=
2939 			    ~(M_CSUM_TCPv4|M_CSUM_UDPv4);
2940 		}
2941 	}
2942 
2943 	if (pf_test(dir == PFIL_OUT ? PF_OUT : PF_IN, ifp, mp, NULL)
2944 	    != PF_PASS) {
2945 		m_freem(*mp);
2946 		*mp = NULL;
2947 		return EHOSTUNREACH;
2948 	}
2949 
2950 	/*
2951 	 * we're not compatible with fast-forward.
2952 	 */
2953 
2954 	if (dir == PFIL_IN) {
2955 		(*mp)->m_flags &= ~M_CANFASTFWD;
2956 	}
2957 
2958 	return (0);
2959 }
2960 
2961 #ifdef INET6
2962 int
2963 pfil6_wrapper(void *arg, struct mbuf **mp, struct ifnet *ifp, int dir)
2964 {
2965 	int error;
2966 
2967 	/*
2968 	 * ensure that mbufs are writable beforehand
2969 	 * as it's assumed by pf code.
2970 	 * XXX inefficient
2971 	 */
2972 	error = m_makewritable(mp, 0, M_COPYALL, M_DONTWAIT);
2973 	if (error) {
2974 		m_freem(*mp);
2975 		*mp = NULL;
2976 		return error;
2977 	}
2978 
2979 	if (pf_test6(dir == PFIL_OUT ? PF_OUT : PF_IN, ifp, mp, NULL)
2980 	    != PF_PASS) {
2981 		m_freem(*mp);
2982 		*mp = NULL;
2983 		return EHOSTUNREACH;
2984 	} else
2985 		return (0);
2986 }
2987 #endif
2988 
2989 int
2990 pfil_ifnet_wrapper(void *arg, struct mbuf **mp, struct ifnet *ifp, int dir)
2991 {
2992 	u_long cmd = (u_long)mp;
2993 
2994 	switch (cmd) {
2995 	case PFIL_IFNET_ATTACH:
2996 		pfi_attach_ifnet(ifp);
2997 		break;
2998 	case PFIL_IFNET_DETACH:
2999 		pfi_detach_ifnet(ifp);
3000 		break;
3001 	}
3002 
3003 	return (0);
3004 }
3005 
3006 int
3007 pfil_ifaddr_wrapper(void *arg, struct mbuf **mp, struct ifnet *ifp, int dir)
3008 {
3009 	extern void pfi_kifaddr_update_if(struct ifnet *);
3010 
3011 	u_long cmd = (u_long)mp;
3012 
3013 	switch (cmd) {
3014 	case SIOCSIFADDR:
3015 	case SIOCAIFADDR:
3016 	case SIOCDIFADDR:
3017 #ifdef INET6
3018 	case SIOCAIFADDR_IN6:
3019 	case SIOCDIFADDR_IN6:
3020 #endif
3021 		pfi_kifaddr_update_if(ifp);
3022 		break;
3023 	default:
3024 		panic("unexpected ioctl");
3025 	}
3026 
3027 	return (0);
3028 }
3029 
3030 static int
3031 pf_pfil_attach(void)
3032 {
3033 	struct pfil_head *ph_inet;
3034 #ifdef INET6
3035 	struct pfil_head *ph_inet6;
3036 #endif
3037 	int error;
3038 	int i;
3039 
3040 	if (pf_pfil_attached)
3041 		return (0);
3042 
3043 	error = pfil_add_hook(pfil_ifnet_wrapper, NULL, PFIL_IFNET, &if_pfil);
3044 	if (error)
3045 		goto bad1;
3046 	error = pfil_add_hook(pfil_ifaddr_wrapper, NULL, PFIL_IFADDR, &if_pfil);
3047 	if (error)
3048 		goto bad2;
3049 
3050 	ph_inet = pfil_head_get(PFIL_TYPE_AF, AF_INET);
3051 	if (ph_inet)
3052 		error = pfil_add_hook((void *)pfil4_wrapper, NULL,
3053 		    PFIL_IN|PFIL_OUT, ph_inet);
3054 	else
3055 		error = ENOENT;
3056 	if (error)
3057 		goto bad3;
3058 
3059 #ifdef INET6
3060 	ph_inet6 = pfil_head_get(PFIL_TYPE_AF, AF_INET6);
3061 	if (ph_inet6)
3062 		error = pfil_add_hook((void *)pfil6_wrapper, NULL,
3063 		    PFIL_IN|PFIL_OUT, ph_inet6);
3064 	else
3065 		error = ENOENT;
3066 	if (error)
3067 		goto bad4;
3068 #endif
3069 
3070 	for (i = 0; i < if_indexlim; i++)
3071 		if (ifindex2ifnet[i])
3072 			pfi_attach_ifnet(ifindex2ifnet[i]);
3073 	pf_pfil_attached = 1;
3074 
3075 	return (0);
3076 
3077 #ifdef INET6
3078 bad4:
3079 	pfil_remove_hook(pfil4_wrapper, NULL, PFIL_IN|PFIL_OUT, ph_inet);
3080 #endif
3081 bad3:
3082 	pfil_remove_hook(pfil_ifaddr_wrapper, NULL, PFIL_IFADDR, &if_pfil);
3083 bad2:
3084 	pfil_remove_hook(pfil_ifnet_wrapper, NULL, PFIL_IFNET, &if_pfil);
3085 bad1:
3086 	return (error);
3087 }
3088 
3089 static int
3090 pf_pfil_detach(void)
3091 {
3092 	struct pfil_head *ph_inet;
3093 #ifdef INET6
3094 	struct pfil_head *ph_inet6;
3095 #endif
3096 	int i;
3097 
3098 	if (pf_pfil_attached == 0)
3099 		return (0);
3100 
3101 	for (i = 0; i < if_indexlim; i++)
3102 		if (pfi_index2kif[i])
3103 			pfi_detach_ifnet(ifindex2ifnet[i]);
3104 
3105 	pfil_remove_hook(pfil_ifaddr_wrapper, NULL, PFIL_IFADDR, &if_pfil);
3106 	pfil_remove_hook(pfil_ifnet_wrapper, NULL, PFIL_IFNET, &if_pfil);
3107 
3108 	ph_inet = pfil_head_get(PFIL_TYPE_AF, AF_INET);
3109 	if (ph_inet)
3110 		pfil_remove_hook((void *)pfil4_wrapper, NULL,
3111 		    PFIL_IN|PFIL_OUT, ph_inet);
3112 #ifdef INET6
3113 	ph_inet6 = pfil_head_get(PFIL_TYPE_AF, AF_INET6);
3114 	if (ph_inet6)
3115 		pfil_remove_hook((void *)pfil6_wrapper, NULL,
3116 		    PFIL_IN|PFIL_OUT, ph_inet6);
3117 #endif
3118 	pf_pfil_attached = 0;
3119 
3120 	return (0);
3121 }
3122 #endif
3123