xref: /openbsd-src/sys/net/pf_ioctl.c (revision 2b0358df1d88d06ef4139321dd05bd5e05d91eaf)
1 /*	$OpenBSD: pf_ioctl.c,v 1.217 2009/04/07 12:48:40 henning Exp $ */
2 
3 /*
4  * Copyright (c) 2001 Daniel Hartmeier
5  * Copyright (c) 2002,2003 Henning Brauer
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  *    - Redistributions of source code must retain the above copyright
13  *      notice, this list of conditions and the following disclaimer.
14  *    - Redistributions in binary form must reproduce the above
15  *      copyright notice, this list of conditions and the following
16  *      disclaimer in the documentation and/or other materials provided
17  *      with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
22  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
23  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
24  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
25  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
29  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  *
32  * Effort sponsored in part by the Defense Advanced Research Projects
33  * Agency (DARPA) and Air Force Research Laboratory, Air Force
34  * Materiel Command, USAF, under agreement number F30602-01-2-0537.
35  *
36  */
37 
38 #include "pfsync.h"
39 #include "pflog.h"
40 
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/mbuf.h>
44 #include <sys/filio.h>
45 #include <sys/fcntl.h>
46 #include <sys/socket.h>
47 #include <sys/socketvar.h>
48 #include <sys/kernel.h>
49 #include <sys/time.h>
50 #include <sys/timeout.h>
51 #include <sys/pool.h>
52 #include <sys/proc.h>
53 #include <sys/malloc.h>
54 #include <sys/kthread.h>
55 #include <sys/rwlock.h>
56 #include <uvm/uvm_extern.h>
57 
58 #include <net/if.h>
59 #include <net/if_types.h>
60 #include <net/route.h>
61 
62 #include <netinet/in.h>
63 #include <netinet/in_var.h>
64 #include <netinet/in_systm.h>
65 #include <netinet/ip.h>
66 #include <netinet/ip_var.h>
67 #include <netinet/ip_icmp.h>
68 
69 #include <dev/rndvar.h>
70 #include <crypto/md5.h>
71 #include <net/pfvar.h>
72 
73 #if NPFSYNC > 0
74 #include <net/if_pfsync.h>
75 #endif /* NPFSYNC > 0 */
76 
77 #if NPFLOG > 0
78 #include <net/if_pflog.h>
79 #endif /* NPFLOG > 0 */
80 
81 #ifdef INET6
82 #include <netinet/ip6.h>
83 #include <netinet/in_pcb.h>
84 #endif /* INET6 */
85 
86 #ifdef ALTQ
87 #include <altq/altq.h>
88 #endif
89 
90 void			 pfattach(int);
91 void			 pf_thread_create(void *);
92 int			 pfopen(dev_t, int, int, struct proc *);
93 int			 pfclose(dev_t, int, int, struct proc *);
94 struct pf_pool		*pf_get_pool(char *, u_int32_t, u_int8_t, u_int32_t,
95 			    u_int8_t, u_int8_t, u_int8_t);
96 
97 void			 pf_mv_pool(struct pf_palist *, struct pf_palist *);
98 void			 pf_empty_pool(struct pf_palist *);
99 int			 pfioctl(dev_t, u_long, caddr_t, int, struct proc *);
100 #ifdef ALTQ
101 int			 pf_begin_altq(u_int32_t *);
102 int			 pf_rollback_altq(u_int32_t);
103 int			 pf_commit_altq(u_int32_t);
104 int			 pf_enable_altq(struct pf_altq *);
105 int			 pf_disable_altq(struct pf_altq *);
106 #endif /* ALTQ */
107 int			 pf_begin_rules(u_int32_t *, int, const char *);
108 int			 pf_rollback_rules(u_int32_t, int, char *);
109 int			 pf_setup_pfsync_matching(struct pf_ruleset *);
110 void			 pf_hash_rule(MD5_CTX *, struct pf_rule *);
111 void			 pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *);
112 int			 pf_commit_rules(u_int32_t, int, char *);
113 int			 pf_addr_setup(struct pf_ruleset *,
114 			    struct pf_addr_wrap *, sa_family_t);
115 void			 pf_addr_copyout(struct pf_addr_wrap *);
116 
117 struct pf_rule		 pf_default_rule, pf_default_rule_new;
118 struct rwlock		 pf_consistency_lock = RWLOCK_INITIALIZER("pfcnslk");
119 #ifdef ALTQ
120 static int		 pf_altq_running;
121 #endif
122 
123 #define	TAGID_MAX	 50000
124 TAILQ_HEAD(pf_tags, pf_tagname)	pf_tags = TAILQ_HEAD_INITIALIZER(pf_tags),
125 				pf_qids = TAILQ_HEAD_INITIALIZER(pf_qids);
126 
127 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE)
128 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE
129 #endif
130 u_int16_t		 tagname2tag(struct pf_tags *, char *);
131 void			 tag2tagname(struct pf_tags *, u_int16_t, char *);
132 void			 tag_unref(struct pf_tags *, u_int16_t);
133 int			 pf_rtlabel_add(struct pf_addr_wrap *);
134 void			 pf_rtlabel_remove(struct pf_addr_wrap *);
135 void			 pf_rtlabel_copyout(struct pf_addr_wrap *);
136 
137 #define DPFPRINTF(n, x) if (pf_status.debug >= (n)) printf x
138 
139 void
140 pfattach(int num)
141 {
142 	u_int32_t *timeout = pf_default_rule.timeout;
143 
144 	pool_init(&pf_rule_pl, sizeof(struct pf_rule), 0, 0, 0, "pfrulepl",
145 	    &pool_allocator_nointr);
146 	pool_init(&pf_src_tree_pl, sizeof(struct pf_src_node), 0, 0, 0,
147 	    "pfsrctrpl", NULL);
148 	pool_init(&pf_state_pl, sizeof(struct pf_state), 0, 0, 0, "pfstatepl",
149 	    NULL);
150 	pool_init(&pf_state_key_pl, sizeof(struct pf_state_key), 0, 0, 0,
151 	    "pfstatekeypl", NULL);
152 	pool_init(&pf_state_item_pl, sizeof(struct pf_state_item), 0, 0, 0,
153 	    "pfstateitempl", NULL);
154 	pool_init(&pf_rule_item_pl, sizeof(struct pf_rule_item), 0, 0, 0,
155 	    "pfruleitempl", NULL);
156 	pool_init(&pf_altq_pl, sizeof(struct pf_altq), 0, 0, 0, "pfaltqpl",
157 	    &pool_allocator_nointr);
158 	pool_init(&pf_pooladdr_pl, sizeof(struct pf_pooladdr), 0, 0, 0,
159 	    "pfpooladdrpl", &pool_allocator_nointr);
160 	pfr_initialize();
161 	pfi_initialize();
162 	pf_osfp_initialize();
163 
164 	pool_sethardlimit(pf_pool_limits[PF_LIMIT_STATES].pp,
165 	    pf_pool_limits[PF_LIMIT_STATES].limit, NULL, 0);
166 
167 	if (physmem <= atop(100*1024*1024))
168 		pf_pool_limits[PF_LIMIT_TABLE_ENTRIES].limit =
169 		    PFR_KENTRY_HIWAT_SMALL;
170 
171 	RB_INIT(&tree_src_tracking);
172 	RB_INIT(&pf_anchors);
173 	pf_init_ruleset(&pf_main_ruleset);
174 	TAILQ_INIT(&pf_altqs[0]);
175 	TAILQ_INIT(&pf_altqs[1]);
176 	TAILQ_INIT(&pf_pabuf);
177 	pf_altqs_active = &pf_altqs[0];
178 	pf_altqs_inactive = &pf_altqs[1];
179 	TAILQ_INIT(&state_list);
180 
181 	/* default rule should never be garbage collected */
182 	pf_default_rule.entries.tqe_prev = &pf_default_rule.entries.tqe_next;
183 	pf_default_rule.action = PF_PASS;
184 	pf_default_rule.nr = -1;
185 	pf_default_rule.rtableid = -1;
186 
187 	/* initialize default timeouts */
188 	timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL;
189 	timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL;
190 	timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL;
191 	timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL;
192 	timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL;
193 	timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL;
194 	timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL;
195 	timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL;
196 	timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL;
197 	timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL;
198 	timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL;
199 	timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL;
200 	timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL;
201 	timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL;
202 	timeout[PFTM_FRAG] = PFTM_FRAG_VAL;
203 	timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL;
204 	timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL;
205 	timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL;
206 	timeout[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START;
207 	timeout[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END;
208 
209 	pf_normalize_init();
210 	bzero(&pf_status, sizeof(pf_status));
211 	pf_status.debug = PF_DEBUG_URGENT;
212 	pf_status.reass = PF_REASS_ENABLED;
213 
214 	/* XXX do our best to avoid a conflict */
215 	pf_status.hostid = arc4random();
216 
217 	/* require process context to purge states, so perform in a thread */
218 	kthread_create_deferred(pf_thread_create, NULL);
219 }
220 
221 void
222 pf_thread_create(void *v)
223 {
224 	if (kthread_create(pf_purge_thread, NULL, NULL, "pfpurge"))
225 		panic("pfpurge thread");
226 }
227 
228 int
229 pfopen(dev_t dev, int flags, int fmt, struct proc *p)
230 {
231 	if (minor(dev) >= 1)
232 		return (ENXIO);
233 	return (0);
234 }
235 
236 int
237 pfclose(dev_t dev, int flags, int fmt, struct proc *p)
238 {
239 	if (minor(dev) >= 1)
240 		return (ENXIO);
241 	return (0);
242 }
243 
244 struct pf_pool *
245 pf_get_pool(char *anchor, u_int32_t ticket, u_int8_t rule_action,
246     u_int32_t rule_number, u_int8_t r_last, u_int8_t active,
247     u_int8_t check_ticket)
248 {
249 	struct pf_ruleset	*ruleset;
250 	struct pf_rule		*rule;
251 	int			 rs_num;
252 
253 	ruleset = pf_find_ruleset(anchor);
254 	if (ruleset == NULL)
255 		return (NULL);
256 	rs_num = pf_get_ruleset_number(rule_action);
257 	if (rs_num >= PF_RULESET_MAX)
258 		return (NULL);
259 	if (active) {
260 		if (check_ticket && ticket !=
261 		    ruleset->rules[rs_num].active.ticket)
262 			return (NULL);
263 		if (r_last)
264 			rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
265 			    pf_rulequeue);
266 		else
267 			rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
268 	} else {
269 		if (check_ticket && ticket !=
270 		    ruleset->rules[rs_num].inactive.ticket)
271 			return (NULL);
272 		if (r_last)
273 			rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
274 			    pf_rulequeue);
275 		else
276 			rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr);
277 	}
278 	if (!r_last) {
279 		while ((rule != NULL) && (rule->nr != rule_number))
280 			rule = TAILQ_NEXT(rule, entries);
281 	}
282 	if (rule == NULL)
283 		return (NULL);
284 
285 	return (&rule->rpool);
286 }
287 
288 void
289 pf_mv_pool(struct pf_palist *poola, struct pf_palist *poolb)
290 {
291 	struct pf_pooladdr	*mv_pool_pa;
292 
293 	while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) {
294 		TAILQ_REMOVE(poola, mv_pool_pa, entries);
295 		TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries);
296 	}
297 }
298 
299 void
300 pf_empty_pool(struct pf_palist *poola)
301 {
302 	struct pf_pooladdr	*empty_pool_pa;
303 
304 	while ((empty_pool_pa = TAILQ_FIRST(poola)) != NULL) {
305 		pfi_dynaddr_remove(&empty_pool_pa->addr);
306 		pf_tbladdr_remove(&empty_pool_pa->addr);
307 		pfi_kif_unref(empty_pool_pa->kif, PFI_KIF_REF_RULE);
308 		TAILQ_REMOVE(poola, empty_pool_pa, entries);
309 		pool_put(&pf_pooladdr_pl, empty_pool_pa);
310 	}
311 }
312 
313 void
314 pf_rm_rule(struct pf_rulequeue *rulequeue, struct pf_rule *rule)
315 {
316 	if (rulequeue != NULL) {
317 		if (rule->states_cur <= 0) {
318 			/*
319 			 * XXX - we need to remove the table *before* detaching
320 			 * the rule to make sure the table code does not delete
321 			 * the anchor under our feet.
322 			 */
323 			pf_tbladdr_remove(&rule->src.addr);
324 			pf_tbladdr_remove(&rule->dst.addr);
325 			if (rule->overload_tbl)
326 				pfr_detach_table(rule->overload_tbl);
327 		}
328 		TAILQ_REMOVE(rulequeue, rule, entries);
329 		rule->entries.tqe_prev = NULL;
330 		rule->nr = -1;
331 	}
332 
333 	if (rule->states_cur > 0 || rule->src_nodes > 0 ||
334 	    rule->entries.tqe_prev != NULL)
335 		return;
336 	pf_tag_unref(rule->tag);
337 	pf_tag_unref(rule->match_tag);
338 #ifdef ALTQ
339 	if (rule->pqid != rule->qid)
340 		pf_qid_unref(rule->pqid);
341 	pf_qid_unref(rule->qid);
342 #endif
343 	pf_rtlabel_remove(&rule->src.addr);
344 	pf_rtlabel_remove(&rule->dst.addr);
345 	pfi_dynaddr_remove(&rule->src.addr);
346 	pfi_dynaddr_remove(&rule->dst.addr);
347 	if (rulequeue == NULL) {
348 		pf_tbladdr_remove(&rule->src.addr);
349 		pf_tbladdr_remove(&rule->dst.addr);
350 		if (rule->overload_tbl)
351 			pfr_detach_table(rule->overload_tbl);
352 	}
353 	pfi_kif_unref(rule->kif, PFI_KIF_REF_RULE);
354 	pf_anchor_remove(rule);
355 	pf_empty_pool(&rule->rpool.list);
356 	pool_put(&pf_rule_pl, rule);
357 }
358 
359 u_int16_t
360 tagname2tag(struct pf_tags *head, char *tagname)
361 {
362 	struct pf_tagname	*tag, *p = NULL;
363 	u_int16_t		 new_tagid = 1;
364 
365 	TAILQ_FOREACH(tag, head, entries)
366 		if (strcmp(tagname, tag->name) == 0) {
367 			tag->ref++;
368 			return (tag->tag);
369 		}
370 
371 	/*
372 	 * to avoid fragmentation, we do a linear search from the beginning
373 	 * and take the first free slot we find. if there is none or the list
374 	 * is empty, append a new entry at the end.
375 	 */
376 
377 	/* new entry */
378 	if (!TAILQ_EMPTY(head))
379 		for (p = TAILQ_FIRST(head); p != NULL &&
380 		    p->tag == new_tagid; p = TAILQ_NEXT(p, entries))
381 			new_tagid = p->tag + 1;
382 
383 	if (new_tagid > TAGID_MAX)
384 		return (0);
385 
386 	/* allocate and fill new struct pf_tagname */
387 	tag = malloc(sizeof(*tag), M_TEMP, M_NOWAIT|M_ZERO);
388 	if (tag == NULL)
389 		return (0);
390 	strlcpy(tag->name, tagname, sizeof(tag->name));
391 	tag->tag = new_tagid;
392 	tag->ref++;
393 
394 	if (p != NULL)	/* insert new entry before p */
395 		TAILQ_INSERT_BEFORE(p, tag, entries);
396 	else	/* either list empty or no free slot in between */
397 		TAILQ_INSERT_TAIL(head, tag, entries);
398 
399 	return (tag->tag);
400 }
401 
402 void
403 tag2tagname(struct pf_tags *head, u_int16_t tagid, char *p)
404 {
405 	struct pf_tagname	*tag;
406 
407 	TAILQ_FOREACH(tag, head, entries)
408 		if (tag->tag == tagid) {
409 			strlcpy(p, tag->name, PF_TAG_NAME_SIZE);
410 			return;
411 		}
412 }
413 
414 void
415 tag_unref(struct pf_tags *head, u_int16_t tag)
416 {
417 	struct pf_tagname	*p, *next;
418 
419 	if (tag == 0)
420 		return;
421 
422 	for (p = TAILQ_FIRST(head); p != NULL; p = next) {
423 		next = TAILQ_NEXT(p, entries);
424 		if (tag == p->tag) {
425 			if (--p->ref == 0) {
426 				TAILQ_REMOVE(head, p, entries);
427 				free(p, M_TEMP);
428 			}
429 			break;
430 		}
431 	}
432 }
433 
434 u_int16_t
435 pf_tagname2tag(char *tagname)
436 {
437 	return (tagname2tag(&pf_tags, tagname));
438 }
439 
440 void
441 pf_tag2tagname(u_int16_t tagid, char *p)
442 {
443 	tag2tagname(&pf_tags, tagid, p);
444 }
445 
446 void
447 pf_tag_ref(u_int16_t tag)
448 {
449 	struct pf_tagname *t;
450 
451 	TAILQ_FOREACH(t, &pf_tags, entries)
452 		if (t->tag == tag)
453 			break;
454 	if (t != NULL)
455 		t->ref++;
456 }
457 
458 void
459 pf_tag_unref(u_int16_t tag)
460 {
461 	tag_unref(&pf_tags, tag);
462 }
463 
464 int
465 pf_rtlabel_add(struct pf_addr_wrap *a)
466 {
467 	if (a->type == PF_ADDR_RTLABEL &&
468 	    (a->v.rtlabel = rtlabel_name2id(a->v.rtlabelname)) == 0)
469 		return (-1);
470 	return (0);
471 }
472 
473 void
474 pf_rtlabel_remove(struct pf_addr_wrap *a)
475 {
476 	if (a->type == PF_ADDR_RTLABEL)
477 		rtlabel_unref(a->v.rtlabel);
478 }
479 
480 void
481 pf_rtlabel_copyout(struct pf_addr_wrap *a)
482 {
483 	const char	*name;
484 
485 	if (a->type == PF_ADDR_RTLABEL && a->v.rtlabel) {
486 		if ((name = rtlabel_id2name(a->v.rtlabel)) == NULL)
487 			strlcpy(a->v.rtlabelname, "?",
488 			    sizeof(a->v.rtlabelname));
489 		else
490 			strlcpy(a->v.rtlabelname, name,
491 			    sizeof(a->v.rtlabelname));
492 	}
493 }
494 
495 #ifdef ALTQ
496 u_int32_t
497 pf_qname2qid(char *qname)
498 {
499 	return ((u_int32_t)tagname2tag(&pf_qids, qname));
500 }
501 
502 void
503 pf_qid2qname(u_int32_t qid, char *p)
504 {
505 	tag2tagname(&pf_qids, (u_int16_t)qid, p);
506 }
507 
508 void
509 pf_qid_unref(u_int32_t qid)
510 {
511 	tag_unref(&pf_qids, (u_int16_t)qid);
512 }
513 
514 int
515 pf_begin_altq(u_int32_t *ticket)
516 {
517 	struct pf_altq	*altq;
518 	int		 error = 0;
519 
520 	/* Purge the old altq list */
521 	while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) {
522 		TAILQ_REMOVE(pf_altqs_inactive, altq, entries);
523 		if (altq->qname[0] == 0) {
524 			/* detach and destroy the discipline */
525 			error = altq_remove(altq);
526 		} else
527 			pf_qid_unref(altq->qid);
528 		pool_put(&pf_altq_pl, altq);
529 	}
530 	if (error)
531 		return (error);
532 	*ticket = ++ticket_altqs_inactive;
533 	altqs_inactive_open = 1;
534 	return (0);
535 }
536 
537 int
538 pf_rollback_altq(u_int32_t ticket)
539 {
540 	struct pf_altq	*altq;
541 	int		 error = 0;
542 
543 	if (!altqs_inactive_open || ticket != ticket_altqs_inactive)
544 		return (0);
545 	/* Purge the old altq list */
546 	while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) {
547 		TAILQ_REMOVE(pf_altqs_inactive, altq, entries);
548 		if (altq->qname[0] == 0) {
549 			/* detach and destroy the discipline */
550 			error = altq_remove(altq);
551 		} else
552 			pf_qid_unref(altq->qid);
553 		pool_put(&pf_altq_pl, altq);
554 	}
555 	altqs_inactive_open = 0;
556 	return (error);
557 }
558 
559 int
560 pf_commit_altq(u_int32_t ticket)
561 {
562 	struct pf_altqqueue	*old_altqs;
563 	struct pf_altq		*altq;
564 	int			 s, err, error = 0;
565 
566 	if (!altqs_inactive_open || ticket != ticket_altqs_inactive)
567 		return (EBUSY);
568 
569 	/* swap altqs, keep the old. */
570 	s = splsoftnet();
571 	old_altqs = pf_altqs_active;
572 	pf_altqs_active = pf_altqs_inactive;
573 	pf_altqs_inactive = old_altqs;
574 	ticket_altqs_active = ticket_altqs_inactive;
575 
576 	/* Attach new disciplines */
577 	TAILQ_FOREACH(altq, pf_altqs_active, entries) {
578 		if (altq->qname[0] == 0) {
579 			/* attach the discipline */
580 			error = altq_pfattach(altq);
581 			if (error == 0 && pf_altq_running)
582 				error = pf_enable_altq(altq);
583 			if (error != 0) {
584 				splx(s);
585 				return (error);
586 			}
587 		}
588 	}
589 
590 	/* Purge the old altq list */
591 	while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) {
592 		TAILQ_REMOVE(pf_altqs_inactive, altq, entries);
593 		if (altq->qname[0] == 0) {
594 			/* detach and destroy the discipline */
595 			if (pf_altq_running)
596 				error = pf_disable_altq(altq);
597 			err = altq_pfdetach(altq);
598 			if (err != 0 && error == 0)
599 				error = err;
600 			err = altq_remove(altq);
601 			if (err != 0 && error == 0)
602 				error = err;
603 		} else
604 			pf_qid_unref(altq->qid);
605 		pool_put(&pf_altq_pl, altq);
606 	}
607 	splx(s);
608 
609 	altqs_inactive_open = 0;
610 	return (error);
611 }
612 
613 int
614 pf_enable_altq(struct pf_altq *altq)
615 {
616 	struct ifnet		*ifp;
617 	struct tb_profile	 tb;
618 	int			 s, error = 0;
619 
620 	if ((ifp = ifunit(altq->ifname)) == NULL)
621 		return (EINVAL);
622 
623 	if (ifp->if_snd.altq_type != ALTQT_NONE)
624 		error = altq_enable(&ifp->if_snd);
625 
626 	/* set tokenbucket regulator */
627 	if (error == 0 && ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) {
628 		tb.rate = altq->ifbandwidth;
629 		tb.depth = altq->tbrsize;
630 		s = splnet();
631 		error = tbr_set(&ifp->if_snd, &tb);
632 		splx(s);
633 	}
634 
635 	return (error);
636 }
637 
638 int
639 pf_disable_altq(struct pf_altq *altq)
640 {
641 	struct ifnet		*ifp;
642 	struct tb_profile	 tb;
643 	int			 s, error;
644 
645 	if ((ifp = ifunit(altq->ifname)) == NULL)
646 		return (EINVAL);
647 
648 	/*
649 	 * when the discipline is no longer referenced, it was overridden
650 	 * by a new one.  if so, just return.
651 	 */
652 	if (altq->altq_disc != ifp->if_snd.altq_disc)
653 		return (0);
654 
655 	error = altq_disable(&ifp->if_snd);
656 
657 	if (error == 0) {
658 		/* clear tokenbucket regulator */
659 		tb.rate = 0;
660 		s = splnet();
661 		error = tbr_set(&ifp->if_snd, &tb);
662 		splx(s);
663 	}
664 
665 	return (error);
666 }
667 #endif /* ALTQ */
668 
669 int
670 pf_begin_rules(u_int32_t *ticket, int rs_num, const char *anchor)
671 {
672 	struct pf_ruleset	*rs;
673 	struct pf_rule		*rule;
674 
675 	if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
676 		return (EINVAL);
677 	rs = pf_find_or_create_ruleset(anchor);
678 	if (rs == NULL)
679 		return (EINVAL);
680 	while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
681 		pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule);
682 		rs->rules[rs_num].inactive.rcount--;
683 	}
684 	*ticket = ++rs->rules[rs_num].inactive.ticket;
685 	rs->rules[rs_num].inactive.open = 1;
686 	return (0);
687 }
688 
689 int
690 pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor)
691 {
692 	struct pf_ruleset	*rs;
693 	struct pf_rule		*rule;
694 
695 	if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
696 		return (EINVAL);
697 	rs = pf_find_ruleset(anchor);
698 	if (rs == NULL || !rs->rules[rs_num].inactive.open ||
699 	    rs->rules[rs_num].inactive.ticket != ticket)
700 		return (0);
701 	while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
702 		pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule);
703 		rs->rules[rs_num].inactive.rcount--;
704 	}
705 	rs->rules[rs_num].inactive.open = 0;
706 	return (0);
707 }
708 
709 #define PF_MD5_UPD(st, elm)						\
710 		MD5Update(ctx, (u_int8_t *) &(st)->elm, sizeof((st)->elm))
711 
712 #define PF_MD5_UPD_STR(st, elm)						\
713 		MD5Update(ctx, (u_int8_t *) (st)->elm, strlen((st)->elm))
714 
715 #define PF_MD5_UPD_HTONL(st, elm, stor) do {				\
716 		(stor) = htonl((st)->elm);				\
717 		MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int32_t));\
718 } while (0)
719 
720 #define PF_MD5_UPD_HTONS(st, elm, stor) do {				\
721 		(stor) = htons((st)->elm);				\
722 		MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int16_t));\
723 } while (0)
724 
725 void
726 pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr)
727 {
728 	PF_MD5_UPD(pfr, addr.type);
729 	switch (pfr->addr.type) {
730 		case PF_ADDR_DYNIFTL:
731 			PF_MD5_UPD(pfr, addr.v.ifname);
732 			PF_MD5_UPD(pfr, addr.iflags);
733 			break;
734 		case PF_ADDR_TABLE:
735 			PF_MD5_UPD(pfr, addr.v.tblname);
736 			break;
737 		case PF_ADDR_ADDRMASK:
738 			/* XXX ignore af? */
739 			PF_MD5_UPD(pfr, addr.v.a.addr.addr32);
740 			PF_MD5_UPD(pfr, addr.v.a.mask.addr32);
741 			break;
742 		case PF_ADDR_RTLABEL:
743 			PF_MD5_UPD(pfr, addr.v.rtlabelname);
744 			break;
745 	}
746 
747 	PF_MD5_UPD(pfr, port[0]);
748 	PF_MD5_UPD(pfr, port[1]);
749 	PF_MD5_UPD(pfr, neg);
750 	PF_MD5_UPD(pfr, port_op);
751 }
752 
753 void
754 pf_hash_rule(MD5_CTX *ctx, struct pf_rule *rule)
755 {
756 	u_int16_t x;
757 	u_int32_t y;
758 
759 	pf_hash_rule_addr(ctx, &rule->src);
760 	pf_hash_rule_addr(ctx, &rule->dst);
761 	PF_MD5_UPD_STR(rule, label);
762 	PF_MD5_UPD_STR(rule, ifname);
763 	PF_MD5_UPD_STR(rule, match_tagname);
764 	PF_MD5_UPD_HTONS(rule, match_tag, x); /* dup? */
765 	PF_MD5_UPD_HTONL(rule, os_fingerprint, y);
766 	PF_MD5_UPD_HTONL(rule, prob, y);
767 	PF_MD5_UPD_HTONL(rule, uid.uid[0], y);
768 	PF_MD5_UPD_HTONL(rule, uid.uid[1], y);
769 	PF_MD5_UPD(rule, uid.op);
770 	PF_MD5_UPD_HTONL(rule, gid.gid[0], y);
771 	PF_MD5_UPD_HTONL(rule, gid.gid[1], y);
772 	PF_MD5_UPD(rule, gid.op);
773 	PF_MD5_UPD_HTONL(rule, rule_flag, y);
774 	PF_MD5_UPD(rule, action);
775 	PF_MD5_UPD(rule, direction);
776 	PF_MD5_UPD(rule, af);
777 	PF_MD5_UPD(rule, quick);
778 	PF_MD5_UPD(rule, ifnot);
779 	PF_MD5_UPD(rule, match_tag_not);
780 	PF_MD5_UPD(rule, natpass);
781 	PF_MD5_UPD(rule, keep_state);
782 	PF_MD5_UPD(rule, proto);
783 	PF_MD5_UPD(rule, type);
784 	PF_MD5_UPD(rule, code);
785 	PF_MD5_UPD(rule, flags);
786 	PF_MD5_UPD(rule, flagset);
787 	PF_MD5_UPD(rule, allow_opts);
788 	PF_MD5_UPD(rule, rt);
789 	PF_MD5_UPD(rule, tos);
790 }
791 
792 int
793 pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor)
794 {
795 	struct pf_ruleset	*rs;
796 	struct pf_rule		*rule, **old_array;
797 	struct pf_rulequeue	*old_rules;
798 	int			 s, error;
799 	u_int32_t		 old_rcount;
800 
801 	if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
802 		return (EINVAL);
803 	rs = pf_find_ruleset(anchor);
804 	if (rs == NULL || !rs->rules[rs_num].inactive.open ||
805 	    ticket != rs->rules[rs_num].inactive.ticket)
806 		return (EBUSY);
807 
808 	/* Calculate checksum for the main ruleset */
809 	if (rs == &pf_main_ruleset) {
810 		error = pf_setup_pfsync_matching(rs);
811 		if (error != 0)
812 			return (error);
813 	}
814 
815 	/* Swap rules, keep the old. */
816 	s = splsoftnet();
817 	old_rules = rs->rules[rs_num].active.ptr;
818 	old_rcount = rs->rules[rs_num].active.rcount;
819 	old_array = rs->rules[rs_num].active.ptr_array;
820 
821 	rs->rules[rs_num].active.ptr =
822 	    rs->rules[rs_num].inactive.ptr;
823 	rs->rules[rs_num].active.ptr_array =
824 	    rs->rules[rs_num].inactive.ptr_array;
825 	rs->rules[rs_num].active.rcount =
826 	    rs->rules[rs_num].inactive.rcount;
827 	rs->rules[rs_num].inactive.ptr = old_rules;
828 	rs->rules[rs_num].inactive.ptr_array = old_array;
829 	rs->rules[rs_num].inactive.rcount = old_rcount;
830 
831 	rs->rules[rs_num].active.ticket =
832 	    rs->rules[rs_num].inactive.ticket;
833 	pf_calc_skip_steps(rs->rules[rs_num].active.ptr);
834 
835 
836 	/* Purge the old rule list. */
837 	while ((rule = TAILQ_FIRST(old_rules)) != NULL)
838 		pf_rm_rule(old_rules, rule);
839 	if (rs->rules[rs_num].inactive.ptr_array)
840 		free(rs->rules[rs_num].inactive.ptr_array, M_TEMP);
841 	rs->rules[rs_num].inactive.ptr_array = NULL;
842 	rs->rules[rs_num].inactive.rcount = 0;
843 	rs->rules[rs_num].inactive.open = 0;
844 	pf_remove_if_empty_ruleset(rs);
845 	splx(s);
846 	return (0);
847 }
848 
849 int
850 pf_setup_pfsync_matching(struct pf_ruleset *rs)
851 {
852 	MD5_CTX			 ctx;
853 	struct pf_rule		*rule;
854 	int			 rs_cnt;
855 	u_int8_t		 digest[PF_MD5_DIGEST_LENGTH];
856 
857 	MD5Init(&ctx);
858 	for (rs_cnt = 0; rs_cnt < PF_RULESET_MAX; rs_cnt++) {
859 		if (rs->rules[rs_cnt].inactive.ptr_array)
860 			free(rs->rules[rs_cnt].inactive.ptr_array, M_TEMP);
861 		rs->rules[rs_cnt].inactive.ptr_array = NULL;
862 
863 		if (rs->rules[rs_cnt].inactive.rcount) {
864 			rs->rules[rs_cnt].inactive.ptr_array =
865 			    malloc(sizeof(caddr_t) *
866 			    rs->rules[rs_cnt].inactive.rcount,
867 			    M_TEMP, M_NOWAIT);
868 
869 			if (!rs->rules[rs_cnt].inactive.ptr_array)
870 				return (ENOMEM);
871 		}
872 
873 		TAILQ_FOREACH(rule, rs->rules[rs_cnt].inactive.ptr,
874 		    entries) {
875 			pf_hash_rule(&ctx, rule);
876 			(rs->rules[rs_cnt].inactive.ptr_array)[rule->nr] = rule;
877 		}
878 	}
879 
880 	MD5Final(digest, &ctx);
881 	memcpy(pf_status.pf_chksum, digest, sizeof(pf_status.pf_chksum));
882 	return (0);
883 }
884 
885 int
886 pf_addr_setup(struct pf_ruleset *ruleset, struct pf_addr_wrap *addr,
887     sa_family_t af)
888 {
889 	if (pfi_dynaddr_setup(addr, af) ||
890 	    pf_tbladdr_setup(ruleset, addr))
891 		return (EINVAL);
892 
893 	return (0);
894 }
895 
896 void
897 pf_addr_copyout(struct pf_addr_wrap *addr)
898 {
899 	pfi_dynaddr_copyout(addr);
900 	pf_tbladdr_copyout(addr);
901 	pf_rtlabel_copyout(addr);
902 }
903 
904 int
905 pfioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct proc *p)
906 {
907 	struct pf_pooladdr	*pa = NULL;
908 	struct pf_pool		*pool = NULL;
909 	int			 s;
910 	int			 error = 0;
911 
912 	/* XXX keep in sync with switch() below */
913 	if (securelevel > 1)
914 		switch (cmd) {
915 		case DIOCGETRULES:
916 		case DIOCGETRULE:
917 		case DIOCGETADDRS:
918 		case DIOCGETADDR:
919 		case DIOCGETSTATE:
920 		case DIOCSETSTATUSIF:
921 		case DIOCGETSTATUS:
922 		case DIOCCLRSTATUS:
923 		case DIOCNATLOOK:
924 		case DIOCSETDEBUG:
925 		case DIOCGETSTATES:
926 		case DIOCGETTIMEOUT:
927 		case DIOCCLRRULECTRS:
928 		case DIOCGETLIMIT:
929 		case DIOCGETALTQS:
930 		case DIOCGETALTQ:
931 		case DIOCGETQSTATS:
932 		case DIOCGETRULESETS:
933 		case DIOCGETRULESET:
934 		case DIOCRGETTABLES:
935 		case DIOCRGETTSTATS:
936 		case DIOCRCLRTSTATS:
937 		case DIOCRCLRADDRS:
938 		case DIOCRADDADDRS:
939 		case DIOCRDELADDRS:
940 		case DIOCRSETADDRS:
941 		case DIOCRGETADDRS:
942 		case DIOCRGETASTATS:
943 		case DIOCRCLRASTATS:
944 		case DIOCRTSTADDRS:
945 		case DIOCOSFPGET:
946 		case DIOCGETSRCNODES:
947 		case DIOCCLRSRCNODES:
948 		case DIOCIGETIFACES:
949 		case DIOCSETIFFLAG:
950 		case DIOCCLRIFFLAG:
951 			break;
952 		case DIOCRCLRTABLES:
953 		case DIOCRADDTABLES:
954 		case DIOCRDELTABLES:
955 		case DIOCRSETTFLAGS:
956 			if (((struct pfioc_table *)addr)->pfrio_flags &
957 			    PFR_FLAG_DUMMY)
958 				break; /* dummy operation ok */
959 			return (EPERM);
960 		default:
961 			return (EPERM);
962 		}
963 
964 	if (!(flags & FWRITE))
965 		switch (cmd) {
966 		case DIOCGETRULES:
967 		case DIOCGETADDRS:
968 		case DIOCGETADDR:
969 		case DIOCGETSTATE:
970 		case DIOCGETSTATUS:
971 		case DIOCGETSTATES:
972 		case DIOCGETTIMEOUT:
973 		case DIOCGETLIMIT:
974 		case DIOCGETALTQS:
975 		case DIOCGETALTQ:
976 		case DIOCGETQSTATS:
977 		case DIOCGETRULESETS:
978 		case DIOCGETRULESET:
979 		case DIOCNATLOOK:
980 		case DIOCRGETTABLES:
981 		case DIOCRGETTSTATS:
982 		case DIOCRGETADDRS:
983 		case DIOCRGETASTATS:
984 		case DIOCRTSTADDRS:
985 		case DIOCOSFPGET:
986 		case DIOCGETSRCNODES:
987 		case DIOCIGETIFACES:
988 			break;
989 		case DIOCRCLRTABLES:
990 		case DIOCRADDTABLES:
991 		case DIOCRDELTABLES:
992 		case DIOCRCLRTSTATS:
993 		case DIOCRCLRADDRS:
994 		case DIOCRADDADDRS:
995 		case DIOCRDELADDRS:
996 		case DIOCRSETADDRS:
997 		case DIOCRSETTFLAGS:
998 			if (((struct pfioc_table *)addr)->pfrio_flags &
999 			    PFR_FLAG_DUMMY) {
1000 				flags |= FWRITE; /* need write lock for dummy */
1001 				break; /* dummy operation ok */
1002 			}
1003 			return (EACCES);
1004 		case DIOCGETRULE:
1005 			if (((struct pfioc_rule *)addr)->action ==
1006 			    PF_GET_CLR_CNTR)
1007 				return (EACCES);
1008 			break;
1009 		default:
1010 			return (EACCES);
1011 		}
1012 
1013 	if (flags & FWRITE)
1014 		rw_enter_write(&pf_consistency_lock);
1015 	else
1016 		rw_enter_read(&pf_consistency_lock);
1017 
1018 	s = splsoftnet();
1019 	switch (cmd) {
1020 
1021 	case DIOCSTART:
1022 		if (pf_status.running)
1023 			error = EEXIST;
1024 		else {
1025 			pf_status.running = 1;
1026 			pf_status.since = time_second;
1027 			if (pf_status.stateid == 0) {
1028 				pf_status.stateid = time_second;
1029 				pf_status.stateid = pf_status.stateid << 32;
1030 			}
1031 			DPFPRINTF(PF_DEBUG_MISC, ("pf: started\n"));
1032 		}
1033 		break;
1034 
1035 	case DIOCSTOP:
1036 		if (!pf_status.running)
1037 			error = ENOENT;
1038 		else {
1039 			pf_status.running = 0;
1040 			pf_status.since = time_second;
1041 			DPFPRINTF(PF_DEBUG_MISC, ("pf: stopped\n"));
1042 		}
1043 		break;
1044 
1045 	case DIOCADDRULE: {
1046 		struct pfioc_rule	*pr = (struct pfioc_rule *)addr;
1047 		struct pf_ruleset	*ruleset;
1048 		struct pf_rule		*rule, *tail;
1049 		struct pf_pooladdr	*pa;
1050 		int			 rs_num;
1051 
1052 		pr->anchor[sizeof(pr->anchor) - 1] = 0;
1053 		ruleset = pf_find_ruleset(pr->anchor);
1054 		if (ruleset == NULL) {
1055 			error = EINVAL;
1056 			break;
1057 		}
1058 		rs_num = pf_get_ruleset_number(pr->rule.action);
1059 		if (rs_num >= PF_RULESET_MAX) {
1060 			error = EINVAL;
1061 			break;
1062 		}
1063 		if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
1064 			error = EINVAL;
1065 			break;
1066 		}
1067 		if (pr->ticket != ruleset->rules[rs_num].inactive.ticket) {
1068 			error = EBUSY;
1069 			break;
1070 		}
1071 		if (pr->pool_ticket != ticket_pabuf) {
1072 			error = EBUSY;
1073 			break;
1074 		}
1075 		rule = pool_get(&pf_rule_pl, PR_WAITOK|PR_LIMITFAIL);
1076 		if (rule == NULL) {
1077 			error = ENOMEM;
1078 			break;
1079 		}
1080 		bcopy(&pr->rule, rule, sizeof(struct pf_rule));
1081 		rule->cuid = p->p_cred->p_ruid;
1082 		rule->cpid = p->p_pid;
1083 		rule->anchor = NULL;
1084 		rule->kif = NULL;
1085 		TAILQ_INIT(&rule->rpool.list);
1086 		/* initialize refcounting */
1087 		rule->states_cur = 0;
1088 		rule->src_nodes = 0;
1089 		rule->entries.tqe_prev = NULL;
1090 #ifndef INET
1091 		if (rule->af == AF_INET) {
1092 			pool_put(&pf_rule_pl, rule);
1093 			error = EAFNOSUPPORT;
1094 			break;
1095 		}
1096 #endif /* INET */
1097 #ifndef INET6
1098 		if (rule->af == AF_INET6) {
1099 			pool_put(&pf_rule_pl, rule);
1100 			error = EAFNOSUPPORT;
1101 			break;
1102 		}
1103 #endif /* INET6 */
1104 		tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
1105 		    pf_rulequeue);
1106 		if (tail)
1107 			rule->nr = tail->nr + 1;
1108 		else
1109 			rule->nr = 0;
1110 		if (rule->ifname[0]) {
1111 			rule->kif = pfi_kif_get(rule->ifname);
1112 			if (rule->kif == NULL) {
1113 				pool_put(&pf_rule_pl, rule);
1114 				error = EINVAL;
1115 				break;
1116 			}
1117 			pfi_kif_ref(rule->kif, PFI_KIF_REF_RULE);
1118 		}
1119 
1120 		if (rule->rtableid > 0 && !rtable_exists(rule->rtableid))
1121 			error = EBUSY;
1122 
1123 #ifdef ALTQ
1124 		/* set queue IDs */
1125 		if (rule->qname[0] != 0) {
1126 			if ((rule->qid = pf_qname2qid(rule->qname)) == 0)
1127 				error = EBUSY;
1128 			else if (rule->pqname[0] != 0) {
1129 				if ((rule->pqid =
1130 				    pf_qname2qid(rule->pqname)) == 0)
1131 					error = EBUSY;
1132 			} else
1133 				rule->pqid = rule->qid;
1134 		}
1135 #endif
1136 		if (rule->tagname[0])
1137 			if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0)
1138 				error = EBUSY;
1139 		if (rule->match_tagname[0])
1140 			if ((rule->match_tag =
1141 			    pf_tagname2tag(rule->match_tagname)) == 0)
1142 				error = EBUSY;
1143 		if (rule->rt && !rule->direction)
1144 			error = EINVAL;
1145 #if NPFLOG > 0
1146 		if (!rule->log)
1147 			rule->logif = 0;
1148 		if (rule->logif >= PFLOGIFS_MAX)
1149 			error = EINVAL;
1150 #endif
1151 		if (pf_rtlabel_add(&rule->src.addr) ||
1152 		    pf_rtlabel_add(&rule->dst.addr))
1153 			error = EBUSY;
1154 		if (pf_addr_setup(ruleset, &rule->src.addr, rule->af))
1155 			error = EINVAL;
1156 		if (pf_addr_setup(ruleset, &rule->dst.addr, rule->af))
1157 			error = EINVAL;
1158 		if (pf_anchor_setup(rule, ruleset, pr->anchor_call))
1159 			error = EINVAL;
1160 		TAILQ_FOREACH(pa, &pf_pabuf, entries)
1161 			if (pf_tbladdr_setup(ruleset, &pa->addr))
1162 				error = EINVAL;
1163 
1164 		if (rule->overload_tblname[0]) {
1165 			if ((rule->overload_tbl = pfr_attach_table(ruleset,
1166 			    rule->overload_tblname, 0)) == NULL)
1167 				error = EINVAL;
1168 			else
1169 				rule->overload_tbl->pfrkt_flags |=
1170 				    PFR_TFLAG_ACTIVE;
1171 		}
1172 
1173 		pf_mv_pool(&pf_pabuf, &rule->rpool.list);
1174 		if (((((rule->action == PF_NAT) || (rule->action == PF_RDR) ||
1175 		    (rule->action == PF_BINAT)) && rule->anchor == NULL) ||
1176 		    (rule->rt > PF_FASTROUTE)) &&
1177 		    (TAILQ_FIRST(&rule->rpool.list) == NULL))
1178 			error = EINVAL;
1179 
1180 		if (error) {
1181 			pf_rm_rule(NULL, rule);
1182 			break;
1183 		}
1184 		rule->rpool.cur = TAILQ_FIRST(&rule->rpool.list);
1185 		rule->evaluations = rule->packets[0] = rule->packets[1] =
1186 		    rule->bytes[0] = rule->bytes[1] = 0;
1187 		TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr,
1188 		    rule, entries);
1189 		ruleset->rules[rs_num].inactive.rcount++;
1190 		break;
1191 	}
1192 
1193 	case DIOCGETRULES: {
1194 		struct pfioc_rule	*pr = (struct pfioc_rule *)addr;
1195 		struct pf_ruleset	*ruleset;
1196 		struct pf_rule		*tail;
1197 		int			 rs_num;
1198 
1199 		pr->anchor[sizeof(pr->anchor) - 1] = 0;
1200 		ruleset = pf_find_ruleset(pr->anchor);
1201 		if (ruleset == NULL) {
1202 			error = EINVAL;
1203 			break;
1204 		}
1205 		rs_num = pf_get_ruleset_number(pr->rule.action);
1206 		if (rs_num >= PF_RULESET_MAX) {
1207 			error = EINVAL;
1208 			break;
1209 		}
1210 		tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
1211 		    pf_rulequeue);
1212 		if (tail)
1213 			pr->nr = tail->nr + 1;
1214 		else
1215 			pr->nr = 0;
1216 		pr->ticket = ruleset->rules[rs_num].active.ticket;
1217 		break;
1218 	}
1219 
1220 	case DIOCGETRULE: {
1221 		struct pfioc_rule	*pr = (struct pfioc_rule *)addr;
1222 		struct pf_ruleset	*ruleset;
1223 		struct pf_rule		*rule;
1224 		int			 rs_num, i;
1225 
1226 		pr->anchor[sizeof(pr->anchor) - 1] = 0;
1227 		ruleset = pf_find_ruleset(pr->anchor);
1228 		if (ruleset == NULL) {
1229 			error = EINVAL;
1230 			break;
1231 		}
1232 		rs_num = pf_get_ruleset_number(pr->rule.action);
1233 		if (rs_num >= PF_RULESET_MAX) {
1234 			error = EINVAL;
1235 			break;
1236 		}
1237 		if (pr->ticket != ruleset->rules[rs_num].active.ticket) {
1238 			error = EBUSY;
1239 			break;
1240 		}
1241 		rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
1242 		while ((rule != NULL) && (rule->nr != pr->nr))
1243 			rule = TAILQ_NEXT(rule, entries);
1244 		if (rule == NULL) {
1245 			error = EBUSY;
1246 			break;
1247 		}
1248 		bcopy(rule, &pr->rule, sizeof(struct pf_rule));
1249 		if (pf_anchor_copyout(ruleset, rule, pr)) {
1250 			error = EBUSY;
1251 			break;
1252 		}
1253 		pf_addr_copyout(&pr->rule.src.addr);
1254 		pf_addr_copyout(&pr->rule.dst.addr);
1255 		for (i = 0; i < PF_SKIP_COUNT; ++i)
1256 			if (rule->skip[i].ptr == NULL)
1257 				pr->rule.skip[i].nr = -1;
1258 			else
1259 				pr->rule.skip[i].nr =
1260 				    rule->skip[i].ptr->nr;
1261 
1262 		if (pr->action == PF_GET_CLR_CNTR) {
1263 			rule->evaluations = 0;
1264 			rule->packets[0] = rule->packets[1] = 0;
1265 			rule->bytes[0] = rule->bytes[1] = 0;
1266 			rule->states_tot = 0;
1267 		}
1268 		break;
1269 	}
1270 
1271 	case DIOCCHANGERULE: {
1272 		struct pfioc_rule	*pcr = (struct pfioc_rule *)addr;
1273 		struct pf_ruleset	*ruleset;
1274 		struct pf_rule		*oldrule = NULL, *newrule = NULL;
1275 		u_int32_t		 nr = 0;
1276 		int			 rs_num;
1277 
1278 		if (!(pcr->action == PF_CHANGE_REMOVE ||
1279 		    pcr->action == PF_CHANGE_GET_TICKET) &&
1280 		    pcr->pool_ticket != ticket_pabuf) {
1281 			error = EBUSY;
1282 			break;
1283 		}
1284 
1285 		if (pcr->action < PF_CHANGE_ADD_HEAD ||
1286 		    pcr->action > PF_CHANGE_GET_TICKET) {
1287 			error = EINVAL;
1288 			break;
1289 		}
1290 		ruleset = pf_find_ruleset(pcr->anchor);
1291 		if (ruleset == NULL) {
1292 			error = EINVAL;
1293 			break;
1294 		}
1295 		rs_num = pf_get_ruleset_number(pcr->rule.action);
1296 		if (rs_num >= PF_RULESET_MAX) {
1297 			error = EINVAL;
1298 			break;
1299 		}
1300 
1301 		if (pcr->action == PF_CHANGE_GET_TICKET) {
1302 			pcr->ticket = ++ruleset->rules[rs_num].active.ticket;
1303 			break;
1304 		} else {
1305 			if (pcr->ticket !=
1306 			    ruleset->rules[rs_num].active.ticket) {
1307 				error = EINVAL;
1308 				break;
1309 			}
1310 			if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
1311 				error = EINVAL;
1312 				break;
1313 			}
1314 		}
1315 
1316 		if (pcr->action != PF_CHANGE_REMOVE) {
1317 			newrule = pool_get(&pf_rule_pl, PR_WAITOK|PR_LIMITFAIL);
1318 			if (newrule == NULL) {
1319 				error = ENOMEM;
1320 				break;
1321 			}
1322 			bcopy(&pcr->rule, newrule, sizeof(struct pf_rule));
1323 			newrule->cuid = p->p_cred->p_ruid;
1324 			newrule->cpid = p->p_pid;
1325 			TAILQ_INIT(&newrule->rpool.list);
1326 			/* initialize refcounting */
1327 			newrule->states_cur = 0;
1328 			newrule->entries.tqe_prev = NULL;
1329 #ifndef INET
1330 			if (newrule->af == AF_INET) {
1331 				pool_put(&pf_rule_pl, newrule);
1332 				error = EAFNOSUPPORT;
1333 				break;
1334 			}
1335 #endif /* INET */
1336 #ifndef INET6
1337 			if (newrule->af == AF_INET6) {
1338 				pool_put(&pf_rule_pl, newrule);
1339 				error = EAFNOSUPPORT;
1340 				break;
1341 			}
1342 #endif /* INET6 */
1343 			if (newrule->ifname[0]) {
1344 				newrule->kif = pfi_kif_get(newrule->ifname);
1345 				if (newrule->kif == NULL) {
1346 					pool_put(&pf_rule_pl, newrule);
1347 					error = EINVAL;
1348 					break;
1349 				}
1350 				pfi_kif_ref(newrule->kif, PFI_KIF_REF_RULE);
1351 			} else
1352 				newrule->kif = NULL;
1353 
1354 			if (newrule->rtableid > 0 &&
1355 			    !rtable_exists(newrule->rtableid))
1356 				error = EBUSY;
1357 
1358 #ifdef ALTQ
1359 			/* set queue IDs */
1360 			if (newrule->qname[0] != 0) {
1361 				if ((newrule->qid =
1362 				    pf_qname2qid(newrule->qname)) == 0)
1363 					error = EBUSY;
1364 				else if (newrule->pqname[0] != 0) {
1365 					if ((newrule->pqid =
1366 					    pf_qname2qid(newrule->pqname)) == 0)
1367 						error = EBUSY;
1368 				} else
1369 					newrule->pqid = newrule->qid;
1370 			}
1371 #endif /* ALTQ */
1372 			if (newrule->tagname[0])
1373 				if ((newrule->tag =
1374 				    pf_tagname2tag(newrule->tagname)) == 0)
1375 					error = EBUSY;
1376 			if (newrule->match_tagname[0])
1377 				if ((newrule->match_tag = pf_tagname2tag(
1378 				    newrule->match_tagname)) == 0)
1379 					error = EBUSY;
1380 			if (newrule->rt && !newrule->direction)
1381 				error = EINVAL;
1382 #if NPFLOG > 0
1383 			if (!newrule->log)
1384 				newrule->logif = 0;
1385 			if (newrule->logif >= PFLOGIFS_MAX)
1386 				error = EINVAL;
1387 #endif
1388 			if (pf_rtlabel_add(&newrule->src.addr) ||
1389 			    pf_rtlabel_add(&newrule->dst.addr))
1390 				error = EBUSY;
1391 			if (pf_addr_setup(ruleset, &newrule->src.addr, newrule->af))
1392 				error = EINVAL;
1393 			if (pf_addr_setup(ruleset, &newrule->dst.addr, newrule->af))
1394 				error = EINVAL;
1395 			if (pf_anchor_setup(newrule, ruleset, pcr->anchor_call))
1396 				error = EINVAL;
1397 			TAILQ_FOREACH(pa, &pf_pabuf, entries)
1398 				if (pf_tbladdr_setup(ruleset, &pa->addr))
1399 					error = EINVAL;
1400 
1401 			if (newrule->overload_tblname[0]) {
1402 				if ((newrule->overload_tbl = pfr_attach_table(
1403 				    ruleset, newrule->overload_tblname, 0)) ==
1404 				    NULL)
1405 					error = EINVAL;
1406 				else
1407 					newrule->overload_tbl->pfrkt_flags |=
1408 					    PFR_TFLAG_ACTIVE;
1409 			}
1410 
1411 			pf_mv_pool(&pf_pabuf, &newrule->rpool.list);
1412 			if (((((newrule->action == PF_NAT) ||
1413 			    (newrule->action == PF_RDR) ||
1414 			    (newrule->action == PF_BINAT) ||
1415 			    (newrule->rt > PF_FASTROUTE)) &&
1416 			    !newrule->anchor)) &&
1417 			    (TAILQ_FIRST(&newrule->rpool.list) == NULL))
1418 				error = EINVAL;
1419 
1420 			if (error) {
1421 				pf_rm_rule(NULL, newrule);
1422 				break;
1423 			}
1424 			newrule->rpool.cur = TAILQ_FIRST(&newrule->rpool.list);
1425 			newrule->evaluations = 0;
1426 			newrule->packets[0] = newrule->packets[1] = 0;
1427 			newrule->bytes[0] = newrule->bytes[1] = 0;
1428 		}
1429 		pf_empty_pool(&pf_pabuf);
1430 
1431 		if (pcr->action == PF_CHANGE_ADD_HEAD)
1432 			oldrule = TAILQ_FIRST(
1433 			    ruleset->rules[rs_num].active.ptr);
1434 		else if (pcr->action == PF_CHANGE_ADD_TAIL)
1435 			oldrule = TAILQ_LAST(
1436 			    ruleset->rules[rs_num].active.ptr, pf_rulequeue);
1437 		else {
1438 			oldrule = TAILQ_FIRST(
1439 			    ruleset->rules[rs_num].active.ptr);
1440 			while ((oldrule != NULL) && (oldrule->nr != pcr->nr))
1441 				oldrule = TAILQ_NEXT(oldrule, entries);
1442 			if (oldrule == NULL) {
1443 				if (newrule != NULL)
1444 					pf_rm_rule(NULL, newrule);
1445 				error = EINVAL;
1446 				break;
1447 			}
1448 		}
1449 
1450 		if (pcr->action == PF_CHANGE_REMOVE) {
1451 			pf_rm_rule(ruleset->rules[rs_num].active.ptr, oldrule);
1452 			ruleset->rules[rs_num].active.rcount--;
1453 		} else {
1454 			if (oldrule == NULL)
1455 				TAILQ_INSERT_TAIL(
1456 				    ruleset->rules[rs_num].active.ptr,
1457 				    newrule, entries);
1458 			else if (pcr->action == PF_CHANGE_ADD_HEAD ||
1459 			    pcr->action == PF_CHANGE_ADD_BEFORE)
1460 				TAILQ_INSERT_BEFORE(oldrule, newrule, entries);
1461 			else
1462 				TAILQ_INSERT_AFTER(
1463 				    ruleset->rules[rs_num].active.ptr,
1464 				    oldrule, newrule, entries);
1465 			ruleset->rules[rs_num].active.rcount++;
1466 		}
1467 
1468 		nr = 0;
1469 		TAILQ_FOREACH(oldrule,
1470 		    ruleset->rules[rs_num].active.ptr, entries)
1471 			oldrule->nr = nr++;
1472 
1473 		ruleset->rules[rs_num].active.ticket++;
1474 
1475 		pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr);
1476 		pf_remove_if_empty_ruleset(ruleset);
1477 
1478 		break;
1479 	}
1480 
1481 	case DIOCCLRSTATES: {
1482 		struct pf_state		*s, *nexts;
1483 		struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr;
1484 		u_int			 killed = 0;
1485 
1486 		for (s = RB_MIN(pf_state_tree_id, &tree_id); s; s = nexts) {
1487 			nexts = RB_NEXT(pf_state_tree_id, &tree_id, s);
1488 
1489 			if (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname,
1490 			    s->kif->pfik_name)) {
1491 #if NPFSYNC > 0
1492 				/* don't send out individual delete messages */
1493 				SET(s->state_flags, PFSTATE_NOSYNC);
1494 #endif
1495 				pf_unlink_state(s);
1496 				killed++;
1497 			}
1498 		}
1499 		psk->psk_killed = killed;
1500 #if NPFSYNC > 0
1501 		pfsync_clear_states(pf_status.hostid, psk->psk_ifname);
1502 #endif
1503 		break;
1504 	}
1505 
1506 	case DIOCKILLSTATES: {
1507 		struct pf_state		*s, *nexts;
1508 		struct pf_state_key	*sk;
1509 		struct pf_addr		*srcaddr, *dstaddr;
1510 		u_int16_t		 srcport, dstport;
1511 		struct pfioc_state_kill	*psk = (struct pfioc_state_kill *)addr;
1512 		u_int			 killed = 0;
1513 
1514 		if (psk->psk_pfcmp.id) {
1515 			if (psk->psk_pfcmp.creatorid == 0)
1516 				psk->psk_pfcmp.creatorid = pf_status.hostid;
1517 			if ((s = pf_find_state_byid(&psk->psk_pfcmp))) {
1518 				pf_unlink_state(s);
1519 				psk->psk_killed = 1;
1520 			}
1521 			break;
1522 		}
1523 
1524 		for (s = RB_MIN(pf_state_tree_id, &tree_id); s;
1525 		    s = nexts) {
1526 			nexts = RB_NEXT(pf_state_tree_id, &tree_id, s);
1527 			sk = s->key[PF_SK_WIRE];
1528 
1529 			if (s->direction == PF_OUT) {
1530 				srcaddr = &sk->addr[1];
1531 				dstaddr = &sk->addr[0];
1532 				srcport = sk->port[0];
1533 				dstport = sk->port[0];
1534 			} else {
1535 				srcaddr = &sk->addr[0];
1536 				dstaddr = &sk->addr[1];
1537 				srcport = sk->port[0];
1538 				dstport = sk->port[0];
1539 			}
1540 			if ((!psk->psk_af || sk->af == psk->psk_af)
1541 			    && (!psk->psk_proto || psk->psk_proto ==
1542 			    sk->proto) &&
1543 			    PF_MATCHA(psk->psk_src.neg,
1544 			    &psk->psk_src.addr.v.a.addr,
1545 			    &psk->psk_src.addr.v.a.mask,
1546 			    srcaddr, sk->af) &&
1547 			    PF_MATCHA(psk->psk_dst.neg,
1548 			    &psk->psk_dst.addr.v.a.addr,
1549 			    &psk->psk_dst.addr.v.a.mask,
1550 			    dstaddr, sk->af) &&
1551 			    (psk->psk_src.port_op == 0 ||
1552 			    pf_match_port(psk->psk_src.port_op,
1553 			    psk->psk_src.port[0], psk->psk_src.port[1],
1554 			    srcport)) &&
1555 			    (psk->psk_dst.port_op == 0 ||
1556 			    pf_match_port(psk->psk_dst.port_op,
1557 			    psk->psk_dst.port[0], psk->psk_dst.port[1],
1558 			    dstport)) &&
1559 			    (!psk->psk_label[0] || (s->rule.ptr->label[0] &&
1560 			    !strcmp(psk->psk_label, s->rule.ptr->label))) &&
1561 			    (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname,
1562 			    s->kif->pfik_name))) {
1563 				pf_unlink_state(s);
1564 				killed++;
1565 			}
1566 		}
1567 		psk->psk_killed = killed;
1568 		break;
1569 	}
1570 
1571 	case DIOCADDSTATE: {
1572 		struct pfioc_state	*ps = (struct pfioc_state *)addr;
1573 		struct pfsync_state	*sp = &ps->state;
1574 
1575 		if (sp->timeout >= PFTM_MAX &&
1576 		    sp->timeout != PFTM_UNTIL_PACKET) {
1577 			error = EINVAL;
1578 			break;
1579 		}
1580 		error = pfsync_state_import(sp, PFSYNC_SI_IOCTL);
1581 		break;
1582 	}
1583 
1584 	case DIOCGETSTATE: {
1585 		struct pfioc_state	*ps = (struct pfioc_state *)addr;
1586 		struct pf_state		*s;
1587 		struct pf_state_cmp	 id_key;
1588 
1589 		bcopy(ps->state.id, &id_key.id, sizeof(id_key.id));
1590 		id_key.creatorid = ps->state.creatorid;
1591 
1592 		s = pf_find_state_byid(&id_key);
1593 		if (s == NULL) {
1594 			error = ENOENT;
1595 			break;
1596 		}
1597 
1598 		pfsync_state_export(&ps->state, s);
1599 		break;
1600 	}
1601 
1602 	case DIOCGETSTATES: {
1603 		struct pfioc_states	*ps = (struct pfioc_states *)addr;
1604 		struct pf_state		*state;
1605 		struct pfsync_state	*p, *pstore;
1606 		u_int32_t		 nr = 0;
1607 
1608 		if (ps->ps_len == 0) {
1609 			nr = pf_status.states;
1610 			ps->ps_len = sizeof(struct pfsync_state) * nr;
1611 			break;
1612 		}
1613 
1614 		pstore = malloc(sizeof(*pstore), M_TEMP, M_WAITOK);
1615 
1616 		p = ps->ps_states;
1617 
1618 		state = TAILQ_FIRST(&state_list);
1619 		while (state) {
1620 			if (state->timeout != PFTM_UNLINKED) {
1621 				if ((nr+1) * sizeof(*p) > (unsigned)ps->ps_len)
1622 					break;
1623 				pfsync_state_export(pstore, state);
1624 				error = copyout(pstore, p, sizeof(*p));
1625 				if (error) {
1626 					free(pstore, M_TEMP);
1627 					goto fail;
1628 				}
1629 				p++;
1630 				nr++;
1631 			}
1632 			state = TAILQ_NEXT(state, entry_list);
1633 		}
1634 
1635 		ps->ps_len = sizeof(struct pfsync_state) * nr;
1636 
1637 		free(pstore, M_TEMP);
1638 		break;
1639 	}
1640 
1641 	case DIOCGETSTATUS: {
1642 		struct pf_status *s = (struct pf_status *)addr;
1643 		bcopy(&pf_status, s, sizeof(struct pf_status));
1644 		pfi_update_status(s->ifname, s);
1645 		break;
1646 	}
1647 
1648 	case DIOCSETSTATUSIF: {
1649 		struct pfioc_if	*pi = (struct pfioc_if *)addr;
1650 
1651 		if (pi->ifname[0] == 0) {
1652 			bzero(pf_status.ifname, IFNAMSIZ);
1653 			break;
1654 		}
1655 		strlcpy(pf_status.ifname, pi->ifname, IFNAMSIZ);
1656 		break;
1657 	}
1658 
1659 	case DIOCCLRSTATUS: {
1660 		bzero(pf_status.counters, sizeof(pf_status.counters));
1661 		bzero(pf_status.fcounters, sizeof(pf_status.fcounters));
1662 		bzero(pf_status.scounters, sizeof(pf_status.scounters));
1663 		pf_status.since = time_second;
1664 		if (*pf_status.ifname)
1665 			pfi_update_status(pf_status.ifname, NULL);
1666 		break;
1667 	}
1668 
1669 	case DIOCNATLOOK: {
1670 		struct pfioc_natlook	*pnl = (struct pfioc_natlook *)addr;
1671 		struct pf_state_key	*sk;
1672 		struct pf_state		*state;
1673 		struct pf_state_key_cmp	 key;
1674 		int			 m = 0, direction = pnl->direction;
1675 		int			 sidx, didx;
1676 
1677 		/* NATLOOK src and dst are reversed, so reverse sidx/didx */
1678 		sidx = (direction == PF_IN) ? 1 : 0;
1679 		didx = (direction == PF_IN) ? 0 : 1;
1680 
1681 		if (!pnl->proto ||
1682 		    PF_AZERO(&pnl->saddr, pnl->af) ||
1683 		    PF_AZERO(&pnl->daddr, pnl->af) ||
1684 		    ((pnl->proto == IPPROTO_TCP ||
1685 		    pnl->proto == IPPROTO_UDP) &&
1686 		    (!pnl->dport || !pnl->sport)))
1687 			error = EINVAL;
1688 		else {
1689 			key.af = pnl->af;
1690 			key.proto = pnl->proto;
1691 			PF_ACPY(&key.addr[sidx], &pnl->saddr, pnl->af);
1692 			key.port[sidx] = pnl->sport;
1693 			PF_ACPY(&key.addr[didx], &pnl->daddr, pnl->af);
1694 			key.port[didx] = pnl->dport;
1695 
1696 			state = pf_find_state_all(&key, direction, &m);
1697 
1698 			if (m > 1)
1699 				error = E2BIG;	/* more than one state */
1700 			else if (state != NULL) {
1701 				sk = state->key[sidx];
1702 				PF_ACPY(&pnl->rsaddr, &sk->addr[sidx], sk->af);
1703 				pnl->rsport = sk->port[sidx];
1704 				PF_ACPY(&pnl->rdaddr, &sk->addr[didx], sk->af);
1705 				pnl->rdport = sk->port[didx];
1706 			} else
1707 				error = ENOENT;
1708 		}
1709 		break;
1710 	}
1711 
1712 	case DIOCSETTIMEOUT: {
1713 		struct pfioc_tm	*pt = (struct pfioc_tm *)addr;
1714 
1715 		if (pt->timeout < 0 || pt->timeout >= PFTM_MAX ||
1716 		    pt->seconds < 0) {
1717 			error = EINVAL;
1718 			goto fail;
1719 		}
1720 		if (pt->timeout == PFTM_INTERVAL && pt->seconds == 0)
1721 			pt->seconds = 1;
1722 		pf_default_rule_new.timeout[pt->timeout] = pt->seconds;
1723 		pt->seconds = pf_default_rule.timeout[pt->timeout];
1724 		break;
1725 	}
1726 
1727 	case DIOCGETTIMEOUT: {
1728 		struct pfioc_tm	*pt = (struct pfioc_tm *)addr;
1729 
1730 		if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) {
1731 			error = EINVAL;
1732 			goto fail;
1733 		}
1734 		pt->seconds = pf_default_rule.timeout[pt->timeout];
1735 		break;
1736 	}
1737 
1738 	case DIOCGETLIMIT: {
1739 		struct pfioc_limit	*pl = (struct pfioc_limit *)addr;
1740 
1741 		if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) {
1742 			error = EINVAL;
1743 			goto fail;
1744 		}
1745 		pl->limit = pf_pool_limits[pl->index].limit;
1746 		break;
1747 	}
1748 
1749 	case DIOCSETLIMIT: {
1750 		struct pfioc_limit	*pl = (struct pfioc_limit *)addr;
1751 
1752 		if (pl->index < 0 || pl->index >= PF_LIMIT_MAX ||
1753 		    pf_pool_limits[pl->index].pp == NULL) {
1754 			error = EINVAL;
1755 			goto fail;
1756 		}
1757 		if (((struct pool *)pf_pool_limits[pl->index].pp)->pr_nout >
1758 		    pl->limit) {
1759 			error = EBUSY;
1760 			goto fail;
1761 		}
1762 		pf_pool_limits[pl->index].limit_new = pl->limit;
1763 		pl->limit = pf_pool_limits[pl->index].limit;
1764 		break;
1765 	}
1766 
1767 	case DIOCSETDEBUG: {
1768 		u_int32_t	*level = (u_int32_t *)addr;
1769 
1770 		pf_status.debug = *level;
1771 		break;
1772 	}
1773 
1774 	case DIOCCLRRULECTRS: {
1775 		/* obsoleted by DIOCGETRULE with action=PF_GET_CLR_CNTR */
1776 		struct pf_ruleset	*ruleset = &pf_main_ruleset;
1777 		struct pf_rule		*rule;
1778 
1779 		TAILQ_FOREACH(rule,
1780 		    ruleset->rules[PF_RULESET_FILTER].active.ptr, entries) {
1781 			rule->evaluations = 0;
1782 			rule->packets[0] = rule->packets[1] = 0;
1783 			rule->bytes[0] = rule->bytes[1] = 0;
1784 		}
1785 		break;
1786 	}
1787 
1788 #ifdef ALTQ
1789 	case DIOCSTARTALTQ: {
1790 		struct pf_altq		*altq;
1791 
1792 		/* enable all altq interfaces on active list */
1793 		TAILQ_FOREACH(altq, pf_altqs_active, entries) {
1794 			if (altq->qname[0] == 0) {
1795 				error = pf_enable_altq(altq);
1796 				if (error != 0)
1797 					break;
1798 			}
1799 		}
1800 		if (error == 0)
1801 			pf_altq_running = 1;
1802 		DPFPRINTF(PF_DEBUG_MISC, ("altq: started\n"));
1803 		break;
1804 	}
1805 
1806 	case DIOCSTOPALTQ: {
1807 		struct pf_altq		*altq;
1808 
1809 		/* disable all altq interfaces on active list */
1810 		TAILQ_FOREACH(altq, pf_altqs_active, entries) {
1811 			if (altq->qname[0] == 0) {
1812 				error = pf_disable_altq(altq);
1813 				if (error != 0)
1814 					break;
1815 			}
1816 		}
1817 		if (error == 0)
1818 			pf_altq_running = 0;
1819 		DPFPRINTF(PF_DEBUG_MISC, ("altq: stopped\n"));
1820 		break;
1821 	}
1822 
1823 	case DIOCADDALTQ: {
1824 		struct pfioc_altq	*pa = (struct pfioc_altq *)addr;
1825 		struct pf_altq		*altq, *a;
1826 
1827 		if (pa->ticket != ticket_altqs_inactive) {
1828 			error = EBUSY;
1829 			break;
1830 		}
1831 		altq = pool_get(&pf_altq_pl, PR_WAITOK|PR_LIMITFAIL);
1832 		if (altq == NULL) {
1833 			error = ENOMEM;
1834 			break;
1835 		}
1836 		bcopy(&pa->altq, altq, sizeof(struct pf_altq));
1837 
1838 		/*
1839 		 * if this is for a queue, find the discipline and
1840 		 * copy the necessary fields
1841 		 */
1842 		if (altq->qname[0] != 0) {
1843 			if ((altq->qid = pf_qname2qid(altq->qname)) == 0) {
1844 				error = EBUSY;
1845 				pool_put(&pf_altq_pl, altq);
1846 				break;
1847 			}
1848 			altq->altq_disc = NULL;
1849 			TAILQ_FOREACH(a, pf_altqs_inactive, entries) {
1850 				if (strncmp(a->ifname, altq->ifname,
1851 				    IFNAMSIZ) == 0 && a->qname[0] == 0) {
1852 					altq->altq_disc = a->altq_disc;
1853 					break;
1854 				}
1855 			}
1856 		}
1857 
1858 		error = altq_add(altq);
1859 		if (error) {
1860 			pool_put(&pf_altq_pl, altq);
1861 			break;
1862 		}
1863 
1864 		TAILQ_INSERT_TAIL(pf_altqs_inactive, altq, entries);
1865 		bcopy(altq, &pa->altq, sizeof(struct pf_altq));
1866 		break;
1867 	}
1868 
1869 	case DIOCGETALTQS: {
1870 		struct pfioc_altq	*pa = (struct pfioc_altq *)addr;
1871 		struct pf_altq		*altq;
1872 
1873 		pa->nr = 0;
1874 		TAILQ_FOREACH(altq, pf_altqs_active, entries)
1875 			pa->nr++;
1876 		pa->ticket = ticket_altqs_active;
1877 		break;
1878 	}
1879 
1880 	case DIOCGETALTQ: {
1881 		struct pfioc_altq	*pa = (struct pfioc_altq *)addr;
1882 		struct pf_altq		*altq;
1883 		u_int32_t		 nr;
1884 
1885 		if (pa->ticket != ticket_altqs_active) {
1886 			error = EBUSY;
1887 			break;
1888 		}
1889 		nr = 0;
1890 		altq = TAILQ_FIRST(pf_altqs_active);
1891 		while ((altq != NULL) && (nr < pa->nr)) {
1892 			altq = TAILQ_NEXT(altq, entries);
1893 			nr++;
1894 		}
1895 		if (altq == NULL) {
1896 			error = EBUSY;
1897 			break;
1898 		}
1899 		bcopy(altq, &pa->altq, sizeof(struct pf_altq));
1900 		break;
1901 	}
1902 
1903 	case DIOCCHANGEALTQ:
1904 		/* CHANGEALTQ not supported yet! */
1905 		error = ENODEV;
1906 		break;
1907 
1908 	case DIOCGETQSTATS: {
1909 		struct pfioc_qstats	*pq = (struct pfioc_qstats *)addr;
1910 		struct pf_altq		*altq;
1911 		u_int32_t		 nr;
1912 		int			 nbytes;
1913 
1914 		if (pq->ticket != ticket_altqs_active) {
1915 			error = EBUSY;
1916 			break;
1917 		}
1918 		nbytes = pq->nbytes;
1919 		nr = 0;
1920 		altq = TAILQ_FIRST(pf_altqs_active);
1921 		while ((altq != NULL) && (nr < pq->nr)) {
1922 			altq = TAILQ_NEXT(altq, entries);
1923 			nr++;
1924 		}
1925 		if (altq == NULL) {
1926 			error = EBUSY;
1927 			break;
1928 		}
1929 		error = altq_getqstats(altq, pq->buf, &nbytes);
1930 		if (error == 0) {
1931 			pq->scheduler = altq->scheduler;
1932 			pq->nbytes = nbytes;
1933 		}
1934 		break;
1935 	}
1936 #endif /* ALTQ */
1937 
1938 	case DIOCBEGINADDRS: {
1939 		struct pfioc_pooladdr	*pp = (struct pfioc_pooladdr *)addr;
1940 
1941 		pf_empty_pool(&pf_pabuf);
1942 		pp->ticket = ++ticket_pabuf;
1943 		break;
1944 	}
1945 
1946 	case DIOCADDADDR: {
1947 		struct pfioc_pooladdr	*pp = (struct pfioc_pooladdr *)addr;
1948 
1949 		if (pp->ticket != ticket_pabuf) {
1950 			error = EBUSY;
1951 			break;
1952 		}
1953 #ifndef INET
1954 		if (pp->af == AF_INET) {
1955 			error = EAFNOSUPPORT;
1956 			break;
1957 		}
1958 #endif /* INET */
1959 #ifndef INET6
1960 		if (pp->af == AF_INET6) {
1961 			error = EAFNOSUPPORT;
1962 			break;
1963 		}
1964 #endif /* INET6 */
1965 		if (pp->addr.addr.type != PF_ADDR_ADDRMASK &&
1966 		    pp->addr.addr.type != PF_ADDR_DYNIFTL &&
1967 		    pp->addr.addr.type != PF_ADDR_TABLE) {
1968 			error = EINVAL;
1969 			break;
1970 		}
1971 		pa = pool_get(&pf_pooladdr_pl, PR_WAITOK|PR_LIMITFAIL);
1972 		if (pa == NULL) {
1973 			error = ENOMEM;
1974 			break;
1975 		}
1976 		bcopy(&pp->addr, pa, sizeof(struct pf_pooladdr));
1977 		if (pa->ifname[0]) {
1978 			pa->kif = pfi_kif_get(pa->ifname);
1979 			if (pa->kif == NULL) {
1980 				pool_put(&pf_pooladdr_pl, pa);
1981 				error = EINVAL;
1982 				break;
1983 			}
1984 			pfi_kif_ref(pa->kif, PFI_KIF_REF_RULE);
1985 		}
1986 		if (pfi_dynaddr_setup(&pa->addr, pp->af)) {
1987 			pfi_dynaddr_remove(&pa->addr);
1988 			pfi_kif_unref(pa->kif, PFI_KIF_REF_RULE);
1989 			pool_put(&pf_pooladdr_pl, pa);
1990 			error = EINVAL;
1991 			break;
1992 		}
1993 		TAILQ_INSERT_TAIL(&pf_pabuf, pa, entries);
1994 		break;
1995 	}
1996 
1997 	case DIOCGETADDRS: {
1998 		struct pfioc_pooladdr	*pp = (struct pfioc_pooladdr *)addr;
1999 
2000 		pp->nr = 0;
2001 		pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action,
2002 		    pp->r_num, 0, 1, 0);
2003 		if (pool == NULL) {
2004 			error = EBUSY;
2005 			break;
2006 		}
2007 		TAILQ_FOREACH(pa, &pool->list, entries)
2008 			pp->nr++;
2009 		break;
2010 	}
2011 
2012 	case DIOCGETADDR: {
2013 		struct pfioc_pooladdr	*pp = (struct pfioc_pooladdr *)addr;
2014 		u_int32_t		 nr = 0;
2015 
2016 		pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action,
2017 		    pp->r_num, 0, 1, 1);
2018 		if (pool == NULL) {
2019 			error = EBUSY;
2020 			break;
2021 		}
2022 		pa = TAILQ_FIRST(&pool->list);
2023 		while ((pa != NULL) && (nr < pp->nr)) {
2024 			pa = TAILQ_NEXT(pa, entries);
2025 			nr++;
2026 		}
2027 		if (pa == NULL) {
2028 			error = EBUSY;
2029 			break;
2030 		}
2031 		bcopy(pa, &pp->addr, sizeof(struct pf_pooladdr));
2032 		pf_addr_copyout(&pp->addr.addr);
2033 		break;
2034 	}
2035 
2036 	case DIOCCHANGEADDR: {
2037 		struct pfioc_pooladdr	*pca = (struct pfioc_pooladdr *)addr;
2038 		struct pf_pooladdr	*oldpa = NULL, *newpa = NULL;
2039 		struct pf_ruleset	*ruleset;
2040 
2041 		if (pca->action < PF_CHANGE_ADD_HEAD ||
2042 		    pca->action > PF_CHANGE_REMOVE) {
2043 			error = EINVAL;
2044 			break;
2045 		}
2046 		if (pca->addr.addr.type != PF_ADDR_ADDRMASK &&
2047 		    pca->addr.addr.type != PF_ADDR_DYNIFTL &&
2048 		    pca->addr.addr.type != PF_ADDR_TABLE) {
2049 			error = EINVAL;
2050 			break;
2051 		}
2052 
2053 		ruleset = pf_find_ruleset(pca->anchor);
2054 		if (ruleset == NULL) {
2055 			error = EBUSY;
2056 			break;
2057 		}
2058 		pool = pf_get_pool(pca->anchor, pca->ticket, pca->r_action,
2059 		    pca->r_num, pca->r_last, 1, 1);
2060 		if (pool == NULL) {
2061 			error = EBUSY;
2062 			break;
2063 		}
2064 		if (pca->action != PF_CHANGE_REMOVE) {
2065 			newpa = pool_get(&pf_pooladdr_pl,
2066 			    PR_WAITOK|PR_LIMITFAIL);
2067 			if (newpa == NULL) {
2068 				error = ENOMEM;
2069 				break;
2070 			}
2071 			bcopy(&pca->addr, newpa, sizeof(struct pf_pooladdr));
2072 #ifndef INET
2073 			if (pca->af == AF_INET) {
2074 				pool_put(&pf_pooladdr_pl, newpa);
2075 				error = EAFNOSUPPORT;
2076 				break;
2077 			}
2078 #endif /* INET */
2079 #ifndef INET6
2080 			if (pca->af == AF_INET6) {
2081 				pool_put(&pf_pooladdr_pl, newpa);
2082 				error = EAFNOSUPPORT;
2083 				break;
2084 			}
2085 #endif /* INET6 */
2086 			if (newpa->ifname[0]) {
2087 				newpa->kif = pfi_kif_get(newpa->ifname);
2088 				if (newpa->kif == NULL) {
2089 					pool_put(&pf_pooladdr_pl, newpa);
2090 					error = EINVAL;
2091 					break;
2092 				}
2093 				pfi_kif_ref(newpa->kif, PFI_KIF_REF_RULE);
2094 			} else
2095 				newpa->kif = NULL;
2096 			if (pfi_dynaddr_setup(&newpa->addr, pca->af) ||
2097 			    pf_tbladdr_setup(ruleset, &newpa->addr)) {
2098 				pfi_dynaddr_remove(&newpa->addr);
2099 				pfi_kif_unref(newpa->kif, PFI_KIF_REF_RULE);
2100 				pool_put(&pf_pooladdr_pl, newpa);
2101 				error = EINVAL;
2102 				break;
2103 			}
2104 		}
2105 
2106 		if (pca->action == PF_CHANGE_ADD_HEAD)
2107 			oldpa = TAILQ_FIRST(&pool->list);
2108 		else if (pca->action == PF_CHANGE_ADD_TAIL)
2109 			oldpa = TAILQ_LAST(&pool->list, pf_palist);
2110 		else {
2111 			int	i = 0;
2112 
2113 			oldpa = TAILQ_FIRST(&pool->list);
2114 			while ((oldpa != NULL) && (i < pca->nr)) {
2115 				oldpa = TAILQ_NEXT(oldpa, entries);
2116 				i++;
2117 			}
2118 			if (oldpa == NULL) {
2119 				error = EINVAL;
2120 				break;
2121 			}
2122 		}
2123 
2124 		if (pca->action == PF_CHANGE_REMOVE) {
2125 			TAILQ_REMOVE(&pool->list, oldpa, entries);
2126 			pfi_dynaddr_remove(&oldpa->addr);
2127 			pf_tbladdr_remove(&oldpa->addr);
2128 			pfi_kif_unref(oldpa->kif, PFI_KIF_REF_RULE);
2129 			pool_put(&pf_pooladdr_pl, oldpa);
2130 		} else {
2131 			if (oldpa == NULL)
2132 				TAILQ_INSERT_TAIL(&pool->list, newpa, entries);
2133 			else if (pca->action == PF_CHANGE_ADD_HEAD ||
2134 			    pca->action == PF_CHANGE_ADD_BEFORE)
2135 				TAILQ_INSERT_BEFORE(oldpa, newpa, entries);
2136 			else
2137 				TAILQ_INSERT_AFTER(&pool->list, oldpa,
2138 				    newpa, entries);
2139 		}
2140 
2141 		pool->cur = TAILQ_FIRST(&pool->list);
2142 		PF_ACPY(&pool->counter, &pool->cur->addr.v.a.addr,
2143 		    pca->af);
2144 		break;
2145 	}
2146 
2147 	case DIOCGETRULESETS: {
2148 		struct pfioc_ruleset	*pr = (struct pfioc_ruleset *)addr;
2149 		struct pf_ruleset	*ruleset;
2150 		struct pf_anchor	*anchor;
2151 
2152 		pr->path[sizeof(pr->path) - 1] = 0;
2153 		if ((ruleset = pf_find_ruleset(pr->path)) == NULL) {
2154 			error = EINVAL;
2155 			break;
2156 		}
2157 		pr->nr = 0;
2158 		if (ruleset->anchor == NULL) {
2159 			/* XXX kludge for pf_main_ruleset */
2160 			RB_FOREACH(anchor, pf_anchor_global, &pf_anchors)
2161 				if (anchor->parent == NULL)
2162 					pr->nr++;
2163 		} else {
2164 			RB_FOREACH(anchor, pf_anchor_node,
2165 			    &ruleset->anchor->children)
2166 				pr->nr++;
2167 		}
2168 		break;
2169 	}
2170 
2171 	case DIOCGETRULESET: {
2172 		struct pfioc_ruleset	*pr = (struct pfioc_ruleset *)addr;
2173 		struct pf_ruleset	*ruleset;
2174 		struct pf_anchor	*anchor;
2175 		u_int32_t		 nr = 0;
2176 
2177 		pr->path[sizeof(pr->path) - 1] = 0;
2178 		if ((ruleset = pf_find_ruleset(pr->path)) == NULL) {
2179 			error = EINVAL;
2180 			break;
2181 		}
2182 		pr->name[0] = 0;
2183 		if (ruleset->anchor == NULL) {
2184 			/* XXX kludge for pf_main_ruleset */
2185 			RB_FOREACH(anchor, pf_anchor_global, &pf_anchors)
2186 				if (anchor->parent == NULL && nr++ == pr->nr) {
2187 					strlcpy(pr->name, anchor->name,
2188 					    sizeof(pr->name));
2189 					break;
2190 				}
2191 		} else {
2192 			RB_FOREACH(anchor, pf_anchor_node,
2193 			    &ruleset->anchor->children)
2194 				if (nr++ == pr->nr) {
2195 					strlcpy(pr->name, anchor->name,
2196 					    sizeof(pr->name));
2197 					break;
2198 				}
2199 		}
2200 		if (!pr->name[0])
2201 			error = EBUSY;
2202 		break;
2203 	}
2204 
2205 	case DIOCRCLRTABLES: {
2206 		struct pfioc_table *io = (struct pfioc_table *)addr;
2207 
2208 		if (io->pfrio_esize != 0) {
2209 			error = ENODEV;
2210 			break;
2211 		}
2212 		error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel,
2213 		    io->pfrio_flags | PFR_FLAG_USERIOCTL);
2214 		break;
2215 	}
2216 
2217 	case DIOCRADDTABLES: {
2218 		struct pfioc_table *io = (struct pfioc_table *)addr;
2219 
2220 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
2221 			error = ENODEV;
2222 			break;
2223 		}
2224 		error = pfr_add_tables(io->pfrio_buffer, io->pfrio_size,
2225 		    &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2226 		break;
2227 	}
2228 
2229 	case DIOCRDELTABLES: {
2230 		struct pfioc_table *io = (struct pfioc_table *)addr;
2231 
2232 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
2233 			error = ENODEV;
2234 			break;
2235 		}
2236 		error = pfr_del_tables(io->pfrio_buffer, io->pfrio_size,
2237 		    &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2238 		break;
2239 	}
2240 
2241 	case DIOCRGETTABLES: {
2242 		struct pfioc_table *io = (struct pfioc_table *)addr;
2243 
2244 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
2245 			error = ENODEV;
2246 			break;
2247 		}
2248 		error = pfr_get_tables(&io->pfrio_table, io->pfrio_buffer,
2249 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2250 		break;
2251 	}
2252 
2253 	case DIOCRGETTSTATS: {
2254 		struct pfioc_table *io = (struct pfioc_table *)addr;
2255 
2256 		if (io->pfrio_esize != sizeof(struct pfr_tstats)) {
2257 			error = ENODEV;
2258 			break;
2259 		}
2260 		error = pfr_get_tstats(&io->pfrio_table, io->pfrio_buffer,
2261 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2262 		break;
2263 	}
2264 
2265 	case DIOCRCLRTSTATS: {
2266 		struct pfioc_table *io = (struct pfioc_table *)addr;
2267 
2268 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
2269 			error = ENODEV;
2270 			break;
2271 		}
2272 		error = pfr_clr_tstats(io->pfrio_buffer, io->pfrio_size,
2273 		    &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2274 		break;
2275 	}
2276 
2277 	case DIOCRSETTFLAGS: {
2278 		struct pfioc_table *io = (struct pfioc_table *)addr;
2279 
2280 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
2281 			error = ENODEV;
2282 			break;
2283 		}
2284 		error = pfr_set_tflags(io->pfrio_buffer, io->pfrio_size,
2285 		    io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange,
2286 		    &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2287 		break;
2288 	}
2289 
2290 	case DIOCRCLRADDRS: {
2291 		struct pfioc_table *io = (struct pfioc_table *)addr;
2292 
2293 		if (io->pfrio_esize != 0) {
2294 			error = ENODEV;
2295 			break;
2296 		}
2297 		error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel,
2298 		    io->pfrio_flags | PFR_FLAG_USERIOCTL);
2299 		break;
2300 	}
2301 
2302 	case DIOCRADDADDRS: {
2303 		struct pfioc_table *io = (struct pfioc_table *)addr;
2304 
2305 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2306 			error = ENODEV;
2307 			break;
2308 		}
2309 		error = pfr_add_addrs(&io->pfrio_table, io->pfrio_buffer,
2310 		    io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags |
2311 		    PFR_FLAG_USERIOCTL);
2312 		break;
2313 	}
2314 
2315 	case DIOCRDELADDRS: {
2316 		struct pfioc_table *io = (struct pfioc_table *)addr;
2317 
2318 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2319 			error = ENODEV;
2320 			break;
2321 		}
2322 		error = pfr_del_addrs(&io->pfrio_table, io->pfrio_buffer,
2323 		    io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags |
2324 		    PFR_FLAG_USERIOCTL);
2325 		break;
2326 	}
2327 
2328 	case DIOCRSETADDRS: {
2329 		struct pfioc_table *io = (struct pfioc_table *)addr;
2330 
2331 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2332 			error = ENODEV;
2333 			break;
2334 		}
2335 		error = pfr_set_addrs(&io->pfrio_table, io->pfrio_buffer,
2336 		    io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd,
2337 		    &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags |
2338 		    PFR_FLAG_USERIOCTL, 0);
2339 		break;
2340 	}
2341 
2342 	case DIOCRGETADDRS: {
2343 		struct pfioc_table *io = (struct pfioc_table *)addr;
2344 
2345 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2346 			error = ENODEV;
2347 			break;
2348 		}
2349 		error = pfr_get_addrs(&io->pfrio_table, io->pfrio_buffer,
2350 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2351 		break;
2352 	}
2353 
2354 	case DIOCRGETASTATS: {
2355 		struct pfioc_table *io = (struct pfioc_table *)addr;
2356 
2357 		if (io->pfrio_esize != sizeof(struct pfr_astats)) {
2358 			error = ENODEV;
2359 			break;
2360 		}
2361 		error = pfr_get_astats(&io->pfrio_table, io->pfrio_buffer,
2362 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2363 		break;
2364 	}
2365 
2366 	case DIOCRCLRASTATS: {
2367 		struct pfioc_table *io = (struct pfioc_table *)addr;
2368 
2369 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2370 			error = ENODEV;
2371 			break;
2372 		}
2373 		error = pfr_clr_astats(&io->pfrio_table, io->pfrio_buffer,
2374 		    io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags |
2375 		    PFR_FLAG_USERIOCTL);
2376 		break;
2377 	}
2378 
2379 	case DIOCRTSTADDRS: {
2380 		struct pfioc_table *io = (struct pfioc_table *)addr;
2381 
2382 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2383 			error = ENODEV;
2384 			break;
2385 		}
2386 		error = pfr_tst_addrs(&io->pfrio_table, io->pfrio_buffer,
2387 		    io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags |
2388 		    PFR_FLAG_USERIOCTL);
2389 		break;
2390 	}
2391 
2392 	case DIOCRINADEFINE: {
2393 		struct pfioc_table *io = (struct pfioc_table *)addr;
2394 
2395 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2396 			error = ENODEV;
2397 			break;
2398 		}
2399 		error = pfr_ina_define(&io->pfrio_table, io->pfrio_buffer,
2400 		    io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr,
2401 		    io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2402 		break;
2403 	}
2404 
2405 	case DIOCOSFPADD: {
2406 		struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
2407 		error = pf_osfp_add(io);
2408 		break;
2409 	}
2410 
2411 	case DIOCOSFPGET: {
2412 		struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
2413 		error = pf_osfp_get(io);
2414 		break;
2415 	}
2416 
2417 	case DIOCXBEGIN: {
2418 		struct pfioc_trans	*io = (struct pfioc_trans *)addr;
2419 		struct pfioc_trans_e	*ioe;
2420 		struct pfr_table	*table;
2421 		int			 i;
2422 
2423 		if (io->esize != sizeof(*ioe)) {
2424 			error = ENODEV;
2425 			goto fail;
2426 		}
2427 		ioe = malloc(sizeof(*ioe), M_TEMP, M_WAITOK);
2428 		table = malloc(sizeof(*table), M_TEMP, M_WAITOK);
2429 		pf_default_rule_new = pf_default_rule;
2430 		for (i = 0; i < io->size; i++) {
2431 			if (copyin(io->array+i, ioe, sizeof(*ioe))) {
2432 				free(table, M_TEMP);
2433 				free(ioe, M_TEMP);
2434 				error = EFAULT;
2435 				goto fail;
2436 			}
2437 			switch (ioe->rs_num) {
2438 #ifdef ALTQ
2439 			case PF_RULESET_ALTQ:
2440 				if (ioe->anchor[0]) {
2441 					free(table, M_TEMP);
2442 					free(ioe, M_TEMP);
2443 					error = EINVAL;
2444 					goto fail;
2445 				}
2446 				if ((error = pf_begin_altq(&ioe->ticket))) {
2447 					free(table, M_TEMP);
2448 					free(ioe, M_TEMP);
2449 					goto fail;
2450 				}
2451 				break;
2452 #endif /* ALTQ */
2453 			case PF_RULESET_TABLE:
2454 				bzero(table, sizeof(*table));
2455 				strlcpy(table->pfrt_anchor, ioe->anchor,
2456 				    sizeof(table->pfrt_anchor));
2457 				if ((error = pfr_ina_begin(table,
2458 				    &ioe->ticket, NULL, 0))) {
2459 					free(table, M_TEMP);
2460 					free(ioe, M_TEMP);
2461 					goto fail;
2462 				}
2463 				break;
2464 			default:
2465 				if ((error = pf_begin_rules(&ioe->ticket,
2466 				    ioe->rs_num, ioe->anchor))) {
2467 					free(table, M_TEMP);
2468 					free(ioe, M_TEMP);
2469 					goto fail;
2470 				}
2471 				break;
2472 			}
2473 			if (copyout(ioe, io->array+i, sizeof(io->array[i]))) {
2474 				free(table, M_TEMP);
2475 				free(ioe, M_TEMP);
2476 				error = EFAULT;
2477 				goto fail;
2478 			}
2479 		}
2480 		free(table, M_TEMP);
2481 		free(ioe, M_TEMP);
2482 		break;
2483 	}
2484 
2485 	case DIOCXROLLBACK: {
2486 		struct pfioc_trans	*io = (struct pfioc_trans *)addr;
2487 		struct pfioc_trans_e	*ioe;
2488 		struct pfr_table	*table;
2489 		int			 i;
2490 
2491 		if (io->esize != sizeof(*ioe)) {
2492 			error = ENODEV;
2493 			goto fail;
2494 		}
2495 		ioe = malloc(sizeof(*ioe), M_TEMP, M_WAITOK);
2496 		table = malloc(sizeof(*table), M_TEMP, M_WAITOK);
2497 		for (i = 0; i < io->size; i++) {
2498 			if (copyin(io->array+i, ioe, sizeof(*ioe))) {
2499 				free(table, M_TEMP);
2500 				free(ioe, M_TEMP);
2501 				error = EFAULT;
2502 				goto fail;
2503 			}
2504 			switch (ioe->rs_num) {
2505 #ifdef ALTQ
2506 			case PF_RULESET_ALTQ:
2507 				if (ioe->anchor[0]) {
2508 					free(table, M_TEMP);
2509 					free(ioe, M_TEMP);
2510 					error = EINVAL;
2511 					goto fail;
2512 				}
2513 				if ((error = pf_rollback_altq(ioe->ticket))) {
2514 					free(table, M_TEMP);
2515 					free(ioe, M_TEMP);
2516 					goto fail; /* really bad */
2517 				}
2518 				break;
2519 #endif /* ALTQ */
2520 			case PF_RULESET_TABLE:
2521 				bzero(table, sizeof(*table));
2522 				strlcpy(table->pfrt_anchor, ioe->anchor,
2523 				    sizeof(table->pfrt_anchor));
2524 				if ((error = pfr_ina_rollback(table,
2525 				    ioe->ticket, NULL, 0))) {
2526 					free(table, M_TEMP);
2527 					free(ioe, M_TEMP);
2528 					goto fail; /* really bad */
2529 				}
2530 				break;
2531 			default:
2532 				if ((error = pf_rollback_rules(ioe->ticket,
2533 				    ioe->rs_num, ioe->anchor))) {
2534 					free(table, M_TEMP);
2535 					free(ioe, M_TEMP);
2536 					goto fail; /* really bad */
2537 				}
2538 				break;
2539 			}
2540 		}
2541 		free(table, M_TEMP);
2542 		free(ioe, M_TEMP);
2543 		break;
2544 	}
2545 
2546 	case DIOCXCOMMIT: {
2547 		struct pfioc_trans	*io = (struct pfioc_trans *)addr;
2548 		struct pfioc_trans_e	*ioe;
2549 		struct pfr_table	*table;
2550 		struct pf_ruleset	*rs;
2551 		int			 i;
2552 
2553 		if (io->esize != sizeof(*ioe)) {
2554 			error = ENODEV;
2555 			goto fail;
2556 		}
2557 		ioe = malloc(sizeof(*ioe), M_TEMP, M_WAITOK);
2558 		table = malloc(sizeof(*table), M_TEMP, M_WAITOK);
2559 		/* first makes sure everything will succeed */
2560 		for (i = 0; i < io->size; i++) {
2561 			if (copyin(io->array+i, ioe, sizeof(*ioe))) {
2562 				free(table, M_TEMP);
2563 				free(ioe, M_TEMP);
2564 				error = EFAULT;
2565 				goto fail;
2566 			}
2567 			switch (ioe->rs_num) {
2568 #ifdef ALTQ
2569 			case PF_RULESET_ALTQ:
2570 				if (ioe->anchor[0]) {
2571 					free(table, M_TEMP);
2572 					free(ioe, M_TEMP);
2573 					error = EINVAL;
2574 					goto fail;
2575 				}
2576 				if (!altqs_inactive_open || ioe->ticket !=
2577 				    ticket_altqs_inactive) {
2578 					free(table, M_TEMP);
2579 					free(ioe, M_TEMP);
2580 					error = EBUSY;
2581 					goto fail;
2582 				}
2583 				break;
2584 #endif /* ALTQ */
2585 			case PF_RULESET_TABLE:
2586 				rs = pf_find_ruleset(ioe->anchor);
2587 				if (rs == NULL || !rs->topen || ioe->ticket !=
2588 				     rs->tticket) {
2589 					free(table, M_TEMP);
2590 					free(ioe, M_TEMP);
2591 					error = EBUSY;
2592 					goto fail;
2593 				}
2594 				break;
2595 			default:
2596 				if (ioe->rs_num < 0 || ioe->rs_num >=
2597 				    PF_RULESET_MAX) {
2598 					free(table, M_TEMP);
2599 					free(ioe, M_TEMP);
2600 					error = EINVAL;
2601 					goto fail;
2602 				}
2603 				rs = pf_find_ruleset(ioe->anchor);
2604 				if (rs == NULL ||
2605 				    !rs->rules[ioe->rs_num].inactive.open ||
2606 				    rs->rules[ioe->rs_num].inactive.ticket !=
2607 				    ioe->ticket) {
2608 					free(table, M_TEMP);
2609 					free(ioe, M_TEMP);
2610 					error = EBUSY;
2611 					goto fail;
2612 				}
2613 				break;
2614 			}
2615 		}
2616 		/*
2617 		 * Checked already in DIOCSETLIMIT, but check again as the
2618 		 * situation might have changed.
2619 		 */
2620 		for (i = 0; i < PF_LIMIT_MAX; i++) {
2621 			if (((struct pool *)pf_pool_limits[i].pp)->pr_nout >
2622 			    pf_pool_limits[i].limit_new) {
2623 				error = EBUSY;
2624 				goto fail;
2625 			}
2626 		}
2627 		/* now do the commit - no errors should happen here */
2628 		for (i = 0; i < io->size; i++) {
2629 			if (copyin(io->array+i, ioe, sizeof(*ioe))) {
2630 				free(table, M_TEMP);
2631 				free(ioe, M_TEMP);
2632 				error = EFAULT;
2633 				goto fail;
2634 			}
2635 			switch (ioe->rs_num) {
2636 #ifdef ALTQ
2637 			case PF_RULESET_ALTQ:
2638 				if ((error = pf_commit_altq(ioe->ticket))) {
2639 					free(table, M_TEMP);
2640 					free(ioe, M_TEMP);
2641 					goto fail; /* really bad */
2642 				}
2643 				break;
2644 #endif /* ALTQ */
2645 			case PF_RULESET_TABLE:
2646 				bzero(table, sizeof(*table));
2647 				strlcpy(table->pfrt_anchor, ioe->anchor,
2648 				    sizeof(table->pfrt_anchor));
2649 				if ((error = pfr_ina_commit(table, ioe->ticket,
2650 				    NULL, NULL, 0))) {
2651 					free(table, M_TEMP);
2652 					free(ioe, M_TEMP);
2653 					goto fail; /* really bad */
2654 				}
2655 				break;
2656 			default:
2657 				if ((error = pf_commit_rules(ioe->ticket,
2658 				    ioe->rs_num, ioe->anchor))) {
2659 					free(table, M_TEMP);
2660 					free(ioe, M_TEMP);
2661 					goto fail; /* really bad */
2662 				}
2663 				break;
2664 			}
2665 		}
2666 		for (i = 0; i < PF_LIMIT_MAX; i++) {
2667 			if (pf_pool_limits[i].limit_new !=
2668 			    pf_pool_limits[i].limit &&
2669 			    pool_sethardlimit(pf_pool_limits[i].pp,
2670 			    pf_pool_limits[i].limit_new, NULL, 0) != 0) {
2671 				error = EBUSY;
2672 				goto fail; /* really bad */
2673 			}
2674 			pf_pool_limits[i].limit = pf_pool_limits[i].limit_new;
2675 		}
2676 		for (i = 0; i < PFTM_MAX; i++) {
2677 			int old = pf_default_rule.timeout[i];
2678 
2679 			pf_default_rule.timeout[i] =
2680 			    pf_default_rule_new.timeout[i];
2681 			if (pf_default_rule.timeout[i] == PFTM_INTERVAL &&
2682 			    pf_default_rule.timeout[i] < old)
2683 				wakeup(pf_purge_thread);
2684 		}
2685 		pfi_xcommit();
2686 		free(table, M_TEMP);
2687 		free(ioe, M_TEMP);
2688 		break;
2689 	}
2690 
2691 	case DIOCGETSRCNODES: {
2692 		struct pfioc_src_nodes	*psn = (struct pfioc_src_nodes *)addr;
2693 		struct pf_src_node	*n, *p, *pstore;
2694 		u_int32_t		 nr = 0;
2695 		int			 space = psn->psn_len;
2696 
2697 		if (space == 0) {
2698 			RB_FOREACH(n, pf_src_tree, &tree_src_tracking)
2699 				nr++;
2700 			psn->psn_len = sizeof(struct pf_src_node) * nr;
2701 			break;
2702 		}
2703 
2704 		pstore = malloc(sizeof(*pstore), M_TEMP, M_WAITOK);
2705 
2706 		p = psn->psn_src_nodes;
2707 		RB_FOREACH(n, pf_src_tree, &tree_src_tracking) {
2708 			int	secs = time_second, diff;
2709 
2710 			if ((nr + 1) * sizeof(*p) > (unsigned)psn->psn_len)
2711 				break;
2712 
2713 			bcopy(n, pstore, sizeof(*pstore));
2714 			if (n->rule.ptr != NULL)
2715 				pstore->rule.nr = n->rule.ptr->nr;
2716 			pstore->creation = secs - pstore->creation;
2717 			if (pstore->expire > secs)
2718 				pstore->expire -= secs;
2719 			else
2720 				pstore->expire = 0;
2721 
2722 			/* adjust the connection rate estimate */
2723 			diff = secs - n->conn_rate.last;
2724 			if (diff >= n->conn_rate.seconds)
2725 				pstore->conn_rate.count = 0;
2726 			else
2727 				pstore->conn_rate.count -=
2728 				    n->conn_rate.count * diff /
2729 				    n->conn_rate.seconds;
2730 
2731 			error = copyout(pstore, p, sizeof(*p));
2732 			if (error) {
2733 				free(pstore, M_TEMP);
2734 				goto fail;
2735 			}
2736 			p++;
2737 			nr++;
2738 		}
2739 		psn->psn_len = sizeof(struct pf_src_node) * nr;
2740 
2741 		free(pstore, M_TEMP);
2742 		break;
2743 	}
2744 
2745 	case DIOCCLRSRCNODES: {
2746 		struct pf_src_node	*n;
2747 		struct pf_state		*state;
2748 
2749 		RB_FOREACH(state, pf_state_tree_id, &tree_id) {
2750 			state->src_node = NULL;
2751 			state->nat_src_node = NULL;
2752 		}
2753 		RB_FOREACH(n, pf_src_tree, &tree_src_tracking) {
2754 			n->expire = 1;
2755 			n->states = 0;
2756 		}
2757 		pf_purge_expired_src_nodes(1);
2758 		pf_status.src_nodes = 0;
2759 		break;
2760 	}
2761 
2762 	case DIOCKILLSRCNODES: {
2763 		struct pf_src_node	*sn;
2764 		struct pf_state		*s;
2765 		struct pfioc_src_node_kill *psnk =
2766 		    (struct pfioc_src_node_kill *)addr;
2767 		u_int			killed = 0;
2768 
2769 		RB_FOREACH(sn, pf_src_tree, &tree_src_tracking) {
2770 			if (PF_MATCHA(psnk->psnk_src.neg,
2771 				&psnk->psnk_src.addr.v.a.addr,
2772 				&psnk->psnk_src.addr.v.a.mask,
2773 				&sn->addr, sn->af) &&
2774 			    PF_MATCHA(psnk->psnk_dst.neg,
2775 				&psnk->psnk_dst.addr.v.a.addr,
2776 				&psnk->psnk_dst.addr.v.a.mask,
2777 				&sn->raddr, sn->af)) {
2778 				/* Handle state to src_node linkage */
2779 				if (sn->states != 0) {
2780 					RB_FOREACH(s, pf_state_tree_id,
2781 					    &tree_id) {
2782 						if (s->src_node == sn)
2783 							s->src_node = NULL;
2784 						if (s->nat_src_node == sn)
2785 							s->nat_src_node = NULL;
2786 					}
2787 					sn->states = 0;
2788 				}
2789 				sn->expire = 1;
2790 				killed++;
2791 			}
2792 		}
2793 
2794 		if (killed > 0)
2795 			pf_purge_expired_src_nodes(1);
2796 
2797 		psnk->psnk_killed = killed;
2798 		break;
2799 	}
2800 
2801 	case DIOCSETHOSTID: {
2802 		u_int32_t	*hostid = (u_int32_t *)addr;
2803 
2804 		if (*hostid == 0)
2805 			pf_status.hostid = arc4random();
2806 		else
2807 			pf_status.hostid = *hostid;
2808 		break;
2809 	}
2810 
2811 	case DIOCOSFPFLUSH:
2812 		pf_osfp_flush();
2813 		break;
2814 
2815 	case DIOCIGETIFACES: {
2816 		struct pfioc_iface *io = (struct pfioc_iface *)addr;
2817 
2818 		if (io->pfiio_esize != sizeof(struct pfi_kif)) {
2819 			error = ENODEV;
2820 			break;
2821 		}
2822 		error = pfi_get_ifaces(io->pfiio_name, io->pfiio_buffer,
2823 		    &io->pfiio_size);
2824 		break;
2825 	}
2826 
2827 	case DIOCSETIFFLAG: {
2828 		struct pfioc_iface *io = (struct pfioc_iface *)addr;
2829 
2830 		error = pfi_set_flags(io->pfiio_name, io->pfiio_flags);
2831 		break;
2832 	}
2833 
2834 	case DIOCCLRIFFLAG: {
2835 		struct pfioc_iface *io = (struct pfioc_iface *)addr;
2836 
2837 		error = pfi_clear_flags(io->pfiio_name, io->pfiio_flags);
2838 		break;
2839 	}
2840 
2841 	case DIOCSETREASS: {
2842 		u_int32_t	*reass = (u_int32_t *)addr;
2843 
2844 		pf_status.reass = *reass;
2845 		if (!(pf_status.reass & PF_REASS_ENABLED))
2846 			pf_status.reass = 0;
2847 		break;
2848 	}
2849 
2850 	default:
2851 		error = ENODEV;
2852 		break;
2853 	}
2854 fail:
2855 	splx(s);
2856 	if (flags & FWRITE)
2857 		rw_exit_write(&pf_consistency_lock);
2858 	else
2859 		rw_exit_read(&pf_consistency_lock);
2860 	return (error);
2861 }
2862