xref: /openbsd-src/sys/net/pf_ioctl.c (revision 4c1e55dc91edd6e69ccc60ce855900fbc12cf34f)
1 /*	$OpenBSD: pf_ioctl.c,v 1.253 2012/07/08 07:58:09 henning Exp $ */
2 
3 /*
4  * Copyright (c) 2001 Daniel Hartmeier
5  * Copyright (c) 2002,2003 Henning Brauer
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  *    - Redistributions of source code must retain the above copyright
13  *      notice, this list of conditions and the following disclaimer.
14  *    - Redistributions in binary form must reproduce the above
15  *      copyright notice, this list of conditions and the following
16  *      disclaimer in the documentation and/or other materials provided
17  *      with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
22  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
23  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
24  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
25  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
29  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  *
32  * Effort sponsored in part by the Defense Advanced Research Projects
33  * Agency (DARPA) and Air Force Research Laboratory, Air Force
34  * Materiel Command, USAF, under agreement number F30602-01-2-0537.
35  *
36  */
37 
38 #include "pfsync.h"
39 #include "pflog.h"
40 
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/mbuf.h>
44 #include <sys/filio.h>
45 #include <sys/fcntl.h>
46 #include <sys/socket.h>
47 #include <sys/socketvar.h>
48 #include <sys/kernel.h>
49 #include <sys/time.h>
50 #include <sys/timeout.h>
51 #include <sys/pool.h>
52 #include <sys/proc.h>
53 #include <sys/malloc.h>
54 #include <sys/kthread.h>
55 #include <sys/rwlock.h>
56 #include <sys/syslog.h>
57 #include <uvm/uvm_extern.h>
58 
59 #include <net/if.h>
60 #include <net/if_types.h>
61 #include <net/route.h>
62 
63 #include <netinet/in.h>
64 #include <netinet/in_var.h>
65 #include <netinet/in_systm.h>
66 #include <netinet/ip.h>
67 #include <netinet/ip_var.h>
68 #include <netinet/ip_icmp.h>
69 
70 #include <dev/rndvar.h>
71 #include <crypto/md5.h>
72 #include <net/pfvar.h>
73 
74 #if NPFSYNC > 0
75 #include <net/if_pfsync.h>
76 #endif /* NPFSYNC > 0 */
77 
78 #if NPFLOG > 0
79 #include <net/if_pflog.h>
80 #endif /* NPFLOG > 0 */
81 
82 #ifdef INET6
83 #include <netinet/ip6.h>
84 #include <netinet/in_pcb.h>
85 #endif /* INET6 */
86 
87 #ifdef ALTQ
88 #include <altq/altq.h>
89 #endif
90 
91 void			 pfattach(int);
92 void			 pf_thread_create(void *);
93 int			 pfopen(dev_t, int, int, struct proc *);
94 int			 pfclose(dev_t, int, int, struct proc *);
95 int			 pfioctl(dev_t, u_long, caddr_t, int, struct proc *);
96 #ifdef ALTQ
97 int			 pf_begin_altq(u_int32_t *);
98 int			 pf_rollback_altq(u_int32_t);
99 int			 pf_commit_altq(u_int32_t);
100 int			 pf_enable_altq(struct pf_altq *);
101 int			 pf_disable_altq(struct pf_altq *);
102 #endif /* ALTQ */
103 int			 pf_begin_rules(u_int32_t *, const char *);
104 int			 pf_rollback_rules(u_int32_t, char *);
105 int			 pf_setup_pfsync_matching(struct pf_ruleset *);
106 void			 pf_hash_rule(MD5_CTX *, struct pf_rule *);
107 void			 pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *);
108 int			 pf_commit_rules(u_int32_t, char *);
109 int			 pf_addr_setup(struct pf_ruleset *,
110 			    struct pf_addr_wrap *, sa_family_t);
111 int			 pf_kif_setup(char *, struct pfi_kif **);
112 void			 pf_addr_copyout(struct pf_addr_wrap *);
113 void			 pf_trans_set_commit(void);
114 void			 pf_pool_copyin(struct pf_pool *, struct pf_pool *);
115 int			 pf_rule_copyin(struct pf_rule *, struct pf_rule *,
116 			    struct pf_ruleset *);
117 u_int32_t		 pf_oqname2qid(char *);
118 void			 pf_oqid2qname(u_int32_t, char *);
119 void			 pf_oqid_unref(u_int32_t);
120 
121 struct pf_rule		 pf_default_rule, pf_default_rule_new;
122 struct rwlock		 pf_consistency_lock = RWLOCK_INITIALIZER("pfcnslk");
123 #ifdef ALTQ
124 static int		 pf_altq_running;
125 #endif
126 
127 struct {
128 	char		statusif[IFNAMSIZ];
129 	u_int32_t	debug;
130 	u_int32_t	hostid;
131 	u_int32_t	reass;
132 	u_int32_t	mask;
133 } pf_trans_set;
134 
135 #define	PF_TSET_STATUSIF	0x01
136 #define	PF_TSET_DEBUG		0x02
137 #define	PF_TSET_HOSTID		0x04
138 #define	PF_TSET_REASS		0x08
139 
140 #define	TAGID_MAX	 50000
141 TAILQ_HEAD(pf_tags, pf_tagname)	pf_tags = TAILQ_HEAD_INITIALIZER(pf_tags),
142 				pf_oqids = TAILQ_HEAD_INITIALIZER(pf_oqids);
143 
144 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE)
145 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE
146 #endif
147 u_int16_t		 tagname2tag(struct pf_tags *, char *);
148 void			 tag2tagname(struct pf_tags *, u_int16_t, char *);
149 void			 tag_unref(struct pf_tags *, u_int16_t);
150 int			 pf_rtlabel_add(struct pf_addr_wrap *);
151 void			 pf_rtlabel_remove(struct pf_addr_wrap *);
152 void			 pf_rtlabel_copyout(struct pf_addr_wrap *);
153 
154 
155 void
156 pfattach(int num)
157 {
158 	u_int32_t *timeout = pf_default_rule.timeout;
159 
160 	pool_init(&pf_rule_pl, sizeof(struct pf_rule), 0, 0, 0, "pfrulepl",
161 	    &pool_allocator_nointr);
162 	pool_init(&pf_src_tree_pl, sizeof(struct pf_src_node), 0, 0, 0,
163 	    "pfsrctrpl", NULL);
164 	pool_init(&pf_sn_item_pl, sizeof(struct pf_sn_item), 0, 0, 0,
165 	    "pfsnitempl", NULL);
166 	pool_init(&pf_state_pl, sizeof(struct pf_state), 0, 0, 0, "pfstatepl",
167 	    NULL);
168 	pool_init(&pf_state_key_pl, sizeof(struct pf_state_key), 0, 0, 0,
169 	    "pfstatekeypl", NULL);
170 	pool_init(&pf_state_item_pl, sizeof(struct pf_state_item), 0, 0, 0,
171 	    "pfstateitempl", NULL);
172 	pool_init(&pf_rule_item_pl, sizeof(struct pf_rule_item), 0, 0, 0,
173 	    "pfruleitempl", NULL);
174 	pool_init(&pf_altq_pl, sizeof(struct pf_altq), 0, 0, 0, "pfaltqpl",
175 	    &pool_allocator_nointr);
176 	pfr_initialize();
177 	pfi_initialize();
178 	pf_osfp_initialize();
179 
180 	pool_sethardlimit(pf_pool_limits[PF_LIMIT_STATES].pp,
181 	    pf_pool_limits[PF_LIMIT_STATES].limit, NULL, 0);
182 
183 	if (physmem <= atop(100*1024*1024))
184 		pf_pool_limits[PF_LIMIT_TABLE_ENTRIES].limit =
185 		    PFR_KENTRY_HIWAT_SMALL;
186 
187 	RB_INIT(&tree_src_tracking);
188 	RB_INIT(&pf_anchors);
189 	pf_init_ruleset(&pf_main_ruleset);
190 	TAILQ_INIT(&pf_altqs[0]);
191 	TAILQ_INIT(&pf_altqs[1]);
192 	pf_altqs_active = &pf_altqs[0];
193 	pf_altqs_inactive = &pf_altqs[1];
194 	TAILQ_INIT(&state_list);
195 
196 	/* default rule should never be garbage collected */
197 	pf_default_rule.entries.tqe_prev = &pf_default_rule.entries.tqe_next;
198 	pf_default_rule.action = PF_PASS;
199 	pf_default_rule.nr = -1;
200 	pf_default_rule.rtableid = -1;
201 
202 	/* initialize default timeouts */
203 	timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL;
204 	timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL;
205 	timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL;
206 	timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL;
207 	timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL;
208 	timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL;
209 	timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL;
210 	timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL;
211 	timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL;
212 	timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL;
213 	timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL;
214 	timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL;
215 	timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL;
216 	timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL;
217 	timeout[PFTM_FRAG] = PFTM_FRAG_VAL;
218 	timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL;
219 	timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL;
220 	timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL;
221 	timeout[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START;
222 	timeout[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END;
223 
224 	pf_default_rule.src.addr.type =  PF_ADDR_ADDRMASK;
225 	pf_default_rule.dst.addr.type =  PF_ADDR_ADDRMASK;
226 	pf_default_rule.rdr.addr.type =  PF_ADDR_NONE;
227 	pf_default_rule.nat.addr.type =  PF_ADDR_NONE;
228 	pf_default_rule.route.addr.type =  PF_ADDR_NONE;
229 
230 	pf_normalize_init();
231 	bzero(&pf_status, sizeof(pf_status));
232 	pf_status.debug = LOG_ERR;
233 	pf_status.reass = PF_REASS_ENABLED;
234 
235 	/* XXX do our best to avoid a conflict */
236 	pf_status.hostid = arc4random();
237 
238 	/* require process context to purge states, so perform in a thread */
239 	kthread_create_deferred(pf_thread_create, NULL);
240 }
241 
242 void
243 pf_thread_create(void *v)
244 {
245 	if (kthread_create(pf_purge_thread, NULL, NULL, "pfpurge"))
246 		panic("pfpurge thread");
247 }
248 
249 int
250 pfopen(dev_t dev, int flags, int fmt, struct proc *p)
251 {
252 	if (minor(dev) >= 1)
253 		return (ENXIO);
254 	return (0);
255 }
256 
257 int
258 pfclose(dev_t dev, int flags, int fmt, struct proc *p)
259 {
260 	if (minor(dev) >= 1)
261 		return (ENXIO);
262 	return (0);
263 }
264 
265 void
266 pf_rm_rule(struct pf_rulequeue *rulequeue, struct pf_rule *rule)
267 {
268 	if (rulequeue != NULL) {
269 		if (rule->states_cur <= 0 && rule->src_nodes <= 0) {
270 			/*
271 			 * XXX - we need to remove the table *before* detaching
272 			 * the rule to make sure the table code does not delete
273 			 * the anchor under our feet.
274 			 */
275 			pf_tbladdr_remove(&rule->src.addr);
276 			pf_tbladdr_remove(&rule->dst.addr);
277 			pf_tbladdr_remove(&rule->rdr.addr);
278 			pf_tbladdr_remove(&rule->nat.addr);
279 			pf_tbladdr_remove(&rule->route.addr);
280 			if (rule->overload_tbl)
281 				pfr_detach_table(rule->overload_tbl);
282 		}
283 		TAILQ_REMOVE(rulequeue, rule, entries);
284 		rule->entries.tqe_prev = NULL;
285 		rule->nr = -1;
286 	}
287 
288 	if (rule->states_cur > 0 || rule->src_nodes > 0 ||
289 	    rule->entries.tqe_prev != NULL)
290 		return;
291 	pf_tag_unref(rule->tag);
292 	pf_tag_unref(rule->match_tag);
293 #ifdef ALTQ
294 	if (rule->pqid != rule->qid)
295 		pf_oqid_unref(rule->pqid);
296 	pf_oqid_unref(rule->qid);
297 #endif
298 	pf_rtlabel_remove(&rule->src.addr);
299 	pf_rtlabel_remove(&rule->dst.addr);
300 	pfi_dynaddr_remove(&rule->src.addr);
301 	pfi_dynaddr_remove(&rule->dst.addr);
302 	pfi_dynaddr_remove(&rule->rdr.addr);
303 	pfi_dynaddr_remove(&rule->nat.addr);
304 	pfi_dynaddr_remove(&rule->route.addr);
305 	if (rulequeue == NULL) {
306 		pf_tbladdr_remove(&rule->src.addr);
307 		pf_tbladdr_remove(&rule->dst.addr);
308 		pf_tbladdr_remove(&rule->rdr.addr);
309 		pf_tbladdr_remove(&rule->nat.addr);
310 		pf_tbladdr_remove(&rule->route.addr);
311 		if (rule->overload_tbl)
312 			pfr_detach_table(rule->overload_tbl);
313 	}
314 	pfi_kif_unref(rule->rcv_kif, PFI_KIF_REF_RULE);
315 	pfi_kif_unref(rule->kif, PFI_KIF_REF_RULE);
316 	pfi_kif_unref(rule->rdr.kif, PFI_KIF_REF_RULE);
317 	pfi_kif_unref(rule->nat.kif, PFI_KIF_REF_RULE);
318 	pfi_kif_unref(rule->route.kif, PFI_KIF_REF_RULE);
319 	pf_anchor_remove(rule);
320 	pool_put(&pf_rule_pl, rule);
321 }
322 
323 void
324 pf_purge_rule(struct pf_ruleset *ruleset, struct pf_rule *rule)
325 {
326 	u_int32_t		 nr;
327 
328 	pf_rm_rule(ruleset->rules.active.ptr, rule);
329 	ruleset->rules.active.rcount--;
330 
331 	nr = 0;
332 	TAILQ_FOREACH(rule, ruleset->rules.active.ptr, entries)
333 		rule->nr = nr++;
334 
335 	ruleset->rules.active.ticket++;
336 
337 	pf_calc_skip_steps(ruleset->rules.active.ptr);
338 	pf_remove_if_empty_ruleset(ruleset);
339 }
340 
341 u_int16_t
342 tagname2tag(struct pf_tags *head, char *tagname)
343 {
344 	struct pf_tagname	*tag, *p = NULL;
345 	u_int16_t		 new_tagid = 1;
346 
347 	TAILQ_FOREACH(tag, head, entries)
348 		if (strcmp(tagname, tag->name) == 0) {
349 			tag->ref++;
350 			return (tag->tag);
351 		}
352 
353 	/*
354 	 * to avoid fragmentation, we do a linear search from the beginning
355 	 * and take the first free slot we find. if there is none or the list
356 	 * is empty, append a new entry at the end.
357 	 */
358 
359 	/* new entry */
360 	if (!TAILQ_EMPTY(head))
361 		for (p = TAILQ_FIRST(head); p != NULL &&
362 		    p->tag == new_tagid; p = TAILQ_NEXT(p, entries))
363 			new_tagid = p->tag + 1;
364 
365 	if (new_tagid > TAGID_MAX)
366 		return (0);
367 
368 	/* allocate and fill new struct pf_tagname */
369 	tag = malloc(sizeof(*tag), M_TEMP, M_NOWAIT|M_ZERO);
370 	if (tag == NULL)
371 		return (0);
372 	strlcpy(tag->name, tagname, sizeof(tag->name));
373 	tag->tag = new_tagid;
374 	tag->ref++;
375 
376 	if (p != NULL)	/* insert new entry before p */
377 		TAILQ_INSERT_BEFORE(p, tag, entries);
378 	else	/* either list empty or no free slot in between */
379 		TAILQ_INSERT_TAIL(head, tag, entries);
380 
381 	return (tag->tag);
382 }
383 
384 void
385 tag2tagname(struct pf_tags *head, u_int16_t tagid, char *p)
386 {
387 	struct pf_tagname	*tag;
388 
389 	TAILQ_FOREACH(tag, head, entries)
390 		if (tag->tag == tagid) {
391 			strlcpy(p, tag->name, PF_TAG_NAME_SIZE);
392 			return;
393 		}
394 }
395 
396 void
397 tag_unref(struct pf_tags *head, u_int16_t tag)
398 {
399 	struct pf_tagname	*p, *next;
400 
401 	if (tag == 0)
402 		return;
403 
404 	for (p = TAILQ_FIRST(head); p != NULL; p = next) {
405 		next = TAILQ_NEXT(p, entries);
406 		if (tag == p->tag) {
407 			if (--p->ref == 0) {
408 				TAILQ_REMOVE(head, p, entries);
409 				free(p, M_TEMP);
410 			}
411 			break;
412 		}
413 	}
414 }
415 
416 u_int16_t
417 pf_tagname2tag(char *tagname)
418 {
419 	return (tagname2tag(&pf_tags, tagname));
420 }
421 
422 void
423 pf_tag2tagname(u_int16_t tagid, char *p)
424 {
425 	tag2tagname(&pf_tags, tagid, p);
426 }
427 
428 void
429 pf_tag_ref(u_int16_t tag)
430 {
431 	struct pf_tagname *t;
432 
433 	TAILQ_FOREACH(t, &pf_tags, entries)
434 		if (t->tag == tag)
435 			break;
436 	if (t != NULL)
437 		t->ref++;
438 }
439 
440 void
441 pf_tag_unref(u_int16_t tag)
442 {
443 	tag_unref(&pf_tags, tag);
444 }
445 
446 int
447 pf_rtlabel_add(struct pf_addr_wrap *a)
448 {
449 	if (a->type == PF_ADDR_RTLABEL &&
450 	    (a->v.rtlabel = rtlabel_name2id(a->v.rtlabelname)) == 0)
451 		return (-1);
452 	return (0);
453 }
454 
455 void
456 pf_rtlabel_remove(struct pf_addr_wrap *a)
457 {
458 	if (a->type == PF_ADDR_RTLABEL)
459 		rtlabel_unref(a->v.rtlabel);
460 }
461 
462 void
463 pf_rtlabel_copyout(struct pf_addr_wrap *a)
464 {
465 	const char	*name;
466 
467 	if (a->type == PF_ADDR_RTLABEL && a->v.rtlabel) {
468 		if ((name = rtlabel_id2name(a->v.rtlabel)) == NULL)
469 			strlcpy(a->v.rtlabelname, "?",
470 			    sizeof(a->v.rtlabelname));
471 		else
472 			strlcpy(a->v.rtlabelname, name,
473 			    sizeof(a->v.rtlabelname));
474 	}
475 }
476 
477 #ifdef ALTQ
478 u_int32_t
479 pf_oqname2qid(char *qname)
480 {
481 	return ((u_int32_t)tagname2tag(&pf_oqids, qname));
482 }
483 
484 void
485 pf_oqid2qname(u_int32_t qid, char *p)
486 {
487 	tag2tagname(&pf_oqids, (u_int16_t)qid, p);
488 }
489 
490 void
491 pf_oqid_unref(u_int32_t qid)
492 {
493 	tag_unref(&pf_oqids, (u_int16_t)qid);
494 }
495 
496 int
497 pf_begin_altq(u_int32_t *ticket)
498 {
499 	struct pf_altq	*altq;
500 	int		 error = 0;
501 
502 	/* Purge the old altq list */
503 	while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) {
504 		TAILQ_REMOVE(pf_altqs_inactive, altq, entries);
505 		if (altq->qname[0] == 0) {
506 			/* detach and destroy the discipline */
507 			error = altq_remove(altq);
508 		} else
509 			pf_oqid_unref(altq->qid);
510 		pool_put(&pf_altq_pl, altq);
511 	}
512 	if (error)
513 		return (error);
514 	*ticket = ++ticket_altqs_inactive;
515 	altqs_inactive_open = 1;
516 	return (0);
517 }
518 
519 int
520 pf_rollback_altq(u_int32_t ticket)
521 {
522 	struct pf_altq	*altq;
523 	int		 error = 0;
524 
525 	if (!altqs_inactive_open || ticket != ticket_altqs_inactive)
526 		return (0);
527 	/* Purge the old altq list */
528 	while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) {
529 		TAILQ_REMOVE(pf_altqs_inactive, altq, entries);
530 		if (altq->qname[0] == 0) {
531 			/* detach and destroy the discipline */
532 			error = altq_remove(altq);
533 		} else
534 			pf_oqid_unref(altq->qid);
535 		pool_put(&pf_altq_pl, altq);
536 	}
537 	altqs_inactive_open = 0;
538 	return (error);
539 }
540 
541 int
542 pf_commit_altq(u_int32_t ticket)
543 {
544 	struct pf_altqqueue	*old_altqs;
545 	struct pf_altq		*altq;
546 	int			 s, err, error = 0;
547 
548 	if (!altqs_inactive_open || ticket != ticket_altqs_inactive)
549 		return (EBUSY);
550 
551 	/* swap altqs, keep the old. */
552 	s = splsoftnet();
553 	old_altqs = pf_altqs_active;
554 	pf_altqs_active = pf_altqs_inactive;
555 	pf_altqs_inactive = old_altqs;
556 	ticket_altqs_active = ticket_altqs_inactive;
557 
558 	/* Attach new disciplines */
559 	TAILQ_FOREACH(altq, pf_altqs_active, entries) {
560 		if (altq->qname[0] == 0) {
561 			/* attach the discipline */
562 			error = altq_pfattach(altq);
563 			if (error == 0 && pf_altq_running)
564 				error = pf_enable_altq(altq);
565 			if (error != 0) {
566 				splx(s);
567 				return (error);
568 			}
569 		}
570 	}
571 
572 	/* Purge the old altq list */
573 	while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) {
574 		TAILQ_REMOVE(pf_altqs_inactive, altq, entries);
575 		if (altq->qname[0] == 0) {
576 			/* detach and destroy the discipline */
577 			if (pf_altq_running)
578 				error = pf_disable_altq(altq);
579 			err = altq_pfdetach(altq);
580 			if (err != 0 && error == 0)
581 				error = err;
582 			err = altq_remove(altq);
583 			if (err != 0 && error == 0)
584 				error = err;
585 		} else
586 			pf_oqid_unref(altq->qid);
587 		pool_put(&pf_altq_pl, altq);
588 	}
589 	splx(s);
590 
591 	altqs_inactive_open = 0;
592 	return (error);
593 }
594 
595 int
596 pf_enable_altq(struct pf_altq *altq)
597 {
598 	struct ifnet		*ifp;
599 	struct oldtb_profile	 tb;
600 	int			 s, error = 0;
601 
602 	if ((ifp = ifunit(altq->ifname)) == NULL)
603 		return (EINVAL);
604 
605 	if (ifp->if_snd.altq_type != ALTQT_NONE)
606 		error = altq_enable(&ifp->if_snd);
607 
608 	/* set tokenbucket regulator */
609 	if (error == 0 && ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) {
610 		tb.rate = altq->ifbandwidth;
611 		tb.depth = altq->tbrsize;
612 		s = splnet();
613 		error = oldtbr_set(&ifp->if_snd, &tb);
614 		splx(s);
615 	}
616 
617 	return (error);
618 }
619 
620 int
621 pf_disable_altq(struct pf_altq *altq)
622 {
623 	struct ifnet		*ifp;
624 	struct oldtb_profile	 tb;
625 	int			 s, error;
626 
627 	if ((ifp = ifunit(altq->ifname)) == NULL)
628 		return (EINVAL);
629 
630 	/*
631 	 * when the discipline is no longer referenced, it was overridden
632 	 * by a new one.  if so, just return.
633 	 */
634 	if (altq->altq_disc != ifp->if_snd.altq_disc)
635 		return (0);
636 
637 	error = altq_disable(&ifp->if_snd);
638 
639 	if (error == 0) {
640 		/* clear tokenbucket regulator */
641 		tb.rate = 0;
642 		s = splnet();
643 		error = oldtbr_set(&ifp->if_snd, &tb);
644 		splx(s);
645 	}
646 
647 	return (error);
648 }
649 #endif /* ALTQ */
650 
651 int
652 pf_begin_rules(u_int32_t *ticket, const char *anchor)
653 {
654 	struct pf_ruleset	*rs;
655 	struct pf_rule		*rule;
656 
657 	if ((rs = pf_find_or_create_ruleset(anchor)) == NULL)
658 		return (EINVAL);
659 	while ((rule = TAILQ_FIRST(rs->rules.inactive.ptr)) != NULL) {
660 		pf_rm_rule(rs->rules.inactive.ptr, rule);
661 		rs->rules.inactive.rcount--;
662 	}
663 	*ticket = ++rs->rules.inactive.ticket;
664 	rs->rules.inactive.open = 1;
665 	return (0);
666 }
667 
668 int
669 pf_rollback_rules(u_int32_t ticket, char *anchor)
670 {
671 	struct pf_ruleset	*rs;
672 	struct pf_rule		*rule;
673 
674 	rs = pf_find_ruleset(anchor);
675 	if (rs == NULL || !rs->rules.inactive.open ||
676 	    rs->rules.inactive.ticket != ticket)
677 		return (0);
678 	while ((rule = TAILQ_FIRST(rs->rules.inactive.ptr)) != NULL) {
679 		pf_rm_rule(rs->rules.inactive.ptr, rule);
680 		rs->rules.inactive.rcount--;
681 	}
682 	rs->rules.inactive.open = 0;
683 	return (0);
684 }
685 
686 #define PF_MD5_UPD(st, elm)						\
687 		MD5Update(ctx, (u_int8_t *) &(st)->elm, sizeof((st)->elm))
688 
689 #define PF_MD5_UPD_STR(st, elm)						\
690 		MD5Update(ctx, (u_int8_t *) (st)->elm, strlen((st)->elm))
691 
692 #define PF_MD5_UPD_HTONL(st, elm, stor) do {				\
693 		(stor) = htonl((st)->elm);				\
694 		MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int32_t));\
695 } while (0)
696 
697 #define PF_MD5_UPD_HTONS(st, elm, stor) do {				\
698 		(stor) = htons((st)->elm);				\
699 		MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int16_t));\
700 } while (0)
701 
702 void
703 pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr)
704 {
705 	PF_MD5_UPD(pfr, addr.type);
706 	switch (pfr->addr.type) {
707 		case PF_ADDR_DYNIFTL:
708 			PF_MD5_UPD(pfr, addr.v.ifname);
709 			PF_MD5_UPD(pfr, addr.iflags);
710 			break;
711 		case PF_ADDR_TABLE:
712 			PF_MD5_UPD(pfr, addr.v.tblname);
713 			break;
714 		case PF_ADDR_ADDRMASK:
715 			/* XXX ignore af? */
716 			PF_MD5_UPD(pfr, addr.v.a.addr.addr32);
717 			PF_MD5_UPD(pfr, addr.v.a.mask.addr32);
718 			break;
719 		case PF_ADDR_RTLABEL:
720 			PF_MD5_UPD(pfr, addr.v.rtlabelname);
721 			break;
722 	}
723 
724 	PF_MD5_UPD(pfr, port[0]);
725 	PF_MD5_UPD(pfr, port[1]);
726 	PF_MD5_UPD(pfr, neg);
727 	PF_MD5_UPD(pfr, port_op);
728 }
729 
730 void
731 pf_hash_rule(MD5_CTX *ctx, struct pf_rule *rule)
732 {
733 	u_int16_t x;
734 	u_int32_t y;
735 
736 	pf_hash_rule_addr(ctx, &rule->src);
737 	pf_hash_rule_addr(ctx, &rule->dst);
738 	PF_MD5_UPD_STR(rule, label);
739 	PF_MD5_UPD_STR(rule, ifname);
740 	PF_MD5_UPD_STR(rule, rcv_ifname);
741 	PF_MD5_UPD_STR(rule, match_tagname);
742 	PF_MD5_UPD_HTONS(rule, match_tag, x); /* dup? */
743 	PF_MD5_UPD_HTONL(rule, os_fingerprint, y);
744 	PF_MD5_UPD_HTONL(rule, prob, y);
745 	PF_MD5_UPD_HTONL(rule, uid.uid[0], y);
746 	PF_MD5_UPD_HTONL(rule, uid.uid[1], y);
747 	PF_MD5_UPD(rule, uid.op);
748 	PF_MD5_UPD_HTONL(rule, gid.gid[0], y);
749 	PF_MD5_UPD_HTONL(rule, gid.gid[1], y);
750 	PF_MD5_UPD(rule, gid.op);
751 	PF_MD5_UPD_HTONL(rule, rule_flag, y);
752 	PF_MD5_UPD(rule, action);
753 	PF_MD5_UPD(rule, direction);
754 	PF_MD5_UPD(rule, af);
755 	PF_MD5_UPD(rule, quick);
756 	PF_MD5_UPD(rule, ifnot);
757 	PF_MD5_UPD(rule, match_tag_not);
758 	PF_MD5_UPD(rule, keep_state);
759 	PF_MD5_UPD(rule, proto);
760 	PF_MD5_UPD(rule, type);
761 	PF_MD5_UPD(rule, code);
762 	PF_MD5_UPD(rule, flags);
763 	PF_MD5_UPD(rule, flagset);
764 	PF_MD5_UPD(rule, allow_opts);
765 	PF_MD5_UPD(rule, rt);
766 	PF_MD5_UPD(rule, tos);
767 }
768 
769 int
770 pf_commit_rules(u_int32_t ticket, char *anchor)
771 {
772 	struct pf_ruleset	*rs;
773 	struct pf_rule		*rule, **old_array;
774 	struct pf_rulequeue	*old_rules;
775 	int			 s, error;
776 	u_int32_t		 old_rcount;
777 
778 	rs = pf_find_ruleset(anchor);
779 	if (rs == NULL || !rs->rules.inactive.open ||
780 	    ticket != rs->rules.inactive.ticket)
781 		return (EBUSY);
782 
783 	/* Calculate checksum for the main ruleset */
784 	if (rs == &pf_main_ruleset) {
785 		error = pf_setup_pfsync_matching(rs);
786 		if (error != 0)
787 			return (error);
788 	}
789 
790 	/* Swap rules, keep the old. */
791 	s = splsoftnet();
792 	old_rules = rs->rules.active.ptr;
793 	old_rcount = rs->rules.active.rcount;
794 	old_array = rs->rules.active.ptr_array;
795 
796 	rs->rules.active.ptr = rs->rules.inactive.ptr;
797 	rs->rules.active.ptr_array = rs->rules.inactive.ptr_array;
798 	rs->rules.active.rcount = rs->rules.inactive.rcount;
799 	rs->rules.inactive.ptr = old_rules;
800 	rs->rules.inactive.ptr_array = old_array;
801 	rs->rules.inactive.rcount = old_rcount;
802 
803 	rs->rules.active.ticket = rs->rules.inactive.ticket;
804 	pf_calc_skip_steps(rs->rules.active.ptr);
805 
806 
807 	/* Purge the old rule list. */
808 	while ((rule = TAILQ_FIRST(old_rules)) != NULL)
809 		pf_rm_rule(old_rules, rule);
810 	if (rs->rules.inactive.ptr_array)
811 		free(rs->rules.inactive.ptr_array, M_TEMP);
812 	rs->rules.inactive.ptr_array = NULL;
813 	rs->rules.inactive.rcount = 0;
814 	rs->rules.inactive.open = 0;
815 	pf_remove_if_empty_ruleset(rs);
816 	splx(s);
817 	return (0);
818 }
819 
820 int
821 pf_setup_pfsync_matching(struct pf_ruleset *rs)
822 {
823 	MD5_CTX			 ctx;
824 	struct pf_rule		*rule;
825 	u_int8_t		 digest[PF_MD5_DIGEST_LENGTH];
826 
827 	MD5Init(&ctx);
828 	if (rs->rules.inactive.ptr_array)
829 		free(rs->rules.inactive.ptr_array, M_TEMP);
830 	rs->rules.inactive.ptr_array = NULL;
831 
832 	if (rs->rules.inactive.rcount) {
833 		rs->rules.inactive.ptr_array = malloc(sizeof(caddr_t) *
834 		    rs->rules.inactive.rcount,  M_TEMP, M_NOWAIT);
835 
836 		if (!rs->rules.inactive.ptr_array)
837 			return (ENOMEM);
838 
839 		TAILQ_FOREACH(rule, rs->rules.inactive.ptr, entries) {
840 			pf_hash_rule(&ctx, rule);
841 			(rs->rules.inactive.ptr_array)[rule->nr] = rule;
842 		}
843 	}
844 
845 	MD5Final(digest, &ctx);
846 	memcpy(pf_status.pf_chksum, digest, sizeof(pf_status.pf_chksum));
847 	return (0);
848 }
849 
850 int
851 pf_addr_setup(struct pf_ruleset *ruleset, struct pf_addr_wrap *addr,
852     sa_family_t af)
853 {
854 	if (pfi_dynaddr_setup(addr, af) ||
855 	    pf_tbladdr_setup(ruleset, addr) ||
856 	    pf_rtlabel_add(addr))
857 		return (EINVAL);
858 
859 	return (0);
860 }
861 
862 int
863 pf_kif_setup(char *ifname, struct pfi_kif **kif)
864 {
865 	if (ifname[0]) {
866 		*kif = pfi_kif_get(ifname);
867 		if (*kif == NULL)
868 			return (EINVAL);
869 
870 		pfi_kif_ref(*kif, PFI_KIF_REF_RULE);
871 	} else
872 		*kif = NULL;
873 
874 	return (0);
875 }
876 
877 void
878 pf_addr_copyout(struct pf_addr_wrap *addr)
879 {
880 	pfi_dynaddr_copyout(addr);
881 	pf_tbladdr_copyout(addr);
882 	pf_rtlabel_copyout(addr);
883 }
884 
885 int
886 pfioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct proc *p)
887 {
888 	int			 s;
889 	int			 error = 0;
890 
891 	/* XXX keep in sync with switch() below */
892 	if (securelevel > 1)
893 		switch (cmd) {
894 		case DIOCGETRULES:
895 		case DIOCGETRULE:
896 		case DIOCGETSTATE:
897 		case DIOCSETSTATUSIF:
898 		case DIOCGETSTATUS:
899 		case DIOCCLRSTATUS:
900 		case DIOCNATLOOK:
901 		case DIOCSETDEBUG:
902 		case DIOCGETSTATES:
903 		case DIOCGETTIMEOUT:
904 		case DIOCCLRRULECTRS:
905 		case DIOCGETLIMIT:
906 		case DIOCGETALTQS:
907 		case DIOCGETALTQ:
908 		case DIOCGETQSTATS:
909 		case DIOCGETRULESETS:
910 		case DIOCGETRULESET:
911 		case DIOCRGETTABLES:
912 		case DIOCRGETTSTATS:
913 		case DIOCRCLRTSTATS:
914 		case DIOCRCLRADDRS:
915 		case DIOCRADDADDRS:
916 		case DIOCRDELADDRS:
917 		case DIOCRSETADDRS:
918 		case DIOCRGETASTATS:
919 		case DIOCRCLRASTATS:
920 		case DIOCRTSTADDRS:
921 		case DIOCOSFPGET:
922 		case DIOCGETSRCNODES:
923 		case DIOCCLRSRCNODES:
924 		case DIOCIGETIFACES:
925 		case DIOCSETIFFLAG:
926 		case DIOCCLRIFFLAG:
927 			break;
928 		case DIOCRCLRTABLES:
929 		case DIOCRADDTABLES:
930 		case DIOCRDELTABLES:
931 		case DIOCRSETTFLAGS:
932 			if (((struct pfioc_table *)addr)->pfrio_flags &
933 			    PFR_FLAG_DUMMY)
934 				break; /* dummy operation ok */
935 			return (EPERM);
936 		default:
937 			return (EPERM);
938 		}
939 
940 	if (!(flags & FWRITE))
941 		switch (cmd) {
942 		case DIOCGETRULES:
943 		case DIOCGETSTATE:
944 		case DIOCGETSTATUS:
945 		case DIOCGETSTATES:
946 		case DIOCGETTIMEOUT:
947 		case DIOCGETLIMIT:
948 		case DIOCGETALTQS:
949 		case DIOCGETALTQ:
950 		case DIOCGETQSTATS:
951 		case DIOCGETRULESETS:
952 		case DIOCGETRULESET:
953 		case DIOCNATLOOK:
954 		case DIOCRGETTABLES:
955 		case DIOCRGETTSTATS:
956 		case DIOCRGETADDRS:
957 		case DIOCRGETASTATS:
958 		case DIOCRTSTADDRS:
959 		case DIOCOSFPGET:
960 		case DIOCGETSRCNODES:
961 		case DIOCIGETIFACES:
962 			break;
963 		case DIOCRCLRTABLES:
964 		case DIOCRADDTABLES:
965 		case DIOCRDELTABLES:
966 		case DIOCRCLRTSTATS:
967 		case DIOCRCLRADDRS:
968 		case DIOCRADDADDRS:
969 		case DIOCRDELADDRS:
970 		case DIOCRSETADDRS:
971 		case DIOCRSETTFLAGS:
972 			if (((struct pfioc_table *)addr)->pfrio_flags &
973 			    PFR_FLAG_DUMMY) {
974 				flags |= FWRITE; /* need write lock for dummy */
975 				break; /* dummy operation ok */
976 			}
977 			return (EACCES);
978 		case DIOCGETRULE:
979 			if (((struct pfioc_rule *)addr)->action ==
980 			    PF_GET_CLR_CNTR)
981 				return (EACCES);
982 			break;
983 		default:
984 			return (EACCES);
985 		}
986 
987 	if (flags & FWRITE)
988 		rw_enter_write(&pf_consistency_lock);
989 	else
990 		rw_enter_read(&pf_consistency_lock);
991 
992 	s = splsoftnet();
993 	switch (cmd) {
994 
995 	case DIOCSTART:
996 		if (pf_status.running)
997 			error = EEXIST;
998 		else {
999 			pf_status.running = 1;
1000 			pf_status.since = time_second;
1001 			if (pf_status.stateid == 0) {
1002 				pf_status.stateid = time_second;
1003 				pf_status.stateid = pf_status.stateid << 32;
1004 			}
1005 			DPFPRINTF(LOG_NOTICE, "pf: started");
1006 		}
1007 		break;
1008 
1009 	case DIOCSTOP:
1010 		if (!pf_status.running)
1011 			error = ENOENT;
1012 		else {
1013 			pf_status.running = 0;
1014 			pf_status.since = time_second;
1015 			DPFPRINTF(LOG_NOTICE, "pf: stopped");
1016 		}
1017 		break;
1018 
1019 	case DIOCADDRULE: {
1020 		struct pfioc_rule	*pr = (struct pfioc_rule *)addr;
1021 		struct pf_ruleset	*ruleset;
1022 		struct pf_rule		*rule, *tail;
1023 
1024 		pr->anchor[sizeof(pr->anchor) - 1] = 0;
1025 		ruleset = pf_find_ruleset(pr->anchor);
1026 		if (ruleset == NULL) {
1027 			error = EINVAL;
1028 			break;
1029 		}
1030 		if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
1031 			error = EINVAL;
1032 			break;
1033 		}
1034 		if (pr->ticket != ruleset->rules.inactive.ticket) {
1035 			error = EBUSY;
1036 			break;
1037 		}
1038 		rule = pool_get(&pf_rule_pl, PR_WAITOK|PR_LIMITFAIL|PR_ZERO);
1039 		if (rule == NULL) {
1040 			error = ENOMEM;
1041 			break;
1042 		}
1043 		if ((error = pf_rule_copyin(&pr->rule, rule, ruleset))) {
1044 			pool_put(&pf_rule_pl, rule);
1045 			break;
1046 		}
1047 		rule->cuid = p->p_cred->p_ruid;
1048 		rule->cpid = p->p_p->ps_pid;
1049 
1050 		switch (rule->af) {
1051 		case 0:
1052 			break;
1053 #ifdef INET
1054 		case AF_INET:
1055 			break;
1056 #endif /* INET */
1057 #ifdef INET6
1058 		case AF_INET6:
1059 			break;
1060 #endif /* INET6 */
1061 		default:
1062 			pool_put(&pf_rule_pl, rule);
1063 			error = EAFNOSUPPORT;
1064 			goto fail;
1065 		}
1066 		tail = TAILQ_LAST(ruleset->rules.inactive.ptr,
1067 		    pf_rulequeue);
1068 		if (tail)
1069 			rule->nr = tail->nr + 1;
1070 		else
1071 			rule->nr = 0;
1072 
1073 		if (rule->src.addr.type == PF_ADDR_NONE ||
1074 		    rule->dst.addr.type == PF_ADDR_NONE)
1075 			error = EINVAL;
1076 
1077 		if (pf_addr_setup(ruleset, &rule->src.addr, rule->af))
1078 			error = EINVAL;
1079 		if (pf_addr_setup(ruleset, &rule->dst.addr, rule->af))
1080 			error = EINVAL;
1081 		if (pf_addr_setup(ruleset, &rule->rdr.addr, rule->af))
1082 			error = EINVAL;
1083 		if (pf_addr_setup(ruleset, &rule->nat.addr, rule->af))
1084 			error = EINVAL;
1085 		if (pf_addr_setup(ruleset, &rule->route.addr, rule->af))
1086 			error = EINVAL;
1087 		if (pf_anchor_setup(rule, ruleset, pr->anchor_call))
1088 			error = EINVAL;
1089 		if (rule->rt && !rule->direction)
1090 			error = EINVAL;
1091 		if ((rule->set_prio[0] != PF_PRIO_NOTSET &&
1092 		    rule->set_prio[0] > IFQ_MAXPRIO) ||
1093 		    (rule->set_prio[1] != PF_PRIO_NOTSET &&
1094                     rule->set_prio[1] > IFQ_MAXPRIO))
1095 			error = EINVAL;
1096 
1097 		if (error) {
1098 			pf_rm_rule(NULL, rule);
1099 			break;
1100 		}
1101 		TAILQ_INSERT_TAIL(ruleset->rules.inactive.ptr,
1102 		    rule, entries);
1103 		ruleset->rules.inactive.rcount++;
1104 		break;
1105 	}
1106 
1107 	case DIOCGETRULES: {
1108 		struct pfioc_rule	*pr = (struct pfioc_rule *)addr;
1109 		struct pf_ruleset	*ruleset;
1110 		struct pf_rule		*tail;
1111 
1112 		pr->anchor[sizeof(pr->anchor) - 1] = 0;
1113 		ruleset = pf_find_ruleset(pr->anchor);
1114 		if (ruleset == NULL) {
1115 			error = EINVAL;
1116 			break;
1117 		}
1118 		tail = TAILQ_LAST(ruleset->rules.active.ptr, pf_rulequeue);
1119 		if (tail)
1120 			pr->nr = tail->nr + 1;
1121 		else
1122 			pr->nr = 0;
1123 		pr->ticket = ruleset->rules.active.ticket;
1124 		break;
1125 	}
1126 
1127 	case DIOCGETRULE: {
1128 		struct pfioc_rule	*pr = (struct pfioc_rule *)addr;
1129 		struct pf_ruleset	*ruleset;
1130 		struct pf_rule		*rule;
1131 		int			 i;
1132 
1133 		pr->anchor[sizeof(pr->anchor) - 1] = 0;
1134 		ruleset = pf_find_ruleset(pr->anchor);
1135 		if (ruleset == NULL) {
1136 			error = EINVAL;
1137 			break;
1138 		}
1139 		if (pr->ticket != ruleset->rules.active.ticket) {
1140 			error = EBUSY;
1141 			break;
1142 		}
1143 		rule = TAILQ_FIRST(ruleset->rules.active.ptr);
1144 		while ((rule != NULL) && (rule->nr != pr->nr))
1145 			rule = TAILQ_NEXT(rule, entries);
1146 		if (rule == NULL) {
1147 			error = EBUSY;
1148 			break;
1149 		}
1150 		bcopy(rule, &pr->rule, sizeof(struct pf_rule));
1151 		if (pf_anchor_copyout(ruleset, rule, pr)) {
1152 			error = EBUSY;
1153 			break;
1154 		}
1155 		pf_addr_copyout(&pr->rule.src.addr);
1156 		pf_addr_copyout(&pr->rule.dst.addr);
1157 		pf_addr_copyout(&pr->rule.rdr.addr);
1158 		pf_addr_copyout(&pr->rule.nat.addr);
1159 		pf_addr_copyout(&pr->rule.route.addr);
1160 		for (i = 0; i < PF_SKIP_COUNT; ++i)
1161 			if (rule->skip[i].ptr == NULL)
1162 				pr->rule.skip[i].nr = -1;
1163 			else
1164 				pr->rule.skip[i].nr =
1165 				    rule->skip[i].ptr->nr;
1166 
1167 		if (pr->action == PF_GET_CLR_CNTR) {
1168 			rule->evaluations = 0;
1169 			rule->packets[0] = rule->packets[1] = 0;
1170 			rule->bytes[0] = rule->bytes[1] = 0;
1171 			rule->states_tot = 0;
1172 		}
1173 		break;
1174 	}
1175 
1176 	case DIOCCHANGERULE: {
1177 		struct pfioc_rule	*pcr = (struct pfioc_rule *)addr;
1178 		struct pf_ruleset	*ruleset;
1179 		struct pf_rule		*oldrule = NULL, *newrule = NULL;
1180 		u_int32_t		 nr = 0;
1181 
1182 		if (pcr->action < PF_CHANGE_ADD_HEAD ||
1183 		    pcr->action > PF_CHANGE_GET_TICKET) {
1184 			error = EINVAL;
1185 			break;
1186 		}
1187 		ruleset = pf_find_ruleset(pcr->anchor);
1188 		if (ruleset == NULL) {
1189 			error = EINVAL;
1190 			break;
1191 		}
1192 
1193 		if (pcr->action == PF_CHANGE_GET_TICKET) {
1194 			pcr->ticket = ++ruleset->rules.active.ticket;
1195 			break;
1196 		} else {
1197 			if (pcr->ticket !=
1198 			    ruleset->rules.active.ticket) {
1199 				error = EINVAL;
1200 				break;
1201 			}
1202 			if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
1203 				error = EINVAL;
1204 				break;
1205 			}
1206 		}
1207 
1208 		if (pcr->action != PF_CHANGE_REMOVE) {
1209 			newrule = pool_get(&pf_rule_pl,
1210 			    PR_WAITOK|PR_LIMITFAIL|PR_ZERO);
1211 			if (newrule == NULL) {
1212 				error = ENOMEM;
1213 				break;
1214 			}
1215 			pf_rule_copyin(&pcr->rule, newrule, ruleset);
1216 			newrule->cuid = p->p_cred->p_ruid;
1217 			newrule->cpid = p->p_p->ps_pid;
1218 
1219 			switch (newrule->af) {
1220 			case 0:
1221 				break;
1222 #ifdef INET
1223 			case AF_INET:
1224 				break;
1225 #endif /* INET */
1226 #ifdef INET6
1227 			case AF_INET6:
1228 				break;
1229 #endif /* INET6 */
1230 			default:
1231 				pool_put(&pf_rule_pl, newrule);
1232 				error = EAFNOSUPPORT;
1233 				goto fail;
1234 			}
1235 
1236 			if (newrule->rt && !newrule->direction)
1237 				error = EINVAL;
1238 			if (pf_addr_setup(ruleset, &newrule->src.addr, newrule->af))
1239 				error = EINVAL;
1240 			if (pf_addr_setup(ruleset, &newrule->dst.addr, newrule->af))
1241 				error = EINVAL;
1242 			if (pf_addr_setup(ruleset, &newrule->rdr.addr, newrule->af))
1243 				error = EINVAL;
1244 			if (pf_addr_setup(ruleset, &newrule->nat.addr, newrule->af))
1245 				error = EINVAL;
1246 			if (pf_addr_setup(ruleset, &newrule->route.addr, newrule->af))
1247 				error = EINVAL;
1248 			if (pf_anchor_setup(newrule, ruleset, pcr->anchor_call))
1249 				error = EINVAL;
1250 
1251 			if (error) {
1252 				pf_rm_rule(NULL, newrule);
1253 				break;
1254 			}
1255 		}
1256 
1257 		if (pcr->action == PF_CHANGE_ADD_HEAD)
1258 			oldrule = TAILQ_FIRST(ruleset->rules.active.ptr);
1259 		else if (pcr->action == PF_CHANGE_ADD_TAIL)
1260 			oldrule = TAILQ_LAST(ruleset->rules.active.ptr,
1261 			    pf_rulequeue);
1262 		else {
1263 			oldrule = TAILQ_FIRST(ruleset->rules.active.ptr);
1264 			while ((oldrule != NULL) && (oldrule->nr != pcr->nr))
1265 				oldrule = TAILQ_NEXT(oldrule, entries);
1266 			if (oldrule == NULL) {
1267 				if (newrule != NULL)
1268 					pf_rm_rule(NULL, newrule);
1269 				error = EINVAL;
1270 				break;
1271 			}
1272 		}
1273 
1274 		if (pcr->action == PF_CHANGE_REMOVE) {
1275 			pf_rm_rule(ruleset->rules.active.ptr, oldrule);
1276 			ruleset->rules.active.rcount--;
1277 		} else {
1278 			if (oldrule == NULL)
1279 				TAILQ_INSERT_TAIL(
1280 				    ruleset->rules.active.ptr,
1281 				    newrule, entries);
1282 			else if (pcr->action == PF_CHANGE_ADD_HEAD ||
1283 			    pcr->action == PF_CHANGE_ADD_BEFORE)
1284 				TAILQ_INSERT_BEFORE(oldrule, newrule, entries);
1285 			else
1286 				TAILQ_INSERT_AFTER(
1287 				    ruleset->rules.active.ptr,
1288 				    oldrule, newrule, entries);
1289 			ruleset->rules.active.rcount++;
1290 		}
1291 
1292 		nr = 0;
1293 		TAILQ_FOREACH(oldrule, ruleset->rules.active.ptr, entries)
1294 			oldrule->nr = nr++;
1295 
1296 		ruleset->rules.active.ticket++;
1297 
1298 		pf_calc_skip_steps(ruleset->rules.active.ptr);
1299 		pf_remove_if_empty_ruleset(ruleset);
1300 
1301 		break;
1302 	}
1303 
1304 	case DIOCCLRSTATES: {
1305 		struct pf_state		*s, *nexts;
1306 		struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr;
1307 		u_int			 killed = 0;
1308 
1309 		for (s = RB_MIN(pf_state_tree_id, &tree_id); s; s = nexts) {
1310 			nexts = RB_NEXT(pf_state_tree_id, &tree_id, s);
1311 
1312 			if (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname,
1313 			    s->kif->pfik_name)) {
1314 #if NPFSYNC > 0
1315 				/* don't send out individual delete messages */
1316 				SET(s->state_flags, PFSTATE_NOSYNC);
1317 #endif
1318 				pf_unlink_state(s);
1319 				killed++;
1320 			}
1321 		}
1322 		psk->psk_killed = killed;
1323 #if NPFSYNC > 0
1324 		pfsync_clear_states(pf_status.hostid, psk->psk_ifname);
1325 #endif
1326 		break;
1327 	}
1328 
1329 	case DIOCKILLSTATES: {
1330 		struct pf_state		*s, *nexts;
1331 		struct pf_state_key	*sk;
1332 		struct pf_addr		*srcaddr, *dstaddr;
1333 		u_int16_t		 srcport, dstport;
1334 		struct pfioc_state_kill	*psk = (struct pfioc_state_kill *)addr;
1335 		u_int			 killed = 0;
1336 
1337 		if (psk->psk_pfcmp.id) {
1338 			if (psk->psk_pfcmp.creatorid == 0)
1339 				psk->psk_pfcmp.creatorid = pf_status.hostid;
1340 			if ((s = pf_find_state_byid(&psk->psk_pfcmp))) {
1341 				pf_unlink_state(s);
1342 				psk->psk_killed = 1;
1343 			}
1344 			break;
1345 		}
1346 
1347 		for (s = RB_MIN(pf_state_tree_id, &tree_id); s;
1348 		    s = nexts) {
1349 			nexts = RB_NEXT(pf_state_tree_id, &tree_id, s);
1350 
1351 			if (s->direction == PF_OUT) {
1352 				sk = s->key[PF_SK_STACK];
1353 				srcaddr = &sk->addr[1];
1354 				dstaddr = &sk->addr[0];
1355 				srcport = sk->port[0];
1356 				dstport = sk->port[0];
1357 			} else {
1358 				sk = s->key[PF_SK_WIRE];
1359 				srcaddr = &sk->addr[0];
1360 				dstaddr = &sk->addr[1];
1361 				srcport = sk->port[0];
1362 				dstport = sk->port[0];
1363 			}
1364 			if ((!psk->psk_af || sk->af == psk->psk_af)
1365 			    && (!psk->psk_proto || psk->psk_proto ==
1366 			    sk->proto) && psk->psk_rdomain == sk->rdomain &&
1367 			    PF_MATCHA(psk->psk_src.neg,
1368 			    &psk->psk_src.addr.v.a.addr,
1369 			    &psk->psk_src.addr.v.a.mask,
1370 			    srcaddr, sk->af) &&
1371 			    PF_MATCHA(psk->psk_dst.neg,
1372 			    &psk->psk_dst.addr.v.a.addr,
1373 			    &psk->psk_dst.addr.v.a.mask,
1374 			    dstaddr, sk->af) &&
1375 			    (psk->psk_src.port_op == 0 ||
1376 			    pf_match_port(psk->psk_src.port_op,
1377 			    psk->psk_src.port[0], psk->psk_src.port[1],
1378 			    srcport)) &&
1379 			    (psk->psk_dst.port_op == 0 ||
1380 			    pf_match_port(psk->psk_dst.port_op,
1381 			    psk->psk_dst.port[0], psk->psk_dst.port[1],
1382 			    dstport)) &&
1383 			    (!psk->psk_label[0] || (s->rule.ptr->label[0] &&
1384 			    !strcmp(psk->psk_label, s->rule.ptr->label))) &&
1385 			    (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname,
1386 			    s->kif->pfik_name))) {
1387 				pf_unlink_state(s);
1388 				killed++;
1389 			}
1390 		}
1391 		psk->psk_killed = killed;
1392 		break;
1393 	}
1394 
1395 #if NPFSYNC > 0
1396 	case DIOCADDSTATE: {
1397 		struct pfioc_state	*ps = (struct pfioc_state *)addr;
1398 		struct pfsync_state	*sp = &ps->state;
1399 
1400 		if (sp->timeout >= PFTM_MAX) {
1401 			error = EINVAL;
1402 			break;
1403 		}
1404 		error = pfsync_state_import(sp, PFSYNC_SI_IOCTL);
1405 		break;
1406 	}
1407 #endif
1408 
1409 	case DIOCGETSTATE: {
1410 		struct pfioc_state	*ps = (struct pfioc_state *)addr;
1411 		struct pf_state		*s;
1412 		struct pf_state_cmp	 id_key;
1413 
1414 		bzero(&id_key, sizeof(id_key));
1415 		id_key.id = ps->state.id;
1416 		id_key.creatorid = ps->state.creatorid;
1417 
1418 		s = pf_find_state_byid(&id_key);
1419 		if (s == NULL) {
1420 			error = ENOENT;
1421 			break;
1422 		}
1423 
1424 		pf_state_export(&ps->state, s);
1425 		break;
1426 	}
1427 
1428 	case DIOCGETSTATES: {
1429 		struct pfioc_states	*ps = (struct pfioc_states *)addr;
1430 		struct pf_state		*state;
1431 		struct pfsync_state	*p, *pstore;
1432 		u_int32_t		 nr = 0;
1433 
1434 		if (ps->ps_len == 0) {
1435 			nr = pf_status.states;
1436 			ps->ps_len = sizeof(struct pfsync_state) * nr;
1437 			break;
1438 		}
1439 
1440 		pstore = malloc(sizeof(*pstore), M_TEMP, M_WAITOK);
1441 
1442 		p = ps->ps_states;
1443 
1444 		state = TAILQ_FIRST(&state_list);
1445 		while (state) {
1446 			if (state->timeout != PFTM_UNLINKED) {
1447 				if ((nr+1) * sizeof(*p) > (unsigned)ps->ps_len)
1448 					break;
1449 				pf_state_export(pstore, state);
1450 				error = copyout(pstore, p, sizeof(*p));
1451 				if (error) {
1452 					free(pstore, M_TEMP);
1453 					goto fail;
1454 				}
1455 				p++;
1456 				nr++;
1457 			}
1458 			state = TAILQ_NEXT(state, entry_list);
1459 		}
1460 
1461 		ps->ps_len = sizeof(struct pfsync_state) * nr;
1462 
1463 		free(pstore, M_TEMP);
1464 		break;
1465 	}
1466 
1467 	case DIOCGETSTATUS: {
1468 		struct pf_status *s = (struct pf_status *)addr;
1469 		bcopy(&pf_status, s, sizeof(struct pf_status));
1470 		pfi_update_status(s->ifname, s);
1471 		break;
1472 	}
1473 
1474 	case DIOCSETSTATUSIF: {
1475 		struct pfioc_iface	*pi = (struct pfioc_iface *)addr;
1476 
1477 		if (pi->pfiio_name[0] == 0) {
1478 			bzero(pf_status.ifname, IFNAMSIZ);
1479 			break;
1480 		}
1481 		strlcpy(pf_trans_set.statusif, pi->pfiio_name, IFNAMSIZ);
1482 		pf_trans_set.mask |= PF_TSET_STATUSIF;
1483 		break;
1484 	}
1485 
1486 	case DIOCCLRSTATUS: {
1487 		struct pfioc_iface	*pi = (struct pfioc_iface *)addr;
1488 
1489 		/* if ifname is specified, clear counters there only */
1490 		if (pi->pfiio_name[0]) {
1491 			pfi_update_status(pi->pfiio_name, NULL);
1492 			break;
1493 		}
1494 
1495 		bzero(pf_status.counters, sizeof(pf_status.counters));
1496 		bzero(pf_status.fcounters, sizeof(pf_status.fcounters));
1497 		bzero(pf_status.scounters, sizeof(pf_status.scounters));
1498 		pf_status.since = time_second;
1499 
1500 		break;
1501 	}
1502 
1503 	case DIOCNATLOOK: {
1504 		struct pfioc_natlook	*pnl = (struct pfioc_natlook *)addr;
1505 		struct pf_state_key	*sk;
1506 		struct pf_state		*state;
1507 		struct pf_state_key_cmp	 key;
1508 		int			 m = 0, direction = pnl->direction;
1509 		int			 sidx, didx;
1510 
1511 		/* NATLOOK src and dst are reversed, so reverse sidx/didx */
1512 		sidx = (direction == PF_IN) ? 1 : 0;
1513 		didx = (direction == PF_IN) ? 0 : 1;
1514 
1515 		if (!pnl->proto ||
1516 		    PF_AZERO(&pnl->saddr, pnl->af) ||
1517 		    PF_AZERO(&pnl->daddr, pnl->af) ||
1518 		    ((pnl->proto == IPPROTO_TCP ||
1519 		    pnl->proto == IPPROTO_UDP) &&
1520 		    (!pnl->dport || !pnl->sport)) ||
1521 		    pnl->rdomain > RT_TABLEID_MAX)
1522 			error = EINVAL;
1523 		else {
1524 			key.af = pnl->af;
1525 			key.proto = pnl->proto;
1526 			key.rdomain = pnl->rdomain;
1527 			PF_ACPY(&key.addr[sidx], &pnl->saddr, pnl->af);
1528 			key.port[sidx] = pnl->sport;
1529 			PF_ACPY(&key.addr[didx], &pnl->daddr, pnl->af);
1530 			key.port[didx] = pnl->dport;
1531 
1532 			state = pf_find_state_all(&key, direction, &m);
1533 
1534 			if (m > 1)
1535 				error = E2BIG;	/* more than one state */
1536 			else if (state != NULL) {
1537 				sk = state->key[sidx];
1538 				PF_ACPY(&pnl->rsaddr, &sk->addr[sidx], sk->af);
1539 				pnl->rsport = sk->port[sidx];
1540 				PF_ACPY(&pnl->rdaddr, &sk->addr[didx], sk->af);
1541 				pnl->rdport = sk->port[didx];
1542 				pnl->rrdomain = sk->rdomain;
1543 			} else
1544 				error = ENOENT;
1545 		}
1546 		break;
1547 	}
1548 
1549 	case DIOCSETTIMEOUT: {
1550 		struct pfioc_tm	*pt = (struct pfioc_tm *)addr;
1551 
1552 		if (pt->timeout < 0 || pt->timeout >= PFTM_MAX ||
1553 		    pt->seconds < 0) {
1554 			error = EINVAL;
1555 			goto fail;
1556 		}
1557 		if (pt->timeout == PFTM_INTERVAL && pt->seconds == 0)
1558 			pt->seconds = 1;
1559 		pf_default_rule_new.timeout[pt->timeout] = pt->seconds;
1560 		pt->seconds = pf_default_rule.timeout[pt->timeout];
1561 		break;
1562 	}
1563 
1564 	case DIOCGETTIMEOUT: {
1565 		struct pfioc_tm	*pt = (struct pfioc_tm *)addr;
1566 
1567 		if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) {
1568 			error = EINVAL;
1569 			goto fail;
1570 		}
1571 		pt->seconds = pf_default_rule.timeout[pt->timeout];
1572 		break;
1573 	}
1574 
1575 	case DIOCGETLIMIT: {
1576 		struct pfioc_limit	*pl = (struct pfioc_limit *)addr;
1577 
1578 		if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) {
1579 			error = EINVAL;
1580 			goto fail;
1581 		}
1582 		pl->limit = pf_pool_limits[pl->index].limit;
1583 		break;
1584 	}
1585 
1586 	case DIOCSETLIMIT: {
1587 		struct pfioc_limit	*pl = (struct pfioc_limit *)addr;
1588 
1589 		if (pl->index < 0 || pl->index >= PF_LIMIT_MAX ||
1590 		    pf_pool_limits[pl->index].pp == NULL) {
1591 			error = EINVAL;
1592 			goto fail;
1593 		}
1594 		if (((struct pool *)pf_pool_limits[pl->index].pp)->pr_nout >
1595 		    pl->limit) {
1596 			error = EBUSY;
1597 			goto fail;
1598 		}
1599 		pf_pool_limits[pl->index].limit_new = pl->limit;
1600 		pl->limit = pf_pool_limits[pl->index].limit;
1601 		break;
1602 	}
1603 
1604 	case DIOCSETDEBUG: {
1605 		u_int32_t	*level = (u_int32_t *)addr;
1606 
1607 		pf_trans_set.debug = *level;
1608 		pf_trans_set.mask |= PF_TSET_DEBUG;
1609 		break;
1610 	}
1611 
1612 	case DIOCCLRRULECTRS: {
1613 		/* obsoleted by DIOCGETRULE with action=PF_GET_CLR_CNTR */
1614 		struct pf_ruleset	*ruleset = &pf_main_ruleset;
1615 		struct pf_rule		*rule;
1616 
1617 		TAILQ_FOREACH(rule,
1618 		    ruleset->rules.active.ptr, entries) {
1619 			rule->evaluations = 0;
1620 			rule->packets[0] = rule->packets[1] = 0;
1621 			rule->bytes[0] = rule->bytes[1] = 0;
1622 		}
1623 		break;
1624 	}
1625 
1626 #ifdef ALTQ
1627 	case DIOCSTARTALTQ: {
1628 		struct pf_altq		*altq;
1629 
1630 		/* enable all altq interfaces on active list */
1631 		TAILQ_FOREACH(altq, pf_altqs_active, entries) {
1632 			if (altq->qname[0] == 0) {
1633 				error = pf_enable_altq(altq);
1634 				if (error != 0)
1635 					break;
1636 			}
1637 		}
1638 		if (error == 0)
1639 			pf_altq_running = 1;
1640 		DPFPRINTF(LOG_NOTICE, "altq: started");
1641 		break;
1642 	}
1643 
1644 	case DIOCSTOPALTQ: {
1645 		struct pf_altq		*altq;
1646 
1647 		/* disable all altq interfaces on active list */
1648 		TAILQ_FOREACH(altq, pf_altqs_active, entries) {
1649 			if (altq->qname[0] == 0) {
1650 				error = pf_disable_altq(altq);
1651 				if (error != 0)
1652 					break;
1653 			}
1654 		}
1655 		if (error == 0)
1656 			pf_altq_running = 0;
1657 		DPFPRINTF(LOG_NOTICE, "altq: stopped");
1658 		break;
1659 	}
1660 
1661 	case DIOCADDALTQ: {
1662 		struct pfioc_altq	*pa = (struct pfioc_altq *)addr;
1663 		struct pf_altq		*altq, *a;
1664 
1665 		if (pa->ticket != ticket_altqs_inactive) {
1666 			error = EBUSY;
1667 			break;
1668 		}
1669 		altq = pool_get(&pf_altq_pl, PR_WAITOK|PR_LIMITFAIL);
1670 		if (altq == NULL) {
1671 			error = ENOMEM;
1672 			break;
1673 		}
1674 		bcopy(&pa->altq, altq, sizeof(struct pf_altq));
1675 		altq->altq_disc = NULL;
1676 
1677 		/*
1678 		 * if this is for a queue, find the discipline and
1679 		 * copy the necessary fields
1680 		 */
1681 		if (altq->qname[0] != 0) {
1682 			if ((altq->qid = pf_oqname2qid(altq->qname)) == 0) {
1683 				error = EBUSY;
1684 				pool_put(&pf_altq_pl, altq);
1685 				break;
1686 			}
1687 			TAILQ_FOREACH(a, pf_altqs_inactive, entries) {
1688 				if (strncmp(a->ifname, altq->ifname,
1689 				    IFNAMSIZ) == 0 && a->qname[0] == 0) {
1690 					altq->altq_disc = a->altq_disc;
1691 					break;
1692 				}
1693 			}
1694 		}
1695 
1696 		error = altq_add(altq);
1697 		if (error) {
1698 			pool_put(&pf_altq_pl, altq);
1699 			break;
1700 		}
1701 
1702 		TAILQ_INSERT_TAIL(pf_altqs_inactive, altq, entries);
1703 		bcopy(altq, &pa->altq, sizeof(struct pf_altq));
1704 		break;
1705 	}
1706 
1707 	case DIOCGETALTQS: {
1708 		struct pfioc_altq	*pa = (struct pfioc_altq *)addr;
1709 		struct pf_altq		*altq;
1710 
1711 		pa->nr = 0;
1712 		TAILQ_FOREACH(altq, pf_altqs_active, entries)
1713 			pa->nr++;
1714 		pa->ticket = ticket_altqs_active;
1715 		break;
1716 	}
1717 
1718 	case DIOCGETALTQ: {
1719 		struct pfioc_altq	*pa = (struct pfioc_altq *)addr;
1720 		struct pf_altq		*altq;
1721 		u_int32_t		 nr;
1722 
1723 		if (pa->ticket != ticket_altqs_active) {
1724 			error = EBUSY;
1725 			break;
1726 		}
1727 		nr = 0;
1728 		altq = TAILQ_FIRST(pf_altqs_active);
1729 		while ((altq != NULL) && (nr < pa->nr)) {
1730 			altq = TAILQ_NEXT(altq, entries);
1731 			nr++;
1732 		}
1733 		if (altq == NULL) {
1734 			error = EBUSY;
1735 			break;
1736 		}
1737 		bcopy(altq, &pa->altq, sizeof(struct pf_altq));
1738 		break;
1739 	}
1740 
1741 	case DIOCCHANGEALTQ:
1742 		/* CHANGEALTQ not supported yet! */
1743 		error = ENODEV;
1744 		break;
1745 
1746 	case DIOCGETQSTATS: {
1747 		struct pfioc_qstats	*pq = (struct pfioc_qstats *)addr;
1748 		struct pf_altq		*altq;
1749 		u_int32_t		 nr;
1750 		int			 nbytes;
1751 
1752 		if (pq->ticket != ticket_altqs_active) {
1753 			error = EBUSY;
1754 			break;
1755 		}
1756 		nbytes = pq->nbytes;
1757 		nr = 0;
1758 		altq = TAILQ_FIRST(pf_altqs_active);
1759 		while ((altq != NULL) && (nr < pq->nr)) {
1760 			altq = TAILQ_NEXT(altq, entries);
1761 			nr++;
1762 		}
1763 		if (altq == NULL) {
1764 			error = EBUSY;
1765 			break;
1766 		}
1767 		error = altq_getqstats(altq, pq->buf, &nbytes);
1768 		if (error == 0) {
1769 			pq->scheduler = altq->scheduler;
1770 			pq->nbytes = nbytes;
1771 		}
1772 		break;
1773 	}
1774 #endif /* ALTQ */
1775 
1776 	case DIOCGETRULESETS: {
1777 		struct pfioc_ruleset	*pr = (struct pfioc_ruleset *)addr;
1778 		struct pf_ruleset	*ruleset;
1779 		struct pf_anchor	*anchor;
1780 
1781 		pr->path[sizeof(pr->path) - 1] = 0;
1782 		if ((ruleset = pf_find_ruleset(pr->path)) == NULL) {
1783 			error = EINVAL;
1784 			break;
1785 		}
1786 		pr->nr = 0;
1787 		if (ruleset->anchor == NULL) {
1788 			/* XXX kludge for pf_main_ruleset */
1789 			RB_FOREACH(anchor, pf_anchor_global, &pf_anchors)
1790 				if (anchor->parent == NULL)
1791 					pr->nr++;
1792 		} else {
1793 			RB_FOREACH(anchor, pf_anchor_node,
1794 			    &ruleset->anchor->children)
1795 				pr->nr++;
1796 		}
1797 		break;
1798 	}
1799 
1800 	case DIOCGETRULESET: {
1801 		struct pfioc_ruleset	*pr = (struct pfioc_ruleset *)addr;
1802 		struct pf_ruleset	*ruleset;
1803 		struct pf_anchor	*anchor;
1804 		u_int32_t		 nr = 0;
1805 
1806 		pr->path[sizeof(pr->path) - 1] = 0;
1807 		if ((ruleset = pf_find_ruleset(pr->path)) == NULL) {
1808 			error = EINVAL;
1809 			break;
1810 		}
1811 		pr->name[0] = 0;
1812 		if (ruleset->anchor == NULL) {
1813 			/* XXX kludge for pf_main_ruleset */
1814 			RB_FOREACH(anchor, pf_anchor_global, &pf_anchors)
1815 				if (anchor->parent == NULL && nr++ == pr->nr) {
1816 					strlcpy(pr->name, anchor->name,
1817 					    sizeof(pr->name));
1818 					break;
1819 				}
1820 		} else {
1821 			RB_FOREACH(anchor, pf_anchor_node,
1822 			    &ruleset->anchor->children)
1823 				if (nr++ == pr->nr) {
1824 					strlcpy(pr->name, anchor->name,
1825 					    sizeof(pr->name));
1826 					break;
1827 				}
1828 		}
1829 		if (!pr->name[0])
1830 			error = EBUSY;
1831 		break;
1832 	}
1833 
1834 	case DIOCRCLRTABLES: {
1835 		struct pfioc_table *io = (struct pfioc_table *)addr;
1836 
1837 		if (io->pfrio_esize != 0) {
1838 			error = ENODEV;
1839 			break;
1840 		}
1841 		error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel,
1842 		    io->pfrio_flags | PFR_FLAG_USERIOCTL);
1843 		break;
1844 	}
1845 
1846 	case DIOCRADDTABLES: {
1847 		struct pfioc_table *io = (struct pfioc_table *)addr;
1848 
1849 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
1850 			error = ENODEV;
1851 			break;
1852 		}
1853 		error = pfr_add_tables(io->pfrio_buffer, io->pfrio_size,
1854 		    &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL);
1855 		break;
1856 	}
1857 
1858 	case DIOCRDELTABLES: {
1859 		struct pfioc_table *io = (struct pfioc_table *)addr;
1860 
1861 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
1862 			error = ENODEV;
1863 			break;
1864 		}
1865 		error = pfr_del_tables(io->pfrio_buffer, io->pfrio_size,
1866 		    &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
1867 		break;
1868 	}
1869 
1870 	case DIOCRGETTABLES: {
1871 		struct pfioc_table *io = (struct pfioc_table *)addr;
1872 
1873 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
1874 			error = ENODEV;
1875 			break;
1876 		}
1877 		error = pfr_get_tables(&io->pfrio_table, io->pfrio_buffer,
1878 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
1879 		break;
1880 	}
1881 
1882 	case DIOCRGETTSTATS: {
1883 		struct pfioc_table *io = (struct pfioc_table *)addr;
1884 
1885 		if (io->pfrio_esize != sizeof(struct pfr_tstats)) {
1886 			error = ENODEV;
1887 			break;
1888 		}
1889 		error = pfr_get_tstats(&io->pfrio_table, io->pfrio_buffer,
1890 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
1891 		break;
1892 	}
1893 
1894 	case DIOCRCLRTSTATS: {
1895 		struct pfioc_table *io = (struct pfioc_table *)addr;
1896 
1897 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
1898 			error = ENODEV;
1899 			break;
1900 		}
1901 		error = pfr_clr_tstats(io->pfrio_buffer, io->pfrio_size,
1902 		    &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL);
1903 		break;
1904 	}
1905 
1906 	case DIOCRSETTFLAGS: {
1907 		struct pfioc_table *io = (struct pfioc_table *)addr;
1908 
1909 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
1910 			error = ENODEV;
1911 			break;
1912 		}
1913 		error = pfr_set_tflags(io->pfrio_buffer, io->pfrio_size,
1914 		    io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange,
1915 		    &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
1916 		break;
1917 	}
1918 
1919 	case DIOCRCLRADDRS: {
1920 		struct pfioc_table *io = (struct pfioc_table *)addr;
1921 
1922 		if (io->pfrio_esize != 0) {
1923 			error = ENODEV;
1924 			break;
1925 		}
1926 		error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel,
1927 		    io->pfrio_flags | PFR_FLAG_USERIOCTL);
1928 		break;
1929 	}
1930 
1931 	case DIOCRADDADDRS: {
1932 		struct pfioc_table *io = (struct pfioc_table *)addr;
1933 
1934 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
1935 			error = ENODEV;
1936 			break;
1937 		}
1938 		error = pfr_add_addrs(&io->pfrio_table, io->pfrio_buffer,
1939 		    io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags |
1940 		    PFR_FLAG_USERIOCTL);
1941 		break;
1942 	}
1943 
1944 	case DIOCRDELADDRS: {
1945 		struct pfioc_table *io = (struct pfioc_table *)addr;
1946 
1947 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
1948 			error = ENODEV;
1949 			break;
1950 		}
1951 		error = pfr_del_addrs(&io->pfrio_table, io->pfrio_buffer,
1952 		    io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags |
1953 		    PFR_FLAG_USERIOCTL);
1954 		break;
1955 	}
1956 
1957 	case DIOCRSETADDRS: {
1958 		struct pfioc_table *io = (struct pfioc_table *)addr;
1959 
1960 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
1961 			error = ENODEV;
1962 			break;
1963 		}
1964 		error = pfr_set_addrs(&io->pfrio_table, io->pfrio_buffer,
1965 		    io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd,
1966 		    &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags |
1967 		    PFR_FLAG_USERIOCTL, 0);
1968 		break;
1969 	}
1970 
1971 	case DIOCRGETADDRS: {
1972 		struct pfioc_table *io = (struct pfioc_table *)addr;
1973 
1974 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
1975 			error = ENODEV;
1976 			break;
1977 		}
1978 		error = pfr_get_addrs(&io->pfrio_table, io->pfrio_buffer,
1979 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
1980 		break;
1981 	}
1982 
1983 	case DIOCRGETASTATS: {
1984 		struct pfioc_table *io = (struct pfioc_table *)addr;
1985 
1986 		if (io->pfrio_esize != sizeof(struct pfr_astats)) {
1987 			error = ENODEV;
1988 			break;
1989 		}
1990 		error = pfr_get_astats(&io->pfrio_table, io->pfrio_buffer,
1991 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
1992 		break;
1993 	}
1994 
1995 	case DIOCRCLRASTATS: {
1996 		struct pfioc_table *io = (struct pfioc_table *)addr;
1997 
1998 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
1999 			error = ENODEV;
2000 			break;
2001 		}
2002 		error = pfr_clr_astats(&io->pfrio_table, io->pfrio_buffer,
2003 		    io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags |
2004 		    PFR_FLAG_USERIOCTL);
2005 		break;
2006 	}
2007 
2008 	case DIOCRTSTADDRS: {
2009 		struct pfioc_table *io = (struct pfioc_table *)addr;
2010 
2011 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2012 			error = ENODEV;
2013 			break;
2014 		}
2015 		error = pfr_tst_addrs(&io->pfrio_table, io->pfrio_buffer,
2016 		    io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags |
2017 		    PFR_FLAG_USERIOCTL);
2018 		break;
2019 	}
2020 
2021 	case DIOCRINADEFINE: {
2022 		struct pfioc_table *io = (struct pfioc_table *)addr;
2023 
2024 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2025 			error = ENODEV;
2026 			break;
2027 		}
2028 		error = pfr_ina_define(&io->pfrio_table, io->pfrio_buffer,
2029 		    io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr,
2030 		    io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2031 		break;
2032 	}
2033 
2034 	case DIOCOSFPADD: {
2035 		struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
2036 		error = pf_osfp_add(io);
2037 		break;
2038 	}
2039 
2040 	case DIOCOSFPGET: {
2041 		struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
2042 		error = pf_osfp_get(io);
2043 		break;
2044 	}
2045 
2046 	case DIOCXBEGIN: {
2047 		struct pfioc_trans	*io = (struct pfioc_trans *)addr;
2048 		struct pfioc_trans_e	*ioe;
2049 		struct pfr_table	*table;
2050 		int			 i;
2051 
2052 		if (io->esize != sizeof(*ioe)) {
2053 			error = ENODEV;
2054 			goto fail;
2055 		}
2056 		ioe = malloc(sizeof(*ioe), M_TEMP, M_WAITOK);
2057 		table = malloc(sizeof(*table), M_TEMP, M_WAITOK);
2058 		pf_default_rule_new = pf_default_rule;
2059 		bzero(&pf_trans_set, sizeof(pf_trans_set));
2060 		for (i = 0; i < io->size; i++) {
2061 			if (copyin(io->array+i, ioe, sizeof(*ioe))) {
2062 				free(table, M_TEMP);
2063 				free(ioe, M_TEMP);
2064 				error = EFAULT;
2065 				goto fail;
2066 			}
2067 			switch (ioe->type) {
2068 #ifdef ALTQ
2069 			case PF_TRANS_ALTQ:
2070 				if (ioe->anchor[0]) {
2071 					free(table, M_TEMP);
2072 					free(ioe, M_TEMP);
2073 					error = EINVAL;
2074 					goto fail;
2075 				}
2076 				if ((error = pf_begin_altq(&ioe->ticket))) {
2077 					free(table, M_TEMP);
2078 					free(ioe, M_TEMP);
2079 					goto fail;
2080 				}
2081 				break;
2082 #endif /* ALTQ */
2083 			case PF_TRANS_TABLE:
2084 				bzero(table, sizeof(*table));
2085 				strlcpy(table->pfrt_anchor, ioe->anchor,
2086 				    sizeof(table->pfrt_anchor));
2087 				if ((error = pfr_ina_begin(table,
2088 				    &ioe->ticket, NULL, 0))) {
2089 					free(table, M_TEMP);
2090 					free(ioe, M_TEMP);
2091 					goto fail;
2092 				}
2093 				break;
2094 			default:
2095 				if ((error = pf_begin_rules(&ioe->ticket,
2096 				    ioe->anchor))) {
2097 					free(table, M_TEMP);
2098 					free(ioe, M_TEMP);
2099 					goto fail;
2100 				}
2101 				break;
2102 			}
2103 			if (copyout(ioe, io->array+i, sizeof(io->array[i]))) {
2104 				free(table, M_TEMP);
2105 				free(ioe, M_TEMP);
2106 				error = EFAULT;
2107 				goto fail;
2108 			}
2109 		}
2110 		free(table, M_TEMP);
2111 		free(ioe, M_TEMP);
2112 		break;
2113 	}
2114 
2115 	case DIOCXROLLBACK: {
2116 		struct pfioc_trans	*io = (struct pfioc_trans *)addr;
2117 		struct pfioc_trans_e	*ioe;
2118 		struct pfr_table	*table;
2119 		int			 i;
2120 
2121 		if (io->esize != sizeof(*ioe)) {
2122 			error = ENODEV;
2123 			goto fail;
2124 		}
2125 		ioe = malloc(sizeof(*ioe), M_TEMP, M_WAITOK);
2126 		table = malloc(sizeof(*table), M_TEMP, M_WAITOK);
2127 		for (i = 0; i < io->size; i++) {
2128 			if (copyin(io->array+i, ioe, sizeof(*ioe))) {
2129 				free(table, M_TEMP);
2130 				free(ioe, M_TEMP);
2131 				error = EFAULT;
2132 				goto fail;
2133 			}
2134 			switch (ioe->type) {
2135 #ifdef ALTQ
2136 			case PF_TRANS_ALTQ:
2137 				if (ioe->anchor[0]) {
2138 					free(table, M_TEMP);
2139 					free(ioe, M_TEMP);
2140 					error = EINVAL;
2141 					goto fail;
2142 				}
2143 				if ((error = pf_rollback_altq(ioe->ticket))) {
2144 					free(table, M_TEMP);
2145 					free(ioe, M_TEMP);
2146 					goto fail; /* really bad */
2147 				}
2148 				break;
2149 #endif /* ALTQ */
2150 			case PF_TRANS_TABLE:
2151 				bzero(table, sizeof(*table));
2152 				strlcpy(table->pfrt_anchor, ioe->anchor,
2153 				    sizeof(table->pfrt_anchor));
2154 				if ((error = pfr_ina_rollback(table,
2155 				    ioe->ticket, NULL, 0))) {
2156 					free(table, M_TEMP);
2157 					free(ioe, M_TEMP);
2158 					goto fail; /* really bad */
2159 				}
2160 				break;
2161 			default:
2162 				if ((error = pf_rollback_rules(ioe->ticket,
2163 				    ioe->anchor))) {
2164 					free(table, M_TEMP);
2165 					free(ioe, M_TEMP);
2166 					goto fail; /* really bad */
2167 				}
2168 				break;
2169 			}
2170 		}
2171 		free(table, M_TEMP);
2172 		free(ioe, M_TEMP);
2173 		break;
2174 	}
2175 
2176 	case DIOCXCOMMIT: {
2177 		struct pfioc_trans	*io = (struct pfioc_trans *)addr;
2178 		struct pfioc_trans_e	*ioe;
2179 		struct pfr_table	*table;
2180 		struct pf_ruleset	*rs;
2181 		int			 i;
2182 
2183 		if (io->esize != sizeof(*ioe)) {
2184 			error = ENODEV;
2185 			goto fail;
2186 		}
2187 		ioe = malloc(sizeof(*ioe), M_TEMP, M_WAITOK);
2188 		table = malloc(sizeof(*table), M_TEMP, M_WAITOK);
2189 		/* first makes sure everything will succeed */
2190 		for (i = 0; i < io->size; i++) {
2191 			if (copyin(io->array+i, ioe, sizeof(*ioe))) {
2192 				free(table, M_TEMP);
2193 				free(ioe, M_TEMP);
2194 				error = EFAULT;
2195 				goto fail;
2196 			}
2197 			switch (ioe->type) {
2198 #ifdef ALTQ
2199 			case PF_TRANS_ALTQ:
2200 				if (ioe->anchor[0]) {
2201 					free(table, M_TEMP);
2202 					free(ioe, M_TEMP);
2203 					error = EINVAL;
2204 					goto fail;
2205 				}
2206 				if (!altqs_inactive_open || ioe->ticket !=
2207 				    ticket_altqs_inactive) {
2208 					free(table, M_TEMP);
2209 					free(ioe, M_TEMP);
2210 					error = EBUSY;
2211 					goto fail;
2212 				}
2213 				break;
2214 #endif /* ALTQ */
2215 			case PF_TRANS_TABLE:
2216 				rs = pf_find_ruleset(ioe->anchor);
2217 				if (rs == NULL || !rs->topen || ioe->ticket !=
2218 				     rs->tticket) {
2219 					free(table, M_TEMP);
2220 					free(ioe, M_TEMP);
2221 					error = EBUSY;
2222 					goto fail;
2223 				}
2224 				break;
2225 			default:
2226 				rs = pf_find_ruleset(ioe->anchor);
2227 				if (rs == NULL ||
2228 				    !rs->rules.inactive.open ||
2229 				    rs->rules.inactive.ticket !=
2230 				    ioe->ticket) {
2231 					free(table, M_TEMP);
2232 					free(ioe, M_TEMP);
2233 					error = EBUSY;
2234 					goto fail;
2235 				}
2236 				break;
2237 			}
2238 		}
2239 		/*
2240 		 * Checked already in DIOCSETLIMIT, but check again as the
2241 		 * situation might have changed.
2242 		 */
2243 		for (i = 0; i < PF_LIMIT_MAX; i++) {
2244 			if (((struct pool *)pf_pool_limits[i].pp)->pr_nout >
2245 			    pf_pool_limits[i].limit_new) {
2246 				free(table, M_TEMP);
2247 				free(ioe, M_TEMP);
2248 				error = EBUSY;
2249 				goto fail;
2250 			}
2251 		}
2252 		/* now do the commit - no errors should happen here */
2253 		for (i = 0; i < io->size; i++) {
2254 			if (copyin(io->array+i, ioe, sizeof(*ioe))) {
2255 				free(table, M_TEMP);
2256 				free(ioe, M_TEMP);
2257 				error = EFAULT;
2258 				goto fail;
2259 			}
2260 			switch (ioe->type) {
2261 #ifdef ALTQ
2262 			case PF_TRANS_ALTQ:
2263 				if ((error = pf_commit_altq(ioe->ticket))) {
2264 					free(table, M_TEMP);
2265 					free(ioe, M_TEMP);
2266 					goto fail; /* really bad */
2267 				}
2268 				break;
2269 #endif /* ALTQ */
2270 			case PF_TRANS_TABLE:
2271 				bzero(table, sizeof(*table));
2272 				strlcpy(table->pfrt_anchor, ioe->anchor,
2273 				    sizeof(table->pfrt_anchor));
2274 				if ((error = pfr_ina_commit(table, ioe->ticket,
2275 				    NULL, NULL, 0))) {
2276 					free(table, M_TEMP);
2277 					free(ioe, M_TEMP);
2278 					goto fail; /* really bad */
2279 				}
2280 				break;
2281 			default:
2282 				if ((error = pf_commit_rules(ioe->ticket,
2283 				    ioe->anchor))) {
2284 					free(table, M_TEMP);
2285 					free(ioe, M_TEMP);
2286 					goto fail; /* really bad */
2287 				}
2288 				break;
2289 			}
2290 		}
2291 		for (i = 0; i < PF_LIMIT_MAX; i++) {
2292 			if (pf_pool_limits[i].limit_new !=
2293 			    pf_pool_limits[i].limit &&
2294 			    pool_sethardlimit(pf_pool_limits[i].pp,
2295 			    pf_pool_limits[i].limit_new, NULL, 0) != 0) {
2296 				free(table, M_TEMP);
2297 				free(ioe, M_TEMP);
2298 				error = EBUSY;
2299 				goto fail; /* really bad */
2300 			}
2301 			pf_pool_limits[i].limit = pf_pool_limits[i].limit_new;
2302 		}
2303 		for (i = 0; i < PFTM_MAX; i++) {
2304 			int old = pf_default_rule.timeout[i];
2305 
2306 			pf_default_rule.timeout[i] =
2307 			    pf_default_rule_new.timeout[i];
2308 			if (pf_default_rule.timeout[i] == PFTM_INTERVAL &&
2309 			    pf_default_rule.timeout[i] < old)
2310 				wakeup(pf_purge_thread);
2311 		}
2312 		pfi_xcommit();
2313 		pf_trans_set_commit();
2314 		free(table, M_TEMP);
2315 		free(ioe, M_TEMP);
2316 		break;
2317 	}
2318 
2319 	case DIOCGETSRCNODES: {
2320 		struct pfioc_src_nodes	*psn = (struct pfioc_src_nodes *)addr;
2321 		struct pf_src_node	*n, *p, *pstore;
2322 		u_int32_t		 nr = 0;
2323 		int			 space = psn->psn_len;
2324 
2325 		if (space == 0) {
2326 			RB_FOREACH(n, pf_src_tree, &tree_src_tracking)
2327 				nr++;
2328 			psn->psn_len = sizeof(struct pf_src_node) * nr;
2329 			break;
2330 		}
2331 
2332 		pstore = malloc(sizeof(*pstore), M_TEMP, M_WAITOK);
2333 
2334 		p = psn->psn_src_nodes;
2335 		RB_FOREACH(n, pf_src_tree, &tree_src_tracking) {
2336 			int	secs = time_second, diff;
2337 
2338 			if ((nr + 1) * sizeof(*p) > (unsigned)psn->psn_len)
2339 				break;
2340 
2341 			bcopy(n, pstore, sizeof(*pstore));
2342 			if (n->rule.ptr != NULL)
2343 				pstore->rule.nr = n->rule.ptr->nr;
2344 			pstore->creation = time_uptime - pstore->creation;
2345 			if (pstore->expire > secs)
2346 				pstore->expire -= secs;
2347 			else
2348 				pstore->expire = 0;
2349 
2350 			/* adjust the connection rate estimate */
2351 			diff = secs - n->conn_rate.last;
2352 			if (diff >= n->conn_rate.seconds)
2353 				pstore->conn_rate.count = 0;
2354 			else
2355 				pstore->conn_rate.count -=
2356 				    n->conn_rate.count * diff /
2357 				    n->conn_rate.seconds;
2358 
2359 			error = copyout(pstore, p, sizeof(*p));
2360 			if (error) {
2361 				free(pstore, M_TEMP);
2362 				goto fail;
2363 			}
2364 			p++;
2365 			nr++;
2366 		}
2367 		psn->psn_len = sizeof(struct pf_src_node) * nr;
2368 
2369 		free(pstore, M_TEMP);
2370 		break;
2371 	}
2372 
2373 	case DIOCCLRSRCNODES: {
2374 		struct pf_src_node	*n;
2375 		struct pf_state		*state;
2376 
2377 		RB_FOREACH(state, pf_state_tree_id, &tree_id)
2378 			pf_src_tree_remove_state(state);
2379 		RB_FOREACH(n, pf_src_tree, &tree_src_tracking)
2380 			n->expire = 1;
2381 		pf_purge_expired_src_nodes(1);
2382 		break;
2383 	}
2384 
2385 	case DIOCKILLSRCNODES: {
2386 		struct pf_src_node	*sn;
2387 		struct pf_state		*s;
2388 		struct pfioc_src_node_kill *psnk =
2389 		    (struct pfioc_src_node_kill *)addr;
2390 		u_int			killed = 0;
2391 
2392 		RB_FOREACH(sn, pf_src_tree, &tree_src_tracking) {
2393 			if (PF_MATCHA(psnk->psnk_src.neg,
2394 				&psnk->psnk_src.addr.v.a.addr,
2395 				&psnk->psnk_src.addr.v.a.mask,
2396 				&sn->addr, sn->af) &&
2397 			    PF_MATCHA(psnk->psnk_dst.neg,
2398 				&psnk->psnk_dst.addr.v.a.addr,
2399 				&psnk->psnk_dst.addr.v.a.mask,
2400 				&sn->raddr, sn->af)) {
2401 				/* Handle state to src_node linkage */
2402 				if (sn->states != 0)
2403 					RB_FOREACH(s, pf_state_tree_id,
2404 					   &tree_id)
2405 						pf_state_rm_src_node(s, sn);
2406 				sn->expire = 1;
2407 				killed++;
2408 			}
2409 		}
2410 
2411 		if (killed > 0)
2412 			pf_purge_expired_src_nodes(1);
2413 
2414 		psnk->psnk_killed = killed;
2415 		break;
2416 	}
2417 
2418 	case DIOCSETHOSTID: {
2419 		u_int32_t	*hostid = (u_int32_t *)addr;
2420 
2421 		if (*hostid == 0)
2422 			pf_trans_set.hostid = arc4random();
2423 		else
2424 			pf_trans_set.hostid = *hostid;
2425 		pf_trans_set.mask |= PF_TSET_HOSTID;
2426 		break;
2427 	}
2428 
2429 	case DIOCOSFPFLUSH:
2430 		pf_osfp_flush();
2431 		break;
2432 
2433 	case DIOCIGETIFACES: {
2434 		struct pfioc_iface *io = (struct pfioc_iface *)addr;
2435 
2436 		if (io->pfiio_esize != sizeof(struct pfi_kif)) {
2437 			error = ENODEV;
2438 			break;
2439 		}
2440 		error = pfi_get_ifaces(io->pfiio_name, io->pfiio_buffer,
2441 		    &io->pfiio_size);
2442 		break;
2443 	}
2444 
2445 	case DIOCSETIFFLAG: {
2446 		struct pfioc_iface *io = (struct pfioc_iface *)addr;
2447 
2448 		error = pfi_set_flags(io->pfiio_name, io->pfiio_flags);
2449 		break;
2450 	}
2451 
2452 	case DIOCCLRIFFLAG: {
2453 		struct pfioc_iface *io = (struct pfioc_iface *)addr;
2454 
2455 		error = pfi_clear_flags(io->pfiio_name, io->pfiio_flags);
2456 		break;
2457 	}
2458 
2459 	case DIOCSETREASS: {
2460 		u_int32_t	*reass = (u_int32_t *)addr;
2461 
2462 		pf_trans_set.reass = *reass;
2463 		pf_trans_set.mask |= PF_TSET_REASS;
2464 		break;
2465 	}
2466 
2467 	default:
2468 		error = ENODEV;
2469 		break;
2470 	}
2471 fail:
2472 	splx(s);
2473 	if (flags & FWRITE)
2474 		rw_exit_write(&pf_consistency_lock);
2475 	else
2476 		rw_exit_read(&pf_consistency_lock);
2477 	return (error);
2478 }
2479 
2480 void
2481 pf_trans_set_commit(void)
2482 {
2483 	if (pf_trans_set.mask & PF_TSET_STATUSIF)
2484 		strlcpy(pf_status.ifname, pf_trans_set.statusif, IFNAMSIZ);
2485 	if (pf_trans_set.mask & PF_TSET_DEBUG)
2486 		pf_status.debug = pf_trans_set.debug;
2487 	if (pf_trans_set.mask & PF_TSET_HOSTID)
2488 		pf_status.hostid = pf_trans_set.hostid;
2489 	if (pf_trans_set.mask & PF_TSET_REASS)
2490 		pf_status.reass = pf_trans_set.reass;
2491 }
2492 
2493 void
2494 pf_pool_copyin(struct pf_pool *from, struct pf_pool *to)
2495 {
2496 	bcopy(from, to, sizeof(*to));
2497 	to->kif = NULL;
2498 }
2499 
2500 int
2501 pf_rule_copyin(struct pf_rule *from, struct pf_rule *to,
2502     struct pf_ruleset *ruleset)
2503 {
2504 	int i;
2505 
2506 	to->src = from->src;
2507 	to->dst = from->dst;
2508 
2509 	/* XXX union skip[] */
2510 
2511 	strlcpy(to->label, from->label, sizeof(to->label));
2512 	strlcpy(to->ifname, from->ifname, sizeof(to->ifname));
2513 	strlcpy(to->rcv_ifname, from->rcv_ifname, sizeof(to->rcv_ifname));
2514 	strlcpy(to->qname, from->qname, sizeof(to->qname));
2515 	strlcpy(to->pqname, from->pqname, sizeof(to->pqname));
2516 	strlcpy(to->tagname, from->tagname, sizeof(to->tagname));
2517 	strlcpy(to->match_tagname, from->match_tagname,
2518 	    sizeof(to->match_tagname));
2519 	strlcpy(to->overload_tblname, from->overload_tblname,
2520 	    sizeof(to->overload_tblname));
2521 
2522 	pf_pool_copyin(&from->nat, &to->nat);
2523 	pf_pool_copyin(&from->rdr, &to->rdr);
2524 	pf_pool_copyin(&from->route, &to->route);
2525 
2526 	if (pf_kif_setup(to->ifname, &to->kif))
2527 		return (EINVAL);
2528 	if (pf_kif_setup(to->rcv_ifname, &to->rcv_kif))
2529 		return (EINVAL);
2530 	if (to->overload_tblname[0]) {
2531 		if ((to->overload_tbl = pfr_attach_table(ruleset,
2532 		    to->overload_tblname, 0)) == NULL)
2533 			return (EINVAL);
2534 		else
2535 			to->overload_tbl->pfrkt_flags |= PFR_TFLAG_ACTIVE;
2536 	}
2537 
2538 	if (pf_kif_setup(to->rdr.ifname, &to->rdr.kif))
2539 		return (EINVAL);
2540 	if (pf_kif_setup(to->nat.ifname, &to->nat.kif))
2541 		return (EINVAL);
2542 	if (pf_kif_setup(to->route.ifname, &to->route.kif))
2543 		return (EINVAL);
2544 
2545 	to->os_fingerprint = from->os_fingerprint;
2546 
2547 	to->rtableid = from->rtableid;
2548 	if (to->rtableid >= 0 && !rtable_exists(to->rtableid))
2549 		return (EBUSY);
2550 	to->onrdomain = from->onrdomain;
2551 	if (to->onrdomain >= 0 && !rtable_exists(to->onrdomain))
2552 		return (EBUSY);
2553 	if (to->onrdomain >= 0)		/* make sure it is a real rdomain */
2554 		to->onrdomain = rtable_l2(to->onrdomain);
2555 
2556 	for (i = 0; i < PFTM_MAX; i++)
2557 		to->timeout[i] = from->timeout[i];
2558 	to->states_tot = from->states_tot;
2559 	to->max_states = from->max_states;
2560 	to->max_src_nodes = from->max_src_nodes;
2561 	to->max_src_states = from->max_src_states;
2562 	to->max_src_conn = from->max_src_conn;
2563 	to->max_src_conn_rate.limit = from->max_src_conn_rate.limit;
2564 	to->max_src_conn_rate.seconds = from->max_src_conn_rate.seconds;
2565 
2566 #ifdef ALTQ
2567 	/* set queue IDs */
2568 	if (to->qname[0] != 0) {
2569 		if ((to->qid = pf_oqname2qid(to->qname)) == 0)
2570 			return (EBUSY);
2571 		else if (to->pqname[0] != 0) {
2572 			if ((to->pqid = pf_oqname2qid(to->pqname)) == 0)
2573 				return (EBUSY);
2574 		} else
2575 			to->pqid = to->qid;
2576 	}
2577 #endif
2578 	to->rt_listid = from->rt_listid;
2579 	to->prob = from->prob;
2580 	to->return_icmp = from->return_icmp;
2581 	to->return_icmp6 = from->return_icmp6;
2582 	to->max_mss = from->max_mss;
2583 	if (to->tagname[0])
2584 		if ((to->tag = pf_tagname2tag(to->tagname)) == 0)
2585 			return (EBUSY);
2586 	if (to->match_tagname[0])
2587 		if ((to->match_tag = pf_tagname2tag(to->match_tagname)) == 0)
2588 			return (EBUSY);
2589 	to->scrub_flags = from->scrub_flags;
2590 	to->uid = from->uid;
2591 	to->gid = from->gid;
2592 	to->rule_flag = from->rule_flag;
2593 	to->action = from->action;
2594 	to->direction = from->direction;
2595 	to->log = from->log;
2596 	to->logif = from->logif;
2597 #if NPFLOG > 0
2598 	if (!to->log)
2599 		to->logif = 0;
2600 #endif
2601 	to->quick = from->quick;
2602 	to->ifnot = from->ifnot;
2603 	to->match_tag_not = from->match_tag_not;
2604 	to->keep_state = from->keep_state;
2605 	to->af = from->af;
2606 	to->naf = from->naf;
2607 	to->proto = from->proto;
2608 	to->type = from->type;
2609 	to->code = from->code;
2610 	to->flags = from->flags;
2611 	to->flagset = from->flagset;
2612 	to->min_ttl = from->min_ttl;
2613 	to->allow_opts = from->allow_opts;
2614 	to->rt = from->rt;
2615 	to->return_ttl = from->return_ttl;
2616 	to->tos = from->tos;
2617 	to->set_tos = from->set_tos;
2618 	to->anchor_relative = from->anchor_relative; /* XXX */
2619 	to->anchor_wildcard = from->anchor_wildcard; /* XXX */
2620 	to->flush = from->flush;
2621 	to->divert.addr = from->divert.addr;
2622 	to->divert.port = from->divert.port;
2623 	to->divert_packet.addr = from->divert_packet.addr;
2624 	to->divert_packet.port = from->divert_packet.port;
2625 	to->set_prio[0] = from->set_prio[0];
2626 	to->set_prio[1] = from->set_prio[1];
2627 
2628 	return (0);
2629 }
2630