xref: /openbsd-src/sys/net/pf_ioctl.c (revision 46035553bfdd96e63c94e32da0210227ec2e3cf1)
1 /*	$OpenBSD: pf_ioctl.c,v 1.361 2020/12/16 18:00:44 kn Exp $ */
2 
3 /*
4  * Copyright (c) 2001 Daniel Hartmeier
5  * Copyright (c) 2002 - 2018 Henning Brauer <henning@openbsd.org>
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  *    - Redistributions of source code must retain the above copyright
13  *      notice, this list of conditions and the following disclaimer.
14  *    - Redistributions in binary form must reproduce the above
15  *      copyright notice, this list of conditions and the following
16  *      disclaimer in the documentation and/or other materials provided
17  *      with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
22  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
23  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
24  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
25  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
29  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  *
32  * Effort sponsored in part by the Defense Advanced Research Projects
33  * Agency (DARPA) and Air Force Research Laboratory, Air Force
34  * Materiel Command, USAF, under agreement number F30602-01-2-0537.
35  *
36  */
37 
38 #include "pfsync.h"
39 #include "pflog.h"
40 
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/sysctl.h>
44 #include <sys/mbuf.h>
45 #include <sys/filio.h>
46 #include <sys/fcntl.h>
47 #include <sys/socket.h>
48 #include <sys/socketvar.h>
49 #include <sys/kernel.h>
50 #include <sys/time.h>
51 #include <sys/timeout.h>
52 #include <sys/pool.h>
53 #include <sys/malloc.h>
54 #include <sys/proc.h>
55 #include <sys/rwlock.h>
56 #include <sys/syslog.h>
57 #include <uvm/uvm_extern.h>
58 
59 #include <crypto/md5.h>
60 
61 #include <net/if.h>
62 #include <net/if_var.h>
63 #include <net/route.h>
64 #include <net/hfsc.h>
65 #include <net/fq_codel.h>
66 
67 #include <netinet/in.h>
68 #include <netinet/ip.h>
69 #include <netinet/in_pcb.h>
70 #include <netinet/ip_var.h>
71 #include <netinet/ip_icmp.h>
72 #include <netinet/tcp.h>
73 #include <netinet/udp.h>
74 
75 #ifdef INET6
76 #include <netinet/ip6.h>
77 #include <netinet/icmp6.h>
78 #endif /* INET6 */
79 
80 #include <net/pfvar.h>
81 #include <net/pfvar_priv.h>
82 
83 #if NPFSYNC > 0
84 #include <netinet/ip_ipsp.h>
85 #include <net/if_pfsync.h>
86 #endif /* NPFSYNC > 0 */
87 
88 struct pool		 pf_tag_pl;
89 
90 void			 pfattach(int);
91 void			 pf_thread_create(void *);
92 int			 pfopen(dev_t, int, int, struct proc *);
93 int			 pfclose(dev_t, int, int, struct proc *);
94 int			 pfioctl(dev_t, u_long, caddr_t, int, struct proc *);
95 int			 pf_begin_rules(u_int32_t *, const char *);
96 int			 pf_rollback_rules(u_int32_t, char *);
97 void			 pf_remove_queues(void);
98 int			 pf_commit_queues(void);
99 void			 pf_free_queues(struct pf_queuehead *);
100 void			 pf_calc_chksum(struct pf_ruleset *);
101 void			 pf_hash_rule(MD5_CTX *, struct pf_rule *);
102 void			 pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *);
103 int			 pf_commit_rules(u_int32_t, char *);
104 int			 pf_addr_setup(struct pf_ruleset *,
105 			    struct pf_addr_wrap *, sa_family_t);
106 int			 pf_kif_setup(char *, struct pfi_kif **);
107 void			 pf_addr_copyout(struct pf_addr_wrap *);
108 void			 pf_trans_set_commit(void);
109 void			 pf_pool_copyin(struct pf_pool *, struct pf_pool *);
110 int			 pf_validate_range(u_int8_t, u_int16_t[2]);
111 int			 pf_rule_copyin(struct pf_rule *, struct pf_rule *,
112 			    struct pf_ruleset *);
113 u_int16_t		 pf_qname2qid(char *, int);
114 void			 pf_qid2qname(u_int16_t, char *);
115 void			 pf_qid_unref(u_int16_t);
116 
117 struct pf_rule		 pf_default_rule, pf_default_rule_new;
118 
119 struct {
120 	char		statusif[IFNAMSIZ];
121 	u_int32_t	debug;
122 	u_int32_t	hostid;
123 	u_int32_t	reass;
124 	u_int32_t	mask;
125 } pf_trans_set;
126 
127 #define	PF_TSET_STATUSIF	0x01
128 #define	PF_TSET_DEBUG		0x02
129 #define	PF_TSET_HOSTID		0x04
130 #define	PF_TSET_REASS		0x08
131 
132 #define	TAGID_MAX	 50000
133 TAILQ_HEAD(pf_tags, pf_tagname)	pf_tags = TAILQ_HEAD_INITIALIZER(pf_tags),
134 				pf_qids = TAILQ_HEAD_INITIALIZER(pf_qids);
135 
136 #ifdef WITH_PF_LOCK
137 /*
138  * pf_lock protects consistency of PF data structures, which don't have
139  * their dedicated lock yet. The pf_lock currently protects:
140  *	- rules,
141  *	- radix tables,
142  *	- source nodes
143  * All callers must grab pf_lock exclusively.
144  *
145  * pf_state_lock protects consistency of state table. Packets, which do state
146  * look up grab the lock as readers. If packet must create state, then it must
147  * grab the lock as writer. Whenever packet creates state it grabs pf_lock
148  * first then it locks pf_state_lock as the writer.
149  */
150 struct rwlock		 pf_lock = RWLOCK_INITIALIZER("pf_lock");
151 struct rwlock		 pf_state_lock = RWLOCK_INITIALIZER("pf_state_lock");
152 #endif /* WITH_PF_LOCK */
153 
154 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE)
155 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE
156 #endif
157 u_int16_t		 tagname2tag(struct pf_tags *, char *, int);
158 void			 tag2tagname(struct pf_tags *, u_int16_t, char *);
159 void			 tag_unref(struct pf_tags *, u_int16_t);
160 int			 pf_rtlabel_add(struct pf_addr_wrap *);
161 void			 pf_rtlabel_remove(struct pf_addr_wrap *);
162 void			 pf_rtlabel_copyout(struct pf_addr_wrap *);
163 
164 
165 void
166 pfattach(int num)
167 {
168 	u_int32_t *timeout = pf_default_rule.timeout;
169 
170 	pool_init(&pf_rule_pl, sizeof(struct pf_rule), 0,
171 	    IPL_SOFTNET, 0, "pfrule", NULL);
172 	pool_init(&pf_src_tree_pl, sizeof(struct pf_src_node), 0,
173 	    IPL_SOFTNET, 0, "pfsrctr", NULL);
174 	pool_init(&pf_sn_item_pl, sizeof(struct pf_sn_item), 0,
175 	    IPL_SOFTNET, 0, "pfsnitem", NULL);
176 	pool_init(&pf_state_pl, sizeof(struct pf_state), 0,
177 	    IPL_SOFTNET, 0, "pfstate", NULL);
178 	pool_init(&pf_state_key_pl, sizeof(struct pf_state_key), 0,
179 	    IPL_SOFTNET, 0, "pfstkey", NULL);
180 	pool_init(&pf_state_item_pl, sizeof(struct pf_state_item), 0,
181 	    IPL_SOFTNET, 0, "pfstitem", NULL);
182 	pool_init(&pf_rule_item_pl, sizeof(struct pf_rule_item), 0,
183 	    IPL_SOFTNET, 0, "pfruleitem", NULL);
184 	pool_init(&pf_queue_pl, sizeof(struct pf_queuespec), 0,
185 	    IPL_SOFTNET, 0, "pfqueue", NULL);
186 	pool_init(&pf_tag_pl, sizeof(struct pf_tagname), 0,
187 	    IPL_SOFTNET, 0, "pftag", NULL);
188 	pool_init(&pf_pktdelay_pl, sizeof(struct pf_pktdelay), 0,
189 	    IPL_SOFTNET, 0, "pfpktdelay", NULL);
190 
191 	hfsc_initialize();
192 	pfr_initialize();
193 	pfi_initialize();
194 	pf_osfp_initialize();
195 	pf_syncookies_init();
196 
197 	pool_sethardlimit(pf_pool_limits[PF_LIMIT_STATES].pp,
198 	    pf_pool_limits[PF_LIMIT_STATES].limit, NULL, 0);
199 
200 	if (physmem <= atop(100*1024*1024))
201 		pf_pool_limits[PF_LIMIT_TABLE_ENTRIES].limit =
202 		    PFR_KENTRY_HIWAT_SMALL;
203 
204 	RB_INIT(&tree_src_tracking);
205 	RB_INIT(&pf_anchors);
206 	pf_init_ruleset(&pf_main_ruleset);
207 	TAILQ_INIT(&pf_queues[0]);
208 	TAILQ_INIT(&pf_queues[1]);
209 	pf_queues_active = &pf_queues[0];
210 	pf_queues_inactive = &pf_queues[1];
211 	TAILQ_INIT(&state_list);
212 
213 	/* default rule should never be garbage collected */
214 	pf_default_rule.entries.tqe_prev = &pf_default_rule.entries.tqe_next;
215 	pf_default_rule.action = PF_PASS;
216 	pf_default_rule.nr = (u_int32_t)-1;
217 	pf_default_rule.rtableid = -1;
218 
219 	/* initialize default timeouts */
220 	timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL;
221 	timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL;
222 	timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL;
223 	timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL;
224 	timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL;
225 	timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL;
226 	timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL;
227 	timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL;
228 	timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL;
229 	timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL;
230 	timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL;
231 	timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL;
232 	timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL;
233 	timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL;
234 	timeout[PFTM_FRAG] = PFTM_FRAG_VAL;
235 	timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL;
236 	timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL;
237 	timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL;
238 	timeout[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START;
239 	timeout[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END;
240 
241 	pf_default_rule.src.addr.type =  PF_ADDR_ADDRMASK;
242 	pf_default_rule.dst.addr.type =  PF_ADDR_ADDRMASK;
243 	pf_default_rule.rdr.addr.type =  PF_ADDR_NONE;
244 	pf_default_rule.nat.addr.type =  PF_ADDR_NONE;
245 	pf_default_rule.route.addr.type =  PF_ADDR_NONE;
246 
247 	pf_normalize_init();
248 	memset(&pf_status, 0, sizeof(pf_status));
249 	pf_status.debug = LOG_ERR;
250 	pf_status.reass = PF_REASS_ENABLED;
251 
252 	/* XXX do our best to avoid a conflict */
253 	pf_status.hostid = arc4random();
254 }
255 
256 int
257 pfopen(dev_t dev, int flags, int fmt, struct proc *p)
258 {
259 	if (minor(dev) >= 1)
260 		return (ENXIO);
261 	return (0);
262 }
263 
264 int
265 pfclose(dev_t dev, int flags, int fmt, struct proc *p)
266 {
267 	if (minor(dev) >= 1)
268 		return (ENXIO);
269 	return (0);
270 }
271 
272 void
273 pf_rm_rule(struct pf_rulequeue *rulequeue, struct pf_rule *rule)
274 {
275 	if (rulequeue != NULL) {
276 		if (rule->states_cur == 0 && rule->src_nodes == 0) {
277 			/*
278 			 * XXX - we need to remove the table *before* detaching
279 			 * the rule to make sure the table code does not delete
280 			 * the anchor under our feet.
281 			 */
282 			pf_tbladdr_remove(&rule->src.addr);
283 			pf_tbladdr_remove(&rule->dst.addr);
284 			pf_tbladdr_remove(&rule->rdr.addr);
285 			pf_tbladdr_remove(&rule->nat.addr);
286 			pf_tbladdr_remove(&rule->route.addr);
287 			if (rule->overload_tbl)
288 				pfr_detach_table(rule->overload_tbl);
289 		}
290 		TAILQ_REMOVE(rulequeue, rule, entries);
291 		rule->entries.tqe_prev = NULL;
292 		rule->nr = (u_int32_t)-1;
293 	}
294 
295 	if (rule->states_cur > 0 || rule->src_nodes > 0 ||
296 	    rule->entries.tqe_prev != NULL)
297 		return;
298 	pf_tag_unref(rule->tag);
299 	pf_tag_unref(rule->match_tag);
300 	pf_rtlabel_remove(&rule->src.addr);
301 	pf_rtlabel_remove(&rule->dst.addr);
302 	pfi_dynaddr_remove(&rule->src.addr);
303 	pfi_dynaddr_remove(&rule->dst.addr);
304 	pfi_dynaddr_remove(&rule->rdr.addr);
305 	pfi_dynaddr_remove(&rule->nat.addr);
306 	pfi_dynaddr_remove(&rule->route.addr);
307 	if (rulequeue == NULL) {
308 		pf_tbladdr_remove(&rule->src.addr);
309 		pf_tbladdr_remove(&rule->dst.addr);
310 		pf_tbladdr_remove(&rule->rdr.addr);
311 		pf_tbladdr_remove(&rule->nat.addr);
312 		pf_tbladdr_remove(&rule->route.addr);
313 		if (rule->overload_tbl)
314 			pfr_detach_table(rule->overload_tbl);
315 	}
316 	pfi_kif_unref(rule->rcv_kif, PFI_KIF_REF_RULE);
317 	pfi_kif_unref(rule->kif, PFI_KIF_REF_RULE);
318 	pfi_kif_unref(rule->rdr.kif, PFI_KIF_REF_RULE);
319 	pfi_kif_unref(rule->nat.kif, PFI_KIF_REF_RULE);
320 	pfi_kif_unref(rule->route.kif, PFI_KIF_REF_RULE);
321 	pf_remove_anchor(rule);
322 	pool_put(&pf_rule_pl, rule);
323 }
324 
325 void
326 pf_purge_rule(struct pf_rule *rule)
327 {
328 	u_int32_t		 nr = 0;
329 	struct pf_ruleset	*ruleset;
330 
331 	KASSERT((rule != NULL) && (rule->ruleset != NULL));
332 	ruleset = rule->ruleset;
333 
334 	pf_rm_rule(ruleset->rules.active.ptr, rule);
335 	ruleset->rules.active.rcount--;
336 	TAILQ_FOREACH(rule, ruleset->rules.active.ptr, entries)
337 		rule->nr = nr++;
338 	ruleset->rules.active.ticket++;
339 	pf_calc_skip_steps(ruleset->rules.active.ptr);
340 	pf_remove_if_empty_ruleset(ruleset);
341 
342 	if (ruleset == &pf_main_ruleset)
343 		pf_calc_chksum(ruleset);
344 }
345 
346 u_int16_t
347 tagname2tag(struct pf_tags *head, char *tagname, int create)
348 {
349 	struct pf_tagname	*tag, *p = NULL;
350 	u_int16_t		 new_tagid = 1;
351 
352 	TAILQ_FOREACH(tag, head, entries)
353 		if (strcmp(tagname, tag->name) == 0) {
354 			tag->ref++;
355 			return (tag->tag);
356 		}
357 
358 	if (!create)
359 		return (0);
360 
361 	/*
362 	 * to avoid fragmentation, we do a linear search from the beginning
363 	 * and take the first free slot we find. if there is none or the list
364 	 * is empty, append a new entry at the end.
365 	 */
366 
367 	/* new entry */
368 	TAILQ_FOREACH(p, head, entries) {
369 		if (p->tag != new_tagid)
370 			break;
371 		new_tagid = p->tag + 1;
372 	}
373 
374 	if (new_tagid > TAGID_MAX)
375 		return (0);
376 
377 	/* allocate and fill new struct pf_tagname */
378 	tag = pool_get(&pf_tag_pl, PR_NOWAIT | PR_ZERO);
379 	if (tag == NULL)
380 		return (0);
381 	strlcpy(tag->name, tagname, sizeof(tag->name));
382 	tag->tag = new_tagid;
383 	tag->ref++;
384 
385 	if (p != NULL)	/* insert new entry before p */
386 		TAILQ_INSERT_BEFORE(p, tag, entries);
387 	else	/* either list empty or no free slot in between */
388 		TAILQ_INSERT_TAIL(head, tag, entries);
389 
390 	return (tag->tag);
391 }
392 
393 void
394 tag2tagname(struct pf_tags *head, u_int16_t tagid, char *p)
395 {
396 	struct pf_tagname	*tag;
397 
398 	TAILQ_FOREACH(tag, head, entries)
399 		if (tag->tag == tagid) {
400 			strlcpy(p, tag->name, PF_TAG_NAME_SIZE);
401 			return;
402 		}
403 }
404 
405 void
406 tag_unref(struct pf_tags *head, u_int16_t tag)
407 {
408 	struct pf_tagname	*p, *next;
409 
410 	if (tag == 0)
411 		return;
412 
413 	TAILQ_FOREACH_SAFE(p, head, entries, next) {
414 		if (tag == p->tag) {
415 			if (--p->ref == 0) {
416 				TAILQ_REMOVE(head, p, entries);
417 				pool_put(&pf_tag_pl, p);
418 			}
419 			break;
420 		}
421 	}
422 }
423 
424 u_int16_t
425 pf_tagname2tag(char *tagname, int create)
426 {
427 	return (tagname2tag(&pf_tags, tagname, create));
428 }
429 
430 void
431 pf_tag2tagname(u_int16_t tagid, char *p)
432 {
433 	tag2tagname(&pf_tags, tagid, p);
434 }
435 
436 void
437 pf_tag_ref(u_int16_t tag)
438 {
439 	struct pf_tagname *t;
440 
441 	TAILQ_FOREACH(t, &pf_tags, entries)
442 		if (t->tag == tag)
443 			break;
444 	if (t != NULL)
445 		t->ref++;
446 }
447 
448 void
449 pf_tag_unref(u_int16_t tag)
450 {
451 	tag_unref(&pf_tags, tag);
452 }
453 
454 int
455 pf_rtlabel_add(struct pf_addr_wrap *a)
456 {
457 	if (a->type == PF_ADDR_RTLABEL &&
458 	    (a->v.rtlabel = rtlabel_name2id(a->v.rtlabelname)) == 0)
459 		return (-1);
460 	return (0);
461 }
462 
463 void
464 pf_rtlabel_remove(struct pf_addr_wrap *a)
465 {
466 	if (a->type == PF_ADDR_RTLABEL)
467 		rtlabel_unref(a->v.rtlabel);
468 }
469 
470 void
471 pf_rtlabel_copyout(struct pf_addr_wrap *a)
472 {
473 	const char	*name;
474 
475 	if (a->type == PF_ADDR_RTLABEL && a->v.rtlabel) {
476 		if ((name = rtlabel_id2name(a->v.rtlabel)) == NULL)
477 			strlcpy(a->v.rtlabelname, "?",
478 			    sizeof(a->v.rtlabelname));
479 		else
480 			strlcpy(a->v.rtlabelname, name,
481 			    sizeof(a->v.rtlabelname));
482 	}
483 }
484 
485 u_int16_t
486 pf_qname2qid(char *qname, int create)
487 {
488 	return (tagname2tag(&pf_qids, qname, create));
489 }
490 
491 void
492 pf_qid2qname(u_int16_t qid, char *p)
493 {
494 	tag2tagname(&pf_qids, qid, p);
495 }
496 
497 void
498 pf_qid_unref(u_int16_t qid)
499 {
500 	tag_unref(&pf_qids, (u_int16_t)qid);
501 }
502 
503 int
504 pf_begin_rules(u_int32_t *ticket, const char *anchor)
505 {
506 	struct pf_ruleset	*rs;
507 	struct pf_rule		*rule;
508 
509 	if ((rs = pf_find_or_create_ruleset(anchor)) == NULL)
510 		return (EINVAL);
511 	while ((rule = TAILQ_FIRST(rs->rules.inactive.ptr)) != NULL) {
512 		pf_rm_rule(rs->rules.inactive.ptr, rule);
513 		rs->rules.inactive.rcount--;
514 	}
515 	*ticket = ++rs->rules.inactive.ticket;
516 	rs->rules.inactive.open = 1;
517 	return (0);
518 }
519 
520 int
521 pf_rollback_rules(u_int32_t ticket, char *anchor)
522 {
523 	struct pf_ruleset	*rs;
524 	struct pf_rule		*rule;
525 
526 	rs = pf_find_ruleset(anchor);
527 	if (rs == NULL || !rs->rules.inactive.open ||
528 	    rs->rules.inactive.ticket != ticket)
529 		return (0);
530 	while ((rule = TAILQ_FIRST(rs->rules.inactive.ptr)) != NULL) {
531 		pf_rm_rule(rs->rules.inactive.ptr, rule);
532 		rs->rules.inactive.rcount--;
533 	}
534 	rs->rules.inactive.open = 0;
535 
536 	/* queue defs only in the main ruleset */
537 	if (anchor[0])
538 		return (0);
539 
540 	pf_free_queues(pf_queues_inactive);
541 
542 	return (0);
543 }
544 
545 void
546 pf_free_queues(struct pf_queuehead *where)
547 {
548 	struct pf_queuespec	*q, *qtmp;
549 
550 	TAILQ_FOREACH_SAFE(q, where, entries, qtmp) {
551 		TAILQ_REMOVE(where, q, entries);
552 		pfi_kif_unref(q->kif, PFI_KIF_REF_RULE);
553 		pool_put(&pf_queue_pl, q);
554 	}
555 }
556 
557 void
558 pf_remove_queues(void)
559 {
560 	struct pf_queuespec	*q;
561 	struct ifnet		*ifp;
562 
563 	/* put back interfaces in normal queueing mode */
564 	TAILQ_FOREACH(q, pf_queues_active, entries) {
565 		if (q->parent_qid != 0)
566 			continue;
567 
568 		ifp = q->kif->pfik_ifp;
569 		if (ifp == NULL)
570 			continue;
571 
572 		ifq_attach(&ifp->if_snd, ifq_priq_ops, NULL);
573 	}
574 }
575 
576 struct pf_queue_if {
577 	struct ifnet		*ifp;
578 	const struct ifq_ops	*ifqops;
579 	const struct pfq_ops	*pfqops;
580 	void			*disc;
581 	struct pf_queue_if	*next;
582 };
583 
584 static inline struct pf_queue_if *
585 pf_ifp2q(struct pf_queue_if *list, struct ifnet *ifp)
586 {
587 	struct pf_queue_if *qif = list;
588 
589 	while (qif != NULL) {
590 		if (qif->ifp == ifp)
591 			return (qif);
592 
593 		qif = qif->next;
594 	}
595 
596 	return (qif);
597 }
598 
599 int
600 pf_create_queues(void)
601 {
602 	struct pf_queuespec	*q;
603 	struct ifnet		*ifp;
604 	struct pf_queue_if		*list = NULL, *qif;
605 	int			 error;
606 
607 	/*
608 	 * Find root queues and allocate traffic conditioner
609 	 * private data for these interfaces
610 	 */
611 	TAILQ_FOREACH(q, pf_queues_active, entries) {
612 		if (q->parent_qid != 0)
613 			continue;
614 
615 		ifp = q->kif->pfik_ifp;
616 		if (ifp == NULL)
617 			continue;
618 
619 		qif = malloc(sizeof(*qif), M_TEMP, M_WAITOK);
620 		qif->ifp = ifp;
621 
622 		if (q->flags & PFQS_ROOTCLASS) {
623 			qif->ifqops = ifq_hfsc_ops;
624 			qif->pfqops = pfq_hfsc_ops;
625 		} else {
626 			qif->ifqops = ifq_fqcodel_ops;
627 			qif->pfqops = pfq_fqcodel_ops;
628 		}
629 
630 		qif->disc = qif->pfqops->pfq_alloc(ifp);
631 
632 		qif->next = list;
633 		list = qif;
634 	}
635 
636 	/* and now everything */
637 	TAILQ_FOREACH(q, pf_queues_active, entries) {
638 		ifp = q->kif->pfik_ifp;
639 		if (ifp == NULL)
640 			continue;
641 
642 		qif = pf_ifp2q(list, ifp);
643 		KASSERT(qif != NULL);
644 
645 		error = qif->pfqops->pfq_addqueue(qif->disc, q);
646 		if (error != 0)
647 			goto error;
648 	}
649 
650 	/* find root queues in old list to disable them if necessary */
651 	TAILQ_FOREACH(q, pf_queues_inactive, entries) {
652 		if (q->parent_qid != 0)
653 			continue;
654 
655 		ifp = q->kif->pfik_ifp;
656 		if (ifp == NULL)
657 			continue;
658 
659 		qif = pf_ifp2q(list, ifp);
660 		if (qif != NULL)
661 			continue;
662 
663 		ifq_attach(&ifp->if_snd, ifq_priq_ops, NULL);
664 	}
665 
666 	/* commit the new queues */
667 	while (list != NULL) {
668 		qif = list;
669 		list = qif->next;
670 
671 		ifp = qif->ifp;
672 
673 		ifq_attach(&ifp->if_snd, qif->ifqops, qif->disc);
674 		free(qif, M_TEMP, sizeof(*qif));
675 	}
676 
677 	return (0);
678 
679 error:
680 	while (list != NULL) {
681 		qif = list;
682 		list = qif->next;
683 
684 		qif->pfqops->pfq_free(qif->disc);
685 		free(qif, M_TEMP, sizeof(*qif));
686 	}
687 
688 	return (error);
689 }
690 
691 int
692 pf_commit_queues(void)
693 {
694 	struct pf_queuehead	*qswap;
695 	int error;
696 
697         /* swap */
698         qswap = pf_queues_active;
699         pf_queues_active = pf_queues_inactive;
700         pf_queues_inactive = qswap;
701 
702 	error = pf_create_queues();
703 	if (error != 0) {
704 		pf_queues_inactive = pf_queues_active;
705 		pf_queues_active = qswap;
706 		return (error);
707 	}
708 
709         pf_free_queues(pf_queues_inactive);
710 
711 	return (0);
712 }
713 
714 const struct pfq_ops *
715 pf_queue_manager(struct pf_queuespec *q)
716 {
717 	if (q->flags & PFQS_FLOWQUEUE)
718 		return pfq_fqcodel_ops;
719 	return (/* pfq_default_ops */ NULL);
720 }
721 
722 #define PF_MD5_UPD(st, elm)						\
723 		MD5Update(ctx, (u_int8_t *) &(st)->elm, sizeof((st)->elm))
724 
725 #define PF_MD5_UPD_STR(st, elm)						\
726 		MD5Update(ctx, (u_int8_t *) (st)->elm, strlen((st)->elm))
727 
728 #define PF_MD5_UPD_HTONL(st, elm, stor) do {				\
729 		(stor) = htonl((st)->elm);				\
730 		MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int32_t));\
731 } while (0)
732 
733 #define PF_MD5_UPD_HTONS(st, elm, stor) do {				\
734 		(stor) = htons((st)->elm);				\
735 		MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int16_t));\
736 } while (0)
737 
738 void
739 pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr)
740 {
741 	PF_MD5_UPD(pfr, addr.type);
742 	switch (pfr->addr.type) {
743 		case PF_ADDR_DYNIFTL:
744 			PF_MD5_UPD(pfr, addr.v.ifname);
745 			PF_MD5_UPD(pfr, addr.iflags);
746 			break;
747 		case PF_ADDR_TABLE:
748 			if (strncmp(pfr->addr.v.tblname, PF_OPTIMIZER_TABLE_PFX,
749 			    strlen(PF_OPTIMIZER_TABLE_PFX)))
750 				PF_MD5_UPD(pfr, addr.v.tblname);
751 			break;
752 		case PF_ADDR_ADDRMASK:
753 			/* XXX ignore af? */
754 			PF_MD5_UPD(pfr, addr.v.a.addr.addr32);
755 			PF_MD5_UPD(pfr, addr.v.a.mask.addr32);
756 			break;
757 		case PF_ADDR_RTLABEL:
758 			PF_MD5_UPD(pfr, addr.v.rtlabelname);
759 			break;
760 	}
761 
762 	PF_MD5_UPD(pfr, port[0]);
763 	PF_MD5_UPD(pfr, port[1]);
764 	PF_MD5_UPD(pfr, neg);
765 	PF_MD5_UPD(pfr, port_op);
766 }
767 
768 void
769 pf_hash_rule(MD5_CTX *ctx, struct pf_rule *rule)
770 {
771 	u_int16_t x;
772 	u_int32_t y;
773 
774 	pf_hash_rule_addr(ctx, &rule->src);
775 	pf_hash_rule_addr(ctx, &rule->dst);
776 	PF_MD5_UPD_STR(rule, label);
777 	PF_MD5_UPD_STR(rule, ifname);
778 	PF_MD5_UPD_STR(rule, rcv_ifname);
779 	PF_MD5_UPD_STR(rule, match_tagname);
780 	PF_MD5_UPD_HTONS(rule, match_tag, x); /* dup? */
781 	PF_MD5_UPD_HTONL(rule, os_fingerprint, y);
782 	PF_MD5_UPD_HTONL(rule, prob, y);
783 	PF_MD5_UPD_HTONL(rule, uid.uid[0], y);
784 	PF_MD5_UPD_HTONL(rule, uid.uid[1], y);
785 	PF_MD5_UPD(rule, uid.op);
786 	PF_MD5_UPD_HTONL(rule, gid.gid[0], y);
787 	PF_MD5_UPD_HTONL(rule, gid.gid[1], y);
788 	PF_MD5_UPD(rule, gid.op);
789 	PF_MD5_UPD_HTONL(rule, rule_flag, y);
790 	PF_MD5_UPD(rule, action);
791 	PF_MD5_UPD(rule, direction);
792 	PF_MD5_UPD(rule, af);
793 	PF_MD5_UPD(rule, quick);
794 	PF_MD5_UPD(rule, ifnot);
795 	PF_MD5_UPD(rule, rcvifnot);
796 	PF_MD5_UPD(rule, match_tag_not);
797 	PF_MD5_UPD(rule, keep_state);
798 	PF_MD5_UPD(rule, proto);
799 	PF_MD5_UPD(rule, type);
800 	PF_MD5_UPD(rule, code);
801 	PF_MD5_UPD(rule, flags);
802 	PF_MD5_UPD(rule, flagset);
803 	PF_MD5_UPD(rule, allow_opts);
804 	PF_MD5_UPD(rule, rt);
805 	PF_MD5_UPD(rule, tos);
806 }
807 
808 int
809 pf_commit_rules(u_int32_t ticket, char *anchor)
810 {
811 	struct pf_ruleset	*rs;
812 	struct pf_rule		*rule;
813 	struct pf_rulequeue	*old_rules;
814 	u_int32_t		 old_rcount;
815 
816 	/* Make sure any expired rules get removed from active rules first. */
817 	pf_purge_expired_rules();
818 
819 	rs = pf_find_ruleset(anchor);
820 	if (rs == NULL || !rs->rules.inactive.open ||
821 	    ticket != rs->rules.inactive.ticket)
822 		return (EBUSY);
823 
824 	if (rs == &pf_main_ruleset)
825 		pf_calc_chksum(rs);
826 
827 	/* Swap rules, keep the old. */
828 	old_rules = rs->rules.active.ptr;
829 	old_rcount = rs->rules.active.rcount;
830 
831 	rs->rules.active.ptr = rs->rules.inactive.ptr;
832 	rs->rules.active.rcount = rs->rules.inactive.rcount;
833 	rs->rules.inactive.ptr = old_rules;
834 	rs->rules.inactive.rcount = old_rcount;
835 
836 	rs->rules.active.ticket = rs->rules.inactive.ticket;
837 	pf_calc_skip_steps(rs->rules.active.ptr);
838 
839 
840 	/* Purge the old rule list. */
841 	while ((rule = TAILQ_FIRST(old_rules)) != NULL)
842 		pf_rm_rule(old_rules, rule);
843 	rs->rules.inactive.rcount = 0;
844 	rs->rules.inactive.open = 0;
845 	pf_remove_if_empty_ruleset(rs);
846 
847 	/* queue defs only in the main ruleset */
848 	if (anchor[0])
849 		return (0);
850 	return (pf_commit_queues());
851 }
852 
853 void
854 pf_calc_chksum(struct pf_ruleset *rs)
855 {
856 	MD5_CTX			 ctx;
857 	struct pf_rule		*rule;
858 	u_int8_t		 digest[PF_MD5_DIGEST_LENGTH];
859 
860 	MD5Init(&ctx);
861 
862 	if (rs->rules.inactive.rcount) {
863 		TAILQ_FOREACH(rule, rs->rules.inactive.ptr, entries) {
864 			pf_hash_rule(&ctx, rule);
865 		}
866 	}
867 
868 	MD5Final(digest, &ctx);
869 	memcpy(pf_status.pf_chksum, digest, sizeof(pf_status.pf_chksum));
870 }
871 
872 int
873 pf_addr_setup(struct pf_ruleset *ruleset, struct pf_addr_wrap *addr,
874     sa_family_t af)
875 {
876 	if (pfi_dynaddr_setup(addr, af) ||
877 	    pf_tbladdr_setup(ruleset, addr) ||
878 	    pf_rtlabel_add(addr))
879 		return (EINVAL);
880 
881 	return (0);
882 }
883 
884 int
885 pf_kif_setup(char *ifname, struct pfi_kif **kif)
886 {
887 	if (ifname[0]) {
888 		*kif = pfi_kif_get(ifname);
889 		if (*kif == NULL)
890 			return (EINVAL);
891 
892 		pfi_kif_ref(*kif, PFI_KIF_REF_RULE);
893 	} else
894 		*kif = NULL;
895 
896 	return (0);
897 }
898 
899 void
900 pf_addr_copyout(struct pf_addr_wrap *addr)
901 {
902 	pfi_dynaddr_copyout(addr);
903 	pf_tbladdr_copyout(addr);
904 	pf_rtlabel_copyout(addr);
905 }
906 
907 int
908 pfioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct proc *p)
909 {
910 	int			 error = 0;
911 
912 	/* XXX keep in sync with switch() below */
913 	if (securelevel > 1)
914 		switch (cmd) {
915 		case DIOCGETRULES:
916 		case DIOCGETRULE:
917 		case DIOCGETSTATE:
918 		case DIOCSETSTATUSIF:
919 		case DIOCGETSTATUS:
920 		case DIOCCLRSTATUS:
921 		case DIOCNATLOOK:
922 		case DIOCSETDEBUG:
923 		case DIOCGETSTATES:
924 		case DIOCGETTIMEOUT:
925 		case DIOCGETLIMIT:
926 		case DIOCGETRULESETS:
927 		case DIOCGETRULESET:
928 		case DIOCGETQUEUES:
929 		case DIOCGETQUEUE:
930 		case DIOCGETQSTATS:
931 		case DIOCRGETTABLES:
932 		case DIOCRGETTSTATS:
933 		case DIOCRCLRTSTATS:
934 		case DIOCRCLRADDRS:
935 		case DIOCRADDADDRS:
936 		case DIOCRDELADDRS:
937 		case DIOCRSETADDRS:
938 		case DIOCRGETADDRS:
939 		case DIOCRGETASTATS:
940 		case DIOCRCLRASTATS:
941 		case DIOCRTSTADDRS:
942 		case DIOCOSFPGET:
943 		case DIOCGETSRCNODES:
944 		case DIOCCLRSRCNODES:
945 		case DIOCIGETIFACES:
946 		case DIOCSETIFFLAG:
947 		case DIOCCLRIFFLAG:
948 		case DIOCGETSYNFLWATS:
949 			break;
950 		case DIOCRCLRTABLES:
951 		case DIOCRADDTABLES:
952 		case DIOCRDELTABLES:
953 		case DIOCRSETTFLAGS:
954 			if (((struct pfioc_table *)addr)->pfrio_flags &
955 			    PFR_FLAG_DUMMY)
956 				break; /* dummy operation ok */
957 			return (EPERM);
958 		default:
959 			return (EPERM);
960 		}
961 
962 	if (!(flags & FWRITE))
963 		switch (cmd) {
964 		case DIOCGETRULES:
965 		case DIOCGETSTATE:
966 		case DIOCGETSTATUS:
967 		case DIOCGETSTATES:
968 		case DIOCGETTIMEOUT:
969 		case DIOCGETLIMIT:
970 		case DIOCGETRULESETS:
971 		case DIOCGETRULESET:
972 		case DIOCGETQUEUES:
973 		case DIOCGETQUEUE:
974 		case DIOCGETQSTATS:
975 		case DIOCNATLOOK:
976 		case DIOCRGETTABLES:
977 		case DIOCRGETTSTATS:
978 		case DIOCRGETADDRS:
979 		case DIOCRGETASTATS:
980 		case DIOCRTSTADDRS:
981 		case DIOCOSFPGET:
982 		case DIOCGETSRCNODES:
983 		case DIOCIGETIFACES:
984 		case DIOCGETSYNFLWATS:
985 			break;
986 		case DIOCRCLRTABLES:
987 		case DIOCRADDTABLES:
988 		case DIOCRDELTABLES:
989 		case DIOCRCLRTSTATS:
990 		case DIOCRCLRADDRS:
991 		case DIOCRADDADDRS:
992 		case DIOCRDELADDRS:
993 		case DIOCRSETADDRS:
994 		case DIOCRSETTFLAGS:
995 			if (((struct pfioc_table *)addr)->pfrio_flags &
996 			    PFR_FLAG_DUMMY) {
997 				flags |= FWRITE; /* need write lock for dummy */
998 				break; /* dummy operation ok */
999 			}
1000 			return (EACCES);
1001 		case DIOCGETRULE:
1002 			if (((struct pfioc_rule *)addr)->action ==
1003 			    PF_GET_CLR_CNTR)
1004 				return (EACCES);
1005 			break;
1006 		default:
1007 			return (EACCES);
1008 		}
1009 
1010 	switch (cmd) {
1011 
1012 	case DIOCSTART:
1013 		NET_LOCK();
1014 		PF_LOCK();
1015 		if (pf_status.running)
1016 			error = EEXIST;
1017 		else {
1018 			pf_status.running = 1;
1019 			pf_status.since = getuptime();
1020 			if (pf_status.stateid == 0) {
1021 				pf_status.stateid = gettime();
1022 				pf_status.stateid = pf_status.stateid << 32;
1023 			}
1024 			timeout_add_sec(&pf_purge_to, 1);
1025 			pf_create_queues();
1026 			DPFPRINTF(LOG_NOTICE, "pf: started");
1027 		}
1028 		PF_UNLOCK();
1029 		NET_UNLOCK();
1030 		break;
1031 
1032 	case DIOCSTOP:
1033 		NET_LOCK();
1034 		PF_LOCK();
1035 		if (!pf_status.running)
1036 			error = ENOENT;
1037 		else {
1038 			pf_status.running = 0;
1039 			pf_status.since = getuptime();
1040 			pf_remove_queues();
1041 			DPFPRINTF(LOG_NOTICE, "pf: stopped");
1042 		}
1043 		PF_UNLOCK();
1044 		NET_UNLOCK();
1045 		break;
1046 
1047 	case DIOCGETQUEUES: {
1048 		struct pfioc_queue	*pq = (struct pfioc_queue *)addr;
1049 		struct pf_queuespec	*qs;
1050 		u_int32_t		 nr = 0;
1051 
1052 		NET_LOCK();
1053 		PF_LOCK();
1054 		pq->ticket = pf_main_ruleset.rules.active.ticket;
1055 
1056 		/* save state to not run over them all each time? */
1057 		qs = TAILQ_FIRST(pf_queues_active);
1058 		while (qs != NULL) {
1059 			qs = TAILQ_NEXT(qs, entries);
1060 			nr++;
1061 		}
1062 		pq->nr = nr;
1063 		PF_UNLOCK();
1064 		NET_UNLOCK();
1065 		break;
1066 	}
1067 
1068 	case DIOCGETQUEUE: {
1069 		struct pfioc_queue	*pq = (struct pfioc_queue *)addr;
1070 		struct pf_queuespec	*qs;
1071 		u_int32_t		 nr = 0;
1072 
1073 		NET_LOCK();
1074 		PF_LOCK();
1075 		if (pq->ticket != pf_main_ruleset.rules.active.ticket) {
1076 			error = EBUSY;
1077 			PF_UNLOCK();
1078 			NET_UNLOCK();
1079 			break;
1080 		}
1081 
1082 		/* save state to not run over them all each time? */
1083 		qs = TAILQ_FIRST(pf_queues_active);
1084 		while ((qs != NULL) && (nr++ < pq->nr))
1085 			qs = TAILQ_NEXT(qs, entries);
1086 		if (qs == NULL) {
1087 			error = EBUSY;
1088 			PF_UNLOCK();
1089 			NET_UNLOCK();
1090 			break;
1091 		}
1092 		memcpy(&pq->queue, qs, sizeof(pq->queue));
1093 		PF_UNLOCK();
1094 		NET_UNLOCK();
1095 		break;
1096 	}
1097 
1098 	case DIOCGETQSTATS: {
1099 		struct pfioc_qstats	*pq = (struct pfioc_qstats *)addr;
1100 		struct pf_queuespec	*qs;
1101 		u_int32_t		 nr;
1102 		int			 nbytes;
1103 
1104 		NET_LOCK();
1105 		PF_LOCK();
1106 		if (pq->ticket != pf_main_ruleset.rules.active.ticket) {
1107 			error = EBUSY;
1108 			PF_UNLOCK();
1109 			NET_UNLOCK();
1110 			break;
1111 		}
1112 		nbytes = pq->nbytes;
1113 		nr = 0;
1114 
1115 		/* save state to not run over them all each time? */
1116 		qs = TAILQ_FIRST(pf_queues_active);
1117 		while ((qs != NULL) && (nr++ < pq->nr))
1118 			qs = TAILQ_NEXT(qs, entries);
1119 		if (qs == NULL) {
1120 			error = EBUSY;
1121 			PF_UNLOCK();
1122 			NET_UNLOCK();
1123 			break;
1124 		}
1125 		memcpy(&pq->queue, qs, sizeof(pq->queue));
1126 		/* It's a root flow queue but is not an HFSC root class */
1127 		if ((qs->flags & PFQS_FLOWQUEUE) && qs->parent_qid == 0 &&
1128 		    !(qs->flags & PFQS_ROOTCLASS))
1129 			error = pfq_fqcodel_ops->pfq_qstats(qs, pq->buf,
1130 			    &nbytes);
1131 		else
1132 			error = pfq_hfsc_ops->pfq_qstats(qs, pq->buf,
1133 			    &nbytes);
1134 		if (error == 0)
1135 			pq->nbytes = nbytes;
1136 		PF_UNLOCK();
1137 		NET_UNLOCK();
1138 		break;
1139 	}
1140 
1141 	case DIOCADDQUEUE: {
1142 		struct pfioc_queue	*q = (struct pfioc_queue *)addr;
1143 		struct pf_queuespec	*qs;
1144 
1145 		qs = pool_get(&pf_queue_pl, PR_WAITOK|PR_LIMITFAIL|PR_ZERO);
1146 		if (qs == NULL) {
1147 			error = ENOMEM;
1148 			break;
1149 		}
1150 
1151 		NET_LOCK();
1152 		PF_LOCK();
1153 		if (q->ticket != pf_main_ruleset.rules.inactive.ticket) {
1154 			error = EBUSY;
1155 			PF_UNLOCK();
1156 			NET_UNLOCK();
1157 			pool_put(&pf_queue_pl, qs);
1158 			break;
1159 		}
1160 		memcpy(qs, &q->queue, sizeof(*qs));
1161 		qs->qid = pf_qname2qid(qs->qname, 1);
1162 		if (qs->qid == 0) {
1163 			error = EBUSY;
1164 			PF_UNLOCK();
1165 			NET_UNLOCK();
1166 			pool_put(&pf_queue_pl, qs);
1167 			break;
1168 		}
1169 		if (qs->parent[0] && (qs->parent_qid =
1170 		    pf_qname2qid(qs->parent, 0)) == 0) {
1171 			error = ESRCH;
1172 			PF_UNLOCK();
1173 			NET_UNLOCK();
1174 			pool_put(&pf_queue_pl, qs);
1175 			break;
1176 		}
1177 		qs->kif = pfi_kif_get(qs->ifname);
1178 		if (qs->kif == NULL) {
1179 			error = ESRCH;
1180 			PF_UNLOCK();
1181 			NET_UNLOCK();
1182 			pool_put(&pf_queue_pl, qs);
1183 			break;
1184 		}
1185 		/* XXX resolve bw percentage specs */
1186 		pfi_kif_ref(qs->kif, PFI_KIF_REF_RULE);
1187 
1188 		TAILQ_INSERT_TAIL(pf_queues_inactive, qs, entries);
1189 		PF_UNLOCK();
1190 		NET_UNLOCK();
1191 
1192 		break;
1193 	}
1194 
1195 	case DIOCADDRULE: {
1196 		struct pfioc_rule	*pr = (struct pfioc_rule *)addr;
1197 		struct pf_ruleset	*ruleset;
1198 		struct pf_rule		*rule, *tail;
1199 
1200 		rule = pool_get(&pf_rule_pl, PR_WAITOK|PR_LIMITFAIL|PR_ZERO);
1201 		if (rule == NULL) {
1202 			error = ENOMEM;
1203 			break;
1204 		}
1205 
1206 		NET_LOCK();
1207 		PF_LOCK();
1208 		pr->anchor[sizeof(pr->anchor) - 1] = '\0';
1209 		ruleset = pf_find_ruleset(pr->anchor);
1210 		if (ruleset == NULL) {
1211 			error = EINVAL;
1212 			PF_UNLOCK();
1213 			NET_UNLOCK();
1214 			pool_put(&pf_rule_pl, rule);
1215 			break;
1216 		}
1217 		if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
1218 			error = EINVAL;
1219 			PF_UNLOCK();
1220 			NET_UNLOCK();
1221 			pool_put(&pf_rule_pl, rule);
1222 			break;
1223 		}
1224 		if (pr->ticket != ruleset->rules.inactive.ticket) {
1225 			error = EBUSY;
1226 			PF_UNLOCK();
1227 			NET_UNLOCK();
1228 			pool_put(&pf_rule_pl, rule);
1229 			break;
1230 		}
1231 		if ((error = pf_rule_copyin(&pr->rule, rule, ruleset))) {
1232 			pf_rm_rule(NULL, rule);
1233 			rule = NULL;
1234 			PF_UNLOCK();
1235 			NET_UNLOCK();
1236 			break;
1237 		}
1238 		rule->cuid = p->p_ucred->cr_ruid;
1239 		rule->cpid = p->p_p->ps_pid;
1240 
1241 		switch (rule->af) {
1242 		case 0:
1243 			break;
1244 		case AF_INET:
1245 			break;
1246 #ifdef INET6
1247 		case AF_INET6:
1248 			break;
1249 #endif /* INET6 */
1250 		default:
1251 			pf_rm_rule(NULL, rule);
1252 			rule = NULL;
1253 			error = EAFNOSUPPORT;
1254 			PF_UNLOCK();
1255 			NET_UNLOCK();
1256 			goto fail;
1257 		}
1258 		tail = TAILQ_LAST(ruleset->rules.inactive.ptr,
1259 		    pf_rulequeue);
1260 		if (tail)
1261 			rule->nr = tail->nr + 1;
1262 		else
1263 			rule->nr = 0;
1264 
1265 		if (rule->src.addr.type == PF_ADDR_NONE ||
1266 		    rule->dst.addr.type == PF_ADDR_NONE)
1267 			error = EINVAL;
1268 
1269 		if (pf_addr_setup(ruleset, &rule->src.addr, rule->af))
1270 			error = EINVAL;
1271 		if (pf_addr_setup(ruleset, &rule->dst.addr, rule->af))
1272 			error = EINVAL;
1273 		if (pf_addr_setup(ruleset, &rule->rdr.addr, rule->af))
1274 			error = EINVAL;
1275 		if (pf_addr_setup(ruleset, &rule->nat.addr, rule->af))
1276 			error = EINVAL;
1277 		if (pf_addr_setup(ruleset, &rule->route.addr, rule->af))
1278 			error = EINVAL;
1279 		if (pf_anchor_setup(rule, ruleset, pr->anchor_call))
1280 			error = EINVAL;
1281 		if (rule->rt && !rule->direction)
1282 			error = EINVAL;
1283 		if (rule->scrub_flags & PFSTATE_SETPRIO &&
1284 		    (rule->set_prio[0] > IFQ_MAXPRIO ||
1285 		    rule->set_prio[1] > IFQ_MAXPRIO))
1286 			error = EINVAL;
1287 
1288 		if (error) {
1289 			pf_rm_rule(NULL, rule);
1290 			PF_UNLOCK();
1291 			NET_UNLOCK();
1292 			break;
1293 		}
1294 		TAILQ_INSERT_TAIL(ruleset->rules.inactive.ptr,
1295 		    rule, entries);
1296 		rule->ruleset = ruleset;
1297 		ruleset->rules.inactive.rcount++;
1298 		PF_UNLOCK();
1299 		NET_UNLOCK();
1300 		break;
1301 	}
1302 
1303 	case DIOCGETRULES: {
1304 		struct pfioc_rule	*pr = (struct pfioc_rule *)addr;
1305 		struct pf_ruleset	*ruleset;
1306 		struct pf_rule		*tail;
1307 
1308 		NET_LOCK();
1309 		PF_LOCK();
1310 		pr->anchor[sizeof(pr->anchor) - 1] = '\0';
1311 		ruleset = pf_find_ruleset(pr->anchor);
1312 		if (ruleset == NULL) {
1313 			error = EINVAL;
1314 			PF_UNLOCK();
1315 			NET_UNLOCK();
1316 			break;
1317 		}
1318 		tail = TAILQ_LAST(ruleset->rules.active.ptr, pf_rulequeue);
1319 		if (tail)
1320 			pr->nr = tail->nr + 1;
1321 		else
1322 			pr->nr = 0;
1323 		pr->ticket = ruleset->rules.active.ticket;
1324 		PF_UNLOCK();
1325 		NET_UNLOCK();
1326 		break;
1327 	}
1328 
1329 	case DIOCGETRULE: {
1330 		struct pfioc_rule	*pr = (struct pfioc_rule *)addr;
1331 		struct pf_ruleset	*ruleset;
1332 		struct pf_rule		*rule;
1333 		int			 i;
1334 
1335 		NET_LOCK();
1336 		PF_LOCK();
1337 		pr->anchor[sizeof(pr->anchor) - 1] = '\0';
1338 		ruleset = pf_find_ruleset(pr->anchor);
1339 		if (ruleset == NULL) {
1340 			error = EINVAL;
1341 			PF_UNLOCK();
1342 			NET_UNLOCK();
1343 			break;
1344 		}
1345 		if (pr->ticket != ruleset->rules.active.ticket) {
1346 			error = EBUSY;
1347 			PF_UNLOCK();
1348 			NET_UNLOCK();
1349 			break;
1350 		}
1351 		rule = TAILQ_FIRST(ruleset->rules.active.ptr);
1352 		while ((rule != NULL) && (rule->nr != pr->nr))
1353 			rule = TAILQ_NEXT(rule, entries);
1354 		if (rule == NULL) {
1355 			error = EBUSY;
1356 			PF_UNLOCK();
1357 			NET_UNLOCK();
1358 			break;
1359 		}
1360 		memcpy(&pr->rule, rule, sizeof(struct pf_rule));
1361 		memset(&pr->rule.entries, 0, sizeof(pr->rule.entries));
1362 		pr->rule.kif = NULL;
1363 		pr->rule.nat.kif = NULL;
1364 		pr->rule.rdr.kif = NULL;
1365 		pr->rule.route.kif = NULL;
1366 		pr->rule.rcv_kif = NULL;
1367 		pr->rule.anchor = NULL;
1368 		pr->rule.overload_tbl = NULL;
1369 		pr->rule.pktrate.limit /= PF_THRESHOLD_MULT;
1370 		memset(&pr->rule.gcle, 0, sizeof(pr->rule.gcle));
1371 		pr->rule.ruleset = NULL;
1372 		if (pf_anchor_copyout(ruleset, rule, pr)) {
1373 			error = EBUSY;
1374 			PF_UNLOCK();
1375 			NET_UNLOCK();
1376 			break;
1377 		}
1378 		pf_addr_copyout(&pr->rule.src.addr);
1379 		pf_addr_copyout(&pr->rule.dst.addr);
1380 		pf_addr_copyout(&pr->rule.rdr.addr);
1381 		pf_addr_copyout(&pr->rule.nat.addr);
1382 		pf_addr_copyout(&pr->rule.route.addr);
1383 		for (i = 0; i < PF_SKIP_COUNT; ++i)
1384 			if (rule->skip[i].ptr == NULL)
1385 				pr->rule.skip[i].nr = (u_int32_t)-1;
1386 			else
1387 				pr->rule.skip[i].nr =
1388 				    rule->skip[i].ptr->nr;
1389 
1390 		if (pr->action == PF_GET_CLR_CNTR) {
1391 			rule->evaluations = 0;
1392 			rule->packets[0] = rule->packets[1] = 0;
1393 			rule->bytes[0] = rule->bytes[1] = 0;
1394 			rule->states_tot = 0;
1395 		}
1396 		PF_UNLOCK();
1397 		NET_UNLOCK();
1398 		break;
1399 	}
1400 
1401 	case DIOCCHANGERULE: {
1402 		struct pfioc_rule	*pcr = (struct pfioc_rule *)addr;
1403 		struct pf_ruleset	*ruleset;
1404 		struct pf_rule		*oldrule = NULL, *newrule = NULL;
1405 		u_int32_t		 nr = 0;
1406 
1407 		if (pcr->action < PF_CHANGE_ADD_HEAD ||
1408 		    pcr->action > PF_CHANGE_GET_TICKET) {
1409 			error = EINVAL;
1410 			break;
1411 		}
1412 
1413 		newrule = pool_get(&pf_rule_pl, PR_WAITOK|PR_LIMITFAIL|PR_ZERO);
1414 		if (newrule == NULL) {
1415 			error = ENOMEM;
1416 			break;
1417 		}
1418 
1419 		NET_LOCK();
1420 		PF_LOCK();
1421 		ruleset = pf_find_ruleset(pcr->anchor);
1422 		if (ruleset == NULL) {
1423 			error = EINVAL;
1424 			PF_UNLOCK();
1425 			NET_UNLOCK();
1426 			pool_put(&pf_rule_pl, newrule);
1427 			break;
1428 		}
1429 
1430 		if (pcr->action == PF_CHANGE_GET_TICKET) {
1431 			pcr->ticket = ++ruleset->rules.active.ticket;
1432 			PF_UNLOCK();
1433 			NET_UNLOCK();
1434 			pool_put(&pf_rule_pl, newrule);
1435 			break;
1436 		} else {
1437 			if (pcr->ticket !=
1438 			    ruleset->rules.active.ticket) {
1439 				error = EINVAL;
1440 				PF_UNLOCK();
1441 				NET_UNLOCK();
1442 				pool_put(&pf_rule_pl, newrule);
1443 				break;
1444 			}
1445 			if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
1446 				error = EINVAL;
1447 				PF_UNLOCK();
1448 				NET_UNLOCK();
1449 				pool_put(&pf_rule_pl, newrule);
1450 				break;
1451 			}
1452 		}
1453 
1454 		if (pcr->action != PF_CHANGE_REMOVE) {
1455 			pf_rule_copyin(&pcr->rule, newrule, ruleset);
1456 			newrule->cuid = p->p_ucred->cr_ruid;
1457 			newrule->cpid = p->p_p->ps_pid;
1458 
1459 			switch (newrule->af) {
1460 			case 0:
1461 				break;
1462 			case AF_INET:
1463 				break;
1464 #ifdef INET6
1465 			case AF_INET6:
1466 				break;
1467 #endif /* INET6 */
1468 			default:
1469 				pf_rm_rule(NULL, newrule);
1470 				error = EAFNOSUPPORT;
1471 				PF_UNLOCK();
1472 				NET_UNLOCK();
1473 				goto fail;
1474 			}
1475 
1476 			if (newrule->rt && !newrule->direction)
1477 				error = EINVAL;
1478 			if (pf_addr_setup(ruleset, &newrule->src.addr, newrule->af))
1479 				error = EINVAL;
1480 			if (pf_addr_setup(ruleset, &newrule->dst.addr, newrule->af))
1481 				error = EINVAL;
1482 			if (pf_addr_setup(ruleset, &newrule->rdr.addr, newrule->af))
1483 				error = EINVAL;
1484 			if (pf_addr_setup(ruleset, &newrule->nat.addr, newrule->af))
1485 				error = EINVAL;
1486 			if (pf_addr_setup(ruleset, &newrule->route.addr, newrule->af))
1487 				error = EINVAL;
1488 			if (pf_anchor_setup(newrule, ruleset, pcr->anchor_call))
1489 				error = EINVAL;
1490 
1491 			if (error) {
1492 				pf_rm_rule(NULL, newrule);
1493 				PF_UNLOCK();
1494 				NET_UNLOCK();
1495 				break;
1496 			}
1497 		}
1498 
1499 		if (pcr->action == PF_CHANGE_ADD_HEAD)
1500 			oldrule = TAILQ_FIRST(ruleset->rules.active.ptr);
1501 		else if (pcr->action == PF_CHANGE_ADD_TAIL)
1502 			oldrule = TAILQ_LAST(ruleset->rules.active.ptr,
1503 			    pf_rulequeue);
1504 		else {
1505 			oldrule = TAILQ_FIRST(ruleset->rules.active.ptr);
1506 			while ((oldrule != NULL) && (oldrule->nr != pcr->nr))
1507 				oldrule = TAILQ_NEXT(oldrule, entries);
1508 			if (oldrule == NULL) {
1509 				if (newrule != NULL)
1510 					pf_rm_rule(NULL, newrule);
1511 				error = EINVAL;
1512 				PF_UNLOCK();
1513 				NET_UNLOCK();
1514 				break;
1515 			}
1516 		}
1517 
1518 		if (pcr->action == PF_CHANGE_REMOVE) {
1519 			pf_rm_rule(ruleset->rules.active.ptr, oldrule);
1520 			ruleset->rules.active.rcount--;
1521 		} else {
1522 			if (oldrule == NULL)
1523 				TAILQ_INSERT_TAIL(
1524 				    ruleset->rules.active.ptr,
1525 				    newrule, entries);
1526 			else if (pcr->action == PF_CHANGE_ADD_HEAD ||
1527 			    pcr->action == PF_CHANGE_ADD_BEFORE)
1528 				TAILQ_INSERT_BEFORE(oldrule, newrule, entries);
1529 			else
1530 				TAILQ_INSERT_AFTER(
1531 				    ruleset->rules.active.ptr,
1532 				    oldrule, newrule, entries);
1533 			ruleset->rules.active.rcount++;
1534 		}
1535 
1536 		nr = 0;
1537 		TAILQ_FOREACH(oldrule, ruleset->rules.active.ptr, entries)
1538 			oldrule->nr = nr++;
1539 
1540 		ruleset->rules.active.ticket++;
1541 
1542 		pf_calc_skip_steps(ruleset->rules.active.ptr);
1543 		pf_remove_if_empty_ruleset(ruleset);
1544 
1545 		PF_UNLOCK();
1546 		NET_UNLOCK();
1547 		break;
1548 	}
1549 
1550 	case DIOCCLRSTATES: {
1551 		struct pf_state		*s, *nexts;
1552 		struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr;
1553 		u_int			 killed = 0;
1554 
1555 		NET_LOCK();
1556 		PF_LOCK();
1557 		PF_STATE_ENTER_WRITE();
1558 		for (s = RB_MIN(pf_state_tree_id, &tree_id); s; s = nexts) {
1559 			nexts = RB_NEXT(pf_state_tree_id, &tree_id, s);
1560 
1561 			if (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname,
1562 			    s->kif->pfik_name)) {
1563 #if NPFSYNC > 0
1564 				/* don't send out individual delete messages */
1565 				SET(s->state_flags, PFSTATE_NOSYNC);
1566 #endif	/* NPFSYNC > 0 */
1567 				pf_remove_state(s);
1568 				killed++;
1569 			}
1570 		}
1571 		PF_STATE_EXIT_WRITE();
1572 		psk->psk_killed = killed;
1573 #if NPFSYNC > 0
1574 		pfsync_clear_states(pf_status.hostid, psk->psk_ifname);
1575 #endif	/* NPFSYNC > 0 */
1576 		PF_UNLOCK();
1577 		NET_UNLOCK();
1578 		break;
1579 	}
1580 
1581 	case DIOCKILLSTATES: {
1582 		struct pf_state		*s, *nexts;
1583 		struct pf_state_item	*si, *sit;
1584 		struct pf_state_key	*sk, key;
1585 		struct pf_addr		*srcaddr, *dstaddr;
1586 		u_int16_t		 srcport, dstport;
1587 		struct pfioc_state_kill	*psk = (struct pfioc_state_kill *)addr;
1588 		u_int			 i, killed = 0;
1589 		const int 		 dirs[] = { PF_IN, PF_OUT };
1590 		int			 sidx, didx;
1591 
1592 		if (psk->psk_pfcmp.id) {
1593 			if (psk->psk_pfcmp.creatorid == 0)
1594 				psk->psk_pfcmp.creatorid = pf_status.hostid;
1595 			NET_LOCK();
1596 			PF_LOCK();
1597 			PF_STATE_ENTER_WRITE();
1598 			if ((s = pf_find_state_byid(&psk->psk_pfcmp))) {
1599 				pf_remove_state(s);
1600 				psk->psk_killed = 1;
1601 			}
1602 			PF_STATE_EXIT_WRITE();
1603 			PF_UNLOCK();
1604 			NET_UNLOCK();
1605 			break;
1606 		}
1607 
1608 		if (psk->psk_af && psk->psk_proto &&
1609 		    psk->psk_src.port_op == PF_OP_EQ &&
1610 		    psk->psk_dst.port_op == PF_OP_EQ) {
1611 
1612 			key.af = psk->psk_af;
1613 			key.proto = psk->psk_proto;
1614 			key.rdomain = psk->psk_rdomain;
1615 
1616 			NET_LOCK();
1617 			PF_LOCK();
1618 			PF_STATE_ENTER_WRITE();
1619 			for (i = 0; i < nitems(dirs); i++) {
1620 				if (dirs[i] == PF_IN) {
1621 					sidx = 0;
1622 					didx = 1;
1623 				} else {
1624 					sidx = 1;
1625 					didx = 0;
1626 				}
1627 				pf_addrcpy(&key.addr[sidx],
1628 				    &psk->psk_src.addr.v.a.addr, key.af);
1629 				pf_addrcpy(&key.addr[didx],
1630 				    &psk->psk_dst.addr.v.a.addr, key.af);
1631 				key.port[sidx] = psk->psk_src.port[0];
1632 				key.port[didx] = psk->psk_dst.port[0];
1633 
1634 				sk = RB_FIND(pf_state_tree, &pf_statetbl, &key);
1635 				if (sk == NULL)
1636 					continue;
1637 
1638 				TAILQ_FOREACH_SAFE(si, &sk->states, entry, sit)
1639 					if (((si->s->key[PF_SK_WIRE]->af ==
1640 					    si->s->key[PF_SK_STACK]->af &&
1641 					    sk == (dirs[i] == PF_IN ?
1642 					    si->s->key[PF_SK_WIRE] :
1643 					    si->s->key[PF_SK_STACK])) ||
1644 					    (si->s->key[PF_SK_WIRE]->af !=
1645 					    si->s->key[PF_SK_STACK]->af &&
1646 					    dirs[i] == PF_IN &&
1647 					    (sk == si->s->key[PF_SK_STACK] ||
1648 					    sk == si->s->key[PF_SK_WIRE]))) &&
1649 					    (!psk->psk_ifname[0] ||
1650 					    (si->s->kif != pfi_all &&
1651 					    !strcmp(psk->psk_ifname,
1652 					    si->s->kif->pfik_name)))) {
1653 						pf_remove_state(si->s);
1654 						killed++;
1655 					}
1656 			}
1657 			if (killed)
1658 				psk->psk_killed = killed;
1659 			PF_STATE_EXIT_WRITE();
1660 			PF_UNLOCK();
1661 			NET_UNLOCK();
1662 			break;
1663 		}
1664 
1665 		NET_LOCK();
1666 		PF_LOCK();
1667 		PF_STATE_ENTER_WRITE();
1668 		for (s = RB_MIN(pf_state_tree_id, &tree_id); s;
1669 		    s = nexts) {
1670 			nexts = RB_NEXT(pf_state_tree_id, &tree_id, s);
1671 
1672 			if (s->direction == PF_OUT) {
1673 				sk = s->key[PF_SK_STACK];
1674 				srcaddr = &sk->addr[1];
1675 				dstaddr = &sk->addr[0];
1676 				srcport = sk->port[1];
1677 				dstport = sk->port[0];
1678 			} else {
1679 				sk = s->key[PF_SK_WIRE];
1680 				srcaddr = &sk->addr[0];
1681 				dstaddr = &sk->addr[1];
1682 				srcport = sk->port[0];
1683 				dstport = sk->port[1];
1684 			}
1685 			if ((!psk->psk_af || sk->af == psk->psk_af)
1686 			    && (!psk->psk_proto || psk->psk_proto ==
1687 			    sk->proto) && psk->psk_rdomain == sk->rdomain &&
1688 			    pf_match_addr(psk->psk_src.neg,
1689 			    &psk->psk_src.addr.v.a.addr,
1690 			    &psk->psk_src.addr.v.a.mask,
1691 			    srcaddr, sk->af) &&
1692 			    pf_match_addr(psk->psk_dst.neg,
1693 			    &psk->psk_dst.addr.v.a.addr,
1694 			    &psk->psk_dst.addr.v.a.mask,
1695 			    dstaddr, sk->af) &&
1696 			    (psk->psk_src.port_op == 0 ||
1697 			    pf_match_port(psk->psk_src.port_op,
1698 			    psk->psk_src.port[0], psk->psk_src.port[1],
1699 			    srcport)) &&
1700 			    (psk->psk_dst.port_op == 0 ||
1701 			    pf_match_port(psk->psk_dst.port_op,
1702 			    psk->psk_dst.port[0], psk->psk_dst.port[1],
1703 			    dstport)) &&
1704 			    (!psk->psk_label[0] || (s->rule.ptr->label[0] &&
1705 			    !strcmp(psk->psk_label, s->rule.ptr->label))) &&
1706 			    (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname,
1707 			    s->kif->pfik_name))) {
1708 				pf_remove_state(s);
1709 				killed++;
1710 			}
1711 		}
1712 		psk->psk_killed = killed;
1713 		PF_STATE_EXIT_WRITE();
1714 		PF_UNLOCK();
1715 		NET_UNLOCK();
1716 		break;
1717 	}
1718 
1719 #if NPFSYNC > 0
1720 	case DIOCADDSTATE: {
1721 		struct pfioc_state	*ps = (struct pfioc_state *)addr;
1722 		struct pfsync_state	*sp = &ps->state;
1723 
1724 		if (sp->timeout >= PFTM_MAX) {
1725 			error = EINVAL;
1726 			break;
1727 		}
1728 		NET_LOCK();
1729 		PF_LOCK();
1730 		PF_STATE_ENTER_WRITE();
1731 		error = pfsync_state_import(sp, PFSYNC_SI_IOCTL);
1732 		PF_STATE_EXIT_WRITE();
1733 		PF_UNLOCK();
1734 		NET_UNLOCK();
1735 		break;
1736 	}
1737 #endif	/* NPFSYNC > 0 */
1738 
1739 	case DIOCGETSTATE: {
1740 		struct pfioc_state	*ps = (struct pfioc_state *)addr;
1741 		struct pf_state		*s;
1742 		struct pf_state_cmp	 id_key;
1743 
1744 		memset(&id_key, 0, sizeof(id_key));
1745 		id_key.id = ps->state.id;
1746 		id_key.creatorid = ps->state.creatorid;
1747 
1748 		NET_LOCK();
1749 		PF_STATE_ENTER_READ();
1750 		s = pf_find_state_byid(&id_key);
1751 		s = pf_state_ref(s);
1752 		PF_STATE_EXIT_READ();
1753 		NET_UNLOCK();
1754 		if (s == NULL) {
1755 			error = ENOENT;
1756 			break;
1757 		}
1758 
1759 		pf_state_export(&ps->state, s);
1760 		pf_state_unref(s);
1761 		break;
1762 	}
1763 
1764 	case DIOCGETSTATES: {
1765 		struct pfioc_states	*ps = (struct pfioc_states *)addr;
1766 		struct pf_state		*state;
1767 		struct pfsync_state	*p, *pstore;
1768 		u_int32_t		 nr = 0;
1769 
1770 		if (ps->ps_len == 0) {
1771 			nr = pf_status.states;
1772 			ps->ps_len = sizeof(struct pfsync_state) * nr;
1773 			break;
1774 		}
1775 
1776 		pstore = malloc(sizeof(*pstore), M_TEMP, M_WAITOK);
1777 
1778 		p = ps->ps_states;
1779 
1780 		NET_LOCK();
1781 		PF_STATE_ENTER_READ();
1782 		state = TAILQ_FIRST(&state_list);
1783 		while (state) {
1784 			if (state->timeout != PFTM_UNLINKED) {
1785 				if ((nr+1) * sizeof(*p) > ps->ps_len)
1786 					break;
1787 				pf_state_export(pstore, state);
1788 				error = copyout(pstore, p, sizeof(*p));
1789 				if (error) {
1790 					free(pstore, M_TEMP, sizeof(*pstore));
1791 					PF_STATE_EXIT_READ();
1792 					NET_UNLOCK();
1793 					goto fail;
1794 				}
1795 				p++;
1796 				nr++;
1797 			}
1798 			state = TAILQ_NEXT(state, entry_list);
1799 		}
1800 		PF_STATE_EXIT_READ();
1801 		NET_UNLOCK();
1802 
1803 		ps->ps_len = sizeof(struct pfsync_state) * nr;
1804 
1805 		free(pstore, M_TEMP, sizeof(*pstore));
1806 		break;
1807 	}
1808 
1809 	case DIOCGETSTATUS: {
1810 		struct pf_status *s = (struct pf_status *)addr;
1811 		NET_LOCK();
1812 		PF_LOCK();
1813 		memcpy(s, &pf_status, sizeof(struct pf_status));
1814 		pfi_update_status(s->ifname, s);
1815 		PF_UNLOCK();
1816 		NET_UNLOCK();
1817 		break;
1818 	}
1819 
1820 	case DIOCSETSTATUSIF: {
1821 		struct pfioc_iface	*pi = (struct pfioc_iface *)addr;
1822 
1823 		NET_LOCK();
1824 		PF_LOCK();
1825 		if (pi->pfiio_name[0] == 0) {
1826 			memset(pf_status.ifname, 0, IFNAMSIZ);
1827 			PF_UNLOCK();
1828 			NET_UNLOCK();
1829 			break;
1830 		}
1831 		strlcpy(pf_trans_set.statusif, pi->pfiio_name, IFNAMSIZ);
1832 		pf_trans_set.mask |= PF_TSET_STATUSIF;
1833 		PF_UNLOCK();
1834 		NET_UNLOCK();
1835 		break;
1836 	}
1837 
1838 	case DIOCCLRSTATUS: {
1839 		struct pfioc_iface	*pi = (struct pfioc_iface *)addr;
1840 
1841 		NET_LOCK();
1842 		PF_LOCK();
1843 		/* if ifname is specified, clear counters there only */
1844 		if (pi->pfiio_name[0]) {
1845 			pfi_update_status(pi->pfiio_name, NULL);
1846 			PF_UNLOCK();
1847 			NET_UNLOCK();
1848 			break;
1849 		}
1850 
1851 		memset(pf_status.counters, 0, sizeof(pf_status.counters));
1852 		memset(pf_status.fcounters, 0, sizeof(pf_status.fcounters));
1853 		memset(pf_status.scounters, 0, sizeof(pf_status.scounters));
1854 		pf_status.since = getuptime();
1855 
1856 		PF_UNLOCK();
1857 		NET_UNLOCK();
1858 		break;
1859 	}
1860 
1861 	case DIOCNATLOOK: {
1862 		struct pfioc_natlook	*pnl = (struct pfioc_natlook *)addr;
1863 		struct pf_state_key	*sk;
1864 		struct pf_state		*state;
1865 		struct pf_state_key_cmp	 key;
1866 		int			 m = 0, direction = pnl->direction;
1867 		int			 sidx, didx;
1868 
1869 		switch (pnl->af) {
1870 		case AF_INET:
1871 			break;
1872 #ifdef INET6
1873 		case AF_INET6:
1874 			break;
1875 #endif /* INET6 */
1876 		default:
1877 			error = EAFNOSUPPORT;
1878 			goto fail;
1879 		}
1880 
1881 		/* NATLOOK src and dst are reversed, so reverse sidx/didx */
1882 		sidx = (direction == PF_IN) ? 1 : 0;
1883 		didx = (direction == PF_IN) ? 0 : 1;
1884 
1885 		if (!pnl->proto ||
1886 		    PF_AZERO(&pnl->saddr, pnl->af) ||
1887 		    PF_AZERO(&pnl->daddr, pnl->af) ||
1888 		    ((pnl->proto == IPPROTO_TCP ||
1889 		    pnl->proto == IPPROTO_UDP) &&
1890 		    (!pnl->dport || !pnl->sport)) ||
1891 		    pnl->rdomain > RT_TABLEID_MAX)
1892 			error = EINVAL;
1893 		else {
1894 			key.af = pnl->af;
1895 			key.proto = pnl->proto;
1896 			key.rdomain = pnl->rdomain;
1897 			pf_addrcpy(&key.addr[sidx], &pnl->saddr, pnl->af);
1898 			key.port[sidx] = pnl->sport;
1899 			pf_addrcpy(&key.addr[didx], &pnl->daddr, pnl->af);
1900 			key.port[didx] = pnl->dport;
1901 
1902 			NET_LOCK();
1903 			PF_STATE_ENTER_READ();
1904 			state = pf_find_state_all(&key, direction, &m);
1905 			state = pf_state_ref(state);
1906 			PF_STATE_EXIT_READ();
1907 			NET_UNLOCK();
1908 
1909 			if (m > 1)
1910 				error = E2BIG;	/* more than one state */
1911 			else if (state != NULL) {
1912 				sk = state->key[sidx];
1913 				pf_addrcpy(&pnl->rsaddr, &sk->addr[sidx],
1914 				    sk->af);
1915 				pnl->rsport = sk->port[sidx];
1916 				pf_addrcpy(&pnl->rdaddr, &sk->addr[didx],
1917 				    sk->af);
1918 				pnl->rdport = sk->port[didx];
1919 				pnl->rrdomain = sk->rdomain;
1920 			} else
1921 				error = ENOENT;
1922 			pf_state_unref(state);
1923 		}
1924 		break;
1925 	}
1926 
1927 	case DIOCSETTIMEOUT: {
1928 		struct pfioc_tm	*pt = (struct pfioc_tm *)addr;
1929 
1930 		if (pt->timeout < 0 || pt->timeout >= PFTM_MAX ||
1931 		    pt->seconds < 0) {
1932 			error = EINVAL;
1933 			goto fail;
1934 		}
1935 		NET_LOCK();
1936 		PF_LOCK();
1937 		if (pt->timeout == PFTM_INTERVAL && pt->seconds == 0)
1938 			pt->seconds = 1;
1939 		pf_default_rule_new.timeout[pt->timeout] = pt->seconds;
1940 		pt->seconds = pf_default_rule.timeout[pt->timeout];
1941 		PF_UNLOCK();
1942 		NET_UNLOCK();
1943 		break;
1944 	}
1945 
1946 	case DIOCGETTIMEOUT: {
1947 		struct pfioc_tm	*pt = (struct pfioc_tm *)addr;
1948 
1949 		if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) {
1950 			error = EINVAL;
1951 			goto fail;
1952 		}
1953 		NET_LOCK();
1954 		PF_LOCK();
1955 		pt->seconds = pf_default_rule.timeout[pt->timeout];
1956 		PF_UNLOCK();
1957 		NET_UNLOCK();
1958 		break;
1959 	}
1960 
1961 	case DIOCGETLIMIT: {
1962 		struct pfioc_limit	*pl = (struct pfioc_limit *)addr;
1963 
1964 		if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) {
1965 			error = EINVAL;
1966 			goto fail;
1967 		}
1968 		NET_LOCK();
1969 		PF_LOCK();
1970 		pl->limit = pf_pool_limits[pl->index].limit;
1971 		PF_UNLOCK();
1972 		NET_UNLOCK();
1973 		break;
1974 	}
1975 
1976 	case DIOCSETLIMIT: {
1977 		struct pfioc_limit	*pl = (struct pfioc_limit *)addr;
1978 
1979 		NET_LOCK();
1980 		PF_LOCK();
1981 		if (pl->index < 0 || pl->index >= PF_LIMIT_MAX ||
1982 		    pf_pool_limits[pl->index].pp == NULL) {
1983 			error = EINVAL;
1984 			PF_UNLOCK();
1985 			NET_UNLOCK();
1986 			goto fail;
1987 		}
1988 		if (((struct pool *)pf_pool_limits[pl->index].pp)->pr_nout >
1989 		    pl->limit) {
1990 			error = EBUSY;
1991 			PF_UNLOCK();
1992 			NET_UNLOCK();
1993 			goto fail;
1994 		}
1995 		/* Fragments reference mbuf clusters. */
1996 		if (pl->index == PF_LIMIT_FRAGS && pl->limit > nmbclust) {
1997 			error = EINVAL;
1998 			PF_UNLOCK();
1999 			NET_UNLOCK();
2000 			goto fail;
2001 		}
2002 
2003 		pf_pool_limits[pl->index].limit_new = pl->limit;
2004 		pl->limit = pf_pool_limits[pl->index].limit;
2005 		PF_UNLOCK();
2006 		NET_UNLOCK();
2007 		break;
2008 	}
2009 
2010 	case DIOCSETDEBUG: {
2011 		u_int32_t	*level = (u_int32_t *)addr;
2012 
2013 		NET_LOCK();
2014 		PF_LOCK();
2015 		pf_trans_set.debug = *level;
2016 		pf_trans_set.mask |= PF_TSET_DEBUG;
2017 		PF_UNLOCK();
2018 		NET_UNLOCK();
2019 		break;
2020 	}
2021 
2022 	case DIOCGETRULESETS: {
2023 		struct pfioc_ruleset	*pr = (struct pfioc_ruleset *)addr;
2024 		struct pf_ruleset	*ruleset;
2025 		struct pf_anchor	*anchor;
2026 
2027 		NET_LOCK();
2028 		PF_LOCK();
2029 		pr->path[sizeof(pr->path) - 1] = '\0';
2030 		if ((ruleset = pf_find_ruleset(pr->path)) == NULL) {
2031 			error = EINVAL;
2032 			PF_UNLOCK();
2033 			NET_UNLOCK();
2034 			break;
2035 		}
2036 		pr->nr = 0;
2037 		if (ruleset == &pf_main_ruleset) {
2038 			/* XXX kludge for pf_main_ruleset */
2039 			RB_FOREACH(anchor, pf_anchor_global, &pf_anchors)
2040 				if (anchor->parent == NULL)
2041 					pr->nr++;
2042 		} else {
2043 			RB_FOREACH(anchor, pf_anchor_node,
2044 			    &ruleset->anchor->children)
2045 				pr->nr++;
2046 		}
2047 		PF_UNLOCK();
2048 		NET_UNLOCK();
2049 		break;
2050 	}
2051 
2052 	case DIOCGETRULESET: {
2053 		struct pfioc_ruleset	*pr = (struct pfioc_ruleset *)addr;
2054 		struct pf_ruleset	*ruleset;
2055 		struct pf_anchor	*anchor;
2056 		u_int32_t		 nr = 0;
2057 
2058 		NET_LOCK();
2059 		PF_LOCK();
2060 		pr->path[sizeof(pr->path) - 1] = '\0';
2061 		if ((ruleset = pf_find_ruleset(pr->path)) == NULL) {
2062 			error = EINVAL;
2063 			PF_UNLOCK();
2064 			NET_UNLOCK();
2065 			break;
2066 		}
2067 		pr->name[0] = '\0';
2068 		if (ruleset == &pf_main_ruleset) {
2069 			/* XXX kludge for pf_main_ruleset */
2070 			RB_FOREACH(anchor, pf_anchor_global, &pf_anchors)
2071 				if (anchor->parent == NULL && nr++ == pr->nr) {
2072 					strlcpy(pr->name, anchor->name,
2073 					    sizeof(pr->name));
2074 					break;
2075 				}
2076 		} else {
2077 			RB_FOREACH(anchor, pf_anchor_node,
2078 			    &ruleset->anchor->children)
2079 				if (nr++ == pr->nr) {
2080 					strlcpy(pr->name, anchor->name,
2081 					    sizeof(pr->name));
2082 					break;
2083 				}
2084 		}
2085 		PF_UNLOCK();
2086 		NET_UNLOCK();
2087 		if (!pr->name[0])
2088 			error = EBUSY;
2089 		break;
2090 	}
2091 
2092 	case DIOCRCLRTABLES: {
2093 		struct pfioc_table *io = (struct pfioc_table *)addr;
2094 
2095 		if (io->pfrio_esize != 0) {
2096 			error = ENODEV;
2097 			break;
2098 		}
2099 		NET_LOCK();
2100 		PF_LOCK();
2101 		error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel,
2102 		    io->pfrio_flags | PFR_FLAG_USERIOCTL);
2103 		PF_UNLOCK();
2104 		NET_UNLOCK();
2105 		break;
2106 	}
2107 
2108 	case DIOCRADDTABLES: {
2109 		struct pfioc_table *io = (struct pfioc_table *)addr;
2110 
2111 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
2112 			error = ENODEV;
2113 			break;
2114 		}
2115 		NET_LOCK();
2116 		PF_LOCK();
2117 		error = pfr_add_tables(io->pfrio_buffer, io->pfrio_size,
2118 		    &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2119 		PF_UNLOCK();
2120 		NET_UNLOCK();
2121 		break;
2122 	}
2123 
2124 	case DIOCRDELTABLES: {
2125 		struct pfioc_table *io = (struct pfioc_table *)addr;
2126 
2127 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
2128 			error = ENODEV;
2129 			break;
2130 		}
2131 		NET_LOCK();
2132 		PF_LOCK();
2133 		error = pfr_del_tables(io->pfrio_buffer, io->pfrio_size,
2134 		    &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2135 		PF_UNLOCK();
2136 		NET_UNLOCK();
2137 		break;
2138 	}
2139 
2140 	case DIOCRGETTABLES: {
2141 		struct pfioc_table *io = (struct pfioc_table *)addr;
2142 
2143 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
2144 			error = ENODEV;
2145 			break;
2146 		}
2147 		NET_LOCK();
2148 		PF_LOCK();
2149 		error = pfr_get_tables(&io->pfrio_table, io->pfrio_buffer,
2150 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2151 		PF_UNLOCK();
2152 		NET_UNLOCK();
2153 		break;
2154 	}
2155 
2156 	case DIOCRGETTSTATS: {
2157 		struct pfioc_table *io = (struct pfioc_table *)addr;
2158 
2159 		if (io->pfrio_esize != sizeof(struct pfr_tstats)) {
2160 			error = ENODEV;
2161 			break;
2162 		}
2163 		NET_LOCK();
2164 		PF_LOCK();
2165 		error = pfr_get_tstats(&io->pfrio_table, io->pfrio_buffer,
2166 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2167 		PF_UNLOCK();
2168 		NET_UNLOCK();
2169 		break;
2170 	}
2171 
2172 	case DIOCRCLRTSTATS: {
2173 		struct pfioc_table *io = (struct pfioc_table *)addr;
2174 
2175 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
2176 			error = ENODEV;
2177 			break;
2178 		}
2179 		NET_LOCK();
2180 		PF_LOCK();
2181 		error = pfr_clr_tstats(io->pfrio_buffer, io->pfrio_size,
2182 		    &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2183 		PF_UNLOCK();
2184 		NET_UNLOCK();
2185 		break;
2186 	}
2187 
2188 	case DIOCRSETTFLAGS: {
2189 		struct pfioc_table *io = (struct pfioc_table *)addr;
2190 
2191 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
2192 			error = ENODEV;
2193 			break;
2194 		}
2195 		NET_LOCK();
2196 		PF_LOCK();
2197 		error = pfr_set_tflags(io->pfrio_buffer, io->pfrio_size,
2198 		    io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange,
2199 		    &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2200 		PF_UNLOCK();
2201 		NET_UNLOCK();
2202 		break;
2203 	}
2204 
2205 	case DIOCRCLRADDRS: {
2206 		struct pfioc_table *io = (struct pfioc_table *)addr;
2207 
2208 		if (io->pfrio_esize != 0) {
2209 			error = ENODEV;
2210 			break;
2211 		}
2212 		NET_LOCK();
2213 		PF_LOCK();
2214 		error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel,
2215 		    io->pfrio_flags | PFR_FLAG_USERIOCTL);
2216 		PF_UNLOCK();
2217 		NET_UNLOCK();
2218 		break;
2219 	}
2220 
2221 	case DIOCRADDADDRS: {
2222 		struct pfioc_table *io = (struct pfioc_table *)addr;
2223 
2224 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2225 			error = ENODEV;
2226 			break;
2227 		}
2228 		NET_LOCK();
2229 		PF_LOCK();
2230 		error = pfr_add_addrs(&io->pfrio_table, io->pfrio_buffer,
2231 		    io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags |
2232 		    PFR_FLAG_USERIOCTL);
2233 		PF_UNLOCK();
2234 		NET_UNLOCK();
2235 		break;
2236 	}
2237 
2238 	case DIOCRDELADDRS: {
2239 		struct pfioc_table *io = (struct pfioc_table *)addr;
2240 
2241 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2242 			error = ENODEV;
2243 			break;
2244 		}
2245 		NET_LOCK();
2246 		PF_LOCK();
2247 		error = pfr_del_addrs(&io->pfrio_table, io->pfrio_buffer,
2248 		    io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags |
2249 		    PFR_FLAG_USERIOCTL);
2250 		PF_UNLOCK();
2251 		NET_UNLOCK();
2252 		break;
2253 	}
2254 
2255 	case DIOCRSETADDRS: {
2256 		struct pfioc_table *io = (struct pfioc_table *)addr;
2257 
2258 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2259 			error = ENODEV;
2260 			break;
2261 		}
2262 		NET_LOCK();
2263 		PF_LOCK();
2264 		error = pfr_set_addrs(&io->pfrio_table, io->pfrio_buffer,
2265 		    io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd,
2266 		    &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags |
2267 		    PFR_FLAG_USERIOCTL, 0);
2268 		PF_UNLOCK();
2269 		NET_UNLOCK();
2270 		break;
2271 	}
2272 
2273 	case DIOCRGETADDRS: {
2274 		struct pfioc_table *io = (struct pfioc_table *)addr;
2275 
2276 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2277 			error = ENODEV;
2278 			break;
2279 		}
2280 		NET_LOCK();
2281 		PF_LOCK();
2282 		error = pfr_get_addrs(&io->pfrio_table, io->pfrio_buffer,
2283 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2284 		PF_UNLOCK();
2285 		NET_UNLOCK();
2286 		break;
2287 	}
2288 
2289 	case DIOCRGETASTATS: {
2290 		struct pfioc_table *io = (struct pfioc_table *)addr;
2291 
2292 		if (io->pfrio_esize != sizeof(struct pfr_astats)) {
2293 			error = ENODEV;
2294 			break;
2295 		}
2296 		NET_LOCK();
2297 		PF_LOCK();
2298 		error = pfr_get_astats(&io->pfrio_table, io->pfrio_buffer,
2299 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2300 		PF_UNLOCK();
2301 		NET_UNLOCK();
2302 		break;
2303 	}
2304 
2305 	case DIOCRCLRASTATS: {
2306 		struct pfioc_table *io = (struct pfioc_table *)addr;
2307 
2308 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2309 			error = ENODEV;
2310 			break;
2311 		}
2312 		NET_LOCK();
2313 		PF_LOCK();
2314 		error = pfr_clr_astats(&io->pfrio_table, io->pfrio_buffer,
2315 		    io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags |
2316 		    PFR_FLAG_USERIOCTL);
2317 		PF_UNLOCK();
2318 		NET_UNLOCK();
2319 		break;
2320 	}
2321 
2322 	case DIOCRTSTADDRS: {
2323 		struct pfioc_table *io = (struct pfioc_table *)addr;
2324 
2325 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2326 			error = ENODEV;
2327 			break;
2328 		}
2329 		NET_LOCK();
2330 		PF_LOCK();
2331 		error = pfr_tst_addrs(&io->pfrio_table, io->pfrio_buffer,
2332 		    io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags |
2333 		    PFR_FLAG_USERIOCTL);
2334 		PF_UNLOCK();
2335 		NET_UNLOCK();
2336 		break;
2337 	}
2338 
2339 	case DIOCRINADEFINE: {
2340 		struct pfioc_table *io = (struct pfioc_table *)addr;
2341 
2342 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2343 			error = ENODEV;
2344 			break;
2345 		}
2346 		NET_LOCK();
2347 		PF_LOCK();
2348 		error = pfr_ina_define(&io->pfrio_table, io->pfrio_buffer,
2349 		    io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr,
2350 		    io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2351 		PF_UNLOCK();
2352 		NET_UNLOCK();
2353 		break;
2354 	}
2355 
2356 	case DIOCOSFPADD: {
2357 		struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
2358 		error = pf_osfp_add(io);
2359 		break;
2360 	}
2361 
2362 	case DIOCOSFPGET: {
2363 		struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
2364 		error = pf_osfp_get(io);
2365 		break;
2366 	}
2367 
2368 	case DIOCXBEGIN: {
2369 		struct pfioc_trans	*io = (struct pfioc_trans *)addr;
2370 		struct pfioc_trans_e	*ioe;
2371 		struct pfr_table	*table;
2372 		int			 i;
2373 
2374 		if (io->esize != sizeof(*ioe)) {
2375 			error = ENODEV;
2376 			goto fail;
2377 		}
2378 		ioe = malloc(sizeof(*ioe), M_TEMP, M_WAITOK);
2379 		table = malloc(sizeof(*table), M_TEMP, M_WAITOK);
2380 		NET_LOCK();
2381 		PF_LOCK();
2382 		pf_default_rule_new = pf_default_rule;
2383 		memset(&pf_trans_set, 0, sizeof(pf_trans_set));
2384 		for (i = 0; i < io->size; i++) {
2385 			if (copyin(io->array+i, ioe, sizeof(*ioe))) {
2386 				PF_UNLOCK();
2387 				NET_UNLOCK();
2388 				free(table, M_TEMP, sizeof(*table));
2389 				free(ioe, M_TEMP, sizeof(*ioe));
2390 				error = EFAULT;
2391 				goto fail;
2392 			}
2393 			if (strnlen(ioe->anchor, sizeof(ioe->anchor)) ==
2394 			    sizeof(ioe->anchor)) {
2395 				PF_UNLOCK();
2396 				NET_UNLOCK();
2397 				free(table, M_TEMP, sizeof(*table));
2398 				free(ioe, M_TEMP, sizeof(*ioe));
2399 				error = ENAMETOOLONG;
2400 				goto fail;
2401 			}
2402 			switch (ioe->type) {
2403 			case PF_TRANS_TABLE:
2404 				memset(table, 0, sizeof(*table));
2405 				strlcpy(table->pfrt_anchor, ioe->anchor,
2406 				    sizeof(table->pfrt_anchor));
2407 				if ((error = pfr_ina_begin(table,
2408 				    &ioe->ticket, NULL, 0))) {
2409 					PF_UNLOCK();
2410 					NET_UNLOCK();
2411 					free(table, M_TEMP, sizeof(*table));
2412 					free(ioe, M_TEMP, sizeof(*ioe));
2413 					goto fail;
2414 				}
2415 				break;
2416 			case PF_TRANS_RULESET:
2417 				if ((error = pf_begin_rules(&ioe->ticket,
2418 				    ioe->anchor))) {
2419 					PF_UNLOCK();
2420 					NET_UNLOCK();
2421 					free(table, M_TEMP, sizeof(*table));
2422 					free(ioe, M_TEMP, sizeof(*ioe));
2423 					goto fail;
2424 				}
2425 				break;
2426 			default:
2427 				PF_UNLOCK();
2428 				NET_UNLOCK();
2429 				free(table, M_TEMP, sizeof(*table));
2430 				free(ioe, M_TEMP, sizeof(*ioe));
2431 				error = EINVAL;
2432 				goto fail;
2433 			}
2434 			if (copyout(ioe, io->array+i, sizeof(io->array[i]))) {
2435 				PF_UNLOCK();
2436 				NET_UNLOCK();
2437 				free(table, M_TEMP, sizeof(*table));
2438 				free(ioe, M_TEMP, sizeof(*ioe));
2439 				error = EFAULT;
2440 				goto fail;
2441 			}
2442 		}
2443 		PF_UNLOCK();
2444 		NET_UNLOCK();
2445 		free(table, M_TEMP, sizeof(*table));
2446 		free(ioe, M_TEMP, sizeof(*ioe));
2447 		break;
2448 	}
2449 
2450 	case DIOCXROLLBACK: {
2451 		struct pfioc_trans	*io = (struct pfioc_trans *)addr;
2452 		struct pfioc_trans_e	*ioe;
2453 		struct pfr_table	*table;
2454 		int			 i;
2455 
2456 		if (io->esize != sizeof(*ioe)) {
2457 			error = ENODEV;
2458 			goto fail;
2459 		}
2460 		ioe = malloc(sizeof(*ioe), M_TEMP, M_WAITOK);
2461 		table = malloc(sizeof(*table), M_TEMP, M_WAITOK);
2462 		NET_LOCK();
2463 		PF_LOCK();
2464 		for (i = 0; i < io->size; i++) {
2465 			if (copyin(io->array+i, ioe, sizeof(*ioe))) {
2466 				PF_UNLOCK();
2467 				NET_UNLOCK();
2468 				free(table, M_TEMP, sizeof(*table));
2469 				free(ioe, M_TEMP, sizeof(*ioe));
2470 				error = EFAULT;
2471 				goto fail;
2472 			}
2473 			if (strnlen(ioe->anchor, sizeof(ioe->anchor)) ==
2474 			    sizeof(ioe->anchor)) {
2475 				PF_UNLOCK();
2476 				NET_UNLOCK();
2477 				free(table, M_TEMP, sizeof(*table));
2478 				free(ioe, M_TEMP, sizeof(*ioe));
2479 				error = ENAMETOOLONG;
2480 				goto fail;
2481 			}
2482 			switch (ioe->type) {
2483 			case PF_TRANS_TABLE:
2484 				memset(table, 0, sizeof(*table));
2485 				strlcpy(table->pfrt_anchor, ioe->anchor,
2486 				    sizeof(table->pfrt_anchor));
2487 				if ((error = pfr_ina_rollback(table,
2488 				    ioe->ticket, NULL, 0))) {
2489 					PF_UNLOCK();
2490 					NET_UNLOCK();
2491 					free(table, M_TEMP, sizeof(*table));
2492 					free(ioe, M_TEMP, sizeof(*ioe));
2493 					goto fail; /* really bad */
2494 				}
2495 				break;
2496 			case PF_TRANS_RULESET:
2497 				if ((error = pf_rollback_rules(ioe->ticket,
2498 				    ioe->anchor))) {
2499 					PF_UNLOCK();
2500 					NET_UNLOCK();
2501 					free(table, M_TEMP, sizeof(*table));
2502 					free(ioe, M_TEMP, sizeof(*ioe));
2503 					goto fail; /* really bad */
2504 				}
2505 				break;
2506 			default:
2507 				PF_UNLOCK();
2508 				NET_UNLOCK();
2509 				free(table, M_TEMP, sizeof(*table));
2510 				free(ioe, M_TEMP, sizeof(*ioe));
2511 				error = EINVAL;
2512 				goto fail; /* really bad */
2513 			}
2514 		}
2515 		PF_UNLOCK();
2516 		NET_UNLOCK();
2517 		free(table, M_TEMP, sizeof(*table));
2518 		free(ioe, M_TEMP, sizeof(*ioe));
2519 		break;
2520 	}
2521 
2522 	case DIOCXCOMMIT: {
2523 		struct pfioc_trans	*io = (struct pfioc_trans *)addr;
2524 		struct pfioc_trans_e	*ioe;
2525 		struct pfr_table	*table;
2526 		struct pf_ruleset	*rs;
2527 		int			 i;
2528 
2529 		if (io->esize != sizeof(*ioe)) {
2530 			error = ENODEV;
2531 			goto fail;
2532 		}
2533 		ioe = malloc(sizeof(*ioe), M_TEMP, M_WAITOK);
2534 		table = malloc(sizeof(*table), M_TEMP, M_WAITOK);
2535 		NET_LOCK();
2536 		PF_LOCK();
2537 		/* first makes sure everything will succeed */
2538 		for (i = 0; i < io->size; i++) {
2539 			if (copyin(io->array+i, ioe, sizeof(*ioe))) {
2540 				PF_UNLOCK();
2541 				NET_UNLOCK();
2542 				free(table, M_TEMP, sizeof(*table));
2543 				free(ioe, M_TEMP, sizeof(*ioe));
2544 				error = EFAULT;
2545 				goto fail;
2546 			}
2547 			if (strnlen(ioe->anchor, sizeof(ioe->anchor)) ==
2548 			    sizeof(ioe->anchor)) {
2549 				PF_UNLOCK();
2550 				NET_UNLOCK();
2551 				free(table, M_TEMP, sizeof(*table));
2552 				free(ioe, M_TEMP, sizeof(*ioe));
2553 				error = ENAMETOOLONG;
2554 				goto fail;
2555 			}
2556 			switch (ioe->type) {
2557 			case PF_TRANS_TABLE:
2558 				rs = pf_find_ruleset(ioe->anchor);
2559 				if (rs == NULL || !rs->topen || ioe->ticket !=
2560 				     rs->tticket) {
2561 					PF_UNLOCK();
2562 					NET_UNLOCK();
2563 					free(table, M_TEMP, sizeof(*table));
2564 					free(ioe, M_TEMP, sizeof(*ioe));
2565 					error = EBUSY;
2566 					goto fail;
2567 				}
2568 				break;
2569 			case PF_TRANS_RULESET:
2570 				rs = pf_find_ruleset(ioe->anchor);
2571 				if (rs == NULL ||
2572 				    !rs->rules.inactive.open ||
2573 				    rs->rules.inactive.ticket !=
2574 				    ioe->ticket) {
2575 					PF_UNLOCK();
2576 					NET_UNLOCK();
2577 					free(table, M_TEMP, sizeof(*table));
2578 					free(ioe, M_TEMP, sizeof(*ioe));
2579 					error = EBUSY;
2580 					goto fail;
2581 				}
2582 				break;
2583 			default:
2584 				PF_UNLOCK();
2585 				NET_UNLOCK();
2586 				free(table, M_TEMP, sizeof(*table));
2587 				free(ioe, M_TEMP, sizeof(*ioe));
2588 				error = EINVAL;
2589 				goto fail;
2590 			}
2591 		}
2592 
2593 		/*
2594 		 * Checked already in DIOCSETLIMIT, but check again as the
2595 		 * situation might have changed.
2596 		 */
2597 		for (i = 0; i < PF_LIMIT_MAX; i++) {
2598 			if (((struct pool *)pf_pool_limits[i].pp)->pr_nout >
2599 			    pf_pool_limits[i].limit_new) {
2600 				PF_UNLOCK();
2601 				NET_UNLOCK();
2602 				free(table, M_TEMP, sizeof(*table));
2603 				free(ioe, M_TEMP, sizeof(*ioe));
2604 				error = EBUSY;
2605 				goto fail;
2606 			}
2607 		}
2608 		/* now do the commit - no errors should happen here */
2609 		for (i = 0; i < io->size; i++) {
2610 			if (copyin(io->array+i, ioe, sizeof(*ioe))) {
2611 				PF_UNLOCK();
2612 				NET_UNLOCK();
2613 				free(table, M_TEMP, sizeof(*table));
2614 				free(ioe, M_TEMP, sizeof(*ioe));
2615 				error = EFAULT;
2616 				goto fail;
2617 			}
2618 			if (strnlen(ioe->anchor, sizeof(ioe->anchor)) ==
2619 			    sizeof(ioe->anchor)) {
2620 				PF_UNLOCK();
2621 				NET_UNLOCK();
2622 				free(table, M_TEMP, sizeof(*table));
2623 				free(ioe, M_TEMP, sizeof(*ioe));
2624 				error = ENAMETOOLONG;
2625 				goto fail;
2626 			}
2627 			switch (ioe->type) {
2628 			case PF_TRANS_TABLE:
2629 				memset(table, 0, sizeof(*table));
2630 				strlcpy(table->pfrt_anchor, ioe->anchor,
2631 				    sizeof(table->pfrt_anchor));
2632 				if ((error = pfr_ina_commit(table, ioe->ticket,
2633 				    NULL, NULL, 0))) {
2634 					PF_UNLOCK();
2635 					NET_UNLOCK();
2636 					free(table, M_TEMP, sizeof(*table));
2637 					free(ioe, M_TEMP, sizeof(*ioe));
2638 					goto fail; /* really bad */
2639 				}
2640 				break;
2641 			case PF_TRANS_RULESET:
2642 				if ((error = pf_commit_rules(ioe->ticket,
2643 				    ioe->anchor))) {
2644 					PF_UNLOCK();
2645 					NET_UNLOCK();
2646 					free(table, M_TEMP, sizeof(*table));
2647 					free(ioe, M_TEMP, sizeof(*ioe));
2648 					goto fail; /* really bad */
2649 				}
2650 				break;
2651 			default:
2652 				PF_UNLOCK();
2653 				NET_UNLOCK();
2654 				free(table, M_TEMP, sizeof(*table));
2655 				free(ioe, M_TEMP, sizeof(*ioe));
2656 				error = EINVAL;
2657 				goto fail; /* really bad */
2658 			}
2659 		}
2660 		for (i = 0; i < PF_LIMIT_MAX; i++) {
2661 			if (pf_pool_limits[i].limit_new !=
2662 			    pf_pool_limits[i].limit &&
2663 			    pool_sethardlimit(pf_pool_limits[i].pp,
2664 			    pf_pool_limits[i].limit_new, NULL, 0) != 0) {
2665 				PF_UNLOCK();
2666 				NET_UNLOCK();
2667 				free(table, M_TEMP, sizeof(*table));
2668 				free(ioe, M_TEMP, sizeof(*ioe));
2669 				error = EBUSY;
2670 				goto fail; /* really bad */
2671 			}
2672 			pf_pool_limits[i].limit = pf_pool_limits[i].limit_new;
2673 		}
2674 		for (i = 0; i < PFTM_MAX; i++) {
2675 			int old = pf_default_rule.timeout[i];
2676 
2677 			pf_default_rule.timeout[i] =
2678 			    pf_default_rule_new.timeout[i];
2679 			if (pf_default_rule.timeout[i] == PFTM_INTERVAL &&
2680 			    pf_default_rule.timeout[i] < old)
2681 				task_add(net_tq(0), &pf_purge_task);
2682 		}
2683 		pfi_xcommit();
2684 		pf_trans_set_commit();
2685 		PF_UNLOCK();
2686 		NET_UNLOCK();
2687 		free(table, M_TEMP, sizeof(*table));
2688 		free(ioe, M_TEMP, sizeof(*ioe));
2689 		break;
2690 	}
2691 
2692 	case DIOCGETSRCNODES: {
2693 		struct pfioc_src_nodes	*psn = (struct pfioc_src_nodes *)addr;
2694 		struct pf_src_node	*n, *p, *pstore;
2695 		u_int32_t		 nr = 0;
2696 		size_t			 space = psn->psn_len;
2697 
2698 		pstore = malloc(sizeof(*pstore), M_TEMP, M_WAITOK);
2699 
2700 		NET_LOCK();
2701 		PF_LOCK();
2702 		if (space == 0) {
2703 			RB_FOREACH(n, pf_src_tree, &tree_src_tracking)
2704 				nr++;
2705 			psn->psn_len = sizeof(struct pf_src_node) * nr;
2706 			PF_UNLOCK();
2707 			NET_UNLOCK();
2708 			free(pstore, M_TEMP, sizeof(*pstore));
2709 			break;
2710 		}
2711 
2712 		p = psn->psn_src_nodes;
2713 		RB_FOREACH(n, pf_src_tree, &tree_src_tracking) {
2714 			int	secs = getuptime(), diff;
2715 
2716 			if ((nr + 1) * sizeof(*p) > psn->psn_len)
2717 				break;
2718 
2719 			memcpy(pstore, n, sizeof(*pstore));
2720 			memset(&pstore->entry, 0, sizeof(pstore->entry));
2721 			pstore->rule.ptr = NULL;
2722 			pstore->kif = NULL;
2723 			pstore->rule.nr = n->rule.ptr->nr;
2724 			pstore->creation = secs - pstore->creation;
2725 			if (pstore->expire > secs)
2726 				pstore->expire -= secs;
2727 			else
2728 				pstore->expire = 0;
2729 
2730 			/* adjust the connection rate estimate */
2731 			diff = secs - n->conn_rate.last;
2732 			if (diff >= n->conn_rate.seconds)
2733 				pstore->conn_rate.count = 0;
2734 			else
2735 				pstore->conn_rate.count -=
2736 				    n->conn_rate.count * diff /
2737 				    n->conn_rate.seconds;
2738 
2739 			error = copyout(pstore, p, sizeof(*p));
2740 			if (error) {
2741 				PF_UNLOCK();
2742 				NET_UNLOCK();
2743 				free(pstore, M_TEMP, sizeof(*pstore));
2744 				goto fail;
2745 			}
2746 			p++;
2747 			nr++;
2748 		}
2749 		psn->psn_len = sizeof(struct pf_src_node) * nr;
2750 
2751 		PF_UNLOCK();
2752 		NET_UNLOCK();
2753 		free(pstore, M_TEMP, sizeof(*pstore));
2754 		break;
2755 	}
2756 
2757 	case DIOCCLRSRCNODES: {
2758 		struct pf_src_node	*n;
2759 		struct pf_state		*state;
2760 
2761 		NET_LOCK();
2762 		PF_LOCK();
2763 		PF_STATE_ENTER_WRITE();
2764 		RB_FOREACH(state, pf_state_tree_id, &tree_id)
2765 			pf_src_tree_remove_state(state);
2766 		PF_STATE_EXIT_WRITE();
2767 		RB_FOREACH(n, pf_src_tree, &tree_src_tracking)
2768 			n->expire = 1;
2769 		pf_purge_expired_src_nodes();
2770 		PF_UNLOCK();
2771 		NET_UNLOCK();
2772 		break;
2773 	}
2774 
2775 	case DIOCKILLSRCNODES: {
2776 		struct pf_src_node	*sn;
2777 		struct pf_state		*s;
2778 		struct pfioc_src_node_kill *psnk =
2779 		    (struct pfioc_src_node_kill *)addr;
2780 		u_int			killed = 0;
2781 
2782 		NET_LOCK();
2783 		PF_LOCK();
2784 		RB_FOREACH(sn, pf_src_tree, &tree_src_tracking) {
2785 			if (pf_match_addr(psnk->psnk_src.neg,
2786 				&psnk->psnk_src.addr.v.a.addr,
2787 				&psnk->psnk_src.addr.v.a.mask,
2788 				&sn->addr, sn->af) &&
2789 			    pf_match_addr(psnk->psnk_dst.neg,
2790 				&psnk->psnk_dst.addr.v.a.addr,
2791 				&psnk->psnk_dst.addr.v.a.mask,
2792 				&sn->raddr, sn->af)) {
2793 				/* Handle state to src_node linkage */
2794 				if (sn->states != 0) {
2795 					PF_ASSERT_LOCKED();
2796 					PF_STATE_ENTER_WRITE();
2797 					RB_FOREACH(s, pf_state_tree_id,
2798 					   &tree_id)
2799 						pf_state_rm_src_node(s, sn);
2800 					PF_STATE_EXIT_WRITE();
2801 				}
2802 				sn->expire = 1;
2803 				killed++;
2804 			}
2805 		}
2806 
2807 		if (killed > 0)
2808 			pf_purge_expired_src_nodes();
2809 
2810 		psnk->psnk_killed = killed;
2811 		PF_UNLOCK();
2812 		NET_UNLOCK();
2813 		break;
2814 	}
2815 
2816 	case DIOCSETHOSTID: {
2817 		u_int32_t	*hostid = (u_int32_t *)addr;
2818 
2819 		NET_LOCK();
2820 		PF_LOCK();
2821 		if (*hostid == 0)
2822 			pf_trans_set.hostid = arc4random();
2823 		else
2824 			pf_trans_set.hostid = *hostid;
2825 		pf_trans_set.mask |= PF_TSET_HOSTID;
2826 		PF_UNLOCK();
2827 		NET_UNLOCK();
2828 		break;
2829 	}
2830 
2831 	case DIOCOSFPFLUSH:
2832 		pf_osfp_flush();
2833 		break;
2834 
2835 	case DIOCIGETIFACES: {
2836 		struct pfioc_iface *io = (struct pfioc_iface *)addr;
2837 
2838 		if (io->pfiio_esize != sizeof(struct pfi_kif)) {
2839 			error = ENODEV;
2840 			break;
2841 		}
2842 		NET_LOCK();
2843 		PF_LOCK();
2844 		error = pfi_get_ifaces(io->pfiio_name, io->pfiio_buffer,
2845 		    &io->pfiio_size);
2846 		PF_UNLOCK();
2847 		NET_UNLOCK();
2848 		break;
2849 	}
2850 
2851 	case DIOCSETIFFLAG: {
2852 		struct pfioc_iface *io = (struct pfioc_iface *)addr;
2853 
2854 		NET_LOCK();
2855 		PF_LOCK();
2856 		error = pfi_set_flags(io->pfiio_name, io->pfiio_flags);
2857 		PF_UNLOCK();
2858 		NET_UNLOCK();
2859 		break;
2860 	}
2861 
2862 	case DIOCCLRIFFLAG: {
2863 		struct pfioc_iface *io = (struct pfioc_iface *)addr;
2864 
2865 		NET_LOCK();
2866 		PF_LOCK();
2867 		error = pfi_clear_flags(io->pfiio_name, io->pfiio_flags);
2868 		PF_UNLOCK();
2869 		NET_UNLOCK();
2870 		break;
2871 	}
2872 
2873 	case DIOCSETREASS: {
2874 		u_int32_t	*reass = (u_int32_t *)addr;
2875 
2876 		NET_LOCK();
2877 		PF_LOCK();
2878 		pf_trans_set.reass = *reass;
2879 		pf_trans_set.mask |= PF_TSET_REASS;
2880 		PF_UNLOCK();
2881 		NET_UNLOCK();
2882 		break;
2883 	}
2884 
2885 	case DIOCSETSYNFLWATS: {
2886 		struct pfioc_synflwats *io = (struct pfioc_synflwats *)addr;
2887 
2888 		NET_LOCK();
2889 		PF_LOCK();
2890 		error = pf_syncookies_setwats(io->hiwat, io->lowat);
2891 		PF_UNLOCK();
2892 		NET_UNLOCK();
2893 		break;
2894 	}
2895 
2896 	case DIOCGETSYNFLWATS: {
2897 		struct pfioc_synflwats *io = (struct pfioc_synflwats *)addr;
2898 
2899 		NET_LOCK();
2900 		PF_LOCK();
2901 		error = pf_syncookies_getwats(io);
2902 		PF_UNLOCK();
2903 		NET_UNLOCK();
2904 		break;
2905 	}
2906 
2907 	case DIOCSETSYNCOOKIES: {
2908 		u_int8_t	*mode = (u_int8_t *)addr;
2909 
2910 		NET_LOCK();
2911 		PF_LOCK();
2912 		error = pf_syncookies_setmode(*mode);
2913 		PF_UNLOCK();
2914 		NET_UNLOCK();
2915 		break;
2916 	}
2917 
2918 	default:
2919 		error = ENODEV;
2920 		break;
2921 	}
2922 fail:
2923 	return (error);
2924 }
2925 
2926 void
2927 pf_trans_set_commit(void)
2928 {
2929 	if (pf_trans_set.mask & PF_TSET_STATUSIF)
2930 		strlcpy(pf_status.ifname, pf_trans_set.statusif, IFNAMSIZ);
2931 	if (pf_trans_set.mask & PF_TSET_DEBUG)
2932 		pf_status.debug = pf_trans_set.debug;
2933 	if (pf_trans_set.mask & PF_TSET_HOSTID)
2934 		pf_status.hostid = pf_trans_set.hostid;
2935 	if (pf_trans_set.mask & PF_TSET_REASS)
2936 		pf_status.reass = pf_trans_set.reass;
2937 }
2938 
2939 void
2940 pf_pool_copyin(struct pf_pool *from, struct pf_pool *to)
2941 {
2942 	memmove(to, from, sizeof(*to));
2943 	to->kif = NULL;
2944 	to->addr.p.tbl = NULL;
2945 }
2946 
2947 int
2948 pf_validate_range(u_int8_t op, u_int16_t port[2])
2949 {
2950 	u_int16_t a = ntohs(port[0]);
2951 	u_int16_t b = ntohs(port[1]);
2952 
2953 	if ((op == PF_OP_RRG && a > b) ||  /* 34:12,  i.e. none */
2954 	    (op == PF_OP_IRG && a >= b) || /* 34><12, i.e. none */
2955 	    (op == PF_OP_XRG && a > b))    /* 34<>22, i.e. all */
2956 		return 1;
2957 	return 0;
2958 }
2959 
2960 int
2961 pf_rule_copyin(struct pf_rule *from, struct pf_rule *to,
2962     struct pf_ruleset *ruleset)
2963 {
2964 	int i;
2965 
2966 	to->src = from->src;
2967 	to->src.addr.p.tbl = NULL;
2968 	to->dst = from->dst;
2969 	to->dst.addr.p.tbl = NULL;
2970 
2971 	if (pf_validate_range(to->src.port_op, to->src.port))
2972 		return (EINVAL);
2973 	if (pf_validate_range(to->dst.port_op, to->dst.port))
2974 		return (EINVAL);
2975 
2976 	/* XXX union skip[] */
2977 
2978 	strlcpy(to->label, from->label, sizeof(to->label));
2979 	strlcpy(to->ifname, from->ifname, sizeof(to->ifname));
2980 	strlcpy(to->rcv_ifname, from->rcv_ifname, sizeof(to->rcv_ifname));
2981 	strlcpy(to->qname, from->qname, sizeof(to->qname));
2982 	strlcpy(to->pqname, from->pqname, sizeof(to->pqname));
2983 	strlcpy(to->tagname, from->tagname, sizeof(to->tagname));
2984 	strlcpy(to->match_tagname, from->match_tagname,
2985 	    sizeof(to->match_tagname));
2986 	strlcpy(to->overload_tblname, from->overload_tblname,
2987 	    sizeof(to->overload_tblname));
2988 
2989 	pf_pool_copyin(&from->nat, &to->nat);
2990 	pf_pool_copyin(&from->rdr, &to->rdr);
2991 	pf_pool_copyin(&from->route, &to->route);
2992 
2993 	if (pf_validate_range(to->rdr.port_op, to->rdr.proxy_port))
2994 		return (EINVAL);
2995 
2996 	if (pf_kif_setup(to->ifname, &to->kif))
2997 		return (EINVAL);
2998 	if (pf_kif_setup(to->rcv_ifname, &to->rcv_kif))
2999 		return (EINVAL);
3000 	if (to->overload_tblname[0]) {
3001 		if ((to->overload_tbl = pfr_attach_table(ruleset,
3002 		    to->overload_tblname, 0)) == NULL)
3003 			return (EINVAL);
3004 		else
3005 			to->overload_tbl->pfrkt_flags |= PFR_TFLAG_ACTIVE;
3006 	}
3007 
3008 	if (pf_kif_setup(to->rdr.ifname, &to->rdr.kif))
3009 		return (EINVAL);
3010 	if (pf_kif_setup(to->nat.ifname, &to->nat.kif))
3011 		return (EINVAL);
3012 	if (pf_kif_setup(to->route.ifname, &to->route.kif))
3013 		return (EINVAL);
3014 
3015 	to->os_fingerprint = from->os_fingerprint;
3016 
3017 	to->rtableid = from->rtableid;
3018 	if (to->rtableid >= 0 && !rtable_exists(to->rtableid))
3019 		return (EBUSY);
3020 	to->onrdomain = from->onrdomain;
3021 	if (to->onrdomain != -1 && (to->onrdomain < 0 ||
3022 	    to->onrdomain > RT_TABLEID_MAX))
3023 		return (EINVAL);
3024 
3025 	for (i = 0; i < PFTM_MAX; i++)
3026 		to->timeout[i] = from->timeout[i];
3027 	to->states_tot = from->states_tot;
3028 	to->max_states = from->max_states;
3029 	to->max_src_nodes = from->max_src_nodes;
3030 	to->max_src_states = from->max_src_states;
3031 	to->max_src_conn = from->max_src_conn;
3032 	to->max_src_conn_rate.limit = from->max_src_conn_rate.limit;
3033 	to->max_src_conn_rate.seconds = from->max_src_conn_rate.seconds;
3034 	pf_init_threshold(&to->pktrate, from->pktrate.limit,
3035 	    from->pktrate.seconds);
3036 
3037 	if (to->qname[0] != 0) {
3038 		if ((to->qid = pf_qname2qid(to->qname, 0)) == 0)
3039 			return (EBUSY);
3040 		if (to->pqname[0] != 0) {
3041 			if ((to->pqid = pf_qname2qid(to->pqname, 0)) == 0)
3042 				return (EBUSY);
3043 		} else
3044 			to->pqid = to->qid;
3045 	}
3046 	to->rt_listid = from->rt_listid;
3047 	to->prob = from->prob;
3048 	to->return_icmp = from->return_icmp;
3049 	to->return_icmp6 = from->return_icmp6;
3050 	to->max_mss = from->max_mss;
3051 	if (to->tagname[0])
3052 		if ((to->tag = pf_tagname2tag(to->tagname, 1)) == 0)
3053 			return (EBUSY);
3054 	if (to->match_tagname[0])
3055 		if ((to->match_tag = pf_tagname2tag(to->match_tagname, 1)) == 0)
3056 			return (EBUSY);
3057 	to->scrub_flags = from->scrub_flags;
3058 	to->delay = from->delay;
3059 	to->uid = from->uid;
3060 	to->gid = from->gid;
3061 	to->rule_flag = from->rule_flag;
3062 	to->action = from->action;
3063 	to->direction = from->direction;
3064 	to->log = from->log;
3065 	to->logif = from->logif;
3066 #if NPFLOG > 0
3067 	if (!to->log)
3068 		to->logif = 0;
3069 #endif	/* NPFLOG > 0 */
3070 	to->quick = from->quick;
3071 	to->ifnot = from->ifnot;
3072 	to->rcvifnot = from->rcvifnot;
3073 	to->match_tag_not = from->match_tag_not;
3074 	to->keep_state = from->keep_state;
3075 	to->af = from->af;
3076 	to->naf = from->naf;
3077 	to->proto = from->proto;
3078 	to->type = from->type;
3079 	to->code = from->code;
3080 	to->flags = from->flags;
3081 	to->flagset = from->flagset;
3082 	to->min_ttl = from->min_ttl;
3083 	to->allow_opts = from->allow_opts;
3084 	to->rt = from->rt;
3085 	to->return_ttl = from->return_ttl;
3086 	to->tos = from->tos;
3087 	to->set_tos = from->set_tos;
3088 	to->anchor_relative = from->anchor_relative; /* XXX */
3089 	to->anchor_wildcard = from->anchor_wildcard; /* XXX */
3090 	to->flush = from->flush;
3091 	to->divert.addr = from->divert.addr;
3092 	to->divert.port = from->divert.port;
3093 	to->divert.type = from->divert.type;
3094 	to->prio = from->prio;
3095 	to->set_prio[0] = from->set_prio[0];
3096 	to->set_prio[1] = from->set_prio[1];
3097 
3098 	return (0);
3099 }
3100 
3101 int
3102 pf_sysctl(void *oldp, size_t *oldlenp, void *newp, size_t newlen)
3103 {
3104 	struct pf_status	pfs;
3105 
3106 	NET_RLOCK_IN_IOCTL();
3107 	PF_LOCK();
3108 	memcpy(&pfs, &pf_status, sizeof(struct pf_status));
3109 	pfi_update_status(pfs.ifname, &pfs);
3110 	PF_UNLOCK();
3111 	NET_RUNLOCK_IN_IOCTL();
3112 
3113 	return sysctl_rdstruct(oldp, oldlenp, newp, &pfs, sizeof(pfs));
3114 }
3115