xref: /openbsd-src/sys/net/pf_ioctl.c (revision de8cc8edbc71bd3e3bc7fbffa27ba0e564c37d8b)
1 /*	$OpenBSD: pf_ioctl.c,v 1.363 2021/02/09 23:37:54 patrick Exp $ */
2 
3 /*
4  * Copyright (c) 2001 Daniel Hartmeier
5  * Copyright (c) 2002 - 2018 Henning Brauer <henning@openbsd.org>
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  *    - Redistributions of source code must retain the above copyright
13  *      notice, this list of conditions and the following disclaimer.
14  *    - Redistributions in binary form must reproduce the above
15  *      copyright notice, this list of conditions and the following
16  *      disclaimer in the documentation and/or other materials provided
17  *      with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
22  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
23  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
24  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
25  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
29  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  *
32  * Effort sponsored in part by the Defense Advanced Research Projects
33  * Agency (DARPA) and Air Force Research Laboratory, Air Force
34  * Materiel Command, USAF, under agreement number F30602-01-2-0537.
35  *
36  */
37 
38 #include "pfsync.h"
39 #include "pflog.h"
40 
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/sysctl.h>
44 #include <sys/mbuf.h>
45 #include <sys/filio.h>
46 #include <sys/fcntl.h>
47 #include <sys/socket.h>
48 #include <sys/socketvar.h>
49 #include <sys/kernel.h>
50 #include <sys/time.h>
51 #include <sys/timeout.h>
52 #include <sys/pool.h>
53 #include <sys/malloc.h>
54 #include <sys/proc.h>
55 #include <sys/rwlock.h>
56 #include <sys/syslog.h>
57 #include <uvm/uvm_extern.h>
58 
59 #include <crypto/md5.h>
60 
61 #include <net/if.h>
62 #include <net/if_var.h>
63 #include <net/route.h>
64 #include <net/hfsc.h>
65 #include <net/fq_codel.h>
66 
67 #include <netinet/in.h>
68 #include <netinet/ip.h>
69 #include <netinet/in_pcb.h>
70 #include <netinet/ip_var.h>
71 #include <netinet/ip_icmp.h>
72 #include <netinet/tcp.h>
73 #include <netinet/udp.h>
74 
75 #ifdef INET6
76 #include <netinet/ip6.h>
77 #include <netinet/icmp6.h>
78 #endif /* INET6 */
79 
80 #include <net/pfvar.h>
81 #include <net/pfvar_priv.h>
82 
83 #if NPFSYNC > 0
84 #include <netinet/ip_ipsp.h>
85 #include <net/if_pfsync.h>
86 #endif /* NPFSYNC > 0 */
87 
88 struct pool		 pf_tag_pl;
89 
90 void			 pfattach(int);
91 void			 pf_thread_create(void *);
92 int			 pfopen(dev_t, int, int, struct proc *);
93 int			 pfclose(dev_t, int, int, struct proc *);
94 int			 pfioctl(dev_t, u_long, caddr_t, int, struct proc *);
95 int			 pf_begin_rules(u_int32_t *, const char *);
96 int			 pf_rollback_rules(u_int32_t, char *);
97 void			 pf_remove_queues(void);
98 int			 pf_commit_queues(void);
99 void			 pf_free_queues(struct pf_queuehead *);
100 void			 pf_calc_chksum(struct pf_ruleset *);
101 void			 pf_hash_rule(MD5_CTX *, struct pf_rule *);
102 void			 pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *);
103 int			 pf_commit_rules(u_int32_t, char *);
104 int			 pf_addr_setup(struct pf_ruleset *,
105 			    struct pf_addr_wrap *, sa_family_t);
106 int			 pf_kif_setup(char *, struct pfi_kif **);
107 void			 pf_addr_copyout(struct pf_addr_wrap *);
108 void			 pf_trans_set_commit(void);
109 void			 pf_pool_copyin(struct pf_pool *, struct pf_pool *);
110 int			 pf_validate_range(u_int8_t, u_int16_t[2]);
111 int			 pf_rule_copyin(struct pf_rule *, struct pf_rule *,
112 			    struct pf_ruleset *);
113 u_int16_t		 pf_qname2qid(char *, int);
114 void			 pf_qid2qname(u_int16_t, char *);
115 void			 pf_qid_unref(u_int16_t);
116 
117 struct pf_rule		 pf_default_rule, pf_default_rule_new;
118 
119 struct {
120 	char		statusif[IFNAMSIZ];
121 	u_int32_t	debug;
122 	u_int32_t	hostid;
123 	u_int32_t	reass;
124 	u_int32_t	mask;
125 } pf_trans_set;
126 
127 #define	PF_TSET_STATUSIF	0x01
128 #define	PF_TSET_DEBUG		0x02
129 #define	PF_TSET_HOSTID		0x04
130 #define	PF_TSET_REASS		0x08
131 
132 #define	TAGID_MAX	 50000
133 TAILQ_HEAD(pf_tags, pf_tagname)	pf_tags = TAILQ_HEAD_INITIALIZER(pf_tags),
134 				pf_qids = TAILQ_HEAD_INITIALIZER(pf_qids);
135 
136 /*
137  * pf_lock protects consistency of PF data structures, which don't have
138  * their dedicated lock yet. The pf_lock currently protects:
139  *	- rules,
140  *	- radix tables,
141  *	- source nodes
142  * All callers must grab pf_lock exclusively.
143  *
144  * pf_state_lock protects consistency of state table. Packets, which do state
145  * look up grab the lock as readers. If packet must create state, then it must
146  * grab the lock as writer. Whenever packet creates state it grabs pf_lock
147  * first then it locks pf_state_lock as the writer.
148  */
149 struct rwlock		 pf_lock = RWLOCK_INITIALIZER("pf_lock");
150 struct rwlock		 pf_state_lock = RWLOCK_INITIALIZER("pf_state_lock");
151 
152 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE)
153 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE
154 #endif
155 u_int16_t		 tagname2tag(struct pf_tags *, char *, int);
156 void			 tag2tagname(struct pf_tags *, u_int16_t, char *);
157 void			 tag_unref(struct pf_tags *, u_int16_t);
158 int			 pf_rtlabel_add(struct pf_addr_wrap *);
159 void			 pf_rtlabel_remove(struct pf_addr_wrap *);
160 void			 pf_rtlabel_copyout(struct pf_addr_wrap *);
161 
162 
163 void
164 pfattach(int num)
165 {
166 	u_int32_t *timeout = pf_default_rule.timeout;
167 
168 	pool_init(&pf_rule_pl, sizeof(struct pf_rule), 0,
169 	    IPL_SOFTNET, 0, "pfrule", NULL);
170 	pool_init(&pf_src_tree_pl, sizeof(struct pf_src_node), 0,
171 	    IPL_SOFTNET, 0, "pfsrctr", NULL);
172 	pool_init(&pf_sn_item_pl, sizeof(struct pf_sn_item), 0,
173 	    IPL_SOFTNET, 0, "pfsnitem", NULL);
174 	pool_init(&pf_state_pl, sizeof(struct pf_state), 0,
175 	    IPL_SOFTNET, 0, "pfstate", NULL);
176 	pool_init(&pf_state_key_pl, sizeof(struct pf_state_key), 0,
177 	    IPL_SOFTNET, 0, "pfstkey", NULL);
178 	pool_init(&pf_state_item_pl, sizeof(struct pf_state_item), 0,
179 	    IPL_SOFTNET, 0, "pfstitem", NULL);
180 	pool_init(&pf_rule_item_pl, sizeof(struct pf_rule_item), 0,
181 	    IPL_SOFTNET, 0, "pfruleitem", NULL);
182 	pool_init(&pf_queue_pl, sizeof(struct pf_queuespec), 0,
183 	    IPL_SOFTNET, 0, "pfqueue", NULL);
184 	pool_init(&pf_tag_pl, sizeof(struct pf_tagname), 0,
185 	    IPL_SOFTNET, 0, "pftag", NULL);
186 	pool_init(&pf_pktdelay_pl, sizeof(struct pf_pktdelay), 0,
187 	    IPL_SOFTNET, 0, "pfpktdelay", NULL);
188 
189 	hfsc_initialize();
190 	pfr_initialize();
191 	pfi_initialize();
192 	pf_osfp_initialize();
193 	pf_syncookies_init();
194 
195 	pool_sethardlimit(pf_pool_limits[PF_LIMIT_STATES].pp,
196 	    pf_pool_limits[PF_LIMIT_STATES].limit, NULL, 0);
197 
198 	if (physmem <= atop(100*1024*1024))
199 		pf_pool_limits[PF_LIMIT_TABLE_ENTRIES].limit =
200 		    PFR_KENTRY_HIWAT_SMALL;
201 
202 	RB_INIT(&tree_src_tracking);
203 	RB_INIT(&pf_anchors);
204 	pf_init_ruleset(&pf_main_ruleset);
205 	TAILQ_INIT(&pf_queues[0]);
206 	TAILQ_INIT(&pf_queues[1]);
207 	pf_queues_active = &pf_queues[0];
208 	pf_queues_inactive = &pf_queues[1];
209 	TAILQ_INIT(&state_list);
210 
211 	/* default rule should never be garbage collected */
212 	pf_default_rule.entries.tqe_prev = &pf_default_rule.entries.tqe_next;
213 	pf_default_rule.action = PF_PASS;
214 	pf_default_rule.nr = (u_int32_t)-1;
215 	pf_default_rule.rtableid = -1;
216 
217 	/* initialize default timeouts */
218 	timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL;
219 	timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL;
220 	timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL;
221 	timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL;
222 	timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL;
223 	timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL;
224 	timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL;
225 	timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL;
226 	timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL;
227 	timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL;
228 	timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL;
229 	timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL;
230 	timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL;
231 	timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL;
232 	timeout[PFTM_FRAG] = PFTM_FRAG_VAL;
233 	timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL;
234 	timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL;
235 	timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL;
236 	timeout[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START;
237 	timeout[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END;
238 
239 	pf_default_rule.src.addr.type =  PF_ADDR_ADDRMASK;
240 	pf_default_rule.dst.addr.type =  PF_ADDR_ADDRMASK;
241 	pf_default_rule.rdr.addr.type =  PF_ADDR_NONE;
242 	pf_default_rule.nat.addr.type =  PF_ADDR_NONE;
243 	pf_default_rule.route.addr.type =  PF_ADDR_NONE;
244 
245 	pf_normalize_init();
246 	memset(&pf_status, 0, sizeof(pf_status));
247 	pf_status.debug = LOG_ERR;
248 	pf_status.reass = PF_REASS_ENABLED;
249 
250 	/* XXX do our best to avoid a conflict */
251 	pf_status.hostid = arc4random();
252 }
253 
254 int
255 pfopen(dev_t dev, int flags, int fmt, struct proc *p)
256 {
257 	if (minor(dev) >= 1)
258 		return (ENXIO);
259 	return (0);
260 }
261 
262 int
263 pfclose(dev_t dev, int flags, int fmt, struct proc *p)
264 {
265 	if (minor(dev) >= 1)
266 		return (ENXIO);
267 	return (0);
268 }
269 
270 void
271 pf_rm_rule(struct pf_rulequeue *rulequeue, struct pf_rule *rule)
272 {
273 	if (rulequeue != NULL) {
274 		if (rule->states_cur == 0 && rule->src_nodes == 0) {
275 			/*
276 			 * XXX - we need to remove the table *before* detaching
277 			 * the rule to make sure the table code does not delete
278 			 * the anchor under our feet.
279 			 */
280 			pf_tbladdr_remove(&rule->src.addr);
281 			pf_tbladdr_remove(&rule->dst.addr);
282 			pf_tbladdr_remove(&rule->rdr.addr);
283 			pf_tbladdr_remove(&rule->nat.addr);
284 			pf_tbladdr_remove(&rule->route.addr);
285 			if (rule->overload_tbl)
286 				pfr_detach_table(rule->overload_tbl);
287 		}
288 		TAILQ_REMOVE(rulequeue, rule, entries);
289 		rule->entries.tqe_prev = NULL;
290 		rule->nr = (u_int32_t)-1;
291 	}
292 
293 	if (rule->states_cur > 0 || rule->src_nodes > 0 ||
294 	    rule->entries.tqe_prev != NULL)
295 		return;
296 	pf_tag_unref(rule->tag);
297 	pf_tag_unref(rule->match_tag);
298 	pf_rtlabel_remove(&rule->src.addr);
299 	pf_rtlabel_remove(&rule->dst.addr);
300 	pfi_dynaddr_remove(&rule->src.addr);
301 	pfi_dynaddr_remove(&rule->dst.addr);
302 	pfi_dynaddr_remove(&rule->rdr.addr);
303 	pfi_dynaddr_remove(&rule->nat.addr);
304 	pfi_dynaddr_remove(&rule->route.addr);
305 	if (rulequeue == NULL) {
306 		pf_tbladdr_remove(&rule->src.addr);
307 		pf_tbladdr_remove(&rule->dst.addr);
308 		pf_tbladdr_remove(&rule->rdr.addr);
309 		pf_tbladdr_remove(&rule->nat.addr);
310 		pf_tbladdr_remove(&rule->route.addr);
311 		if (rule->overload_tbl)
312 			pfr_detach_table(rule->overload_tbl);
313 	}
314 	pfi_kif_unref(rule->rcv_kif, PFI_KIF_REF_RULE);
315 	pfi_kif_unref(rule->kif, PFI_KIF_REF_RULE);
316 	pfi_kif_unref(rule->rdr.kif, PFI_KIF_REF_RULE);
317 	pfi_kif_unref(rule->nat.kif, PFI_KIF_REF_RULE);
318 	pfi_kif_unref(rule->route.kif, PFI_KIF_REF_RULE);
319 	pf_remove_anchor(rule);
320 	pool_put(&pf_rule_pl, rule);
321 }
322 
323 void
324 pf_purge_rule(struct pf_rule *rule)
325 {
326 	u_int32_t		 nr = 0;
327 	struct pf_ruleset	*ruleset;
328 
329 	KASSERT((rule != NULL) && (rule->ruleset != NULL));
330 	ruleset = rule->ruleset;
331 
332 	pf_rm_rule(ruleset->rules.active.ptr, rule);
333 	ruleset->rules.active.rcount--;
334 	TAILQ_FOREACH(rule, ruleset->rules.active.ptr, entries)
335 		rule->nr = nr++;
336 	ruleset->rules.active.ticket++;
337 	pf_calc_skip_steps(ruleset->rules.active.ptr);
338 	pf_remove_if_empty_ruleset(ruleset);
339 
340 	if (ruleset == &pf_main_ruleset)
341 		pf_calc_chksum(ruleset);
342 }
343 
344 u_int16_t
345 tagname2tag(struct pf_tags *head, char *tagname, int create)
346 {
347 	struct pf_tagname	*tag, *p = NULL;
348 	u_int16_t		 new_tagid = 1;
349 
350 	TAILQ_FOREACH(tag, head, entries)
351 		if (strcmp(tagname, tag->name) == 0) {
352 			tag->ref++;
353 			return (tag->tag);
354 		}
355 
356 	if (!create)
357 		return (0);
358 
359 	/*
360 	 * to avoid fragmentation, we do a linear search from the beginning
361 	 * and take the first free slot we find. if there is none or the list
362 	 * is empty, append a new entry at the end.
363 	 */
364 
365 	/* new entry */
366 	TAILQ_FOREACH(p, head, entries) {
367 		if (p->tag != new_tagid)
368 			break;
369 		new_tagid = p->tag + 1;
370 	}
371 
372 	if (new_tagid > TAGID_MAX)
373 		return (0);
374 
375 	/* allocate and fill new struct pf_tagname */
376 	tag = pool_get(&pf_tag_pl, PR_NOWAIT | PR_ZERO);
377 	if (tag == NULL)
378 		return (0);
379 	strlcpy(tag->name, tagname, sizeof(tag->name));
380 	tag->tag = new_tagid;
381 	tag->ref++;
382 
383 	if (p != NULL)	/* insert new entry before p */
384 		TAILQ_INSERT_BEFORE(p, tag, entries);
385 	else	/* either list empty or no free slot in between */
386 		TAILQ_INSERT_TAIL(head, tag, entries);
387 
388 	return (tag->tag);
389 }
390 
391 void
392 tag2tagname(struct pf_tags *head, u_int16_t tagid, char *p)
393 {
394 	struct pf_tagname	*tag;
395 
396 	TAILQ_FOREACH(tag, head, entries)
397 		if (tag->tag == tagid) {
398 			strlcpy(p, tag->name, PF_TAG_NAME_SIZE);
399 			return;
400 		}
401 }
402 
403 void
404 tag_unref(struct pf_tags *head, u_int16_t tag)
405 {
406 	struct pf_tagname	*p, *next;
407 
408 	if (tag == 0)
409 		return;
410 
411 	TAILQ_FOREACH_SAFE(p, head, entries, next) {
412 		if (tag == p->tag) {
413 			if (--p->ref == 0) {
414 				TAILQ_REMOVE(head, p, entries);
415 				pool_put(&pf_tag_pl, p);
416 			}
417 			break;
418 		}
419 	}
420 }
421 
422 u_int16_t
423 pf_tagname2tag(char *tagname, int create)
424 {
425 	return (tagname2tag(&pf_tags, tagname, create));
426 }
427 
428 void
429 pf_tag2tagname(u_int16_t tagid, char *p)
430 {
431 	tag2tagname(&pf_tags, tagid, p);
432 }
433 
434 void
435 pf_tag_ref(u_int16_t tag)
436 {
437 	struct pf_tagname *t;
438 
439 	TAILQ_FOREACH(t, &pf_tags, entries)
440 		if (t->tag == tag)
441 			break;
442 	if (t != NULL)
443 		t->ref++;
444 }
445 
446 void
447 pf_tag_unref(u_int16_t tag)
448 {
449 	tag_unref(&pf_tags, tag);
450 }
451 
452 int
453 pf_rtlabel_add(struct pf_addr_wrap *a)
454 {
455 	if (a->type == PF_ADDR_RTLABEL &&
456 	    (a->v.rtlabel = rtlabel_name2id(a->v.rtlabelname)) == 0)
457 		return (-1);
458 	return (0);
459 }
460 
461 void
462 pf_rtlabel_remove(struct pf_addr_wrap *a)
463 {
464 	if (a->type == PF_ADDR_RTLABEL)
465 		rtlabel_unref(a->v.rtlabel);
466 }
467 
468 void
469 pf_rtlabel_copyout(struct pf_addr_wrap *a)
470 {
471 	const char	*name;
472 
473 	if (a->type == PF_ADDR_RTLABEL && a->v.rtlabel) {
474 		if ((name = rtlabel_id2name(a->v.rtlabel)) == NULL)
475 			strlcpy(a->v.rtlabelname, "?",
476 			    sizeof(a->v.rtlabelname));
477 		else
478 			strlcpy(a->v.rtlabelname, name,
479 			    sizeof(a->v.rtlabelname));
480 	}
481 }
482 
483 u_int16_t
484 pf_qname2qid(char *qname, int create)
485 {
486 	return (tagname2tag(&pf_qids, qname, create));
487 }
488 
489 void
490 pf_qid2qname(u_int16_t qid, char *p)
491 {
492 	tag2tagname(&pf_qids, qid, p);
493 }
494 
495 void
496 pf_qid_unref(u_int16_t qid)
497 {
498 	tag_unref(&pf_qids, (u_int16_t)qid);
499 }
500 
501 int
502 pf_begin_rules(u_int32_t *ticket, const char *anchor)
503 {
504 	struct pf_ruleset	*rs;
505 	struct pf_rule		*rule;
506 
507 	if ((rs = pf_find_or_create_ruleset(anchor)) == NULL)
508 		return (EINVAL);
509 	while ((rule = TAILQ_FIRST(rs->rules.inactive.ptr)) != NULL) {
510 		pf_rm_rule(rs->rules.inactive.ptr, rule);
511 		rs->rules.inactive.rcount--;
512 	}
513 	*ticket = ++rs->rules.inactive.ticket;
514 	rs->rules.inactive.open = 1;
515 	return (0);
516 }
517 
518 int
519 pf_rollback_rules(u_int32_t ticket, char *anchor)
520 {
521 	struct pf_ruleset	*rs;
522 	struct pf_rule		*rule;
523 
524 	rs = pf_find_ruleset(anchor);
525 	if (rs == NULL || !rs->rules.inactive.open ||
526 	    rs->rules.inactive.ticket != ticket)
527 		return (0);
528 	while ((rule = TAILQ_FIRST(rs->rules.inactive.ptr)) != NULL) {
529 		pf_rm_rule(rs->rules.inactive.ptr, rule);
530 		rs->rules.inactive.rcount--;
531 	}
532 	rs->rules.inactive.open = 0;
533 
534 	/* queue defs only in the main ruleset */
535 	if (anchor[0])
536 		return (0);
537 
538 	pf_free_queues(pf_queues_inactive);
539 
540 	return (0);
541 }
542 
543 void
544 pf_free_queues(struct pf_queuehead *where)
545 {
546 	struct pf_queuespec	*q, *qtmp;
547 
548 	TAILQ_FOREACH_SAFE(q, where, entries, qtmp) {
549 		TAILQ_REMOVE(where, q, entries);
550 		pfi_kif_unref(q->kif, PFI_KIF_REF_RULE);
551 		pool_put(&pf_queue_pl, q);
552 	}
553 }
554 
555 void
556 pf_remove_queues(void)
557 {
558 	struct pf_queuespec	*q;
559 	struct ifnet		*ifp;
560 
561 	/* put back interfaces in normal queueing mode */
562 	TAILQ_FOREACH(q, pf_queues_active, entries) {
563 		if (q->parent_qid != 0)
564 			continue;
565 
566 		ifp = q->kif->pfik_ifp;
567 		if (ifp == NULL)
568 			continue;
569 
570 		ifq_attach(&ifp->if_snd, ifq_priq_ops, NULL);
571 	}
572 }
573 
574 struct pf_queue_if {
575 	struct ifnet		*ifp;
576 	const struct ifq_ops	*ifqops;
577 	const struct pfq_ops	*pfqops;
578 	void			*disc;
579 	struct pf_queue_if	*next;
580 };
581 
582 static inline struct pf_queue_if *
583 pf_ifp2q(struct pf_queue_if *list, struct ifnet *ifp)
584 {
585 	struct pf_queue_if *qif = list;
586 
587 	while (qif != NULL) {
588 		if (qif->ifp == ifp)
589 			return (qif);
590 
591 		qif = qif->next;
592 	}
593 
594 	return (qif);
595 }
596 
597 int
598 pf_create_queues(void)
599 {
600 	struct pf_queuespec	*q;
601 	struct ifnet		*ifp;
602 	struct pf_queue_if		*list = NULL, *qif;
603 	int			 error;
604 
605 	/*
606 	 * Find root queues and allocate traffic conditioner
607 	 * private data for these interfaces
608 	 */
609 	TAILQ_FOREACH(q, pf_queues_active, entries) {
610 		if (q->parent_qid != 0)
611 			continue;
612 
613 		ifp = q->kif->pfik_ifp;
614 		if (ifp == NULL)
615 			continue;
616 
617 		qif = malloc(sizeof(*qif), M_TEMP, M_WAITOK);
618 		qif->ifp = ifp;
619 
620 		if (q->flags & PFQS_ROOTCLASS) {
621 			qif->ifqops = ifq_hfsc_ops;
622 			qif->pfqops = pfq_hfsc_ops;
623 		} else {
624 			qif->ifqops = ifq_fqcodel_ops;
625 			qif->pfqops = pfq_fqcodel_ops;
626 		}
627 
628 		qif->disc = qif->pfqops->pfq_alloc(ifp);
629 
630 		qif->next = list;
631 		list = qif;
632 	}
633 
634 	/* and now everything */
635 	TAILQ_FOREACH(q, pf_queues_active, entries) {
636 		ifp = q->kif->pfik_ifp;
637 		if (ifp == NULL)
638 			continue;
639 
640 		qif = pf_ifp2q(list, ifp);
641 		KASSERT(qif != NULL);
642 
643 		error = qif->pfqops->pfq_addqueue(qif->disc, q);
644 		if (error != 0)
645 			goto error;
646 	}
647 
648 	/* find root queues in old list to disable them if necessary */
649 	TAILQ_FOREACH(q, pf_queues_inactive, entries) {
650 		if (q->parent_qid != 0)
651 			continue;
652 
653 		ifp = q->kif->pfik_ifp;
654 		if (ifp == NULL)
655 			continue;
656 
657 		qif = pf_ifp2q(list, ifp);
658 		if (qif != NULL)
659 			continue;
660 
661 		ifq_attach(&ifp->if_snd, ifq_priq_ops, NULL);
662 	}
663 
664 	/* commit the new queues */
665 	while (list != NULL) {
666 		qif = list;
667 		list = qif->next;
668 
669 		ifp = qif->ifp;
670 
671 		ifq_attach(&ifp->if_snd, qif->ifqops, qif->disc);
672 		free(qif, M_TEMP, sizeof(*qif));
673 	}
674 
675 	return (0);
676 
677 error:
678 	while (list != NULL) {
679 		qif = list;
680 		list = qif->next;
681 
682 		qif->pfqops->pfq_free(qif->disc);
683 		free(qif, M_TEMP, sizeof(*qif));
684 	}
685 
686 	return (error);
687 }
688 
689 int
690 pf_commit_queues(void)
691 {
692 	struct pf_queuehead	*qswap;
693 	int error;
694 
695         /* swap */
696         qswap = pf_queues_active;
697         pf_queues_active = pf_queues_inactive;
698         pf_queues_inactive = qswap;
699 
700 	error = pf_create_queues();
701 	if (error != 0) {
702 		pf_queues_inactive = pf_queues_active;
703 		pf_queues_active = qswap;
704 		return (error);
705 	}
706 
707         pf_free_queues(pf_queues_inactive);
708 
709 	return (0);
710 }
711 
712 const struct pfq_ops *
713 pf_queue_manager(struct pf_queuespec *q)
714 {
715 	if (q->flags & PFQS_FLOWQUEUE)
716 		return pfq_fqcodel_ops;
717 	return (/* pfq_default_ops */ NULL);
718 }
719 
720 #define PF_MD5_UPD(st, elm)						\
721 		MD5Update(ctx, (u_int8_t *) &(st)->elm, sizeof((st)->elm))
722 
723 #define PF_MD5_UPD_STR(st, elm)						\
724 		MD5Update(ctx, (u_int8_t *) (st)->elm, strlen((st)->elm))
725 
726 #define PF_MD5_UPD_HTONL(st, elm, stor) do {				\
727 		(stor) = htonl((st)->elm);				\
728 		MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int32_t));\
729 } while (0)
730 
731 #define PF_MD5_UPD_HTONS(st, elm, stor) do {				\
732 		(stor) = htons((st)->elm);				\
733 		MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int16_t));\
734 } while (0)
735 
736 void
737 pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr)
738 {
739 	PF_MD5_UPD(pfr, addr.type);
740 	switch (pfr->addr.type) {
741 		case PF_ADDR_DYNIFTL:
742 			PF_MD5_UPD(pfr, addr.v.ifname);
743 			PF_MD5_UPD(pfr, addr.iflags);
744 			break;
745 		case PF_ADDR_TABLE:
746 			if (strncmp(pfr->addr.v.tblname, PF_OPTIMIZER_TABLE_PFX,
747 			    strlen(PF_OPTIMIZER_TABLE_PFX)))
748 				PF_MD5_UPD(pfr, addr.v.tblname);
749 			break;
750 		case PF_ADDR_ADDRMASK:
751 			/* XXX ignore af? */
752 			PF_MD5_UPD(pfr, addr.v.a.addr.addr32);
753 			PF_MD5_UPD(pfr, addr.v.a.mask.addr32);
754 			break;
755 		case PF_ADDR_RTLABEL:
756 			PF_MD5_UPD(pfr, addr.v.rtlabelname);
757 			break;
758 	}
759 
760 	PF_MD5_UPD(pfr, port[0]);
761 	PF_MD5_UPD(pfr, port[1]);
762 	PF_MD5_UPD(pfr, neg);
763 	PF_MD5_UPD(pfr, port_op);
764 }
765 
766 void
767 pf_hash_rule(MD5_CTX *ctx, struct pf_rule *rule)
768 {
769 	u_int16_t x;
770 	u_int32_t y;
771 
772 	pf_hash_rule_addr(ctx, &rule->src);
773 	pf_hash_rule_addr(ctx, &rule->dst);
774 	PF_MD5_UPD_STR(rule, label);
775 	PF_MD5_UPD_STR(rule, ifname);
776 	PF_MD5_UPD_STR(rule, rcv_ifname);
777 	PF_MD5_UPD_STR(rule, match_tagname);
778 	PF_MD5_UPD_HTONS(rule, match_tag, x); /* dup? */
779 	PF_MD5_UPD_HTONL(rule, os_fingerprint, y);
780 	PF_MD5_UPD_HTONL(rule, prob, y);
781 	PF_MD5_UPD_HTONL(rule, uid.uid[0], y);
782 	PF_MD5_UPD_HTONL(rule, uid.uid[1], y);
783 	PF_MD5_UPD(rule, uid.op);
784 	PF_MD5_UPD_HTONL(rule, gid.gid[0], y);
785 	PF_MD5_UPD_HTONL(rule, gid.gid[1], y);
786 	PF_MD5_UPD(rule, gid.op);
787 	PF_MD5_UPD_HTONL(rule, rule_flag, y);
788 	PF_MD5_UPD(rule, action);
789 	PF_MD5_UPD(rule, direction);
790 	PF_MD5_UPD(rule, af);
791 	PF_MD5_UPD(rule, quick);
792 	PF_MD5_UPD(rule, ifnot);
793 	PF_MD5_UPD(rule, rcvifnot);
794 	PF_MD5_UPD(rule, match_tag_not);
795 	PF_MD5_UPD(rule, keep_state);
796 	PF_MD5_UPD(rule, proto);
797 	PF_MD5_UPD(rule, type);
798 	PF_MD5_UPD(rule, code);
799 	PF_MD5_UPD(rule, flags);
800 	PF_MD5_UPD(rule, flagset);
801 	PF_MD5_UPD(rule, allow_opts);
802 	PF_MD5_UPD(rule, rt);
803 	PF_MD5_UPD(rule, tos);
804 }
805 
806 int
807 pf_commit_rules(u_int32_t ticket, char *anchor)
808 {
809 	struct pf_ruleset	*rs;
810 	struct pf_rule		*rule;
811 	struct pf_rulequeue	*old_rules;
812 	u_int32_t		 old_rcount;
813 
814 	/* Make sure any expired rules get removed from active rules first. */
815 	pf_purge_expired_rules();
816 
817 	rs = pf_find_ruleset(anchor);
818 	if (rs == NULL || !rs->rules.inactive.open ||
819 	    ticket != rs->rules.inactive.ticket)
820 		return (EBUSY);
821 
822 	if (rs == &pf_main_ruleset)
823 		pf_calc_chksum(rs);
824 
825 	/* Swap rules, keep the old. */
826 	old_rules = rs->rules.active.ptr;
827 	old_rcount = rs->rules.active.rcount;
828 
829 	rs->rules.active.ptr = rs->rules.inactive.ptr;
830 	rs->rules.active.rcount = rs->rules.inactive.rcount;
831 	rs->rules.inactive.ptr = old_rules;
832 	rs->rules.inactive.rcount = old_rcount;
833 
834 	rs->rules.active.ticket = rs->rules.inactive.ticket;
835 	pf_calc_skip_steps(rs->rules.active.ptr);
836 
837 
838 	/* Purge the old rule list. */
839 	while ((rule = TAILQ_FIRST(old_rules)) != NULL)
840 		pf_rm_rule(old_rules, rule);
841 	rs->rules.inactive.rcount = 0;
842 	rs->rules.inactive.open = 0;
843 	pf_remove_if_empty_ruleset(rs);
844 
845 	/* queue defs only in the main ruleset */
846 	if (anchor[0])
847 		return (0);
848 	return (pf_commit_queues());
849 }
850 
851 void
852 pf_calc_chksum(struct pf_ruleset *rs)
853 {
854 	MD5_CTX			 ctx;
855 	struct pf_rule		*rule;
856 	u_int8_t		 digest[PF_MD5_DIGEST_LENGTH];
857 
858 	MD5Init(&ctx);
859 
860 	if (rs->rules.inactive.rcount) {
861 		TAILQ_FOREACH(rule, rs->rules.inactive.ptr, entries) {
862 			pf_hash_rule(&ctx, rule);
863 		}
864 	}
865 
866 	MD5Final(digest, &ctx);
867 	memcpy(pf_status.pf_chksum, digest, sizeof(pf_status.pf_chksum));
868 }
869 
870 int
871 pf_addr_setup(struct pf_ruleset *ruleset, struct pf_addr_wrap *addr,
872     sa_family_t af)
873 {
874 	if (pfi_dynaddr_setup(addr, af) ||
875 	    pf_tbladdr_setup(ruleset, addr) ||
876 	    pf_rtlabel_add(addr))
877 		return (EINVAL);
878 
879 	return (0);
880 }
881 
882 int
883 pf_kif_setup(char *ifname, struct pfi_kif **kif)
884 {
885 	if (ifname[0]) {
886 		*kif = pfi_kif_get(ifname);
887 		if (*kif == NULL)
888 			return (EINVAL);
889 
890 		pfi_kif_ref(*kif, PFI_KIF_REF_RULE);
891 	} else
892 		*kif = NULL;
893 
894 	return (0);
895 }
896 
897 void
898 pf_addr_copyout(struct pf_addr_wrap *addr)
899 {
900 	pfi_dynaddr_copyout(addr);
901 	pf_tbladdr_copyout(addr);
902 	pf_rtlabel_copyout(addr);
903 }
904 
905 int
906 pfioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct proc *p)
907 {
908 	int			 error = 0;
909 
910 	/* XXX keep in sync with switch() below */
911 	if (securelevel > 1)
912 		switch (cmd) {
913 		case DIOCGETRULES:
914 		case DIOCGETRULE:
915 		case DIOCGETSTATE:
916 		case DIOCSETSTATUSIF:
917 		case DIOCGETSTATUS:
918 		case DIOCCLRSTATUS:
919 		case DIOCNATLOOK:
920 		case DIOCSETDEBUG:
921 		case DIOCGETSTATES:
922 		case DIOCGETTIMEOUT:
923 		case DIOCGETLIMIT:
924 		case DIOCGETRULESETS:
925 		case DIOCGETRULESET:
926 		case DIOCGETQUEUES:
927 		case DIOCGETQUEUE:
928 		case DIOCGETQSTATS:
929 		case DIOCRGETTABLES:
930 		case DIOCRGETTSTATS:
931 		case DIOCRCLRTSTATS:
932 		case DIOCRCLRADDRS:
933 		case DIOCRADDADDRS:
934 		case DIOCRDELADDRS:
935 		case DIOCRSETADDRS:
936 		case DIOCRGETADDRS:
937 		case DIOCRGETASTATS:
938 		case DIOCRCLRASTATS:
939 		case DIOCRTSTADDRS:
940 		case DIOCOSFPGET:
941 		case DIOCGETSRCNODES:
942 		case DIOCCLRSRCNODES:
943 		case DIOCIGETIFACES:
944 		case DIOCSETIFFLAG:
945 		case DIOCCLRIFFLAG:
946 		case DIOCGETSYNFLWATS:
947 			break;
948 		case DIOCRCLRTABLES:
949 		case DIOCRADDTABLES:
950 		case DIOCRDELTABLES:
951 		case DIOCRSETTFLAGS:
952 			if (((struct pfioc_table *)addr)->pfrio_flags &
953 			    PFR_FLAG_DUMMY)
954 				break; /* dummy operation ok */
955 			return (EPERM);
956 		default:
957 			return (EPERM);
958 		}
959 
960 	if (!(flags & FWRITE))
961 		switch (cmd) {
962 		case DIOCGETRULES:
963 		case DIOCGETSTATE:
964 		case DIOCGETSTATUS:
965 		case DIOCGETSTATES:
966 		case DIOCGETTIMEOUT:
967 		case DIOCGETLIMIT:
968 		case DIOCGETRULESETS:
969 		case DIOCGETRULESET:
970 		case DIOCGETQUEUES:
971 		case DIOCGETQUEUE:
972 		case DIOCGETQSTATS:
973 		case DIOCNATLOOK:
974 		case DIOCRGETTABLES:
975 		case DIOCRGETTSTATS:
976 		case DIOCRGETADDRS:
977 		case DIOCRGETASTATS:
978 		case DIOCRTSTADDRS:
979 		case DIOCOSFPGET:
980 		case DIOCGETSRCNODES:
981 		case DIOCIGETIFACES:
982 		case DIOCGETSYNFLWATS:
983 			break;
984 		case DIOCRCLRTABLES:
985 		case DIOCRADDTABLES:
986 		case DIOCRDELTABLES:
987 		case DIOCRCLRTSTATS:
988 		case DIOCRCLRADDRS:
989 		case DIOCRADDADDRS:
990 		case DIOCRDELADDRS:
991 		case DIOCRSETADDRS:
992 		case DIOCRSETTFLAGS:
993 			if (((struct pfioc_table *)addr)->pfrio_flags &
994 			    PFR_FLAG_DUMMY) {
995 				flags |= FWRITE; /* need write lock for dummy */
996 				break; /* dummy operation ok */
997 			}
998 			return (EACCES);
999 		case DIOCGETRULE:
1000 			if (((struct pfioc_rule *)addr)->action ==
1001 			    PF_GET_CLR_CNTR)
1002 				return (EACCES);
1003 			break;
1004 		default:
1005 			return (EACCES);
1006 		}
1007 
1008 	switch (cmd) {
1009 
1010 	case DIOCSTART:
1011 		NET_LOCK();
1012 		PF_LOCK();
1013 		if (pf_status.running)
1014 			error = EEXIST;
1015 		else {
1016 			pf_status.running = 1;
1017 			pf_status.since = getuptime();
1018 			if (pf_status.stateid == 0) {
1019 				pf_status.stateid = gettime();
1020 				pf_status.stateid = pf_status.stateid << 32;
1021 			}
1022 			timeout_add_sec(&pf_purge_to, 1);
1023 			pf_create_queues();
1024 			DPFPRINTF(LOG_NOTICE, "pf: started");
1025 		}
1026 		PF_UNLOCK();
1027 		NET_UNLOCK();
1028 		break;
1029 
1030 	case DIOCSTOP:
1031 		NET_LOCK();
1032 		PF_LOCK();
1033 		if (!pf_status.running)
1034 			error = ENOENT;
1035 		else {
1036 			pf_status.running = 0;
1037 			pf_status.since = getuptime();
1038 			pf_remove_queues();
1039 			DPFPRINTF(LOG_NOTICE, "pf: stopped");
1040 		}
1041 		PF_UNLOCK();
1042 		NET_UNLOCK();
1043 		break;
1044 
1045 	case DIOCGETQUEUES: {
1046 		struct pfioc_queue	*pq = (struct pfioc_queue *)addr;
1047 		struct pf_queuespec	*qs;
1048 		u_int32_t		 nr = 0;
1049 
1050 		NET_LOCK();
1051 		PF_LOCK();
1052 		pq->ticket = pf_main_ruleset.rules.active.ticket;
1053 
1054 		/* save state to not run over them all each time? */
1055 		qs = TAILQ_FIRST(pf_queues_active);
1056 		while (qs != NULL) {
1057 			qs = TAILQ_NEXT(qs, entries);
1058 			nr++;
1059 		}
1060 		pq->nr = nr;
1061 		PF_UNLOCK();
1062 		NET_UNLOCK();
1063 		break;
1064 	}
1065 
1066 	case DIOCGETQUEUE: {
1067 		struct pfioc_queue	*pq = (struct pfioc_queue *)addr;
1068 		struct pf_queuespec	*qs;
1069 		u_int32_t		 nr = 0;
1070 
1071 		NET_LOCK();
1072 		PF_LOCK();
1073 		if (pq->ticket != pf_main_ruleset.rules.active.ticket) {
1074 			error = EBUSY;
1075 			PF_UNLOCK();
1076 			NET_UNLOCK();
1077 			break;
1078 		}
1079 
1080 		/* save state to not run over them all each time? */
1081 		qs = TAILQ_FIRST(pf_queues_active);
1082 		while ((qs != NULL) && (nr++ < pq->nr))
1083 			qs = TAILQ_NEXT(qs, entries);
1084 		if (qs == NULL) {
1085 			error = EBUSY;
1086 			PF_UNLOCK();
1087 			NET_UNLOCK();
1088 			break;
1089 		}
1090 		memcpy(&pq->queue, qs, sizeof(pq->queue));
1091 		PF_UNLOCK();
1092 		NET_UNLOCK();
1093 		break;
1094 	}
1095 
1096 	case DIOCGETQSTATS: {
1097 		struct pfioc_qstats	*pq = (struct pfioc_qstats *)addr;
1098 		struct pf_queuespec	*qs;
1099 		u_int32_t		 nr;
1100 		int			 nbytes;
1101 
1102 		NET_LOCK();
1103 		PF_LOCK();
1104 		if (pq->ticket != pf_main_ruleset.rules.active.ticket) {
1105 			error = EBUSY;
1106 			PF_UNLOCK();
1107 			NET_UNLOCK();
1108 			break;
1109 		}
1110 		nbytes = pq->nbytes;
1111 		nr = 0;
1112 
1113 		/* save state to not run over them all each time? */
1114 		qs = TAILQ_FIRST(pf_queues_active);
1115 		while ((qs != NULL) && (nr++ < pq->nr))
1116 			qs = TAILQ_NEXT(qs, entries);
1117 		if (qs == NULL) {
1118 			error = EBUSY;
1119 			PF_UNLOCK();
1120 			NET_UNLOCK();
1121 			break;
1122 		}
1123 		memcpy(&pq->queue, qs, sizeof(pq->queue));
1124 		/* It's a root flow queue but is not an HFSC root class */
1125 		if ((qs->flags & PFQS_FLOWQUEUE) && qs->parent_qid == 0 &&
1126 		    !(qs->flags & PFQS_ROOTCLASS))
1127 			error = pfq_fqcodel_ops->pfq_qstats(qs, pq->buf,
1128 			    &nbytes);
1129 		else
1130 			error = pfq_hfsc_ops->pfq_qstats(qs, pq->buf,
1131 			    &nbytes);
1132 		if (error == 0)
1133 			pq->nbytes = nbytes;
1134 		PF_UNLOCK();
1135 		NET_UNLOCK();
1136 		break;
1137 	}
1138 
1139 	case DIOCADDQUEUE: {
1140 		struct pfioc_queue	*q = (struct pfioc_queue *)addr;
1141 		struct pf_queuespec	*qs;
1142 
1143 		qs = pool_get(&pf_queue_pl, PR_WAITOK|PR_LIMITFAIL|PR_ZERO);
1144 		if (qs == NULL) {
1145 			error = ENOMEM;
1146 			break;
1147 		}
1148 
1149 		NET_LOCK();
1150 		PF_LOCK();
1151 		if (q->ticket != pf_main_ruleset.rules.inactive.ticket) {
1152 			error = EBUSY;
1153 			PF_UNLOCK();
1154 			NET_UNLOCK();
1155 			pool_put(&pf_queue_pl, qs);
1156 			break;
1157 		}
1158 		memcpy(qs, &q->queue, sizeof(*qs));
1159 		qs->qid = pf_qname2qid(qs->qname, 1);
1160 		if (qs->qid == 0) {
1161 			error = EBUSY;
1162 			PF_UNLOCK();
1163 			NET_UNLOCK();
1164 			pool_put(&pf_queue_pl, qs);
1165 			break;
1166 		}
1167 		if (qs->parent[0] && (qs->parent_qid =
1168 		    pf_qname2qid(qs->parent, 0)) == 0) {
1169 			error = ESRCH;
1170 			PF_UNLOCK();
1171 			NET_UNLOCK();
1172 			pool_put(&pf_queue_pl, qs);
1173 			break;
1174 		}
1175 		qs->kif = pfi_kif_get(qs->ifname);
1176 		if (qs->kif == NULL) {
1177 			error = ESRCH;
1178 			PF_UNLOCK();
1179 			NET_UNLOCK();
1180 			pool_put(&pf_queue_pl, qs);
1181 			break;
1182 		}
1183 		/* XXX resolve bw percentage specs */
1184 		pfi_kif_ref(qs->kif, PFI_KIF_REF_RULE);
1185 
1186 		TAILQ_INSERT_TAIL(pf_queues_inactive, qs, entries);
1187 		PF_UNLOCK();
1188 		NET_UNLOCK();
1189 
1190 		break;
1191 	}
1192 
1193 	case DIOCADDRULE: {
1194 		struct pfioc_rule	*pr = (struct pfioc_rule *)addr;
1195 		struct pf_ruleset	*ruleset;
1196 		struct pf_rule		*rule, *tail;
1197 
1198 		rule = pool_get(&pf_rule_pl, PR_WAITOK|PR_LIMITFAIL|PR_ZERO);
1199 		if (rule == NULL) {
1200 			error = ENOMEM;
1201 			break;
1202 		}
1203 
1204 		NET_LOCK();
1205 		PF_LOCK();
1206 		pr->anchor[sizeof(pr->anchor) - 1] = '\0';
1207 		ruleset = pf_find_ruleset(pr->anchor);
1208 		if (ruleset == NULL) {
1209 			error = EINVAL;
1210 			PF_UNLOCK();
1211 			NET_UNLOCK();
1212 			pool_put(&pf_rule_pl, rule);
1213 			break;
1214 		}
1215 		if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
1216 			error = EINVAL;
1217 			PF_UNLOCK();
1218 			NET_UNLOCK();
1219 			pool_put(&pf_rule_pl, rule);
1220 			break;
1221 		}
1222 		if (pr->ticket != ruleset->rules.inactive.ticket) {
1223 			error = EBUSY;
1224 			PF_UNLOCK();
1225 			NET_UNLOCK();
1226 			pool_put(&pf_rule_pl, rule);
1227 			break;
1228 		}
1229 		if ((error = pf_rule_copyin(&pr->rule, rule, ruleset))) {
1230 			pf_rm_rule(NULL, rule);
1231 			rule = NULL;
1232 			PF_UNLOCK();
1233 			NET_UNLOCK();
1234 			break;
1235 		}
1236 		rule->cuid = p->p_ucred->cr_ruid;
1237 		rule->cpid = p->p_p->ps_pid;
1238 
1239 		switch (rule->af) {
1240 		case 0:
1241 			break;
1242 		case AF_INET:
1243 			break;
1244 #ifdef INET6
1245 		case AF_INET6:
1246 			break;
1247 #endif /* INET6 */
1248 		default:
1249 			pf_rm_rule(NULL, rule);
1250 			rule = NULL;
1251 			error = EAFNOSUPPORT;
1252 			PF_UNLOCK();
1253 			NET_UNLOCK();
1254 			goto fail;
1255 		}
1256 		tail = TAILQ_LAST(ruleset->rules.inactive.ptr,
1257 		    pf_rulequeue);
1258 		if (tail)
1259 			rule->nr = tail->nr + 1;
1260 		else
1261 			rule->nr = 0;
1262 
1263 		if (rule->src.addr.type == PF_ADDR_NONE ||
1264 		    rule->dst.addr.type == PF_ADDR_NONE)
1265 			error = EINVAL;
1266 
1267 		if (pf_addr_setup(ruleset, &rule->src.addr, rule->af))
1268 			error = EINVAL;
1269 		if (pf_addr_setup(ruleset, &rule->dst.addr, rule->af))
1270 			error = EINVAL;
1271 		if (pf_addr_setup(ruleset, &rule->rdr.addr, rule->af))
1272 			error = EINVAL;
1273 		if (pf_addr_setup(ruleset, &rule->nat.addr, rule->af))
1274 			error = EINVAL;
1275 		if (pf_addr_setup(ruleset, &rule->route.addr, rule->af))
1276 			error = EINVAL;
1277 		if (pf_anchor_setup(rule, ruleset, pr->anchor_call))
1278 			error = EINVAL;
1279 		if (rule->rt && !rule->direction)
1280 			error = EINVAL;
1281 		if (rule->scrub_flags & PFSTATE_SETPRIO &&
1282 		    (rule->set_prio[0] > IFQ_MAXPRIO ||
1283 		    rule->set_prio[1] > IFQ_MAXPRIO))
1284 			error = EINVAL;
1285 
1286 		if (error) {
1287 			pf_rm_rule(NULL, rule);
1288 			PF_UNLOCK();
1289 			NET_UNLOCK();
1290 			break;
1291 		}
1292 		TAILQ_INSERT_TAIL(ruleset->rules.inactive.ptr,
1293 		    rule, entries);
1294 		rule->ruleset = ruleset;
1295 		ruleset->rules.inactive.rcount++;
1296 		PF_UNLOCK();
1297 		NET_UNLOCK();
1298 		break;
1299 	}
1300 
1301 	case DIOCGETRULES: {
1302 		struct pfioc_rule	*pr = (struct pfioc_rule *)addr;
1303 		struct pf_ruleset	*ruleset;
1304 		struct pf_rule		*tail;
1305 
1306 		NET_LOCK();
1307 		PF_LOCK();
1308 		pr->anchor[sizeof(pr->anchor) - 1] = '\0';
1309 		ruleset = pf_find_ruleset(pr->anchor);
1310 		if (ruleset == NULL) {
1311 			error = EINVAL;
1312 			PF_UNLOCK();
1313 			NET_UNLOCK();
1314 			break;
1315 		}
1316 		tail = TAILQ_LAST(ruleset->rules.active.ptr, pf_rulequeue);
1317 		if (tail)
1318 			pr->nr = tail->nr + 1;
1319 		else
1320 			pr->nr = 0;
1321 		pr->ticket = ruleset->rules.active.ticket;
1322 		PF_UNLOCK();
1323 		NET_UNLOCK();
1324 		break;
1325 	}
1326 
1327 	case DIOCGETRULE: {
1328 		struct pfioc_rule	*pr = (struct pfioc_rule *)addr;
1329 		struct pf_ruleset	*ruleset;
1330 		struct pf_rule		*rule;
1331 		int			 i;
1332 
1333 		NET_LOCK();
1334 		PF_LOCK();
1335 		pr->anchor[sizeof(pr->anchor) - 1] = '\0';
1336 		ruleset = pf_find_ruleset(pr->anchor);
1337 		if (ruleset == NULL) {
1338 			error = EINVAL;
1339 			PF_UNLOCK();
1340 			NET_UNLOCK();
1341 			break;
1342 		}
1343 		if (pr->ticket != ruleset->rules.active.ticket) {
1344 			error = EBUSY;
1345 			PF_UNLOCK();
1346 			NET_UNLOCK();
1347 			break;
1348 		}
1349 		rule = TAILQ_FIRST(ruleset->rules.active.ptr);
1350 		while ((rule != NULL) && (rule->nr != pr->nr))
1351 			rule = TAILQ_NEXT(rule, entries);
1352 		if (rule == NULL) {
1353 			error = EBUSY;
1354 			PF_UNLOCK();
1355 			NET_UNLOCK();
1356 			break;
1357 		}
1358 		memcpy(&pr->rule, rule, sizeof(struct pf_rule));
1359 		memset(&pr->rule.entries, 0, sizeof(pr->rule.entries));
1360 		pr->rule.kif = NULL;
1361 		pr->rule.nat.kif = NULL;
1362 		pr->rule.rdr.kif = NULL;
1363 		pr->rule.route.kif = NULL;
1364 		pr->rule.rcv_kif = NULL;
1365 		pr->rule.anchor = NULL;
1366 		pr->rule.overload_tbl = NULL;
1367 		pr->rule.pktrate.limit /= PF_THRESHOLD_MULT;
1368 		memset(&pr->rule.gcle, 0, sizeof(pr->rule.gcle));
1369 		pr->rule.ruleset = NULL;
1370 		if (pf_anchor_copyout(ruleset, rule, pr)) {
1371 			error = EBUSY;
1372 			PF_UNLOCK();
1373 			NET_UNLOCK();
1374 			break;
1375 		}
1376 		pf_addr_copyout(&pr->rule.src.addr);
1377 		pf_addr_copyout(&pr->rule.dst.addr);
1378 		pf_addr_copyout(&pr->rule.rdr.addr);
1379 		pf_addr_copyout(&pr->rule.nat.addr);
1380 		pf_addr_copyout(&pr->rule.route.addr);
1381 		for (i = 0; i < PF_SKIP_COUNT; ++i)
1382 			if (rule->skip[i].ptr == NULL)
1383 				pr->rule.skip[i].nr = (u_int32_t)-1;
1384 			else
1385 				pr->rule.skip[i].nr =
1386 				    rule->skip[i].ptr->nr;
1387 
1388 		if (pr->action == PF_GET_CLR_CNTR) {
1389 			rule->evaluations = 0;
1390 			rule->packets[0] = rule->packets[1] = 0;
1391 			rule->bytes[0] = rule->bytes[1] = 0;
1392 			rule->states_tot = 0;
1393 		}
1394 		PF_UNLOCK();
1395 		NET_UNLOCK();
1396 		break;
1397 	}
1398 
1399 	case DIOCCHANGERULE: {
1400 		struct pfioc_rule	*pcr = (struct pfioc_rule *)addr;
1401 		struct pf_ruleset	*ruleset;
1402 		struct pf_rule		*oldrule = NULL, *newrule = NULL;
1403 		u_int32_t		 nr = 0;
1404 
1405 		if (pcr->action < PF_CHANGE_ADD_HEAD ||
1406 		    pcr->action > PF_CHANGE_GET_TICKET) {
1407 			error = EINVAL;
1408 			break;
1409 		}
1410 
1411 		newrule = pool_get(&pf_rule_pl, PR_WAITOK|PR_LIMITFAIL|PR_ZERO);
1412 		if (newrule == NULL) {
1413 			error = ENOMEM;
1414 			break;
1415 		}
1416 
1417 		NET_LOCK();
1418 		PF_LOCK();
1419 		ruleset = pf_find_ruleset(pcr->anchor);
1420 		if (ruleset == NULL) {
1421 			error = EINVAL;
1422 			PF_UNLOCK();
1423 			NET_UNLOCK();
1424 			pool_put(&pf_rule_pl, newrule);
1425 			break;
1426 		}
1427 
1428 		if (pcr->action == PF_CHANGE_GET_TICKET) {
1429 			pcr->ticket = ++ruleset->rules.active.ticket;
1430 			PF_UNLOCK();
1431 			NET_UNLOCK();
1432 			pool_put(&pf_rule_pl, newrule);
1433 			break;
1434 		} else {
1435 			if (pcr->ticket !=
1436 			    ruleset->rules.active.ticket) {
1437 				error = EINVAL;
1438 				PF_UNLOCK();
1439 				NET_UNLOCK();
1440 				pool_put(&pf_rule_pl, newrule);
1441 				break;
1442 			}
1443 			if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
1444 				error = EINVAL;
1445 				PF_UNLOCK();
1446 				NET_UNLOCK();
1447 				pool_put(&pf_rule_pl, newrule);
1448 				break;
1449 			}
1450 		}
1451 
1452 		if (pcr->action != PF_CHANGE_REMOVE) {
1453 			pf_rule_copyin(&pcr->rule, newrule, ruleset);
1454 			newrule->cuid = p->p_ucred->cr_ruid;
1455 			newrule->cpid = p->p_p->ps_pid;
1456 
1457 			switch (newrule->af) {
1458 			case 0:
1459 				break;
1460 			case AF_INET:
1461 				break;
1462 #ifdef INET6
1463 			case AF_INET6:
1464 				break;
1465 #endif /* INET6 */
1466 			default:
1467 				pf_rm_rule(NULL, newrule);
1468 				error = EAFNOSUPPORT;
1469 				PF_UNLOCK();
1470 				NET_UNLOCK();
1471 				goto fail;
1472 			}
1473 
1474 			if (newrule->rt && !newrule->direction)
1475 				error = EINVAL;
1476 			if (pf_addr_setup(ruleset, &newrule->src.addr, newrule->af))
1477 				error = EINVAL;
1478 			if (pf_addr_setup(ruleset, &newrule->dst.addr, newrule->af))
1479 				error = EINVAL;
1480 			if (pf_addr_setup(ruleset, &newrule->rdr.addr, newrule->af))
1481 				error = EINVAL;
1482 			if (pf_addr_setup(ruleset, &newrule->nat.addr, newrule->af))
1483 				error = EINVAL;
1484 			if (pf_addr_setup(ruleset, &newrule->route.addr, newrule->af))
1485 				error = EINVAL;
1486 			if (pf_anchor_setup(newrule, ruleset, pcr->anchor_call))
1487 				error = EINVAL;
1488 
1489 			if (error) {
1490 				pf_rm_rule(NULL, newrule);
1491 				PF_UNLOCK();
1492 				NET_UNLOCK();
1493 				break;
1494 			}
1495 		}
1496 
1497 		if (pcr->action == PF_CHANGE_ADD_HEAD)
1498 			oldrule = TAILQ_FIRST(ruleset->rules.active.ptr);
1499 		else if (pcr->action == PF_CHANGE_ADD_TAIL)
1500 			oldrule = TAILQ_LAST(ruleset->rules.active.ptr,
1501 			    pf_rulequeue);
1502 		else {
1503 			oldrule = TAILQ_FIRST(ruleset->rules.active.ptr);
1504 			while ((oldrule != NULL) && (oldrule->nr != pcr->nr))
1505 				oldrule = TAILQ_NEXT(oldrule, entries);
1506 			if (oldrule == NULL) {
1507 				if (newrule != NULL)
1508 					pf_rm_rule(NULL, newrule);
1509 				error = EINVAL;
1510 				PF_UNLOCK();
1511 				NET_UNLOCK();
1512 				break;
1513 			}
1514 		}
1515 
1516 		if (pcr->action == PF_CHANGE_REMOVE) {
1517 			pf_rm_rule(ruleset->rules.active.ptr, oldrule);
1518 			ruleset->rules.active.rcount--;
1519 		} else {
1520 			if (oldrule == NULL)
1521 				TAILQ_INSERT_TAIL(
1522 				    ruleset->rules.active.ptr,
1523 				    newrule, entries);
1524 			else if (pcr->action == PF_CHANGE_ADD_HEAD ||
1525 			    pcr->action == PF_CHANGE_ADD_BEFORE)
1526 				TAILQ_INSERT_BEFORE(oldrule, newrule, entries);
1527 			else
1528 				TAILQ_INSERT_AFTER(
1529 				    ruleset->rules.active.ptr,
1530 				    oldrule, newrule, entries);
1531 			ruleset->rules.active.rcount++;
1532 		}
1533 
1534 		nr = 0;
1535 		TAILQ_FOREACH(oldrule, ruleset->rules.active.ptr, entries)
1536 			oldrule->nr = nr++;
1537 
1538 		ruleset->rules.active.ticket++;
1539 
1540 		pf_calc_skip_steps(ruleset->rules.active.ptr);
1541 		pf_remove_if_empty_ruleset(ruleset);
1542 
1543 		PF_UNLOCK();
1544 		NET_UNLOCK();
1545 		break;
1546 	}
1547 
1548 	case DIOCCLRSTATES: {
1549 		struct pf_state		*s, *nexts;
1550 		struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr;
1551 		u_int			 killed = 0;
1552 
1553 		NET_LOCK();
1554 		PF_LOCK();
1555 		PF_STATE_ENTER_WRITE();
1556 		for (s = RB_MIN(pf_state_tree_id, &tree_id); s; s = nexts) {
1557 			nexts = RB_NEXT(pf_state_tree_id, &tree_id, s);
1558 
1559 			if (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname,
1560 			    s->kif->pfik_name)) {
1561 #if NPFSYNC > 0
1562 				/* don't send out individual delete messages */
1563 				SET(s->state_flags, PFSTATE_NOSYNC);
1564 #endif	/* NPFSYNC > 0 */
1565 				pf_remove_state(s);
1566 				killed++;
1567 			}
1568 		}
1569 		PF_STATE_EXIT_WRITE();
1570 		psk->psk_killed = killed;
1571 #if NPFSYNC > 0
1572 		pfsync_clear_states(pf_status.hostid, psk->psk_ifname);
1573 #endif	/* NPFSYNC > 0 */
1574 		PF_UNLOCK();
1575 		NET_UNLOCK();
1576 		break;
1577 	}
1578 
1579 	case DIOCKILLSTATES: {
1580 		struct pf_state		*s, *nexts;
1581 		struct pf_state_item	*si, *sit;
1582 		struct pf_state_key	*sk, key;
1583 		struct pf_addr		*srcaddr, *dstaddr;
1584 		u_int16_t		 srcport, dstport;
1585 		struct pfioc_state_kill	*psk = (struct pfioc_state_kill *)addr;
1586 		u_int			 i, killed = 0;
1587 		const int 		 dirs[] = { PF_IN, PF_OUT };
1588 		int			 sidx, didx;
1589 
1590 		if (psk->psk_pfcmp.id) {
1591 			if (psk->psk_pfcmp.creatorid == 0)
1592 				psk->psk_pfcmp.creatorid = pf_status.hostid;
1593 			NET_LOCK();
1594 			PF_LOCK();
1595 			PF_STATE_ENTER_WRITE();
1596 			if ((s = pf_find_state_byid(&psk->psk_pfcmp))) {
1597 				pf_remove_state(s);
1598 				psk->psk_killed = 1;
1599 			}
1600 			PF_STATE_EXIT_WRITE();
1601 			PF_UNLOCK();
1602 			NET_UNLOCK();
1603 			break;
1604 		}
1605 
1606 		if (psk->psk_af && psk->psk_proto &&
1607 		    psk->psk_src.port_op == PF_OP_EQ &&
1608 		    psk->psk_dst.port_op == PF_OP_EQ) {
1609 
1610 			key.af = psk->psk_af;
1611 			key.proto = psk->psk_proto;
1612 			key.rdomain = psk->psk_rdomain;
1613 
1614 			NET_LOCK();
1615 			PF_LOCK();
1616 			PF_STATE_ENTER_WRITE();
1617 			for (i = 0; i < nitems(dirs); i++) {
1618 				if (dirs[i] == PF_IN) {
1619 					sidx = 0;
1620 					didx = 1;
1621 				} else {
1622 					sidx = 1;
1623 					didx = 0;
1624 				}
1625 				pf_addrcpy(&key.addr[sidx],
1626 				    &psk->psk_src.addr.v.a.addr, key.af);
1627 				pf_addrcpy(&key.addr[didx],
1628 				    &psk->psk_dst.addr.v.a.addr, key.af);
1629 				key.port[sidx] = psk->psk_src.port[0];
1630 				key.port[didx] = psk->psk_dst.port[0];
1631 
1632 				sk = RB_FIND(pf_state_tree, &pf_statetbl, &key);
1633 				if (sk == NULL)
1634 					continue;
1635 
1636 				TAILQ_FOREACH_SAFE(si, &sk->states, entry, sit)
1637 					if (((si->s->key[PF_SK_WIRE]->af ==
1638 					    si->s->key[PF_SK_STACK]->af &&
1639 					    sk == (dirs[i] == PF_IN ?
1640 					    si->s->key[PF_SK_WIRE] :
1641 					    si->s->key[PF_SK_STACK])) ||
1642 					    (si->s->key[PF_SK_WIRE]->af !=
1643 					    si->s->key[PF_SK_STACK]->af &&
1644 					    dirs[i] == PF_IN &&
1645 					    (sk == si->s->key[PF_SK_STACK] ||
1646 					    sk == si->s->key[PF_SK_WIRE]))) &&
1647 					    (!psk->psk_ifname[0] ||
1648 					    (si->s->kif != pfi_all &&
1649 					    !strcmp(psk->psk_ifname,
1650 					    si->s->kif->pfik_name)))) {
1651 						pf_remove_state(si->s);
1652 						killed++;
1653 					}
1654 			}
1655 			if (killed)
1656 				psk->psk_killed = killed;
1657 			PF_STATE_EXIT_WRITE();
1658 			PF_UNLOCK();
1659 			NET_UNLOCK();
1660 			break;
1661 		}
1662 
1663 		NET_LOCK();
1664 		PF_LOCK();
1665 		PF_STATE_ENTER_WRITE();
1666 		for (s = RB_MIN(pf_state_tree_id, &tree_id); s;
1667 		    s = nexts) {
1668 			nexts = RB_NEXT(pf_state_tree_id, &tree_id, s);
1669 
1670 			if (s->direction == PF_OUT) {
1671 				sk = s->key[PF_SK_STACK];
1672 				srcaddr = &sk->addr[1];
1673 				dstaddr = &sk->addr[0];
1674 				srcport = sk->port[1];
1675 				dstport = sk->port[0];
1676 			} else {
1677 				sk = s->key[PF_SK_WIRE];
1678 				srcaddr = &sk->addr[0];
1679 				dstaddr = &sk->addr[1];
1680 				srcport = sk->port[0];
1681 				dstport = sk->port[1];
1682 			}
1683 			if ((!psk->psk_af || sk->af == psk->psk_af)
1684 			    && (!psk->psk_proto || psk->psk_proto ==
1685 			    sk->proto) && psk->psk_rdomain == sk->rdomain &&
1686 			    pf_match_addr(psk->psk_src.neg,
1687 			    &psk->psk_src.addr.v.a.addr,
1688 			    &psk->psk_src.addr.v.a.mask,
1689 			    srcaddr, sk->af) &&
1690 			    pf_match_addr(psk->psk_dst.neg,
1691 			    &psk->psk_dst.addr.v.a.addr,
1692 			    &psk->psk_dst.addr.v.a.mask,
1693 			    dstaddr, sk->af) &&
1694 			    (psk->psk_src.port_op == 0 ||
1695 			    pf_match_port(psk->psk_src.port_op,
1696 			    psk->psk_src.port[0], psk->psk_src.port[1],
1697 			    srcport)) &&
1698 			    (psk->psk_dst.port_op == 0 ||
1699 			    pf_match_port(psk->psk_dst.port_op,
1700 			    psk->psk_dst.port[0], psk->psk_dst.port[1],
1701 			    dstport)) &&
1702 			    (!psk->psk_label[0] || (s->rule.ptr->label[0] &&
1703 			    !strcmp(psk->psk_label, s->rule.ptr->label))) &&
1704 			    (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname,
1705 			    s->kif->pfik_name))) {
1706 				pf_remove_state(s);
1707 				killed++;
1708 			}
1709 		}
1710 		psk->psk_killed = killed;
1711 		PF_STATE_EXIT_WRITE();
1712 		PF_UNLOCK();
1713 		NET_UNLOCK();
1714 		break;
1715 	}
1716 
1717 #if NPFSYNC > 0
1718 	case DIOCADDSTATE: {
1719 		struct pfioc_state	*ps = (struct pfioc_state *)addr;
1720 		struct pfsync_state	*sp = &ps->state;
1721 
1722 		if (sp->timeout >= PFTM_MAX) {
1723 			error = EINVAL;
1724 			break;
1725 		}
1726 		NET_LOCK();
1727 		PF_LOCK();
1728 		error = pfsync_state_import(sp, PFSYNC_SI_IOCTL);
1729 		PF_UNLOCK();
1730 		NET_UNLOCK();
1731 		break;
1732 	}
1733 #endif	/* NPFSYNC > 0 */
1734 
1735 	case DIOCGETSTATE: {
1736 		struct pfioc_state	*ps = (struct pfioc_state *)addr;
1737 		struct pf_state		*s;
1738 		struct pf_state_cmp	 id_key;
1739 
1740 		memset(&id_key, 0, sizeof(id_key));
1741 		id_key.id = ps->state.id;
1742 		id_key.creatorid = ps->state.creatorid;
1743 
1744 		NET_LOCK();
1745 		PF_STATE_ENTER_READ();
1746 		s = pf_find_state_byid(&id_key);
1747 		s = pf_state_ref(s);
1748 		PF_STATE_EXIT_READ();
1749 		NET_UNLOCK();
1750 		if (s == NULL) {
1751 			error = ENOENT;
1752 			break;
1753 		}
1754 
1755 		pf_state_export(&ps->state, s);
1756 		pf_state_unref(s);
1757 		break;
1758 	}
1759 
1760 	case DIOCGETSTATES: {
1761 		struct pfioc_states	*ps = (struct pfioc_states *)addr;
1762 		struct pf_state		*state;
1763 		struct pfsync_state	*p, *pstore;
1764 		u_int32_t		 nr = 0;
1765 
1766 		if (ps->ps_len == 0) {
1767 			nr = pf_status.states;
1768 			ps->ps_len = sizeof(struct pfsync_state) * nr;
1769 			break;
1770 		}
1771 
1772 		pstore = malloc(sizeof(*pstore), M_TEMP, M_WAITOK);
1773 
1774 		p = ps->ps_states;
1775 
1776 		NET_LOCK();
1777 		PF_STATE_ENTER_READ();
1778 		state = TAILQ_FIRST(&state_list);
1779 		while (state) {
1780 			if (state->timeout != PFTM_UNLINKED) {
1781 				if ((nr+1) * sizeof(*p) > ps->ps_len)
1782 					break;
1783 				pf_state_export(pstore, state);
1784 				error = copyout(pstore, p, sizeof(*p));
1785 				if (error) {
1786 					free(pstore, M_TEMP, sizeof(*pstore));
1787 					PF_STATE_EXIT_READ();
1788 					NET_UNLOCK();
1789 					goto fail;
1790 				}
1791 				p++;
1792 				nr++;
1793 			}
1794 			state = TAILQ_NEXT(state, entry_list);
1795 		}
1796 		PF_STATE_EXIT_READ();
1797 		NET_UNLOCK();
1798 
1799 		ps->ps_len = sizeof(struct pfsync_state) * nr;
1800 
1801 		free(pstore, M_TEMP, sizeof(*pstore));
1802 		break;
1803 	}
1804 
1805 	case DIOCGETSTATUS: {
1806 		struct pf_status *s = (struct pf_status *)addr;
1807 		NET_LOCK();
1808 		PF_LOCK();
1809 		memcpy(s, &pf_status, sizeof(struct pf_status));
1810 		pfi_update_status(s->ifname, s);
1811 		PF_UNLOCK();
1812 		NET_UNLOCK();
1813 		break;
1814 	}
1815 
1816 	case DIOCSETSTATUSIF: {
1817 		struct pfioc_iface	*pi = (struct pfioc_iface *)addr;
1818 
1819 		NET_LOCK();
1820 		PF_LOCK();
1821 		if (pi->pfiio_name[0] == 0) {
1822 			memset(pf_status.ifname, 0, IFNAMSIZ);
1823 			PF_UNLOCK();
1824 			NET_UNLOCK();
1825 			break;
1826 		}
1827 		strlcpy(pf_trans_set.statusif, pi->pfiio_name, IFNAMSIZ);
1828 		pf_trans_set.mask |= PF_TSET_STATUSIF;
1829 		PF_UNLOCK();
1830 		NET_UNLOCK();
1831 		break;
1832 	}
1833 
1834 	case DIOCCLRSTATUS: {
1835 		struct pfioc_iface	*pi = (struct pfioc_iface *)addr;
1836 
1837 		NET_LOCK();
1838 		PF_LOCK();
1839 		/* if ifname is specified, clear counters there only */
1840 		if (pi->pfiio_name[0]) {
1841 			pfi_update_status(pi->pfiio_name, NULL);
1842 			PF_UNLOCK();
1843 			NET_UNLOCK();
1844 			break;
1845 		}
1846 
1847 		memset(pf_status.counters, 0, sizeof(pf_status.counters));
1848 		memset(pf_status.fcounters, 0, sizeof(pf_status.fcounters));
1849 		memset(pf_status.scounters, 0, sizeof(pf_status.scounters));
1850 		pf_status.since = getuptime();
1851 
1852 		PF_UNLOCK();
1853 		NET_UNLOCK();
1854 		break;
1855 	}
1856 
1857 	case DIOCNATLOOK: {
1858 		struct pfioc_natlook	*pnl = (struct pfioc_natlook *)addr;
1859 		struct pf_state_key	*sk;
1860 		struct pf_state		*state;
1861 		struct pf_state_key_cmp	 key;
1862 		int			 m = 0, direction = pnl->direction;
1863 		int			 sidx, didx;
1864 
1865 		switch (pnl->af) {
1866 		case AF_INET:
1867 			break;
1868 #ifdef INET6
1869 		case AF_INET6:
1870 			break;
1871 #endif /* INET6 */
1872 		default:
1873 			error = EAFNOSUPPORT;
1874 			goto fail;
1875 		}
1876 
1877 		/* NATLOOK src and dst are reversed, so reverse sidx/didx */
1878 		sidx = (direction == PF_IN) ? 1 : 0;
1879 		didx = (direction == PF_IN) ? 0 : 1;
1880 
1881 		if (!pnl->proto ||
1882 		    PF_AZERO(&pnl->saddr, pnl->af) ||
1883 		    PF_AZERO(&pnl->daddr, pnl->af) ||
1884 		    ((pnl->proto == IPPROTO_TCP ||
1885 		    pnl->proto == IPPROTO_UDP) &&
1886 		    (!pnl->dport || !pnl->sport)) ||
1887 		    pnl->rdomain > RT_TABLEID_MAX)
1888 			error = EINVAL;
1889 		else {
1890 			key.af = pnl->af;
1891 			key.proto = pnl->proto;
1892 			key.rdomain = pnl->rdomain;
1893 			pf_addrcpy(&key.addr[sidx], &pnl->saddr, pnl->af);
1894 			key.port[sidx] = pnl->sport;
1895 			pf_addrcpy(&key.addr[didx], &pnl->daddr, pnl->af);
1896 			key.port[didx] = pnl->dport;
1897 
1898 			NET_LOCK();
1899 			PF_STATE_ENTER_READ();
1900 			state = pf_find_state_all(&key, direction, &m);
1901 			state = pf_state_ref(state);
1902 			PF_STATE_EXIT_READ();
1903 			NET_UNLOCK();
1904 
1905 			if (m > 1)
1906 				error = E2BIG;	/* more than one state */
1907 			else if (state != NULL) {
1908 				sk = state->key[sidx];
1909 				pf_addrcpy(&pnl->rsaddr, &sk->addr[sidx],
1910 				    sk->af);
1911 				pnl->rsport = sk->port[sidx];
1912 				pf_addrcpy(&pnl->rdaddr, &sk->addr[didx],
1913 				    sk->af);
1914 				pnl->rdport = sk->port[didx];
1915 				pnl->rrdomain = sk->rdomain;
1916 			} else
1917 				error = ENOENT;
1918 			pf_state_unref(state);
1919 		}
1920 		break;
1921 	}
1922 
1923 	case DIOCSETTIMEOUT: {
1924 		struct pfioc_tm	*pt = (struct pfioc_tm *)addr;
1925 
1926 		if (pt->timeout < 0 || pt->timeout >= PFTM_MAX ||
1927 		    pt->seconds < 0) {
1928 			error = EINVAL;
1929 			goto fail;
1930 		}
1931 		NET_LOCK();
1932 		PF_LOCK();
1933 		if (pt->timeout == PFTM_INTERVAL && pt->seconds == 0)
1934 			pt->seconds = 1;
1935 		pf_default_rule_new.timeout[pt->timeout] = pt->seconds;
1936 		pt->seconds = pf_default_rule.timeout[pt->timeout];
1937 		PF_UNLOCK();
1938 		NET_UNLOCK();
1939 		break;
1940 	}
1941 
1942 	case DIOCGETTIMEOUT: {
1943 		struct pfioc_tm	*pt = (struct pfioc_tm *)addr;
1944 
1945 		if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) {
1946 			error = EINVAL;
1947 			goto fail;
1948 		}
1949 		NET_LOCK();
1950 		PF_LOCK();
1951 		pt->seconds = pf_default_rule.timeout[pt->timeout];
1952 		PF_UNLOCK();
1953 		NET_UNLOCK();
1954 		break;
1955 	}
1956 
1957 	case DIOCGETLIMIT: {
1958 		struct pfioc_limit	*pl = (struct pfioc_limit *)addr;
1959 
1960 		if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) {
1961 			error = EINVAL;
1962 			goto fail;
1963 		}
1964 		NET_LOCK();
1965 		PF_LOCK();
1966 		pl->limit = pf_pool_limits[pl->index].limit;
1967 		PF_UNLOCK();
1968 		NET_UNLOCK();
1969 		break;
1970 	}
1971 
1972 	case DIOCSETLIMIT: {
1973 		struct pfioc_limit	*pl = (struct pfioc_limit *)addr;
1974 
1975 		NET_LOCK();
1976 		PF_LOCK();
1977 		if (pl->index < 0 || pl->index >= PF_LIMIT_MAX ||
1978 		    pf_pool_limits[pl->index].pp == NULL) {
1979 			error = EINVAL;
1980 			PF_UNLOCK();
1981 			NET_UNLOCK();
1982 			goto fail;
1983 		}
1984 		if (((struct pool *)pf_pool_limits[pl->index].pp)->pr_nout >
1985 		    pl->limit) {
1986 			error = EBUSY;
1987 			PF_UNLOCK();
1988 			NET_UNLOCK();
1989 			goto fail;
1990 		}
1991 		/* Fragments reference mbuf clusters. */
1992 		if (pl->index == PF_LIMIT_FRAGS && pl->limit > nmbclust) {
1993 			error = EINVAL;
1994 			PF_UNLOCK();
1995 			NET_UNLOCK();
1996 			goto fail;
1997 		}
1998 
1999 		pf_pool_limits[pl->index].limit_new = pl->limit;
2000 		pl->limit = pf_pool_limits[pl->index].limit;
2001 		PF_UNLOCK();
2002 		NET_UNLOCK();
2003 		break;
2004 	}
2005 
2006 	case DIOCSETDEBUG: {
2007 		u_int32_t	*level = (u_int32_t *)addr;
2008 
2009 		NET_LOCK();
2010 		PF_LOCK();
2011 		pf_trans_set.debug = *level;
2012 		pf_trans_set.mask |= PF_TSET_DEBUG;
2013 		PF_UNLOCK();
2014 		NET_UNLOCK();
2015 		break;
2016 	}
2017 
2018 	case DIOCGETRULESETS: {
2019 		struct pfioc_ruleset	*pr = (struct pfioc_ruleset *)addr;
2020 		struct pf_ruleset	*ruleset;
2021 		struct pf_anchor	*anchor;
2022 
2023 		NET_LOCK();
2024 		PF_LOCK();
2025 		pr->path[sizeof(pr->path) - 1] = '\0';
2026 		if ((ruleset = pf_find_ruleset(pr->path)) == NULL) {
2027 			error = EINVAL;
2028 			PF_UNLOCK();
2029 			NET_UNLOCK();
2030 			break;
2031 		}
2032 		pr->nr = 0;
2033 		if (ruleset == &pf_main_ruleset) {
2034 			/* XXX kludge for pf_main_ruleset */
2035 			RB_FOREACH(anchor, pf_anchor_global, &pf_anchors)
2036 				if (anchor->parent == NULL)
2037 					pr->nr++;
2038 		} else {
2039 			RB_FOREACH(anchor, pf_anchor_node,
2040 			    &ruleset->anchor->children)
2041 				pr->nr++;
2042 		}
2043 		PF_UNLOCK();
2044 		NET_UNLOCK();
2045 		break;
2046 	}
2047 
2048 	case DIOCGETRULESET: {
2049 		struct pfioc_ruleset	*pr = (struct pfioc_ruleset *)addr;
2050 		struct pf_ruleset	*ruleset;
2051 		struct pf_anchor	*anchor;
2052 		u_int32_t		 nr = 0;
2053 
2054 		NET_LOCK();
2055 		PF_LOCK();
2056 		pr->path[sizeof(pr->path) - 1] = '\0';
2057 		if ((ruleset = pf_find_ruleset(pr->path)) == NULL) {
2058 			error = EINVAL;
2059 			PF_UNLOCK();
2060 			NET_UNLOCK();
2061 			break;
2062 		}
2063 		pr->name[0] = '\0';
2064 		if (ruleset == &pf_main_ruleset) {
2065 			/* XXX kludge for pf_main_ruleset */
2066 			RB_FOREACH(anchor, pf_anchor_global, &pf_anchors)
2067 				if (anchor->parent == NULL && nr++ == pr->nr) {
2068 					strlcpy(pr->name, anchor->name,
2069 					    sizeof(pr->name));
2070 					break;
2071 				}
2072 		} else {
2073 			RB_FOREACH(anchor, pf_anchor_node,
2074 			    &ruleset->anchor->children)
2075 				if (nr++ == pr->nr) {
2076 					strlcpy(pr->name, anchor->name,
2077 					    sizeof(pr->name));
2078 					break;
2079 				}
2080 		}
2081 		PF_UNLOCK();
2082 		NET_UNLOCK();
2083 		if (!pr->name[0])
2084 			error = EBUSY;
2085 		break;
2086 	}
2087 
2088 	case DIOCRCLRTABLES: {
2089 		struct pfioc_table *io = (struct pfioc_table *)addr;
2090 
2091 		if (io->pfrio_esize != 0) {
2092 			error = ENODEV;
2093 			break;
2094 		}
2095 		NET_LOCK();
2096 		PF_LOCK();
2097 		error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel,
2098 		    io->pfrio_flags | PFR_FLAG_USERIOCTL);
2099 		PF_UNLOCK();
2100 		NET_UNLOCK();
2101 		break;
2102 	}
2103 
2104 	case DIOCRADDTABLES: {
2105 		struct pfioc_table *io = (struct pfioc_table *)addr;
2106 
2107 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
2108 			error = ENODEV;
2109 			break;
2110 		}
2111 		NET_LOCK();
2112 		PF_LOCK();
2113 		error = pfr_add_tables(io->pfrio_buffer, io->pfrio_size,
2114 		    &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2115 		PF_UNLOCK();
2116 		NET_UNLOCK();
2117 		break;
2118 	}
2119 
2120 	case DIOCRDELTABLES: {
2121 		struct pfioc_table *io = (struct pfioc_table *)addr;
2122 
2123 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
2124 			error = ENODEV;
2125 			break;
2126 		}
2127 		NET_LOCK();
2128 		PF_LOCK();
2129 		error = pfr_del_tables(io->pfrio_buffer, io->pfrio_size,
2130 		    &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2131 		PF_UNLOCK();
2132 		NET_UNLOCK();
2133 		break;
2134 	}
2135 
2136 	case DIOCRGETTABLES: {
2137 		struct pfioc_table *io = (struct pfioc_table *)addr;
2138 
2139 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
2140 			error = ENODEV;
2141 			break;
2142 		}
2143 		NET_LOCK();
2144 		PF_LOCK();
2145 		error = pfr_get_tables(&io->pfrio_table, io->pfrio_buffer,
2146 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2147 		PF_UNLOCK();
2148 		NET_UNLOCK();
2149 		break;
2150 	}
2151 
2152 	case DIOCRGETTSTATS: {
2153 		struct pfioc_table *io = (struct pfioc_table *)addr;
2154 
2155 		if (io->pfrio_esize != sizeof(struct pfr_tstats)) {
2156 			error = ENODEV;
2157 			break;
2158 		}
2159 		NET_LOCK();
2160 		PF_LOCK();
2161 		error = pfr_get_tstats(&io->pfrio_table, io->pfrio_buffer,
2162 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2163 		PF_UNLOCK();
2164 		NET_UNLOCK();
2165 		break;
2166 	}
2167 
2168 	case DIOCRCLRTSTATS: {
2169 		struct pfioc_table *io = (struct pfioc_table *)addr;
2170 
2171 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
2172 			error = ENODEV;
2173 			break;
2174 		}
2175 		NET_LOCK();
2176 		PF_LOCK();
2177 		error = pfr_clr_tstats(io->pfrio_buffer, io->pfrio_size,
2178 		    &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2179 		PF_UNLOCK();
2180 		NET_UNLOCK();
2181 		break;
2182 	}
2183 
2184 	case DIOCRSETTFLAGS: {
2185 		struct pfioc_table *io = (struct pfioc_table *)addr;
2186 
2187 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
2188 			error = ENODEV;
2189 			break;
2190 		}
2191 		NET_LOCK();
2192 		PF_LOCK();
2193 		error = pfr_set_tflags(io->pfrio_buffer, io->pfrio_size,
2194 		    io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange,
2195 		    &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2196 		PF_UNLOCK();
2197 		NET_UNLOCK();
2198 		break;
2199 	}
2200 
2201 	case DIOCRCLRADDRS: {
2202 		struct pfioc_table *io = (struct pfioc_table *)addr;
2203 
2204 		if (io->pfrio_esize != 0) {
2205 			error = ENODEV;
2206 			break;
2207 		}
2208 		NET_LOCK();
2209 		PF_LOCK();
2210 		error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel,
2211 		    io->pfrio_flags | PFR_FLAG_USERIOCTL);
2212 		PF_UNLOCK();
2213 		NET_UNLOCK();
2214 		break;
2215 	}
2216 
2217 	case DIOCRADDADDRS: {
2218 		struct pfioc_table *io = (struct pfioc_table *)addr;
2219 
2220 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2221 			error = ENODEV;
2222 			break;
2223 		}
2224 		NET_LOCK();
2225 		PF_LOCK();
2226 		error = pfr_add_addrs(&io->pfrio_table, io->pfrio_buffer,
2227 		    io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags |
2228 		    PFR_FLAG_USERIOCTL);
2229 		PF_UNLOCK();
2230 		NET_UNLOCK();
2231 		break;
2232 	}
2233 
2234 	case DIOCRDELADDRS: {
2235 		struct pfioc_table *io = (struct pfioc_table *)addr;
2236 
2237 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2238 			error = ENODEV;
2239 			break;
2240 		}
2241 		NET_LOCK();
2242 		PF_LOCK();
2243 		error = pfr_del_addrs(&io->pfrio_table, io->pfrio_buffer,
2244 		    io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags |
2245 		    PFR_FLAG_USERIOCTL);
2246 		PF_UNLOCK();
2247 		NET_UNLOCK();
2248 		break;
2249 	}
2250 
2251 	case DIOCRSETADDRS: {
2252 		struct pfioc_table *io = (struct pfioc_table *)addr;
2253 
2254 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2255 			error = ENODEV;
2256 			break;
2257 		}
2258 		NET_LOCK();
2259 		PF_LOCK();
2260 		error = pfr_set_addrs(&io->pfrio_table, io->pfrio_buffer,
2261 		    io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd,
2262 		    &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags |
2263 		    PFR_FLAG_USERIOCTL, 0);
2264 		PF_UNLOCK();
2265 		NET_UNLOCK();
2266 		break;
2267 	}
2268 
2269 	case DIOCRGETADDRS: {
2270 		struct pfioc_table *io = (struct pfioc_table *)addr;
2271 
2272 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2273 			error = ENODEV;
2274 			break;
2275 		}
2276 		NET_LOCK();
2277 		PF_LOCK();
2278 		error = pfr_get_addrs(&io->pfrio_table, io->pfrio_buffer,
2279 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2280 		PF_UNLOCK();
2281 		NET_UNLOCK();
2282 		break;
2283 	}
2284 
2285 	case DIOCRGETASTATS: {
2286 		struct pfioc_table *io = (struct pfioc_table *)addr;
2287 
2288 		if (io->pfrio_esize != sizeof(struct pfr_astats)) {
2289 			error = ENODEV;
2290 			break;
2291 		}
2292 		NET_LOCK();
2293 		PF_LOCK();
2294 		error = pfr_get_astats(&io->pfrio_table, io->pfrio_buffer,
2295 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2296 		PF_UNLOCK();
2297 		NET_UNLOCK();
2298 		break;
2299 	}
2300 
2301 	case DIOCRCLRASTATS: {
2302 		struct pfioc_table *io = (struct pfioc_table *)addr;
2303 
2304 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2305 			error = ENODEV;
2306 			break;
2307 		}
2308 		NET_LOCK();
2309 		PF_LOCK();
2310 		error = pfr_clr_astats(&io->pfrio_table, io->pfrio_buffer,
2311 		    io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags |
2312 		    PFR_FLAG_USERIOCTL);
2313 		PF_UNLOCK();
2314 		NET_UNLOCK();
2315 		break;
2316 	}
2317 
2318 	case DIOCRTSTADDRS: {
2319 		struct pfioc_table *io = (struct pfioc_table *)addr;
2320 
2321 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2322 			error = ENODEV;
2323 			break;
2324 		}
2325 		NET_LOCK();
2326 		PF_LOCK();
2327 		error = pfr_tst_addrs(&io->pfrio_table, io->pfrio_buffer,
2328 		    io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags |
2329 		    PFR_FLAG_USERIOCTL);
2330 		PF_UNLOCK();
2331 		NET_UNLOCK();
2332 		break;
2333 	}
2334 
2335 	case DIOCRINADEFINE: {
2336 		struct pfioc_table *io = (struct pfioc_table *)addr;
2337 
2338 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2339 			error = ENODEV;
2340 			break;
2341 		}
2342 		NET_LOCK();
2343 		PF_LOCK();
2344 		error = pfr_ina_define(&io->pfrio_table, io->pfrio_buffer,
2345 		    io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr,
2346 		    io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2347 		PF_UNLOCK();
2348 		NET_UNLOCK();
2349 		break;
2350 	}
2351 
2352 	case DIOCOSFPADD: {
2353 		struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
2354 		error = pf_osfp_add(io);
2355 		break;
2356 	}
2357 
2358 	case DIOCOSFPGET: {
2359 		struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
2360 		error = pf_osfp_get(io);
2361 		break;
2362 	}
2363 
2364 	case DIOCXBEGIN: {
2365 		struct pfioc_trans	*io = (struct pfioc_trans *)addr;
2366 		struct pfioc_trans_e	*ioe;
2367 		struct pfr_table	*table;
2368 		int			 i;
2369 
2370 		if (io->esize != sizeof(*ioe)) {
2371 			error = ENODEV;
2372 			goto fail;
2373 		}
2374 		ioe = malloc(sizeof(*ioe), M_TEMP, M_WAITOK);
2375 		table = malloc(sizeof(*table), M_TEMP, M_WAITOK);
2376 		NET_LOCK();
2377 		PF_LOCK();
2378 		pf_default_rule_new = pf_default_rule;
2379 		memset(&pf_trans_set, 0, sizeof(pf_trans_set));
2380 		for (i = 0; i < io->size; i++) {
2381 			if (copyin(io->array+i, ioe, sizeof(*ioe))) {
2382 				PF_UNLOCK();
2383 				NET_UNLOCK();
2384 				free(table, M_TEMP, sizeof(*table));
2385 				free(ioe, M_TEMP, sizeof(*ioe));
2386 				error = EFAULT;
2387 				goto fail;
2388 			}
2389 			if (strnlen(ioe->anchor, sizeof(ioe->anchor)) ==
2390 			    sizeof(ioe->anchor)) {
2391 				PF_UNLOCK();
2392 				NET_UNLOCK();
2393 				free(table, M_TEMP, sizeof(*table));
2394 				free(ioe, M_TEMP, sizeof(*ioe));
2395 				error = ENAMETOOLONG;
2396 				goto fail;
2397 			}
2398 			switch (ioe->type) {
2399 			case PF_TRANS_TABLE:
2400 				memset(table, 0, sizeof(*table));
2401 				strlcpy(table->pfrt_anchor, ioe->anchor,
2402 				    sizeof(table->pfrt_anchor));
2403 				if ((error = pfr_ina_begin(table,
2404 				    &ioe->ticket, NULL, 0))) {
2405 					PF_UNLOCK();
2406 					NET_UNLOCK();
2407 					free(table, M_TEMP, sizeof(*table));
2408 					free(ioe, M_TEMP, sizeof(*ioe));
2409 					goto fail;
2410 				}
2411 				break;
2412 			case PF_TRANS_RULESET:
2413 				if ((error = pf_begin_rules(&ioe->ticket,
2414 				    ioe->anchor))) {
2415 					PF_UNLOCK();
2416 					NET_UNLOCK();
2417 					free(table, M_TEMP, sizeof(*table));
2418 					free(ioe, M_TEMP, sizeof(*ioe));
2419 					goto fail;
2420 				}
2421 				break;
2422 			default:
2423 				PF_UNLOCK();
2424 				NET_UNLOCK();
2425 				free(table, M_TEMP, sizeof(*table));
2426 				free(ioe, M_TEMP, sizeof(*ioe));
2427 				error = EINVAL;
2428 				goto fail;
2429 			}
2430 			if (copyout(ioe, io->array+i, sizeof(io->array[i]))) {
2431 				PF_UNLOCK();
2432 				NET_UNLOCK();
2433 				free(table, M_TEMP, sizeof(*table));
2434 				free(ioe, M_TEMP, sizeof(*ioe));
2435 				error = EFAULT;
2436 				goto fail;
2437 			}
2438 		}
2439 		PF_UNLOCK();
2440 		NET_UNLOCK();
2441 		free(table, M_TEMP, sizeof(*table));
2442 		free(ioe, M_TEMP, sizeof(*ioe));
2443 		break;
2444 	}
2445 
2446 	case DIOCXROLLBACK: {
2447 		struct pfioc_trans	*io = (struct pfioc_trans *)addr;
2448 		struct pfioc_trans_e	*ioe;
2449 		struct pfr_table	*table;
2450 		int			 i;
2451 
2452 		if (io->esize != sizeof(*ioe)) {
2453 			error = ENODEV;
2454 			goto fail;
2455 		}
2456 		ioe = malloc(sizeof(*ioe), M_TEMP, M_WAITOK);
2457 		table = malloc(sizeof(*table), M_TEMP, M_WAITOK);
2458 		NET_LOCK();
2459 		PF_LOCK();
2460 		for (i = 0; i < io->size; i++) {
2461 			if (copyin(io->array+i, ioe, sizeof(*ioe))) {
2462 				PF_UNLOCK();
2463 				NET_UNLOCK();
2464 				free(table, M_TEMP, sizeof(*table));
2465 				free(ioe, M_TEMP, sizeof(*ioe));
2466 				error = EFAULT;
2467 				goto fail;
2468 			}
2469 			if (strnlen(ioe->anchor, sizeof(ioe->anchor)) ==
2470 			    sizeof(ioe->anchor)) {
2471 				PF_UNLOCK();
2472 				NET_UNLOCK();
2473 				free(table, M_TEMP, sizeof(*table));
2474 				free(ioe, M_TEMP, sizeof(*ioe));
2475 				error = ENAMETOOLONG;
2476 				goto fail;
2477 			}
2478 			switch (ioe->type) {
2479 			case PF_TRANS_TABLE:
2480 				memset(table, 0, sizeof(*table));
2481 				strlcpy(table->pfrt_anchor, ioe->anchor,
2482 				    sizeof(table->pfrt_anchor));
2483 				if ((error = pfr_ina_rollback(table,
2484 				    ioe->ticket, NULL, 0))) {
2485 					PF_UNLOCK();
2486 					NET_UNLOCK();
2487 					free(table, M_TEMP, sizeof(*table));
2488 					free(ioe, M_TEMP, sizeof(*ioe));
2489 					goto fail; /* really bad */
2490 				}
2491 				break;
2492 			case PF_TRANS_RULESET:
2493 				if ((error = pf_rollback_rules(ioe->ticket,
2494 				    ioe->anchor))) {
2495 					PF_UNLOCK();
2496 					NET_UNLOCK();
2497 					free(table, M_TEMP, sizeof(*table));
2498 					free(ioe, M_TEMP, sizeof(*ioe));
2499 					goto fail; /* really bad */
2500 				}
2501 				break;
2502 			default:
2503 				PF_UNLOCK();
2504 				NET_UNLOCK();
2505 				free(table, M_TEMP, sizeof(*table));
2506 				free(ioe, M_TEMP, sizeof(*ioe));
2507 				error = EINVAL;
2508 				goto fail; /* really bad */
2509 			}
2510 		}
2511 		PF_UNLOCK();
2512 		NET_UNLOCK();
2513 		free(table, M_TEMP, sizeof(*table));
2514 		free(ioe, M_TEMP, sizeof(*ioe));
2515 		break;
2516 	}
2517 
2518 	case DIOCXCOMMIT: {
2519 		struct pfioc_trans	*io = (struct pfioc_trans *)addr;
2520 		struct pfioc_trans_e	*ioe;
2521 		struct pfr_table	*table;
2522 		struct pf_ruleset	*rs;
2523 		int			 i;
2524 
2525 		if (io->esize != sizeof(*ioe)) {
2526 			error = ENODEV;
2527 			goto fail;
2528 		}
2529 		ioe = malloc(sizeof(*ioe), M_TEMP, M_WAITOK);
2530 		table = malloc(sizeof(*table), M_TEMP, M_WAITOK);
2531 		NET_LOCK();
2532 		PF_LOCK();
2533 		/* first makes sure everything will succeed */
2534 		for (i = 0; i < io->size; i++) {
2535 			if (copyin(io->array+i, ioe, sizeof(*ioe))) {
2536 				PF_UNLOCK();
2537 				NET_UNLOCK();
2538 				free(table, M_TEMP, sizeof(*table));
2539 				free(ioe, M_TEMP, sizeof(*ioe));
2540 				error = EFAULT;
2541 				goto fail;
2542 			}
2543 			if (strnlen(ioe->anchor, sizeof(ioe->anchor)) ==
2544 			    sizeof(ioe->anchor)) {
2545 				PF_UNLOCK();
2546 				NET_UNLOCK();
2547 				free(table, M_TEMP, sizeof(*table));
2548 				free(ioe, M_TEMP, sizeof(*ioe));
2549 				error = ENAMETOOLONG;
2550 				goto fail;
2551 			}
2552 			switch (ioe->type) {
2553 			case PF_TRANS_TABLE:
2554 				rs = pf_find_ruleset(ioe->anchor);
2555 				if (rs == NULL || !rs->topen || ioe->ticket !=
2556 				     rs->tticket) {
2557 					PF_UNLOCK();
2558 					NET_UNLOCK();
2559 					free(table, M_TEMP, sizeof(*table));
2560 					free(ioe, M_TEMP, sizeof(*ioe));
2561 					error = EBUSY;
2562 					goto fail;
2563 				}
2564 				break;
2565 			case PF_TRANS_RULESET:
2566 				rs = pf_find_ruleset(ioe->anchor);
2567 				if (rs == NULL ||
2568 				    !rs->rules.inactive.open ||
2569 				    rs->rules.inactive.ticket !=
2570 				    ioe->ticket) {
2571 					PF_UNLOCK();
2572 					NET_UNLOCK();
2573 					free(table, M_TEMP, sizeof(*table));
2574 					free(ioe, M_TEMP, sizeof(*ioe));
2575 					error = EBUSY;
2576 					goto fail;
2577 				}
2578 				break;
2579 			default:
2580 				PF_UNLOCK();
2581 				NET_UNLOCK();
2582 				free(table, M_TEMP, sizeof(*table));
2583 				free(ioe, M_TEMP, sizeof(*ioe));
2584 				error = EINVAL;
2585 				goto fail;
2586 			}
2587 		}
2588 
2589 		/*
2590 		 * Checked already in DIOCSETLIMIT, but check again as the
2591 		 * situation might have changed.
2592 		 */
2593 		for (i = 0; i < PF_LIMIT_MAX; i++) {
2594 			if (((struct pool *)pf_pool_limits[i].pp)->pr_nout >
2595 			    pf_pool_limits[i].limit_new) {
2596 				PF_UNLOCK();
2597 				NET_UNLOCK();
2598 				free(table, M_TEMP, sizeof(*table));
2599 				free(ioe, M_TEMP, sizeof(*ioe));
2600 				error = EBUSY;
2601 				goto fail;
2602 			}
2603 		}
2604 		/* now do the commit - no errors should happen here */
2605 		for (i = 0; i < io->size; i++) {
2606 			if (copyin(io->array+i, ioe, sizeof(*ioe))) {
2607 				PF_UNLOCK();
2608 				NET_UNLOCK();
2609 				free(table, M_TEMP, sizeof(*table));
2610 				free(ioe, M_TEMP, sizeof(*ioe));
2611 				error = EFAULT;
2612 				goto fail;
2613 			}
2614 			if (strnlen(ioe->anchor, sizeof(ioe->anchor)) ==
2615 			    sizeof(ioe->anchor)) {
2616 				PF_UNLOCK();
2617 				NET_UNLOCK();
2618 				free(table, M_TEMP, sizeof(*table));
2619 				free(ioe, M_TEMP, sizeof(*ioe));
2620 				error = ENAMETOOLONG;
2621 				goto fail;
2622 			}
2623 			switch (ioe->type) {
2624 			case PF_TRANS_TABLE:
2625 				memset(table, 0, sizeof(*table));
2626 				strlcpy(table->pfrt_anchor, ioe->anchor,
2627 				    sizeof(table->pfrt_anchor));
2628 				if ((error = pfr_ina_commit(table, ioe->ticket,
2629 				    NULL, NULL, 0))) {
2630 					PF_UNLOCK();
2631 					NET_UNLOCK();
2632 					free(table, M_TEMP, sizeof(*table));
2633 					free(ioe, M_TEMP, sizeof(*ioe));
2634 					goto fail; /* really bad */
2635 				}
2636 				break;
2637 			case PF_TRANS_RULESET:
2638 				if ((error = pf_commit_rules(ioe->ticket,
2639 				    ioe->anchor))) {
2640 					PF_UNLOCK();
2641 					NET_UNLOCK();
2642 					free(table, M_TEMP, sizeof(*table));
2643 					free(ioe, M_TEMP, sizeof(*ioe));
2644 					goto fail; /* really bad */
2645 				}
2646 				break;
2647 			default:
2648 				PF_UNLOCK();
2649 				NET_UNLOCK();
2650 				free(table, M_TEMP, sizeof(*table));
2651 				free(ioe, M_TEMP, sizeof(*ioe));
2652 				error = EINVAL;
2653 				goto fail; /* really bad */
2654 			}
2655 		}
2656 		for (i = 0; i < PF_LIMIT_MAX; i++) {
2657 			if (pf_pool_limits[i].limit_new !=
2658 			    pf_pool_limits[i].limit &&
2659 			    pool_sethardlimit(pf_pool_limits[i].pp,
2660 			    pf_pool_limits[i].limit_new, NULL, 0) != 0) {
2661 				PF_UNLOCK();
2662 				NET_UNLOCK();
2663 				free(table, M_TEMP, sizeof(*table));
2664 				free(ioe, M_TEMP, sizeof(*ioe));
2665 				error = EBUSY;
2666 				goto fail; /* really bad */
2667 			}
2668 			pf_pool_limits[i].limit = pf_pool_limits[i].limit_new;
2669 		}
2670 		for (i = 0; i < PFTM_MAX; i++) {
2671 			int old = pf_default_rule.timeout[i];
2672 
2673 			pf_default_rule.timeout[i] =
2674 			    pf_default_rule_new.timeout[i];
2675 			if (pf_default_rule.timeout[i] == PFTM_INTERVAL &&
2676 			    pf_default_rule.timeout[i] < old)
2677 				task_add(net_tq(0), &pf_purge_task);
2678 		}
2679 		pfi_xcommit();
2680 		pf_trans_set_commit();
2681 		PF_UNLOCK();
2682 		NET_UNLOCK();
2683 		free(table, M_TEMP, sizeof(*table));
2684 		free(ioe, M_TEMP, sizeof(*ioe));
2685 		break;
2686 	}
2687 
2688 	case DIOCGETSRCNODES: {
2689 		struct pfioc_src_nodes	*psn = (struct pfioc_src_nodes *)addr;
2690 		struct pf_src_node	*n, *p, *pstore;
2691 		u_int32_t		 nr = 0;
2692 		size_t			 space = psn->psn_len;
2693 
2694 		pstore = malloc(sizeof(*pstore), M_TEMP, M_WAITOK);
2695 
2696 		NET_LOCK();
2697 		PF_LOCK();
2698 		if (space == 0) {
2699 			RB_FOREACH(n, pf_src_tree, &tree_src_tracking)
2700 				nr++;
2701 			psn->psn_len = sizeof(struct pf_src_node) * nr;
2702 			PF_UNLOCK();
2703 			NET_UNLOCK();
2704 			free(pstore, M_TEMP, sizeof(*pstore));
2705 			break;
2706 		}
2707 
2708 		p = psn->psn_src_nodes;
2709 		RB_FOREACH(n, pf_src_tree, &tree_src_tracking) {
2710 			int	secs = getuptime(), diff;
2711 
2712 			if ((nr + 1) * sizeof(*p) > psn->psn_len)
2713 				break;
2714 
2715 			memcpy(pstore, n, sizeof(*pstore));
2716 			memset(&pstore->entry, 0, sizeof(pstore->entry));
2717 			pstore->rule.ptr = NULL;
2718 			pstore->kif = NULL;
2719 			pstore->rule.nr = n->rule.ptr->nr;
2720 			pstore->creation = secs - pstore->creation;
2721 			if (pstore->expire > secs)
2722 				pstore->expire -= secs;
2723 			else
2724 				pstore->expire = 0;
2725 
2726 			/* adjust the connection rate estimate */
2727 			diff = secs - n->conn_rate.last;
2728 			if (diff >= n->conn_rate.seconds)
2729 				pstore->conn_rate.count = 0;
2730 			else
2731 				pstore->conn_rate.count -=
2732 				    n->conn_rate.count * diff /
2733 				    n->conn_rate.seconds;
2734 
2735 			error = copyout(pstore, p, sizeof(*p));
2736 			if (error) {
2737 				PF_UNLOCK();
2738 				NET_UNLOCK();
2739 				free(pstore, M_TEMP, sizeof(*pstore));
2740 				goto fail;
2741 			}
2742 			p++;
2743 			nr++;
2744 		}
2745 		psn->psn_len = sizeof(struct pf_src_node) * nr;
2746 
2747 		PF_UNLOCK();
2748 		NET_UNLOCK();
2749 		free(pstore, M_TEMP, sizeof(*pstore));
2750 		break;
2751 	}
2752 
2753 	case DIOCCLRSRCNODES: {
2754 		struct pf_src_node	*n;
2755 		struct pf_state		*state;
2756 
2757 		NET_LOCK();
2758 		PF_LOCK();
2759 		PF_STATE_ENTER_WRITE();
2760 		RB_FOREACH(state, pf_state_tree_id, &tree_id)
2761 			pf_src_tree_remove_state(state);
2762 		PF_STATE_EXIT_WRITE();
2763 		RB_FOREACH(n, pf_src_tree, &tree_src_tracking)
2764 			n->expire = 1;
2765 		pf_purge_expired_src_nodes();
2766 		PF_UNLOCK();
2767 		NET_UNLOCK();
2768 		break;
2769 	}
2770 
2771 	case DIOCKILLSRCNODES: {
2772 		struct pf_src_node	*sn;
2773 		struct pf_state		*s;
2774 		struct pfioc_src_node_kill *psnk =
2775 		    (struct pfioc_src_node_kill *)addr;
2776 		u_int			killed = 0;
2777 
2778 		NET_LOCK();
2779 		PF_LOCK();
2780 		RB_FOREACH(sn, pf_src_tree, &tree_src_tracking) {
2781 			if (pf_match_addr(psnk->psnk_src.neg,
2782 				&psnk->psnk_src.addr.v.a.addr,
2783 				&psnk->psnk_src.addr.v.a.mask,
2784 				&sn->addr, sn->af) &&
2785 			    pf_match_addr(psnk->psnk_dst.neg,
2786 				&psnk->psnk_dst.addr.v.a.addr,
2787 				&psnk->psnk_dst.addr.v.a.mask,
2788 				&sn->raddr, sn->af)) {
2789 				/* Handle state to src_node linkage */
2790 				if (sn->states != 0) {
2791 					PF_ASSERT_LOCKED();
2792 					PF_STATE_ENTER_WRITE();
2793 					RB_FOREACH(s, pf_state_tree_id,
2794 					   &tree_id)
2795 						pf_state_rm_src_node(s, sn);
2796 					PF_STATE_EXIT_WRITE();
2797 				}
2798 				sn->expire = 1;
2799 				killed++;
2800 			}
2801 		}
2802 
2803 		if (killed > 0)
2804 			pf_purge_expired_src_nodes();
2805 
2806 		psnk->psnk_killed = killed;
2807 		PF_UNLOCK();
2808 		NET_UNLOCK();
2809 		break;
2810 	}
2811 
2812 	case DIOCSETHOSTID: {
2813 		u_int32_t	*hostid = (u_int32_t *)addr;
2814 
2815 		NET_LOCK();
2816 		PF_LOCK();
2817 		if (*hostid == 0)
2818 			pf_trans_set.hostid = arc4random();
2819 		else
2820 			pf_trans_set.hostid = *hostid;
2821 		pf_trans_set.mask |= PF_TSET_HOSTID;
2822 		PF_UNLOCK();
2823 		NET_UNLOCK();
2824 		break;
2825 	}
2826 
2827 	case DIOCOSFPFLUSH:
2828 		pf_osfp_flush();
2829 		break;
2830 
2831 	case DIOCIGETIFACES: {
2832 		struct pfioc_iface *io = (struct pfioc_iface *)addr;
2833 
2834 		if (io->pfiio_esize != sizeof(struct pfi_kif)) {
2835 			error = ENODEV;
2836 			break;
2837 		}
2838 		NET_LOCK();
2839 		PF_LOCK();
2840 		error = pfi_get_ifaces(io->pfiio_name, io->pfiio_buffer,
2841 		    &io->pfiio_size);
2842 		PF_UNLOCK();
2843 		NET_UNLOCK();
2844 		break;
2845 	}
2846 
2847 	case DIOCSETIFFLAG: {
2848 		struct pfioc_iface *io = (struct pfioc_iface *)addr;
2849 
2850 		NET_LOCK();
2851 		PF_LOCK();
2852 		error = pfi_set_flags(io->pfiio_name, io->pfiio_flags);
2853 		PF_UNLOCK();
2854 		NET_UNLOCK();
2855 		break;
2856 	}
2857 
2858 	case DIOCCLRIFFLAG: {
2859 		struct pfioc_iface *io = (struct pfioc_iface *)addr;
2860 
2861 		NET_LOCK();
2862 		PF_LOCK();
2863 		error = pfi_clear_flags(io->pfiio_name, io->pfiio_flags);
2864 		PF_UNLOCK();
2865 		NET_UNLOCK();
2866 		break;
2867 	}
2868 
2869 	case DIOCSETREASS: {
2870 		u_int32_t	*reass = (u_int32_t *)addr;
2871 
2872 		NET_LOCK();
2873 		PF_LOCK();
2874 		pf_trans_set.reass = *reass;
2875 		pf_trans_set.mask |= PF_TSET_REASS;
2876 		PF_UNLOCK();
2877 		NET_UNLOCK();
2878 		break;
2879 	}
2880 
2881 	case DIOCSETSYNFLWATS: {
2882 		struct pfioc_synflwats *io = (struct pfioc_synflwats *)addr;
2883 
2884 		NET_LOCK();
2885 		PF_LOCK();
2886 		error = pf_syncookies_setwats(io->hiwat, io->lowat);
2887 		PF_UNLOCK();
2888 		NET_UNLOCK();
2889 		break;
2890 	}
2891 
2892 	case DIOCGETSYNFLWATS: {
2893 		struct pfioc_synflwats *io = (struct pfioc_synflwats *)addr;
2894 
2895 		NET_LOCK();
2896 		PF_LOCK();
2897 		error = pf_syncookies_getwats(io);
2898 		PF_UNLOCK();
2899 		NET_UNLOCK();
2900 		break;
2901 	}
2902 
2903 	case DIOCSETSYNCOOKIES: {
2904 		u_int8_t	*mode = (u_int8_t *)addr;
2905 
2906 		NET_LOCK();
2907 		PF_LOCK();
2908 		error = pf_syncookies_setmode(*mode);
2909 		PF_UNLOCK();
2910 		NET_UNLOCK();
2911 		break;
2912 	}
2913 
2914 	default:
2915 		error = ENODEV;
2916 		break;
2917 	}
2918 fail:
2919 	return (error);
2920 }
2921 
2922 void
2923 pf_trans_set_commit(void)
2924 {
2925 	if (pf_trans_set.mask & PF_TSET_STATUSIF)
2926 		strlcpy(pf_status.ifname, pf_trans_set.statusif, IFNAMSIZ);
2927 	if (pf_trans_set.mask & PF_TSET_DEBUG)
2928 		pf_status.debug = pf_trans_set.debug;
2929 	if (pf_trans_set.mask & PF_TSET_HOSTID)
2930 		pf_status.hostid = pf_trans_set.hostid;
2931 	if (pf_trans_set.mask & PF_TSET_REASS)
2932 		pf_status.reass = pf_trans_set.reass;
2933 }
2934 
2935 void
2936 pf_pool_copyin(struct pf_pool *from, struct pf_pool *to)
2937 {
2938 	memmove(to, from, sizeof(*to));
2939 	to->kif = NULL;
2940 	to->addr.p.tbl = NULL;
2941 }
2942 
2943 int
2944 pf_validate_range(u_int8_t op, u_int16_t port[2])
2945 {
2946 	u_int16_t a = ntohs(port[0]);
2947 	u_int16_t b = ntohs(port[1]);
2948 
2949 	if ((op == PF_OP_RRG && a > b) ||  /* 34:12,  i.e. none */
2950 	    (op == PF_OP_IRG && a >= b) || /* 34><12, i.e. none */
2951 	    (op == PF_OP_XRG && a > b))    /* 34<>22, i.e. all */
2952 		return 1;
2953 	return 0;
2954 }
2955 
2956 int
2957 pf_rule_copyin(struct pf_rule *from, struct pf_rule *to,
2958     struct pf_ruleset *ruleset)
2959 {
2960 	int i;
2961 
2962 	to->src = from->src;
2963 	to->src.addr.p.tbl = NULL;
2964 	to->dst = from->dst;
2965 	to->dst.addr.p.tbl = NULL;
2966 
2967 	if (pf_validate_range(to->src.port_op, to->src.port))
2968 		return (EINVAL);
2969 	if (pf_validate_range(to->dst.port_op, to->dst.port))
2970 		return (EINVAL);
2971 
2972 	/* XXX union skip[] */
2973 
2974 	strlcpy(to->label, from->label, sizeof(to->label));
2975 	strlcpy(to->ifname, from->ifname, sizeof(to->ifname));
2976 	strlcpy(to->rcv_ifname, from->rcv_ifname, sizeof(to->rcv_ifname));
2977 	strlcpy(to->qname, from->qname, sizeof(to->qname));
2978 	strlcpy(to->pqname, from->pqname, sizeof(to->pqname));
2979 	strlcpy(to->tagname, from->tagname, sizeof(to->tagname));
2980 	strlcpy(to->match_tagname, from->match_tagname,
2981 	    sizeof(to->match_tagname));
2982 	strlcpy(to->overload_tblname, from->overload_tblname,
2983 	    sizeof(to->overload_tblname));
2984 
2985 	pf_pool_copyin(&from->nat, &to->nat);
2986 	pf_pool_copyin(&from->rdr, &to->rdr);
2987 	pf_pool_copyin(&from->route, &to->route);
2988 
2989 	if (pf_validate_range(to->rdr.port_op, to->rdr.proxy_port))
2990 		return (EINVAL);
2991 
2992 	if (pf_kif_setup(to->ifname, &to->kif))
2993 		return (EINVAL);
2994 	if (pf_kif_setup(to->rcv_ifname, &to->rcv_kif))
2995 		return (EINVAL);
2996 	if (to->overload_tblname[0]) {
2997 		if ((to->overload_tbl = pfr_attach_table(ruleset,
2998 		    to->overload_tblname, 0)) == NULL)
2999 			return (EINVAL);
3000 		else
3001 			to->overload_tbl->pfrkt_flags |= PFR_TFLAG_ACTIVE;
3002 	}
3003 
3004 	if (pf_kif_setup(to->rdr.ifname, &to->rdr.kif))
3005 		return (EINVAL);
3006 	if (pf_kif_setup(to->nat.ifname, &to->nat.kif))
3007 		return (EINVAL);
3008 	if (pf_kif_setup(to->route.ifname, &to->route.kif))
3009 		return (EINVAL);
3010 
3011 	to->os_fingerprint = from->os_fingerprint;
3012 
3013 	to->rtableid = from->rtableid;
3014 	if (to->rtableid >= 0 && !rtable_exists(to->rtableid))
3015 		return (EBUSY);
3016 	to->onrdomain = from->onrdomain;
3017 	if (to->onrdomain != -1 && (to->onrdomain < 0 ||
3018 	    to->onrdomain > RT_TABLEID_MAX))
3019 		return (EINVAL);
3020 
3021 	for (i = 0; i < PFTM_MAX; i++)
3022 		to->timeout[i] = from->timeout[i];
3023 	to->states_tot = from->states_tot;
3024 	to->max_states = from->max_states;
3025 	to->max_src_nodes = from->max_src_nodes;
3026 	to->max_src_states = from->max_src_states;
3027 	to->max_src_conn = from->max_src_conn;
3028 	to->max_src_conn_rate.limit = from->max_src_conn_rate.limit;
3029 	to->max_src_conn_rate.seconds = from->max_src_conn_rate.seconds;
3030 	pf_init_threshold(&to->pktrate, from->pktrate.limit,
3031 	    from->pktrate.seconds);
3032 
3033 	if (to->qname[0] != 0) {
3034 		if ((to->qid = pf_qname2qid(to->qname, 0)) == 0)
3035 			return (EBUSY);
3036 		if (to->pqname[0] != 0) {
3037 			if ((to->pqid = pf_qname2qid(to->pqname, 0)) == 0)
3038 				return (EBUSY);
3039 		} else
3040 			to->pqid = to->qid;
3041 	}
3042 	to->rt_listid = from->rt_listid;
3043 	to->prob = from->prob;
3044 	to->return_icmp = from->return_icmp;
3045 	to->return_icmp6 = from->return_icmp6;
3046 	to->max_mss = from->max_mss;
3047 	if (to->tagname[0])
3048 		if ((to->tag = pf_tagname2tag(to->tagname, 1)) == 0)
3049 			return (EBUSY);
3050 	if (to->match_tagname[0])
3051 		if ((to->match_tag = pf_tagname2tag(to->match_tagname, 1)) == 0)
3052 			return (EBUSY);
3053 	to->scrub_flags = from->scrub_flags;
3054 	to->delay = from->delay;
3055 	to->uid = from->uid;
3056 	to->gid = from->gid;
3057 	to->rule_flag = from->rule_flag;
3058 	to->action = from->action;
3059 	to->direction = from->direction;
3060 	to->log = from->log;
3061 	to->logif = from->logif;
3062 #if NPFLOG > 0
3063 	if (!to->log)
3064 		to->logif = 0;
3065 #endif	/* NPFLOG > 0 */
3066 	to->quick = from->quick;
3067 	to->ifnot = from->ifnot;
3068 	to->rcvifnot = from->rcvifnot;
3069 	to->match_tag_not = from->match_tag_not;
3070 	to->keep_state = from->keep_state;
3071 	to->af = from->af;
3072 	to->naf = from->naf;
3073 	to->proto = from->proto;
3074 	to->type = from->type;
3075 	to->code = from->code;
3076 	to->flags = from->flags;
3077 	to->flagset = from->flagset;
3078 	to->min_ttl = from->min_ttl;
3079 	to->allow_opts = from->allow_opts;
3080 	to->rt = from->rt;
3081 	to->return_ttl = from->return_ttl;
3082 	to->tos = from->tos;
3083 	to->set_tos = from->set_tos;
3084 	to->anchor_relative = from->anchor_relative; /* XXX */
3085 	to->anchor_wildcard = from->anchor_wildcard; /* XXX */
3086 	to->flush = from->flush;
3087 	to->divert.addr = from->divert.addr;
3088 	to->divert.port = from->divert.port;
3089 	to->divert.type = from->divert.type;
3090 	to->prio = from->prio;
3091 	to->set_prio[0] = from->set_prio[0];
3092 	to->set_prio[1] = from->set_prio[1];
3093 
3094 	return (0);
3095 }
3096 
3097 int
3098 pf_sysctl(void *oldp, size_t *oldlenp, void *newp, size_t newlen)
3099 {
3100 	struct pf_status	pfs;
3101 
3102 	NET_RLOCK_IN_IOCTL();
3103 	PF_LOCK();
3104 	memcpy(&pfs, &pf_status, sizeof(struct pf_status));
3105 	pfi_update_status(pfs.ifname, &pfs);
3106 	PF_UNLOCK();
3107 	NET_RUNLOCK_IN_IOCTL();
3108 
3109 	return sysctl_rdstruct(oldp, oldlenp, newp, &pfs, sizeof(pfs));
3110 }
3111