xref: /openbsd-src/sys/net/pf_ioctl.c (revision 24bb5fcea3ed904bc467217bdaadb5dfc618d5bf)
1 /*	$OpenBSD: pf_ioctl.c,v 1.365 2021/06/23 06:53:52 dlg Exp $ */
2 
3 /*
4  * Copyright (c) 2001 Daniel Hartmeier
5  * Copyright (c) 2002 - 2018 Henning Brauer <henning@openbsd.org>
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  *    - Redistributions of source code must retain the above copyright
13  *      notice, this list of conditions and the following disclaimer.
14  *    - Redistributions in binary form must reproduce the above
15  *      copyright notice, this list of conditions and the following
16  *      disclaimer in the documentation and/or other materials provided
17  *      with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
22  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
23  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
24  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
25  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
29  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  *
32  * Effort sponsored in part by the Defense Advanced Research Projects
33  * Agency (DARPA) and Air Force Research Laboratory, Air Force
34  * Materiel Command, USAF, under agreement number F30602-01-2-0537.
35  *
36  */
37 
38 #include "pfsync.h"
39 #include "pflog.h"
40 
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/sysctl.h>
44 #include <sys/mbuf.h>
45 #include <sys/filio.h>
46 #include <sys/fcntl.h>
47 #include <sys/socket.h>
48 #include <sys/socketvar.h>
49 #include <sys/kernel.h>
50 #include <sys/time.h>
51 #include <sys/timeout.h>
52 #include <sys/pool.h>
53 #include <sys/malloc.h>
54 #include <sys/proc.h>
55 #include <sys/rwlock.h>
56 #include <sys/syslog.h>
57 #include <uvm/uvm_extern.h>
58 
59 #include <crypto/md5.h>
60 
61 #include <net/if.h>
62 #include <net/if_var.h>
63 #include <net/route.h>
64 #include <net/hfsc.h>
65 #include <net/fq_codel.h>
66 
67 #include <netinet/in.h>
68 #include <netinet/ip.h>
69 #include <netinet/in_pcb.h>
70 #include <netinet/ip_var.h>
71 #include <netinet/ip_icmp.h>
72 #include <netinet/tcp.h>
73 #include <netinet/udp.h>
74 
75 #ifdef INET6
76 #include <netinet/ip6.h>
77 #include <netinet/icmp6.h>
78 #endif /* INET6 */
79 
80 #include <net/pfvar.h>
81 #include <net/pfvar_priv.h>
82 
83 #if NPFSYNC > 0
84 #include <netinet/ip_ipsp.h>
85 #include <net/if_pfsync.h>
86 #endif /* NPFSYNC > 0 */
87 
88 struct pool		 pf_tag_pl;
89 
90 void			 pfattach(int);
91 void			 pf_thread_create(void *);
92 int			 pfopen(dev_t, int, int, struct proc *);
93 int			 pfclose(dev_t, int, int, struct proc *);
94 int			 pfioctl(dev_t, u_long, caddr_t, int, struct proc *);
95 int			 pf_begin_rules(u_int32_t *, const char *);
96 int			 pf_rollback_rules(u_int32_t, char *);
97 void			 pf_remove_queues(void);
98 int			 pf_commit_queues(void);
99 void			 pf_free_queues(struct pf_queuehead *);
100 void			 pf_calc_chksum(struct pf_ruleset *);
101 void			 pf_hash_rule(MD5_CTX *, struct pf_rule *);
102 void			 pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *);
103 int			 pf_commit_rules(u_int32_t, char *);
104 int			 pf_addr_setup(struct pf_ruleset *,
105 			    struct pf_addr_wrap *, sa_family_t);
106 int			 pf_kif_setup(char *, struct pfi_kif **);
107 void			 pf_addr_copyout(struct pf_addr_wrap *);
108 void			 pf_trans_set_commit(void);
109 void			 pf_pool_copyin(struct pf_pool *, struct pf_pool *);
110 int			 pf_validate_range(u_int8_t, u_int16_t[2]);
111 int			 pf_rule_copyin(struct pf_rule *, struct pf_rule *,
112 			    struct pf_ruleset *);
113 u_int16_t		 pf_qname2qid(char *, int);
114 void			 pf_qid2qname(u_int16_t, char *);
115 void			 pf_qid_unref(u_int16_t);
116 int			 pf_states_clr(struct pfioc_state_kill *);
117 int			 pf_states_get(struct pfioc_states *);
118 
119 struct pf_rule		 pf_default_rule, pf_default_rule_new;
120 
121 struct {
122 	char		statusif[IFNAMSIZ];
123 	u_int32_t	debug;
124 	u_int32_t	hostid;
125 	u_int32_t	reass;
126 	u_int32_t	mask;
127 } pf_trans_set;
128 
129 #define	PF_TSET_STATUSIF	0x01
130 #define	PF_TSET_DEBUG		0x02
131 #define	PF_TSET_HOSTID		0x04
132 #define	PF_TSET_REASS		0x08
133 
134 #define	TAGID_MAX	 50000
135 TAILQ_HEAD(pf_tags, pf_tagname)	pf_tags = TAILQ_HEAD_INITIALIZER(pf_tags),
136 				pf_qids = TAILQ_HEAD_INITIALIZER(pf_qids);
137 
138 /*
139  * pf_lock protects consistency of PF data structures, which don't have
140  * their dedicated lock yet. The pf_lock currently protects:
141  *	- rules,
142  *	- radix tables,
143  *	- source nodes
144  * All callers must grab pf_lock exclusively.
145  *
146  * pf_state_lock protects consistency of state table. Packets, which do state
147  * look up grab the lock as readers. If packet must create state, then it must
148  * grab the lock as writer. Whenever packet creates state it grabs pf_lock
149  * first then it locks pf_state_lock as the writer.
150  */
151 struct rwlock		 pf_lock = RWLOCK_INITIALIZER("pf_lock");
152 struct rwlock		 pf_state_lock = RWLOCK_INITIALIZER("pf_state_lock");
153 
154 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE)
155 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE
156 #endif
157 u_int16_t		 tagname2tag(struct pf_tags *, char *, int);
158 void			 tag2tagname(struct pf_tags *, u_int16_t, char *);
159 void			 tag_unref(struct pf_tags *, u_int16_t);
160 int			 pf_rtlabel_add(struct pf_addr_wrap *);
161 void			 pf_rtlabel_remove(struct pf_addr_wrap *);
162 void			 pf_rtlabel_copyout(struct pf_addr_wrap *);
163 
164 
165 void
166 pfattach(int num)
167 {
168 	u_int32_t *timeout = pf_default_rule.timeout;
169 
170 	pool_init(&pf_rule_pl, sizeof(struct pf_rule), 0,
171 	    IPL_SOFTNET, 0, "pfrule", NULL);
172 	pool_init(&pf_src_tree_pl, sizeof(struct pf_src_node), 0,
173 	    IPL_SOFTNET, 0, "pfsrctr", NULL);
174 	pool_init(&pf_sn_item_pl, sizeof(struct pf_sn_item), 0,
175 	    IPL_SOFTNET, 0, "pfsnitem", NULL);
176 	pool_init(&pf_state_pl, sizeof(struct pf_state), 0,
177 	    IPL_SOFTNET, 0, "pfstate", NULL);
178 	pool_init(&pf_state_key_pl, sizeof(struct pf_state_key), 0,
179 	    IPL_SOFTNET, 0, "pfstkey", NULL);
180 	pool_init(&pf_state_item_pl, sizeof(struct pf_state_item), 0,
181 	    IPL_SOFTNET, 0, "pfstitem", NULL);
182 	pool_init(&pf_rule_item_pl, sizeof(struct pf_rule_item), 0,
183 	    IPL_SOFTNET, 0, "pfruleitem", NULL);
184 	pool_init(&pf_queue_pl, sizeof(struct pf_queuespec), 0,
185 	    IPL_SOFTNET, 0, "pfqueue", NULL);
186 	pool_init(&pf_tag_pl, sizeof(struct pf_tagname), 0,
187 	    IPL_SOFTNET, 0, "pftag", NULL);
188 	pool_init(&pf_pktdelay_pl, sizeof(struct pf_pktdelay), 0,
189 	    IPL_SOFTNET, 0, "pfpktdelay", NULL);
190 
191 	hfsc_initialize();
192 	pfr_initialize();
193 	pfi_initialize();
194 	pf_osfp_initialize();
195 	pf_syncookies_init();
196 
197 	pool_sethardlimit(pf_pool_limits[PF_LIMIT_STATES].pp,
198 	    pf_pool_limits[PF_LIMIT_STATES].limit, NULL, 0);
199 
200 	if (physmem <= atop(100*1024*1024))
201 		pf_pool_limits[PF_LIMIT_TABLE_ENTRIES].limit =
202 		    PFR_KENTRY_HIWAT_SMALL;
203 
204 	RB_INIT(&tree_src_tracking);
205 	RB_INIT(&pf_anchors);
206 	pf_init_ruleset(&pf_main_ruleset);
207 	TAILQ_INIT(&pf_queues[0]);
208 	TAILQ_INIT(&pf_queues[1]);
209 	pf_queues_active = &pf_queues[0];
210 	pf_queues_inactive = &pf_queues[1];
211 
212 	/* default rule should never be garbage collected */
213 	pf_default_rule.entries.tqe_prev = &pf_default_rule.entries.tqe_next;
214 	pf_default_rule.action = PF_PASS;
215 	pf_default_rule.nr = (u_int32_t)-1;
216 	pf_default_rule.rtableid = -1;
217 
218 	/* initialize default timeouts */
219 	timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL;
220 	timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL;
221 	timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL;
222 	timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL;
223 	timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL;
224 	timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL;
225 	timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL;
226 	timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL;
227 	timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL;
228 	timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL;
229 	timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL;
230 	timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL;
231 	timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL;
232 	timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL;
233 	timeout[PFTM_FRAG] = PFTM_FRAG_VAL;
234 	timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL;
235 	timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL;
236 	timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL;
237 	timeout[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START;
238 	timeout[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END;
239 
240 	pf_default_rule.src.addr.type =  PF_ADDR_ADDRMASK;
241 	pf_default_rule.dst.addr.type =  PF_ADDR_ADDRMASK;
242 	pf_default_rule.rdr.addr.type =  PF_ADDR_NONE;
243 	pf_default_rule.nat.addr.type =  PF_ADDR_NONE;
244 	pf_default_rule.route.addr.type =  PF_ADDR_NONE;
245 
246 	pf_normalize_init();
247 	memset(&pf_status, 0, sizeof(pf_status));
248 	pf_status.debug = LOG_ERR;
249 	pf_status.reass = PF_REASS_ENABLED;
250 
251 	/* XXX do our best to avoid a conflict */
252 	pf_status.hostid = arc4random();
253 }
254 
255 int
256 pfopen(dev_t dev, int flags, int fmt, struct proc *p)
257 {
258 	if (minor(dev) >= 1)
259 		return (ENXIO);
260 	return (0);
261 }
262 
263 int
264 pfclose(dev_t dev, int flags, int fmt, struct proc *p)
265 {
266 	if (minor(dev) >= 1)
267 		return (ENXIO);
268 	return (0);
269 }
270 
271 void
272 pf_rm_rule(struct pf_rulequeue *rulequeue, struct pf_rule *rule)
273 {
274 	if (rulequeue != NULL) {
275 		if (rule->states_cur == 0 && rule->src_nodes == 0) {
276 			/*
277 			 * XXX - we need to remove the table *before* detaching
278 			 * the rule to make sure the table code does not delete
279 			 * the anchor under our feet.
280 			 */
281 			pf_tbladdr_remove(&rule->src.addr);
282 			pf_tbladdr_remove(&rule->dst.addr);
283 			pf_tbladdr_remove(&rule->rdr.addr);
284 			pf_tbladdr_remove(&rule->nat.addr);
285 			pf_tbladdr_remove(&rule->route.addr);
286 			if (rule->overload_tbl)
287 				pfr_detach_table(rule->overload_tbl);
288 		}
289 		TAILQ_REMOVE(rulequeue, rule, entries);
290 		rule->entries.tqe_prev = NULL;
291 		rule->nr = (u_int32_t)-1;
292 	}
293 
294 	if (rule->states_cur > 0 || rule->src_nodes > 0 ||
295 	    rule->entries.tqe_prev != NULL)
296 		return;
297 	pf_tag_unref(rule->tag);
298 	pf_tag_unref(rule->match_tag);
299 	pf_rtlabel_remove(&rule->src.addr);
300 	pf_rtlabel_remove(&rule->dst.addr);
301 	pfi_dynaddr_remove(&rule->src.addr);
302 	pfi_dynaddr_remove(&rule->dst.addr);
303 	pfi_dynaddr_remove(&rule->rdr.addr);
304 	pfi_dynaddr_remove(&rule->nat.addr);
305 	pfi_dynaddr_remove(&rule->route.addr);
306 	if (rulequeue == NULL) {
307 		pf_tbladdr_remove(&rule->src.addr);
308 		pf_tbladdr_remove(&rule->dst.addr);
309 		pf_tbladdr_remove(&rule->rdr.addr);
310 		pf_tbladdr_remove(&rule->nat.addr);
311 		pf_tbladdr_remove(&rule->route.addr);
312 		if (rule->overload_tbl)
313 			pfr_detach_table(rule->overload_tbl);
314 	}
315 	pfi_kif_unref(rule->rcv_kif, PFI_KIF_REF_RULE);
316 	pfi_kif_unref(rule->kif, PFI_KIF_REF_RULE);
317 	pfi_kif_unref(rule->rdr.kif, PFI_KIF_REF_RULE);
318 	pfi_kif_unref(rule->nat.kif, PFI_KIF_REF_RULE);
319 	pfi_kif_unref(rule->route.kif, PFI_KIF_REF_RULE);
320 	pf_remove_anchor(rule);
321 	pool_put(&pf_rule_pl, rule);
322 }
323 
324 void
325 pf_purge_rule(struct pf_rule *rule)
326 {
327 	u_int32_t		 nr = 0;
328 	struct pf_ruleset	*ruleset;
329 
330 	KASSERT((rule != NULL) && (rule->ruleset != NULL));
331 	ruleset = rule->ruleset;
332 
333 	pf_rm_rule(ruleset->rules.active.ptr, rule);
334 	ruleset->rules.active.rcount--;
335 	TAILQ_FOREACH(rule, ruleset->rules.active.ptr, entries)
336 		rule->nr = nr++;
337 	ruleset->rules.active.ticket++;
338 	pf_calc_skip_steps(ruleset->rules.active.ptr);
339 	pf_remove_if_empty_ruleset(ruleset);
340 
341 	if (ruleset == &pf_main_ruleset)
342 		pf_calc_chksum(ruleset);
343 }
344 
345 u_int16_t
346 tagname2tag(struct pf_tags *head, char *tagname, int create)
347 {
348 	struct pf_tagname	*tag, *p = NULL;
349 	u_int16_t		 new_tagid = 1;
350 
351 	TAILQ_FOREACH(tag, head, entries)
352 		if (strcmp(tagname, tag->name) == 0) {
353 			tag->ref++;
354 			return (tag->tag);
355 		}
356 
357 	if (!create)
358 		return (0);
359 
360 	/*
361 	 * to avoid fragmentation, we do a linear search from the beginning
362 	 * and take the first free slot we find. if there is none or the list
363 	 * is empty, append a new entry at the end.
364 	 */
365 
366 	/* new entry */
367 	TAILQ_FOREACH(p, head, entries) {
368 		if (p->tag != new_tagid)
369 			break;
370 		new_tagid = p->tag + 1;
371 	}
372 
373 	if (new_tagid > TAGID_MAX)
374 		return (0);
375 
376 	/* allocate and fill new struct pf_tagname */
377 	tag = pool_get(&pf_tag_pl, PR_NOWAIT | PR_ZERO);
378 	if (tag == NULL)
379 		return (0);
380 	strlcpy(tag->name, tagname, sizeof(tag->name));
381 	tag->tag = new_tagid;
382 	tag->ref++;
383 
384 	if (p != NULL)	/* insert new entry before p */
385 		TAILQ_INSERT_BEFORE(p, tag, entries);
386 	else	/* either list empty or no free slot in between */
387 		TAILQ_INSERT_TAIL(head, tag, entries);
388 
389 	return (tag->tag);
390 }
391 
392 void
393 tag2tagname(struct pf_tags *head, u_int16_t tagid, char *p)
394 {
395 	struct pf_tagname	*tag;
396 
397 	TAILQ_FOREACH(tag, head, entries)
398 		if (tag->tag == tagid) {
399 			strlcpy(p, tag->name, PF_TAG_NAME_SIZE);
400 			return;
401 		}
402 }
403 
404 void
405 tag_unref(struct pf_tags *head, u_int16_t tag)
406 {
407 	struct pf_tagname	*p, *next;
408 
409 	if (tag == 0)
410 		return;
411 
412 	TAILQ_FOREACH_SAFE(p, head, entries, next) {
413 		if (tag == p->tag) {
414 			if (--p->ref == 0) {
415 				TAILQ_REMOVE(head, p, entries);
416 				pool_put(&pf_tag_pl, p);
417 			}
418 			break;
419 		}
420 	}
421 }
422 
423 u_int16_t
424 pf_tagname2tag(char *tagname, int create)
425 {
426 	return (tagname2tag(&pf_tags, tagname, create));
427 }
428 
429 void
430 pf_tag2tagname(u_int16_t tagid, char *p)
431 {
432 	tag2tagname(&pf_tags, tagid, p);
433 }
434 
435 void
436 pf_tag_ref(u_int16_t tag)
437 {
438 	struct pf_tagname *t;
439 
440 	TAILQ_FOREACH(t, &pf_tags, entries)
441 		if (t->tag == tag)
442 			break;
443 	if (t != NULL)
444 		t->ref++;
445 }
446 
447 void
448 pf_tag_unref(u_int16_t tag)
449 {
450 	tag_unref(&pf_tags, tag);
451 }
452 
453 int
454 pf_rtlabel_add(struct pf_addr_wrap *a)
455 {
456 	if (a->type == PF_ADDR_RTLABEL &&
457 	    (a->v.rtlabel = rtlabel_name2id(a->v.rtlabelname)) == 0)
458 		return (-1);
459 	return (0);
460 }
461 
462 void
463 pf_rtlabel_remove(struct pf_addr_wrap *a)
464 {
465 	if (a->type == PF_ADDR_RTLABEL)
466 		rtlabel_unref(a->v.rtlabel);
467 }
468 
469 void
470 pf_rtlabel_copyout(struct pf_addr_wrap *a)
471 {
472 	const char	*name;
473 
474 	if (a->type == PF_ADDR_RTLABEL && a->v.rtlabel) {
475 		if ((name = rtlabel_id2name(a->v.rtlabel)) == NULL)
476 			strlcpy(a->v.rtlabelname, "?",
477 			    sizeof(a->v.rtlabelname));
478 		else
479 			strlcpy(a->v.rtlabelname, name,
480 			    sizeof(a->v.rtlabelname));
481 	}
482 }
483 
484 u_int16_t
485 pf_qname2qid(char *qname, int create)
486 {
487 	return (tagname2tag(&pf_qids, qname, create));
488 }
489 
490 void
491 pf_qid2qname(u_int16_t qid, char *p)
492 {
493 	tag2tagname(&pf_qids, qid, p);
494 }
495 
496 void
497 pf_qid_unref(u_int16_t qid)
498 {
499 	tag_unref(&pf_qids, (u_int16_t)qid);
500 }
501 
502 int
503 pf_begin_rules(u_int32_t *ticket, const char *anchor)
504 {
505 	struct pf_ruleset	*rs;
506 	struct pf_rule		*rule;
507 
508 	if ((rs = pf_find_or_create_ruleset(anchor)) == NULL)
509 		return (EINVAL);
510 	while ((rule = TAILQ_FIRST(rs->rules.inactive.ptr)) != NULL) {
511 		pf_rm_rule(rs->rules.inactive.ptr, rule);
512 		rs->rules.inactive.rcount--;
513 	}
514 	*ticket = ++rs->rules.inactive.ticket;
515 	rs->rules.inactive.open = 1;
516 	return (0);
517 }
518 
519 int
520 pf_rollback_rules(u_int32_t ticket, char *anchor)
521 {
522 	struct pf_ruleset	*rs;
523 	struct pf_rule		*rule;
524 
525 	rs = pf_find_ruleset(anchor);
526 	if (rs == NULL || !rs->rules.inactive.open ||
527 	    rs->rules.inactive.ticket != ticket)
528 		return (0);
529 	while ((rule = TAILQ_FIRST(rs->rules.inactive.ptr)) != NULL) {
530 		pf_rm_rule(rs->rules.inactive.ptr, rule);
531 		rs->rules.inactive.rcount--;
532 	}
533 	rs->rules.inactive.open = 0;
534 
535 	/* queue defs only in the main ruleset */
536 	if (anchor[0])
537 		return (0);
538 
539 	pf_free_queues(pf_queues_inactive);
540 
541 	return (0);
542 }
543 
544 void
545 pf_free_queues(struct pf_queuehead *where)
546 {
547 	struct pf_queuespec	*q, *qtmp;
548 
549 	TAILQ_FOREACH_SAFE(q, where, entries, qtmp) {
550 		TAILQ_REMOVE(where, q, entries);
551 		pfi_kif_unref(q->kif, PFI_KIF_REF_RULE);
552 		pool_put(&pf_queue_pl, q);
553 	}
554 }
555 
556 void
557 pf_remove_queues(void)
558 {
559 	struct pf_queuespec	*q;
560 	struct ifnet		*ifp;
561 
562 	/* put back interfaces in normal queueing mode */
563 	TAILQ_FOREACH(q, pf_queues_active, entries) {
564 		if (q->parent_qid != 0)
565 			continue;
566 
567 		ifp = q->kif->pfik_ifp;
568 		if (ifp == NULL)
569 			continue;
570 
571 		ifq_attach(&ifp->if_snd, ifq_priq_ops, NULL);
572 	}
573 }
574 
575 struct pf_queue_if {
576 	struct ifnet		*ifp;
577 	const struct ifq_ops	*ifqops;
578 	const struct pfq_ops	*pfqops;
579 	void			*disc;
580 	struct pf_queue_if	*next;
581 };
582 
583 static inline struct pf_queue_if *
584 pf_ifp2q(struct pf_queue_if *list, struct ifnet *ifp)
585 {
586 	struct pf_queue_if *qif = list;
587 
588 	while (qif != NULL) {
589 		if (qif->ifp == ifp)
590 			return (qif);
591 
592 		qif = qif->next;
593 	}
594 
595 	return (qif);
596 }
597 
598 int
599 pf_create_queues(void)
600 {
601 	struct pf_queuespec	*q;
602 	struct ifnet		*ifp;
603 	struct pf_queue_if		*list = NULL, *qif;
604 	int			 error;
605 
606 	/*
607 	 * Find root queues and allocate traffic conditioner
608 	 * private data for these interfaces
609 	 */
610 	TAILQ_FOREACH(q, pf_queues_active, entries) {
611 		if (q->parent_qid != 0)
612 			continue;
613 
614 		ifp = q->kif->pfik_ifp;
615 		if (ifp == NULL)
616 			continue;
617 
618 		qif = malloc(sizeof(*qif), M_TEMP, M_WAITOK);
619 		qif->ifp = ifp;
620 
621 		if (q->flags & PFQS_ROOTCLASS) {
622 			qif->ifqops = ifq_hfsc_ops;
623 			qif->pfqops = pfq_hfsc_ops;
624 		} else {
625 			qif->ifqops = ifq_fqcodel_ops;
626 			qif->pfqops = pfq_fqcodel_ops;
627 		}
628 
629 		qif->disc = qif->pfqops->pfq_alloc(ifp);
630 
631 		qif->next = list;
632 		list = qif;
633 	}
634 
635 	/* and now everything */
636 	TAILQ_FOREACH(q, pf_queues_active, entries) {
637 		ifp = q->kif->pfik_ifp;
638 		if (ifp == NULL)
639 			continue;
640 
641 		qif = pf_ifp2q(list, ifp);
642 		KASSERT(qif != NULL);
643 
644 		error = qif->pfqops->pfq_addqueue(qif->disc, q);
645 		if (error != 0)
646 			goto error;
647 	}
648 
649 	/* find root queues in old list to disable them if necessary */
650 	TAILQ_FOREACH(q, pf_queues_inactive, entries) {
651 		if (q->parent_qid != 0)
652 			continue;
653 
654 		ifp = q->kif->pfik_ifp;
655 		if (ifp == NULL)
656 			continue;
657 
658 		qif = pf_ifp2q(list, ifp);
659 		if (qif != NULL)
660 			continue;
661 
662 		ifq_attach(&ifp->if_snd, ifq_priq_ops, NULL);
663 	}
664 
665 	/* commit the new queues */
666 	while (list != NULL) {
667 		qif = list;
668 		list = qif->next;
669 
670 		ifp = qif->ifp;
671 
672 		ifq_attach(&ifp->if_snd, qif->ifqops, qif->disc);
673 		free(qif, M_TEMP, sizeof(*qif));
674 	}
675 
676 	return (0);
677 
678 error:
679 	while (list != NULL) {
680 		qif = list;
681 		list = qif->next;
682 
683 		qif->pfqops->pfq_free(qif->disc);
684 		free(qif, M_TEMP, sizeof(*qif));
685 	}
686 
687 	return (error);
688 }
689 
690 int
691 pf_commit_queues(void)
692 {
693 	struct pf_queuehead	*qswap;
694 	int error;
695 
696 	/* swap */
697 	qswap = pf_queues_active;
698 	pf_queues_active = pf_queues_inactive;
699 	pf_queues_inactive = qswap;
700 
701 	error = pf_create_queues();
702 	if (error != 0) {
703 		pf_queues_inactive = pf_queues_active;
704 		pf_queues_active = qswap;
705 		return (error);
706 	}
707 
708 	pf_free_queues(pf_queues_inactive);
709 
710 	return (0);
711 }
712 
713 const struct pfq_ops *
714 pf_queue_manager(struct pf_queuespec *q)
715 {
716 	if (q->flags & PFQS_FLOWQUEUE)
717 		return pfq_fqcodel_ops;
718 	return (/* pfq_default_ops */ NULL);
719 }
720 
721 #define PF_MD5_UPD(st, elm)						\
722 		MD5Update(ctx, (u_int8_t *) &(st)->elm, sizeof((st)->elm))
723 
724 #define PF_MD5_UPD_STR(st, elm)						\
725 		MD5Update(ctx, (u_int8_t *) (st)->elm, strlen((st)->elm))
726 
727 #define PF_MD5_UPD_HTONL(st, elm, stor) do {				\
728 		(stor) = htonl((st)->elm);				\
729 		MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int32_t));\
730 } while (0)
731 
732 #define PF_MD5_UPD_HTONS(st, elm, stor) do {				\
733 		(stor) = htons((st)->elm);				\
734 		MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int16_t));\
735 } while (0)
736 
737 void
738 pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr)
739 {
740 	PF_MD5_UPD(pfr, addr.type);
741 	switch (pfr->addr.type) {
742 		case PF_ADDR_DYNIFTL:
743 			PF_MD5_UPD(pfr, addr.v.ifname);
744 			PF_MD5_UPD(pfr, addr.iflags);
745 			break;
746 		case PF_ADDR_TABLE:
747 			if (strncmp(pfr->addr.v.tblname, PF_OPTIMIZER_TABLE_PFX,
748 			    strlen(PF_OPTIMIZER_TABLE_PFX)))
749 				PF_MD5_UPD(pfr, addr.v.tblname);
750 			break;
751 		case PF_ADDR_ADDRMASK:
752 			/* XXX ignore af? */
753 			PF_MD5_UPD(pfr, addr.v.a.addr.addr32);
754 			PF_MD5_UPD(pfr, addr.v.a.mask.addr32);
755 			break;
756 		case PF_ADDR_RTLABEL:
757 			PF_MD5_UPD(pfr, addr.v.rtlabelname);
758 			break;
759 	}
760 
761 	PF_MD5_UPD(pfr, port[0]);
762 	PF_MD5_UPD(pfr, port[1]);
763 	PF_MD5_UPD(pfr, neg);
764 	PF_MD5_UPD(pfr, port_op);
765 }
766 
767 void
768 pf_hash_rule(MD5_CTX *ctx, struct pf_rule *rule)
769 {
770 	u_int16_t x;
771 	u_int32_t y;
772 
773 	pf_hash_rule_addr(ctx, &rule->src);
774 	pf_hash_rule_addr(ctx, &rule->dst);
775 	PF_MD5_UPD_STR(rule, label);
776 	PF_MD5_UPD_STR(rule, ifname);
777 	PF_MD5_UPD_STR(rule, rcv_ifname);
778 	PF_MD5_UPD_STR(rule, match_tagname);
779 	PF_MD5_UPD_HTONS(rule, match_tag, x); /* dup? */
780 	PF_MD5_UPD_HTONL(rule, os_fingerprint, y);
781 	PF_MD5_UPD_HTONL(rule, prob, y);
782 	PF_MD5_UPD_HTONL(rule, uid.uid[0], y);
783 	PF_MD5_UPD_HTONL(rule, uid.uid[1], y);
784 	PF_MD5_UPD(rule, uid.op);
785 	PF_MD5_UPD_HTONL(rule, gid.gid[0], y);
786 	PF_MD5_UPD_HTONL(rule, gid.gid[1], y);
787 	PF_MD5_UPD(rule, gid.op);
788 	PF_MD5_UPD_HTONL(rule, rule_flag, y);
789 	PF_MD5_UPD(rule, action);
790 	PF_MD5_UPD(rule, direction);
791 	PF_MD5_UPD(rule, af);
792 	PF_MD5_UPD(rule, quick);
793 	PF_MD5_UPD(rule, ifnot);
794 	PF_MD5_UPD(rule, rcvifnot);
795 	PF_MD5_UPD(rule, match_tag_not);
796 	PF_MD5_UPD(rule, keep_state);
797 	PF_MD5_UPD(rule, proto);
798 	PF_MD5_UPD(rule, type);
799 	PF_MD5_UPD(rule, code);
800 	PF_MD5_UPD(rule, flags);
801 	PF_MD5_UPD(rule, flagset);
802 	PF_MD5_UPD(rule, allow_opts);
803 	PF_MD5_UPD(rule, rt);
804 	PF_MD5_UPD(rule, tos);
805 }
806 
807 int
808 pf_commit_rules(u_int32_t ticket, char *anchor)
809 {
810 	struct pf_ruleset	*rs;
811 	struct pf_rule		*rule;
812 	struct pf_rulequeue	*old_rules;
813 	u_int32_t		 old_rcount;
814 
815 	/* Make sure any expired rules get removed from active rules first. */
816 	pf_purge_expired_rules();
817 
818 	rs = pf_find_ruleset(anchor);
819 	if (rs == NULL || !rs->rules.inactive.open ||
820 	    ticket != rs->rules.inactive.ticket)
821 		return (EBUSY);
822 
823 	if (rs == &pf_main_ruleset)
824 		pf_calc_chksum(rs);
825 
826 	/* Swap rules, keep the old. */
827 	old_rules = rs->rules.active.ptr;
828 	old_rcount = rs->rules.active.rcount;
829 
830 	rs->rules.active.ptr = rs->rules.inactive.ptr;
831 	rs->rules.active.rcount = rs->rules.inactive.rcount;
832 	rs->rules.inactive.ptr = old_rules;
833 	rs->rules.inactive.rcount = old_rcount;
834 
835 	rs->rules.active.ticket = rs->rules.inactive.ticket;
836 	pf_calc_skip_steps(rs->rules.active.ptr);
837 
838 
839 	/* Purge the old rule list. */
840 	while ((rule = TAILQ_FIRST(old_rules)) != NULL)
841 		pf_rm_rule(old_rules, rule);
842 	rs->rules.inactive.rcount = 0;
843 	rs->rules.inactive.open = 0;
844 	pf_remove_if_empty_ruleset(rs);
845 
846 	/* queue defs only in the main ruleset */
847 	if (anchor[0])
848 		return (0);
849 	return (pf_commit_queues());
850 }
851 
852 void
853 pf_calc_chksum(struct pf_ruleset *rs)
854 {
855 	MD5_CTX			 ctx;
856 	struct pf_rule		*rule;
857 	u_int8_t		 digest[PF_MD5_DIGEST_LENGTH];
858 
859 	MD5Init(&ctx);
860 
861 	if (rs->rules.inactive.rcount) {
862 		TAILQ_FOREACH(rule, rs->rules.inactive.ptr, entries) {
863 			pf_hash_rule(&ctx, rule);
864 		}
865 	}
866 
867 	MD5Final(digest, &ctx);
868 	memcpy(pf_status.pf_chksum, digest, sizeof(pf_status.pf_chksum));
869 }
870 
871 int
872 pf_addr_setup(struct pf_ruleset *ruleset, struct pf_addr_wrap *addr,
873     sa_family_t af)
874 {
875 	if (pfi_dynaddr_setup(addr, af) ||
876 	    pf_tbladdr_setup(ruleset, addr) ||
877 	    pf_rtlabel_add(addr))
878 		return (EINVAL);
879 
880 	return (0);
881 }
882 
883 int
884 pf_kif_setup(char *ifname, struct pfi_kif **kif)
885 {
886 	if (ifname[0]) {
887 		*kif = pfi_kif_get(ifname);
888 		if (*kif == NULL)
889 			return (EINVAL);
890 
891 		pfi_kif_ref(*kif, PFI_KIF_REF_RULE);
892 	} else
893 		*kif = NULL;
894 
895 	return (0);
896 }
897 
898 void
899 pf_addr_copyout(struct pf_addr_wrap *addr)
900 {
901 	pfi_dynaddr_copyout(addr);
902 	pf_tbladdr_copyout(addr);
903 	pf_rtlabel_copyout(addr);
904 }
905 
906 int
907 pf_states_clr(struct pfioc_state_kill *psk)
908 {
909 	struct pf_state		*s, *nexts;
910 	struct pf_state		*head, *tail;
911 	u_int			 killed = 0;
912 	int			 error;
913 
914 	NET_LOCK();
915 
916 	/* lock against the gc removing an item from the list */
917 	error = rw_enter(&pf_state_list.pfs_rwl, RW_READ|RW_INTR);
918 	if (error != 0)
919 		goto unlock;
920 
921 	/* get a snapshot view of the ends of the list to traverse between */
922 	mtx_enter(&pf_state_list.pfs_mtx);
923 	head = TAILQ_FIRST(&pf_state_list.pfs_list);
924 	tail = TAILQ_LAST(&pf_state_list.pfs_list, pf_state_queue);
925 	mtx_leave(&pf_state_list.pfs_mtx);
926 
927 	s = NULL;
928 	nexts = head;
929 
930 	PF_LOCK();
931 	PF_STATE_ENTER_WRITE();
932 
933 	while (s != tail) {
934 		s = nexts;
935 		nexts = TAILQ_NEXT(s, entry_list);
936 
937 		if (s->timeout == PFTM_UNLINKED)
938 			continue;
939 
940 		if (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname,
941 		    s->kif->pfik_name)) {
942 #if NPFSYNC > 0
943 			/* don't send out individual delete messages */
944 			SET(s->state_flags, PFSTATE_NOSYNC);
945 #endif	/* NPFSYNC > 0 */
946 			pf_remove_state(s);
947 			killed++;
948 		}
949 	}
950 
951 	PF_STATE_EXIT_WRITE();
952 #if NPFSYNC > 0
953 	pfsync_clear_states(pf_status.hostid, psk->psk_ifname);
954 #endif	/* NPFSYNC > 0 */
955 	PF_UNLOCK();
956 	rw_exit(&pf_state_list.pfs_rwl);
957 
958 	psk->psk_killed = killed;
959 unlock:
960 	NET_UNLOCK();
961 
962 	return (error);
963 }
964 
965 int
966 pf_states_get(struct pfioc_states *ps)
967 {
968 	struct pf_state		*head, *tail;
969 	struct pf_state		*next, *state;
970 	struct pfsync_state	*p, pstore;
971 	u_int32_t		 nr = 0;
972 	int			 error;
973 
974 	if (ps->ps_len == 0) {
975 		nr = pf_status.states;
976 		ps->ps_len = sizeof(struct pfsync_state) * nr;
977 		return (0);
978 	}
979 
980 	p = ps->ps_states;
981 
982 	/* lock against the gc removing an item from the list */
983 	error = rw_enter(&pf_state_list.pfs_rwl, RW_READ|RW_INTR);
984 	if (error != 0)
985 		return (error);
986 
987 	/* get a snapshot view of the ends of the list to traverse between */
988 	mtx_enter(&pf_state_list.pfs_mtx);
989 	head = TAILQ_FIRST(&pf_state_list.pfs_list);
990 	tail = TAILQ_LAST(&pf_state_list.pfs_list, pf_state_queue);
991 	mtx_leave(&pf_state_list.pfs_mtx);
992 
993 	state = NULL;
994 	next = head;
995 
996 	while (state != tail) {
997 		state = next;
998 		next = TAILQ_NEXT(state, entry_list);
999 
1000 		if (state->timeout == PFTM_UNLINKED)
1001 			continue;
1002 
1003 		if ((nr+1) * sizeof(*p) > ps->ps_len)
1004 			break;
1005 
1006 		pf_state_export(&pstore, state);
1007 		error = copyout(&pstore, p, sizeof(*p));
1008 		if (error)
1009 			goto fail;
1010 
1011 		p++;
1012 		nr++;
1013 	}
1014 	ps->ps_len = sizeof(struct pfsync_state) * nr;
1015 
1016 fail:
1017 	rw_exit(&pf_state_list.pfs_rwl);
1018 
1019 	return (error);
1020 }
1021 
1022 int
1023 pfioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct proc *p)
1024 {
1025 	int			 error = 0;
1026 
1027 	/* XXX keep in sync with switch() below */
1028 	if (securelevel > 1)
1029 		switch (cmd) {
1030 		case DIOCGETRULES:
1031 		case DIOCGETRULE:
1032 		case DIOCGETSTATE:
1033 		case DIOCSETSTATUSIF:
1034 		case DIOCGETSTATUS:
1035 		case DIOCCLRSTATUS:
1036 		case DIOCNATLOOK:
1037 		case DIOCSETDEBUG:
1038 		case DIOCGETSTATES:
1039 		case DIOCGETTIMEOUT:
1040 		case DIOCGETLIMIT:
1041 		case DIOCGETRULESETS:
1042 		case DIOCGETRULESET:
1043 		case DIOCGETQUEUES:
1044 		case DIOCGETQUEUE:
1045 		case DIOCGETQSTATS:
1046 		case DIOCRGETTABLES:
1047 		case DIOCRGETTSTATS:
1048 		case DIOCRCLRTSTATS:
1049 		case DIOCRCLRADDRS:
1050 		case DIOCRADDADDRS:
1051 		case DIOCRDELADDRS:
1052 		case DIOCRSETADDRS:
1053 		case DIOCRGETADDRS:
1054 		case DIOCRGETASTATS:
1055 		case DIOCRCLRASTATS:
1056 		case DIOCRTSTADDRS:
1057 		case DIOCOSFPGET:
1058 		case DIOCGETSRCNODES:
1059 		case DIOCCLRSRCNODES:
1060 		case DIOCIGETIFACES:
1061 		case DIOCSETIFFLAG:
1062 		case DIOCCLRIFFLAG:
1063 		case DIOCGETSYNFLWATS:
1064 			break;
1065 		case DIOCRCLRTABLES:
1066 		case DIOCRADDTABLES:
1067 		case DIOCRDELTABLES:
1068 		case DIOCRSETTFLAGS:
1069 			if (((struct pfioc_table *)addr)->pfrio_flags &
1070 			    PFR_FLAG_DUMMY)
1071 				break; /* dummy operation ok */
1072 			return (EPERM);
1073 		default:
1074 			return (EPERM);
1075 		}
1076 
1077 	if (!(flags & FWRITE))
1078 		switch (cmd) {
1079 		case DIOCGETRULES:
1080 		case DIOCGETSTATE:
1081 		case DIOCGETSTATUS:
1082 		case DIOCGETSTATES:
1083 		case DIOCGETTIMEOUT:
1084 		case DIOCGETLIMIT:
1085 		case DIOCGETRULESETS:
1086 		case DIOCGETRULESET:
1087 		case DIOCGETQUEUES:
1088 		case DIOCGETQUEUE:
1089 		case DIOCGETQSTATS:
1090 		case DIOCNATLOOK:
1091 		case DIOCRGETTABLES:
1092 		case DIOCRGETTSTATS:
1093 		case DIOCRGETADDRS:
1094 		case DIOCRGETASTATS:
1095 		case DIOCRTSTADDRS:
1096 		case DIOCOSFPGET:
1097 		case DIOCGETSRCNODES:
1098 		case DIOCIGETIFACES:
1099 		case DIOCGETSYNFLWATS:
1100 			break;
1101 		case DIOCRCLRTABLES:
1102 		case DIOCRADDTABLES:
1103 		case DIOCRDELTABLES:
1104 		case DIOCRCLRTSTATS:
1105 		case DIOCRCLRADDRS:
1106 		case DIOCRADDADDRS:
1107 		case DIOCRDELADDRS:
1108 		case DIOCRSETADDRS:
1109 		case DIOCRSETTFLAGS:
1110 			if (((struct pfioc_table *)addr)->pfrio_flags &
1111 			    PFR_FLAG_DUMMY) {
1112 				flags |= FWRITE; /* need write lock for dummy */
1113 				break; /* dummy operation ok */
1114 			}
1115 			return (EACCES);
1116 		case DIOCGETRULE:
1117 			if (((struct pfioc_rule *)addr)->action ==
1118 			    PF_GET_CLR_CNTR)
1119 				return (EACCES);
1120 			break;
1121 		default:
1122 			return (EACCES);
1123 		}
1124 
1125 	switch (cmd) {
1126 
1127 	case DIOCSTART:
1128 		NET_LOCK();
1129 		PF_LOCK();
1130 		if (pf_status.running)
1131 			error = EEXIST;
1132 		else {
1133 			pf_status.running = 1;
1134 			pf_status.since = getuptime();
1135 			if (pf_status.stateid == 0) {
1136 				pf_status.stateid = gettime();
1137 				pf_status.stateid = pf_status.stateid << 32;
1138 			}
1139 			timeout_add_sec(&pf_purge_to, 1);
1140 			pf_create_queues();
1141 			DPFPRINTF(LOG_NOTICE, "pf: started");
1142 		}
1143 		PF_UNLOCK();
1144 		NET_UNLOCK();
1145 		break;
1146 
1147 	case DIOCSTOP:
1148 		NET_LOCK();
1149 		PF_LOCK();
1150 		if (!pf_status.running)
1151 			error = ENOENT;
1152 		else {
1153 			pf_status.running = 0;
1154 			pf_status.since = getuptime();
1155 			pf_remove_queues();
1156 			DPFPRINTF(LOG_NOTICE, "pf: stopped");
1157 		}
1158 		PF_UNLOCK();
1159 		NET_UNLOCK();
1160 		break;
1161 
1162 	case DIOCGETQUEUES: {
1163 		struct pfioc_queue	*pq = (struct pfioc_queue *)addr;
1164 		struct pf_queuespec	*qs;
1165 		u_int32_t		 nr = 0;
1166 
1167 		NET_LOCK();
1168 		PF_LOCK();
1169 		pq->ticket = pf_main_ruleset.rules.active.ticket;
1170 
1171 		/* save state to not run over them all each time? */
1172 		qs = TAILQ_FIRST(pf_queues_active);
1173 		while (qs != NULL) {
1174 			qs = TAILQ_NEXT(qs, entries);
1175 			nr++;
1176 		}
1177 		pq->nr = nr;
1178 		PF_UNLOCK();
1179 		NET_UNLOCK();
1180 		break;
1181 	}
1182 
1183 	case DIOCGETQUEUE: {
1184 		struct pfioc_queue	*pq = (struct pfioc_queue *)addr;
1185 		struct pf_queuespec	*qs;
1186 		u_int32_t		 nr = 0;
1187 
1188 		NET_LOCK();
1189 		PF_LOCK();
1190 		if (pq->ticket != pf_main_ruleset.rules.active.ticket) {
1191 			error = EBUSY;
1192 			PF_UNLOCK();
1193 			NET_UNLOCK();
1194 			break;
1195 		}
1196 
1197 		/* save state to not run over them all each time? */
1198 		qs = TAILQ_FIRST(pf_queues_active);
1199 		while ((qs != NULL) && (nr++ < pq->nr))
1200 			qs = TAILQ_NEXT(qs, entries);
1201 		if (qs == NULL) {
1202 			error = EBUSY;
1203 			PF_UNLOCK();
1204 			NET_UNLOCK();
1205 			break;
1206 		}
1207 		memcpy(&pq->queue, qs, sizeof(pq->queue));
1208 		PF_UNLOCK();
1209 		NET_UNLOCK();
1210 		break;
1211 	}
1212 
1213 	case DIOCGETQSTATS: {
1214 		struct pfioc_qstats	*pq = (struct pfioc_qstats *)addr;
1215 		struct pf_queuespec	*qs;
1216 		u_int32_t		 nr;
1217 		int			 nbytes;
1218 
1219 		NET_LOCK();
1220 		PF_LOCK();
1221 		if (pq->ticket != pf_main_ruleset.rules.active.ticket) {
1222 			error = EBUSY;
1223 			PF_UNLOCK();
1224 			NET_UNLOCK();
1225 			break;
1226 		}
1227 		nbytes = pq->nbytes;
1228 		nr = 0;
1229 
1230 		/* save state to not run over them all each time? */
1231 		qs = TAILQ_FIRST(pf_queues_active);
1232 		while ((qs != NULL) && (nr++ < pq->nr))
1233 			qs = TAILQ_NEXT(qs, entries);
1234 		if (qs == NULL) {
1235 			error = EBUSY;
1236 			PF_UNLOCK();
1237 			NET_UNLOCK();
1238 			break;
1239 		}
1240 		memcpy(&pq->queue, qs, sizeof(pq->queue));
1241 		/* It's a root flow queue but is not an HFSC root class */
1242 		if ((qs->flags & PFQS_FLOWQUEUE) && qs->parent_qid == 0 &&
1243 		    !(qs->flags & PFQS_ROOTCLASS))
1244 			error = pfq_fqcodel_ops->pfq_qstats(qs, pq->buf,
1245 			    &nbytes);
1246 		else
1247 			error = pfq_hfsc_ops->pfq_qstats(qs, pq->buf,
1248 			    &nbytes);
1249 		if (error == 0)
1250 			pq->nbytes = nbytes;
1251 		PF_UNLOCK();
1252 		NET_UNLOCK();
1253 		break;
1254 	}
1255 
1256 	case DIOCADDQUEUE: {
1257 		struct pfioc_queue	*q = (struct pfioc_queue *)addr;
1258 		struct pf_queuespec	*qs;
1259 
1260 		qs = pool_get(&pf_queue_pl, PR_WAITOK|PR_LIMITFAIL|PR_ZERO);
1261 		if (qs == NULL) {
1262 			error = ENOMEM;
1263 			break;
1264 		}
1265 
1266 		NET_LOCK();
1267 		PF_LOCK();
1268 		if (q->ticket != pf_main_ruleset.rules.inactive.ticket) {
1269 			error = EBUSY;
1270 			PF_UNLOCK();
1271 			NET_UNLOCK();
1272 			pool_put(&pf_queue_pl, qs);
1273 			break;
1274 		}
1275 		memcpy(qs, &q->queue, sizeof(*qs));
1276 		qs->qid = pf_qname2qid(qs->qname, 1);
1277 		if (qs->qid == 0) {
1278 			error = EBUSY;
1279 			PF_UNLOCK();
1280 			NET_UNLOCK();
1281 			pool_put(&pf_queue_pl, qs);
1282 			break;
1283 		}
1284 		if (qs->parent[0] && (qs->parent_qid =
1285 		    pf_qname2qid(qs->parent, 0)) == 0) {
1286 			error = ESRCH;
1287 			PF_UNLOCK();
1288 			NET_UNLOCK();
1289 			pool_put(&pf_queue_pl, qs);
1290 			break;
1291 		}
1292 		qs->kif = pfi_kif_get(qs->ifname);
1293 		if (qs->kif == NULL) {
1294 			error = ESRCH;
1295 			PF_UNLOCK();
1296 			NET_UNLOCK();
1297 			pool_put(&pf_queue_pl, qs);
1298 			break;
1299 		}
1300 		/* XXX resolve bw percentage specs */
1301 		pfi_kif_ref(qs->kif, PFI_KIF_REF_RULE);
1302 
1303 		TAILQ_INSERT_TAIL(pf_queues_inactive, qs, entries);
1304 		PF_UNLOCK();
1305 		NET_UNLOCK();
1306 
1307 		break;
1308 	}
1309 
1310 	case DIOCADDRULE: {
1311 		struct pfioc_rule	*pr = (struct pfioc_rule *)addr;
1312 		struct pf_ruleset	*ruleset;
1313 		struct pf_rule		*rule, *tail;
1314 
1315 		rule = pool_get(&pf_rule_pl, PR_WAITOK|PR_LIMITFAIL|PR_ZERO);
1316 		if (rule == NULL) {
1317 			error = ENOMEM;
1318 			break;
1319 		}
1320 
1321 		NET_LOCK();
1322 		PF_LOCK();
1323 		pr->anchor[sizeof(pr->anchor) - 1] = '\0';
1324 		ruleset = pf_find_ruleset(pr->anchor);
1325 		if (ruleset == NULL) {
1326 			error = EINVAL;
1327 			PF_UNLOCK();
1328 			NET_UNLOCK();
1329 			pool_put(&pf_rule_pl, rule);
1330 			break;
1331 		}
1332 		if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
1333 			error = EINVAL;
1334 			PF_UNLOCK();
1335 			NET_UNLOCK();
1336 			pool_put(&pf_rule_pl, rule);
1337 			break;
1338 		}
1339 		if (pr->ticket != ruleset->rules.inactive.ticket) {
1340 			error = EBUSY;
1341 			PF_UNLOCK();
1342 			NET_UNLOCK();
1343 			pool_put(&pf_rule_pl, rule);
1344 			break;
1345 		}
1346 		if ((error = pf_rule_copyin(&pr->rule, rule, ruleset))) {
1347 			pf_rm_rule(NULL, rule);
1348 			rule = NULL;
1349 			PF_UNLOCK();
1350 			NET_UNLOCK();
1351 			break;
1352 		}
1353 		rule->cuid = p->p_ucred->cr_ruid;
1354 		rule->cpid = p->p_p->ps_pid;
1355 
1356 		switch (rule->af) {
1357 		case 0:
1358 			break;
1359 		case AF_INET:
1360 			break;
1361 #ifdef INET6
1362 		case AF_INET6:
1363 			break;
1364 #endif /* INET6 */
1365 		default:
1366 			pf_rm_rule(NULL, rule);
1367 			rule = NULL;
1368 			error = EAFNOSUPPORT;
1369 			PF_UNLOCK();
1370 			NET_UNLOCK();
1371 			goto fail;
1372 		}
1373 		tail = TAILQ_LAST(ruleset->rules.inactive.ptr,
1374 		    pf_rulequeue);
1375 		if (tail)
1376 			rule->nr = tail->nr + 1;
1377 		else
1378 			rule->nr = 0;
1379 
1380 		if (rule->src.addr.type == PF_ADDR_NONE ||
1381 		    rule->dst.addr.type == PF_ADDR_NONE)
1382 			error = EINVAL;
1383 
1384 		if (pf_addr_setup(ruleset, &rule->src.addr, rule->af))
1385 			error = EINVAL;
1386 		if (pf_addr_setup(ruleset, &rule->dst.addr, rule->af))
1387 			error = EINVAL;
1388 		if (pf_addr_setup(ruleset, &rule->rdr.addr, rule->af))
1389 			error = EINVAL;
1390 		if (pf_addr_setup(ruleset, &rule->nat.addr, rule->af))
1391 			error = EINVAL;
1392 		if (pf_addr_setup(ruleset, &rule->route.addr, rule->af))
1393 			error = EINVAL;
1394 		if (pf_anchor_setup(rule, ruleset, pr->anchor_call))
1395 			error = EINVAL;
1396 		if (rule->rt && !rule->direction)
1397 			error = EINVAL;
1398 		if (rule->scrub_flags & PFSTATE_SETPRIO &&
1399 		    (rule->set_prio[0] > IFQ_MAXPRIO ||
1400 		    rule->set_prio[1] > IFQ_MAXPRIO))
1401 			error = EINVAL;
1402 
1403 		if (error) {
1404 			pf_rm_rule(NULL, rule);
1405 			PF_UNLOCK();
1406 			NET_UNLOCK();
1407 			break;
1408 		}
1409 		TAILQ_INSERT_TAIL(ruleset->rules.inactive.ptr,
1410 		    rule, entries);
1411 		rule->ruleset = ruleset;
1412 		ruleset->rules.inactive.rcount++;
1413 		PF_UNLOCK();
1414 		NET_UNLOCK();
1415 		break;
1416 	}
1417 
1418 	case DIOCGETRULES: {
1419 		struct pfioc_rule	*pr = (struct pfioc_rule *)addr;
1420 		struct pf_ruleset	*ruleset;
1421 		struct pf_rule		*tail;
1422 
1423 		NET_LOCK();
1424 		PF_LOCK();
1425 		pr->anchor[sizeof(pr->anchor) - 1] = '\0';
1426 		ruleset = pf_find_ruleset(pr->anchor);
1427 		if (ruleset == NULL) {
1428 			error = EINVAL;
1429 			PF_UNLOCK();
1430 			NET_UNLOCK();
1431 			break;
1432 		}
1433 		tail = TAILQ_LAST(ruleset->rules.active.ptr, pf_rulequeue);
1434 		if (tail)
1435 			pr->nr = tail->nr + 1;
1436 		else
1437 			pr->nr = 0;
1438 		pr->ticket = ruleset->rules.active.ticket;
1439 		PF_UNLOCK();
1440 		NET_UNLOCK();
1441 		break;
1442 	}
1443 
1444 	case DIOCGETRULE: {
1445 		struct pfioc_rule	*pr = (struct pfioc_rule *)addr;
1446 		struct pf_ruleset	*ruleset;
1447 		struct pf_rule		*rule;
1448 		int			 i;
1449 
1450 		NET_LOCK();
1451 		PF_LOCK();
1452 		pr->anchor[sizeof(pr->anchor) - 1] = '\0';
1453 		ruleset = pf_find_ruleset(pr->anchor);
1454 		if (ruleset == NULL) {
1455 			error = EINVAL;
1456 			PF_UNLOCK();
1457 			NET_UNLOCK();
1458 			break;
1459 		}
1460 		if (pr->ticket != ruleset->rules.active.ticket) {
1461 			error = EBUSY;
1462 			PF_UNLOCK();
1463 			NET_UNLOCK();
1464 			break;
1465 		}
1466 		rule = TAILQ_FIRST(ruleset->rules.active.ptr);
1467 		while ((rule != NULL) && (rule->nr != pr->nr))
1468 			rule = TAILQ_NEXT(rule, entries);
1469 		if (rule == NULL) {
1470 			error = EBUSY;
1471 			PF_UNLOCK();
1472 			NET_UNLOCK();
1473 			break;
1474 		}
1475 		memcpy(&pr->rule, rule, sizeof(struct pf_rule));
1476 		memset(&pr->rule.entries, 0, sizeof(pr->rule.entries));
1477 		pr->rule.kif = NULL;
1478 		pr->rule.nat.kif = NULL;
1479 		pr->rule.rdr.kif = NULL;
1480 		pr->rule.route.kif = NULL;
1481 		pr->rule.rcv_kif = NULL;
1482 		pr->rule.anchor = NULL;
1483 		pr->rule.overload_tbl = NULL;
1484 		pr->rule.pktrate.limit /= PF_THRESHOLD_MULT;
1485 		memset(&pr->rule.gcle, 0, sizeof(pr->rule.gcle));
1486 		pr->rule.ruleset = NULL;
1487 		if (pf_anchor_copyout(ruleset, rule, pr)) {
1488 			error = EBUSY;
1489 			PF_UNLOCK();
1490 			NET_UNLOCK();
1491 			break;
1492 		}
1493 		pf_addr_copyout(&pr->rule.src.addr);
1494 		pf_addr_copyout(&pr->rule.dst.addr);
1495 		pf_addr_copyout(&pr->rule.rdr.addr);
1496 		pf_addr_copyout(&pr->rule.nat.addr);
1497 		pf_addr_copyout(&pr->rule.route.addr);
1498 		for (i = 0; i < PF_SKIP_COUNT; ++i)
1499 			if (rule->skip[i].ptr == NULL)
1500 				pr->rule.skip[i].nr = (u_int32_t)-1;
1501 			else
1502 				pr->rule.skip[i].nr =
1503 				    rule->skip[i].ptr->nr;
1504 
1505 		if (pr->action == PF_GET_CLR_CNTR) {
1506 			rule->evaluations = 0;
1507 			rule->packets[0] = rule->packets[1] = 0;
1508 			rule->bytes[0] = rule->bytes[1] = 0;
1509 			rule->states_tot = 0;
1510 		}
1511 		PF_UNLOCK();
1512 		NET_UNLOCK();
1513 		break;
1514 	}
1515 
1516 	case DIOCCHANGERULE: {
1517 		struct pfioc_rule	*pcr = (struct pfioc_rule *)addr;
1518 		struct pf_ruleset	*ruleset;
1519 		struct pf_rule		*oldrule = NULL, *newrule = NULL;
1520 		u_int32_t		 nr = 0;
1521 
1522 		if (pcr->action < PF_CHANGE_ADD_HEAD ||
1523 		    pcr->action > PF_CHANGE_GET_TICKET) {
1524 			error = EINVAL;
1525 			break;
1526 		}
1527 
1528 		newrule = pool_get(&pf_rule_pl, PR_WAITOK|PR_LIMITFAIL|PR_ZERO);
1529 		if (newrule == NULL) {
1530 			error = ENOMEM;
1531 			break;
1532 		}
1533 
1534 		NET_LOCK();
1535 		PF_LOCK();
1536 		ruleset = pf_find_ruleset(pcr->anchor);
1537 		if (ruleset == NULL) {
1538 			error = EINVAL;
1539 			PF_UNLOCK();
1540 			NET_UNLOCK();
1541 			pool_put(&pf_rule_pl, newrule);
1542 			break;
1543 		}
1544 
1545 		if (pcr->action == PF_CHANGE_GET_TICKET) {
1546 			pcr->ticket = ++ruleset->rules.active.ticket;
1547 			PF_UNLOCK();
1548 			NET_UNLOCK();
1549 			pool_put(&pf_rule_pl, newrule);
1550 			break;
1551 		} else {
1552 			if (pcr->ticket !=
1553 			    ruleset->rules.active.ticket) {
1554 				error = EINVAL;
1555 				PF_UNLOCK();
1556 				NET_UNLOCK();
1557 				pool_put(&pf_rule_pl, newrule);
1558 				break;
1559 			}
1560 			if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
1561 				error = EINVAL;
1562 				PF_UNLOCK();
1563 				NET_UNLOCK();
1564 				pool_put(&pf_rule_pl, newrule);
1565 				break;
1566 			}
1567 		}
1568 
1569 		if (pcr->action != PF_CHANGE_REMOVE) {
1570 			pf_rule_copyin(&pcr->rule, newrule, ruleset);
1571 			newrule->cuid = p->p_ucred->cr_ruid;
1572 			newrule->cpid = p->p_p->ps_pid;
1573 
1574 			switch (newrule->af) {
1575 			case 0:
1576 				break;
1577 			case AF_INET:
1578 				break;
1579 #ifdef INET6
1580 			case AF_INET6:
1581 				break;
1582 #endif /* INET6 */
1583 			default:
1584 				pf_rm_rule(NULL, newrule);
1585 				error = EAFNOSUPPORT;
1586 				PF_UNLOCK();
1587 				NET_UNLOCK();
1588 				goto fail;
1589 			}
1590 
1591 			if (newrule->rt && !newrule->direction)
1592 				error = EINVAL;
1593 			if (pf_addr_setup(ruleset, &newrule->src.addr, newrule->af))
1594 				error = EINVAL;
1595 			if (pf_addr_setup(ruleset, &newrule->dst.addr, newrule->af))
1596 				error = EINVAL;
1597 			if (pf_addr_setup(ruleset, &newrule->rdr.addr, newrule->af))
1598 				error = EINVAL;
1599 			if (pf_addr_setup(ruleset, &newrule->nat.addr, newrule->af))
1600 				error = EINVAL;
1601 			if (pf_addr_setup(ruleset, &newrule->route.addr, newrule->af))
1602 				error = EINVAL;
1603 			if (pf_anchor_setup(newrule, ruleset, pcr->anchor_call))
1604 				error = EINVAL;
1605 
1606 			if (error) {
1607 				pf_rm_rule(NULL, newrule);
1608 				PF_UNLOCK();
1609 				NET_UNLOCK();
1610 				break;
1611 			}
1612 		}
1613 
1614 		if (pcr->action == PF_CHANGE_ADD_HEAD)
1615 			oldrule = TAILQ_FIRST(ruleset->rules.active.ptr);
1616 		else if (pcr->action == PF_CHANGE_ADD_TAIL)
1617 			oldrule = TAILQ_LAST(ruleset->rules.active.ptr,
1618 			    pf_rulequeue);
1619 		else {
1620 			oldrule = TAILQ_FIRST(ruleset->rules.active.ptr);
1621 			while ((oldrule != NULL) && (oldrule->nr != pcr->nr))
1622 				oldrule = TAILQ_NEXT(oldrule, entries);
1623 			if (oldrule == NULL) {
1624 				if (newrule != NULL)
1625 					pf_rm_rule(NULL, newrule);
1626 				error = EINVAL;
1627 				PF_UNLOCK();
1628 				NET_UNLOCK();
1629 				break;
1630 			}
1631 		}
1632 
1633 		if (pcr->action == PF_CHANGE_REMOVE) {
1634 			pf_rm_rule(ruleset->rules.active.ptr, oldrule);
1635 			ruleset->rules.active.rcount--;
1636 		} else {
1637 			if (oldrule == NULL)
1638 				TAILQ_INSERT_TAIL(
1639 				    ruleset->rules.active.ptr,
1640 				    newrule, entries);
1641 			else if (pcr->action == PF_CHANGE_ADD_HEAD ||
1642 			    pcr->action == PF_CHANGE_ADD_BEFORE)
1643 				TAILQ_INSERT_BEFORE(oldrule, newrule, entries);
1644 			else
1645 				TAILQ_INSERT_AFTER(
1646 				    ruleset->rules.active.ptr,
1647 				    oldrule, newrule, entries);
1648 			ruleset->rules.active.rcount++;
1649 		}
1650 
1651 		nr = 0;
1652 		TAILQ_FOREACH(oldrule, ruleset->rules.active.ptr, entries)
1653 			oldrule->nr = nr++;
1654 
1655 		ruleset->rules.active.ticket++;
1656 
1657 		pf_calc_skip_steps(ruleset->rules.active.ptr);
1658 		pf_remove_if_empty_ruleset(ruleset);
1659 
1660 		PF_UNLOCK();
1661 		NET_UNLOCK();
1662 		break;
1663 	}
1664 
1665 	case DIOCCLRSTATES:
1666 		error = pf_states_clr((struct pfioc_state_kill *)addr);
1667 		break;
1668 
1669 	case DIOCKILLSTATES: {
1670 		struct pf_state		*s, *nexts;
1671 		struct pf_state_item	*si, *sit;
1672 		struct pf_state_key	*sk, key;
1673 		struct pf_addr		*srcaddr, *dstaddr;
1674 		u_int16_t		 srcport, dstport;
1675 		struct pfioc_state_kill	*psk = (struct pfioc_state_kill *)addr;
1676 		u_int			 i, killed = 0;
1677 		const int		 dirs[] = { PF_IN, PF_OUT };
1678 		int			 sidx, didx;
1679 
1680 		if (psk->psk_pfcmp.id) {
1681 			if (psk->psk_pfcmp.creatorid == 0)
1682 				psk->psk_pfcmp.creatorid = pf_status.hostid;
1683 			NET_LOCK();
1684 			PF_LOCK();
1685 			PF_STATE_ENTER_WRITE();
1686 			if ((s = pf_find_state_byid(&psk->psk_pfcmp))) {
1687 				pf_remove_state(s);
1688 				psk->psk_killed = 1;
1689 			}
1690 			PF_STATE_EXIT_WRITE();
1691 			PF_UNLOCK();
1692 			NET_UNLOCK();
1693 			break;
1694 		}
1695 
1696 		if (psk->psk_af && psk->psk_proto &&
1697 		    psk->psk_src.port_op == PF_OP_EQ &&
1698 		    psk->psk_dst.port_op == PF_OP_EQ) {
1699 
1700 			key.af = psk->psk_af;
1701 			key.proto = psk->psk_proto;
1702 			key.rdomain = psk->psk_rdomain;
1703 
1704 			NET_LOCK();
1705 			PF_LOCK();
1706 			PF_STATE_ENTER_WRITE();
1707 			for (i = 0; i < nitems(dirs); i++) {
1708 				if (dirs[i] == PF_IN) {
1709 					sidx = 0;
1710 					didx = 1;
1711 				} else {
1712 					sidx = 1;
1713 					didx = 0;
1714 				}
1715 				pf_addrcpy(&key.addr[sidx],
1716 				    &psk->psk_src.addr.v.a.addr, key.af);
1717 				pf_addrcpy(&key.addr[didx],
1718 				    &psk->psk_dst.addr.v.a.addr, key.af);
1719 				key.port[sidx] = psk->psk_src.port[0];
1720 				key.port[didx] = psk->psk_dst.port[0];
1721 
1722 				sk = RB_FIND(pf_state_tree, &pf_statetbl, &key);
1723 				if (sk == NULL)
1724 					continue;
1725 
1726 				TAILQ_FOREACH_SAFE(si, &sk->states, entry, sit)
1727 					if (((si->s->key[PF_SK_WIRE]->af ==
1728 					    si->s->key[PF_SK_STACK]->af &&
1729 					    sk == (dirs[i] == PF_IN ?
1730 					    si->s->key[PF_SK_WIRE] :
1731 					    si->s->key[PF_SK_STACK])) ||
1732 					    (si->s->key[PF_SK_WIRE]->af !=
1733 					    si->s->key[PF_SK_STACK]->af &&
1734 					    dirs[i] == PF_IN &&
1735 					    (sk == si->s->key[PF_SK_STACK] ||
1736 					    sk == si->s->key[PF_SK_WIRE]))) &&
1737 					    (!psk->psk_ifname[0] ||
1738 					    (si->s->kif != pfi_all &&
1739 					    !strcmp(psk->psk_ifname,
1740 					    si->s->kif->pfik_name)))) {
1741 						pf_remove_state(si->s);
1742 						killed++;
1743 					}
1744 			}
1745 			if (killed)
1746 				psk->psk_killed = killed;
1747 			PF_STATE_EXIT_WRITE();
1748 			PF_UNLOCK();
1749 			NET_UNLOCK();
1750 			break;
1751 		}
1752 
1753 		NET_LOCK();
1754 		PF_LOCK();
1755 		PF_STATE_ENTER_WRITE();
1756 		for (s = RB_MIN(pf_state_tree_id, &tree_id); s;
1757 		    s = nexts) {
1758 			nexts = RB_NEXT(pf_state_tree_id, &tree_id, s);
1759 
1760 			if (s->direction == PF_OUT) {
1761 				sk = s->key[PF_SK_STACK];
1762 				srcaddr = &sk->addr[1];
1763 				dstaddr = &sk->addr[0];
1764 				srcport = sk->port[1];
1765 				dstport = sk->port[0];
1766 			} else {
1767 				sk = s->key[PF_SK_WIRE];
1768 				srcaddr = &sk->addr[0];
1769 				dstaddr = &sk->addr[1];
1770 				srcport = sk->port[0];
1771 				dstport = sk->port[1];
1772 			}
1773 			if ((!psk->psk_af || sk->af == psk->psk_af)
1774 			    && (!psk->psk_proto || psk->psk_proto ==
1775 			    sk->proto) && psk->psk_rdomain == sk->rdomain &&
1776 			    pf_match_addr(psk->psk_src.neg,
1777 			    &psk->psk_src.addr.v.a.addr,
1778 			    &psk->psk_src.addr.v.a.mask,
1779 			    srcaddr, sk->af) &&
1780 			    pf_match_addr(psk->psk_dst.neg,
1781 			    &psk->psk_dst.addr.v.a.addr,
1782 			    &psk->psk_dst.addr.v.a.mask,
1783 			    dstaddr, sk->af) &&
1784 			    (psk->psk_src.port_op == 0 ||
1785 			    pf_match_port(psk->psk_src.port_op,
1786 			    psk->psk_src.port[0], psk->psk_src.port[1],
1787 			    srcport)) &&
1788 			    (psk->psk_dst.port_op == 0 ||
1789 			    pf_match_port(psk->psk_dst.port_op,
1790 			    psk->psk_dst.port[0], psk->psk_dst.port[1],
1791 			    dstport)) &&
1792 			    (!psk->psk_label[0] || (s->rule.ptr->label[0] &&
1793 			    !strcmp(psk->psk_label, s->rule.ptr->label))) &&
1794 			    (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname,
1795 			    s->kif->pfik_name))) {
1796 				pf_remove_state(s);
1797 				killed++;
1798 			}
1799 		}
1800 		psk->psk_killed = killed;
1801 		PF_STATE_EXIT_WRITE();
1802 		PF_UNLOCK();
1803 		NET_UNLOCK();
1804 		break;
1805 	}
1806 
1807 #if NPFSYNC > 0
1808 	case DIOCADDSTATE: {
1809 		struct pfioc_state	*ps = (struct pfioc_state *)addr;
1810 		struct pfsync_state	*sp = &ps->state;
1811 
1812 		if (sp->timeout >= PFTM_MAX) {
1813 			error = EINVAL;
1814 			break;
1815 		}
1816 		NET_LOCK();
1817 		PF_LOCK();
1818 		error = pfsync_state_import(sp, PFSYNC_SI_IOCTL);
1819 		PF_UNLOCK();
1820 		NET_UNLOCK();
1821 		break;
1822 	}
1823 #endif	/* NPFSYNC > 0 */
1824 
1825 	case DIOCGETSTATE: {
1826 		struct pfioc_state	*ps = (struct pfioc_state *)addr;
1827 		struct pf_state		*s;
1828 		struct pf_state_cmp	 id_key;
1829 
1830 		memset(&id_key, 0, sizeof(id_key));
1831 		id_key.id = ps->state.id;
1832 		id_key.creatorid = ps->state.creatorid;
1833 
1834 		NET_LOCK();
1835 		PF_STATE_ENTER_READ();
1836 		s = pf_find_state_byid(&id_key);
1837 		s = pf_state_ref(s);
1838 		PF_STATE_EXIT_READ();
1839 		NET_UNLOCK();
1840 		if (s == NULL) {
1841 			error = ENOENT;
1842 			break;
1843 		}
1844 
1845 		pf_state_export(&ps->state, s);
1846 		pf_state_unref(s);
1847 		break;
1848 	}
1849 
1850 	case DIOCGETSTATES:
1851 		error = pf_states_get((struct pfioc_states *)addr);
1852 		break;
1853 
1854 	case DIOCGETSTATUS: {
1855 		struct pf_status *s = (struct pf_status *)addr;
1856 		NET_LOCK();
1857 		PF_LOCK();
1858 		memcpy(s, &pf_status, sizeof(struct pf_status));
1859 		pfi_update_status(s->ifname, s);
1860 		PF_UNLOCK();
1861 		NET_UNLOCK();
1862 		break;
1863 	}
1864 
1865 	case DIOCSETSTATUSIF: {
1866 		struct pfioc_iface	*pi = (struct pfioc_iface *)addr;
1867 
1868 		NET_LOCK();
1869 		PF_LOCK();
1870 		if (pi->pfiio_name[0] == 0) {
1871 			memset(pf_status.ifname, 0, IFNAMSIZ);
1872 			PF_UNLOCK();
1873 			NET_UNLOCK();
1874 			break;
1875 		}
1876 		strlcpy(pf_trans_set.statusif, pi->pfiio_name, IFNAMSIZ);
1877 		pf_trans_set.mask |= PF_TSET_STATUSIF;
1878 		PF_UNLOCK();
1879 		NET_UNLOCK();
1880 		break;
1881 	}
1882 
1883 	case DIOCCLRSTATUS: {
1884 		struct pfioc_iface	*pi = (struct pfioc_iface *)addr;
1885 
1886 		NET_LOCK();
1887 		PF_LOCK();
1888 		/* if ifname is specified, clear counters there only */
1889 		if (pi->pfiio_name[0]) {
1890 			pfi_update_status(pi->pfiio_name, NULL);
1891 			PF_UNLOCK();
1892 			NET_UNLOCK();
1893 			break;
1894 		}
1895 
1896 		memset(pf_status.counters, 0, sizeof(pf_status.counters));
1897 		memset(pf_status.fcounters, 0, sizeof(pf_status.fcounters));
1898 		memset(pf_status.scounters, 0, sizeof(pf_status.scounters));
1899 		pf_status.since = getuptime();
1900 
1901 		PF_UNLOCK();
1902 		NET_UNLOCK();
1903 		break;
1904 	}
1905 
1906 	case DIOCNATLOOK: {
1907 		struct pfioc_natlook	*pnl = (struct pfioc_natlook *)addr;
1908 		struct pf_state_key	*sk;
1909 		struct pf_state		*state;
1910 		struct pf_state_key_cmp	 key;
1911 		int			 m = 0, direction = pnl->direction;
1912 		int			 sidx, didx;
1913 
1914 		switch (pnl->af) {
1915 		case AF_INET:
1916 			break;
1917 #ifdef INET6
1918 		case AF_INET6:
1919 			break;
1920 #endif /* INET6 */
1921 		default:
1922 			error = EAFNOSUPPORT;
1923 			goto fail;
1924 		}
1925 
1926 		/* NATLOOK src and dst are reversed, so reverse sidx/didx */
1927 		sidx = (direction == PF_IN) ? 1 : 0;
1928 		didx = (direction == PF_IN) ? 0 : 1;
1929 
1930 		if (!pnl->proto ||
1931 		    PF_AZERO(&pnl->saddr, pnl->af) ||
1932 		    PF_AZERO(&pnl->daddr, pnl->af) ||
1933 		    ((pnl->proto == IPPROTO_TCP ||
1934 		    pnl->proto == IPPROTO_UDP) &&
1935 		    (!pnl->dport || !pnl->sport)) ||
1936 		    pnl->rdomain > RT_TABLEID_MAX)
1937 			error = EINVAL;
1938 		else {
1939 			key.af = pnl->af;
1940 			key.proto = pnl->proto;
1941 			key.rdomain = pnl->rdomain;
1942 			pf_addrcpy(&key.addr[sidx], &pnl->saddr, pnl->af);
1943 			key.port[sidx] = pnl->sport;
1944 			pf_addrcpy(&key.addr[didx], &pnl->daddr, pnl->af);
1945 			key.port[didx] = pnl->dport;
1946 
1947 			NET_LOCK();
1948 			PF_STATE_ENTER_READ();
1949 			state = pf_find_state_all(&key, direction, &m);
1950 			state = pf_state_ref(state);
1951 			PF_STATE_EXIT_READ();
1952 			NET_UNLOCK();
1953 
1954 			if (m > 1)
1955 				error = E2BIG;	/* more than one state */
1956 			else if (state != NULL) {
1957 				sk = state->key[sidx];
1958 				pf_addrcpy(&pnl->rsaddr, &sk->addr[sidx],
1959 				    sk->af);
1960 				pnl->rsport = sk->port[sidx];
1961 				pf_addrcpy(&pnl->rdaddr, &sk->addr[didx],
1962 				    sk->af);
1963 				pnl->rdport = sk->port[didx];
1964 				pnl->rrdomain = sk->rdomain;
1965 			} else
1966 				error = ENOENT;
1967 			pf_state_unref(state);
1968 		}
1969 		break;
1970 	}
1971 
1972 	case DIOCSETTIMEOUT: {
1973 		struct pfioc_tm	*pt = (struct pfioc_tm *)addr;
1974 
1975 		if (pt->timeout < 0 || pt->timeout >= PFTM_MAX ||
1976 		    pt->seconds < 0) {
1977 			error = EINVAL;
1978 			goto fail;
1979 		}
1980 		NET_LOCK();
1981 		PF_LOCK();
1982 		if (pt->timeout == PFTM_INTERVAL && pt->seconds == 0)
1983 			pt->seconds = 1;
1984 		pf_default_rule_new.timeout[pt->timeout] = pt->seconds;
1985 		pt->seconds = pf_default_rule.timeout[pt->timeout];
1986 		PF_UNLOCK();
1987 		NET_UNLOCK();
1988 		break;
1989 	}
1990 
1991 	case DIOCGETTIMEOUT: {
1992 		struct pfioc_tm	*pt = (struct pfioc_tm *)addr;
1993 
1994 		if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) {
1995 			error = EINVAL;
1996 			goto fail;
1997 		}
1998 		NET_LOCK();
1999 		PF_LOCK();
2000 		pt->seconds = pf_default_rule.timeout[pt->timeout];
2001 		PF_UNLOCK();
2002 		NET_UNLOCK();
2003 		break;
2004 	}
2005 
2006 	case DIOCGETLIMIT: {
2007 		struct pfioc_limit	*pl = (struct pfioc_limit *)addr;
2008 
2009 		if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) {
2010 			error = EINVAL;
2011 			goto fail;
2012 		}
2013 		NET_LOCK();
2014 		PF_LOCK();
2015 		pl->limit = pf_pool_limits[pl->index].limit;
2016 		PF_UNLOCK();
2017 		NET_UNLOCK();
2018 		break;
2019 	}
2020 
2021 	case DIOCSETLIMIT: {
2022 		struct pfioc_limit	*pl = (struct pfioc_limit *)addr;
2023 
2024 		NET_LOCK();
2025 		PF_LOCK();
2026 		if (pl->index < 0 || pl->index >= PF_LIMIT_MAX ||
2027 		    pf_pool_limits[pl->index].pp == NULL) {
2028 			error = EINVAL;
2029 			PF_UNLOCK();
2030 			NET_UNLOCK();
2031 			goto fail;
2032 		}
2033 		if (((struct pool *)pf_pool_limits[pl->index].pp)->pr_nout >
2034 		    pl->limit) {
2035 			error = EBUSY;
2036 			PF_UNLOCK();
2037 			NET_UNLOCK();
2038 			goto fail;
2039 		}
2040 		/* Fragments reference mbuf clusters. */
2041 		if (pl->index == PF_LIMIT_FRAGS && pl->limit > nmbclust) {
2042 			error = EINVAL;
2043 			PF_UNLOCK();
2044 			NET_UNLOCK();
2045 			goto fail;
2046 		}
2047 
2048 		pf_pool_limits[pl->index].limit_new = pl->limit;
2049 		pl->limit = pf_pool_limits[pl->index].limit;
2050 		PF_UNLOCK();
2051 		NET_UNLOCK();
2052 		break;
2053 	}
2054 
2055 	case DIOCSETDEBUG: {
2056 		u_int32_t	*level = (u_int32_t *)addr;
2057 
2058 		NET_LOCK();
2059 		PF_LOCK();
2060 		pf_trans_set.debug = *level;
2061 		pf_trans_set.mask |= PF_TSET_DEBUG;
2062 		PF_UNLOCK();
2063 		NET_UNLOCK();
2064 		break;
2065 	}
2066 
2067 	case DIOCGETRULESETS: {
2068 		struct pfioc_ruleset	*pr = (struct pfioc_ruleset *)addr;
2069 		struct pf_ruleset	*ruleset;
2070 		struct pf_anchor	*anchor;
2071 
2072 		NET_LOCK();
2073 		PF_LOCK();
2074 		pr->path[sizeof(pr->path) - 1] = '\0';
2075 		if ((ruleset = pf_find_ruleset(pr->path)) == NULL) {
2076 			error = EINVAL;
2077 			PF_UNLOCK();
2078 			NET_UNLOCK();
2079 			break;
2080 		}
2081 		pr->nr = 0;
2082 		if (ruleset == &pf_main_ruleset) {
2083 			/* XXX kludge for pf_main_ruleset */
2084 			RB_FOREACH(anchor, pf_anchor_global, &pf_anchors)
2085 				if (anchor->parent == NULL)
2086 					pr->nr++;
2087 		} else {
2088 			RB_FOREACH(anchor, pf_anchor_node,
2089 			    &ruleset->anchor->children)
2090 				pr->nr++;
2091 		}
2092 		PF_UNLOCK();
2093 		NET_UNLOCK();
2094 		break;
2095 	}
2096 
2097 	case DIOCGETRULESET: {
2098 		struct pfioc_ruleset	*pr = (struct pfioc_ruleset *)addr;
2099 		struct pf_ruleset	*ruleset;
2100 		struct pf_anchor	*anchor;
2101 		u_int32_t		 nr = 0;
2102 
2103 		NET_LOCK();
2104 		PF_LOCK();
2105 		pr->path[sizeof(pr->path) - 1] = '\0';
2106 		if ((ruleset = pf_find_ruleset(pr->path)) == NULL) {
2107 			error = EINVAL;
2108 			PF_UNLOCK();
2109 			NET_UNLOCK();
2110 			break;
2111 		}
2112 		pr->name[0] = '\0';
2113 		if (ruleset == &pf_main_ruleset) {
2114 			/* XXX kludge for pf_main_ruleset */
2115 			RB_FOREACH(anchor, pf_anchor_global, &pf_anchors)
2116 				if (anchor->parent == NULL && nr++ == pr->nr) {
2117 					strlcpy(pr->name, anchor->name,
2118 					    sizeof(pr->name));
2119 					break;
2120 				}
2121 		} else {
2122 			RB_FOREACH(anchor, pf_anchor_node,
2123 			    &ruleset->anchor->children)
2124 				if (nr++ == pr->nr) {
2125 					strlcpy(pr->name, anchor->name,
2126 					    sizeof(pr->name));
2127 					break;
2128 				}
2129 		}
2130 		PF_UNLOCK();
2131 		NET_UNLOCK();
2132 		if (!pr->name[0])
2133 			error = EBUSY;
2134 		break;
2135 	}
2136 
2137 	case DIOCRCLRTABLES: {
2138 		struct pfioc_table *io = (struct pfioc_table *)addr;
2139 
2140 		if (io->pfrio_esize != 0) {
2141 			error = ENODEV;
2142 			break;
2143 		}
2144 		NET_LOCK();
2145 		PF_LOCK();
2146 		error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel,
2147 		    io->pfrio_flags | PFR_FLAG_USERIOCTL);
2148 		PF_UNLOCK();
2149 		NET_UNLOCK();
2150 		break;
2151 	}
2152 
2153 	case DIOCRADDTABLES: {
2154 		struct pfioc_table *io = (struct pfioc_table *)addr;
2155 
2156 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
2157 			error = ENODEV;
2158 			break;
2159 		}
2160 		NET_LOCK();
2161 		PF_LOCK();
2162 		error = pfr_add_tables(io->pfrio_buffer, io->pfrio_size,
2163 		    &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2164 		PF_UNLOCK();
2165 		NET_UNLOCK();
2166 		break;
2167 	}
2168 
2169 	case DIOCRDELTABLES: {
2170 		struct pfioc_table *io = (struct pfioc_table *)addr;
2171 
2172 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
2173 			error = ENODEV;
2174 			break;
2175 		}
2176 		NET_LOCK();
2177 		PF_LOCK();
2178 		error = pfr_del_tables(io->pfrio_buffer, io->pfrio_size,
2179 		    &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2180 		PF_UNLOCK();
2181 		NET_UNLOCK();
2182 		break;
2183 	}
2184 
2185 	case DIOCRGETTABLES: {
2186 		struct pfioc_table *io = (struct pfioc_table *)addr;
2187 
2188 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
2189 			error = ENODEV;
2190 			break;
2191 		}
2192 		NET_LOCK();
2193 		PF_LOCK();
2194 		error = pfr_get_tables(&io->pfrio_table, io->pfrio_buffer,
2195 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2196 		PF_UNLOCK();
2197 		NET_UNLOCK();
2198 		break;
2199 	}
2200 
2201 	case DIOCRGETTSTATS: {
2202 		struct pfioc_table *io = (struct pfioc_table *)addr;
2203 
2204 		if (io->pfrio_esize != sizeof(struct pfr_tstats)) {
2205 			error = ENODEV;
2206 			break;
2207 		}
2208 		NET_LOCK();
2209 		PF_LOCK();
2210 		error = pfr_get_tstats(&io->pfrio_table, io->pfrio_buffer,
2211 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2212 		PF_UNLOCK();
2213 		NET_UNLOCK();
2214 		break;
2215 	}
2216 
2217 	case DIOCRCLRTSTATS: {
2218 		struct pfioc_table *io = (struct pfioc_table *)addr;
2219 
2220 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
2221 			error = ENODEV;
2222 			break;
2223 		}
2224 		NET_LOCK();
2225 		PF_LOCK();
2226 		error = pfr_clr_tstats(io->pfrio_buffer, io->pfrio_size,
2227 		    &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2228 		PF_UNLOCK();
2229 		NET_UNLOCK();
2230 		break;
2231 	}
2232 
2233 	case DIOCRSETTFLAGS: {
2234 		struct pfioc_table *io = (struct pfioc_table *)addr;
2235 
2236 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
2237 			error = ENODEV;
2238 			break;
2239 		}
2240 		NET_LOCK();
2241 		PF_LOCK();
2242 		error = pfr_set_tflags(io->pfrio_buffer, io->pfrio_size,
2243 		    io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange,
2244 		    &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2245 		PF_UNLOCK();
2246 		NET_UNLOCK();
2247 		break;
2248 	}
2249 
2250 	case DIOCRCLRADDRS: {
2251 		struct pfioc_table *io = (struct pfioc_table *)addr;
2252 
2253 		if (io->pfrio_esize != 0) {
2254 			error = ENODEV;
2255 			break;
2256 		}
2257 		NET_LOCK();
2258 		PF_LOCK();
2259 		error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel,
2260 		    io->pfrio_flags | PFR_FLAG_USERIOCTL);
2261 		PF_UNLOCK();
2262 		NET_UNLOCK();
2263 		break;
2264 	}
2265 
2266 	case DIOCRADDADDRS: {
2267 		struct pfioc_table *io = (struct pfioc_table *)addr;
2268 
2269 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2270 			error = ENODEV;
2271 			break;
2272 		}
2273 		NET_LOCK();
2274 		PF_LOCK();
2275 		error = pfr_add_addrs(&io->pfrio_table, io->pfrio_buffer,
2276 		    io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags |
2277 		    PFR_FLAG_USERIOCTL);
2278 		PF_UNLOCK();
2279 		NET_UNLOCK();
2280 		break;
2281 	}
2282 
2283 	case DIOCRDELADDRS: {
2284 		struct pfioc_table *io = (struct pfioc_table *)addr;
2285 
2286 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2287 			error = ENODEV;
2288 			break;
2289 		}
2290 		NET_LOCK();
2291 		PF_LOCK();
2292 		error = pfr_del_addrs(&io->pfrio_table, io->pfrio_buffer,
2293 		    io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags |
2294 		    PFR_FLAG_USERIOCTL);
2295 		PF_UNLOCK();
2296 		NET_UNLOCK();
2297 		break;
2298 	}
2299 
2300 	case DIOCRSETADDRS: {
2301 		struct pfioc_table *io = (struct pfioc_table *)addr;
2302 
2303 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2304 			error = ENODEV;
2305 			break;
2306 		}
2307 		NET_LOCK();
2308 		PF_LOCK();
2309 		error = pfr_set_addrs(&io->pfrio_table, io->pfrio_buffer,
2310 		    io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd,
2311 		    &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags |
2312 		    PFR_FLAG_USERIOCTL, 0);
2313 		PF_UNLOCK();
2314 		NET_UNLOCK();
2315 		break;
2316 	}
2317 
2318 	case DIOCRGETADDRS: {
2319 		struct pfioc_table *io = (struct pfioc_table *)addr;
2320 
2321 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2322 			error = ENODEV;
2323 			break;
2324 		}
2325 		NET_LOCK();
2326 		PF_LOCK();
2327 		error = pfr_get_addrs(&io->pfrio_table, io->pfrio_buffer,
2328 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2329 		PF_UNLOCK();
2330 		NET_UNLOCK();
2331 		break;
2332 	}
2333 
2334 	case DIOCRGETASTATS: {
2335 		struct pfioc_table *io = (struct pfioc_table *)addr;
2336 
2337 		if (io->pfrio_esize != sizeof(struct pfr_astats)) {
2338 			error = ENODEV;
2339 			break;
2340 		}
2341 		NET_LOCK();
2342 		PF_LOCK();
2343 		error = pfr_get_astats(&io->pfrio_table, io->pfrio_buffer,
2344 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2345 		PF_UNLOCK();
2346 		NET_UNLOCK();
2347 		break;
2348 	}
2349 
2350 	case DIOCRCLRASTATS: {
2351 		struct pfioc_table *io = (struct pfioc_table *)addr;
2352 
2353 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2354 			error = ENODEV;
2355 			break;
2356 		}
2357 		NET_LOCK();
2358 		PF_LOCK();
2359 		error = pfr_clr_astats(&io->pfrio_table, io->pfrio_buffer,
2360 		    io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags |
2361 		    PFR_FLAG_USERIOCTL);
2362 		PF_UNLOCK();
2363 		NET_UNLOCK();
2364 		break;
2365 	}
2366 
2367 	case DIOCRTSTADDRS: {
2368 		struct pfioc_table *io = (struct pfioc_table *)addr;
2369 
2370 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2371 			error = ENODEV;
2372 			break;
2373 		}
2374 		NET_LOCK();
2375 		PF_LOCK();
2376 		error = pfr_tst_addrs(&io->pfrio_table, io->pfrio_buffer,
2377 		    io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags |
2378 		    PFR_FLAG_USERIOCTL);
2379 		PF_UNLOCK();
2380 		NET_UNLOCK();
2381 		break;
2382 	}
2383 
2384 	case DIOCRINADEFINE: {
2385 		struct pfioc_table *io = (struct pfioc_table *)addr;
2386 
2387 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2388 			error = ENODEV;
2389 			break;
2390 		}
2391 		NET_LOCK();
2392 		PF_LOCK();
2393 		error = pfr_ina_define(&io->pfrio_table, io->pfrio_buffer,
2394 		    io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr,
2395 		    io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2396 		PF_UNLOCK();
2397 		NET_UNLOCK();
2398 		break;
2399 	}
2400 
2401 	case DIOCOSFPADD: {
2402 		struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
2403 		error = pf_osfp_add(io);
2404 		break;
2405 	}
2406 
2407 	case DIOCOSFPGET: {
2408 		struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
2409 		error = pf_osfp_get(io);
2410 		break;
2411 	}
2412 
2413 	case DIOCXBEGIN: {
2414 		struct pfioc_trans	*io = (struct pfioc_trans *)addr;
2415 		struct pfioc_trans_e	*ioe;
2416 		struct pfr_table	*table;
2417 		int			 i;
2418 
2419 		if (io->esize != sizeof(*ioe)) {
2420 			error = ENODEV;
2421 			goto fail;
2422 		}
2423 		ioe = malloc(sizeof(*ioe), M_TEMP, M_WAITOK);
2424 		table = malloc(sizeof(*table), M_TEMP, M_WAITOK);
2425 		NET_LOCK();
2426 		PF_LOCK();
2427 		pf_default_rule_new = pf_default_rule;
2428 		memset(&pf_trans_set, 0, sizeof(pf_trans_set));
2429 		for (i = 0; i < io->size; i++) {
2430 			if (copyin(io->array+i, ioe, sizeof(*ioe))) {
2431 				PF_UNLOCK();
2432 				NET_UNLOCK();
2433 				free(table, M_TEMP, sizeof(*table));
2434 				free(ioe, M_TEMP, sizeof(*ioe));
2435 				error = EFAULT;
2436 				goto fail;
2437 			}
2438 			if (strnlen(ioe->anchor, sizeof(ioe->anchor)) ==
2439 			    sizeof(ioe->anchor)) {
2440 				PF_UNLOCK();
2441 				NET_UNLOCK();
2442 				free(table, M_TEMP, sizeof(*table));
2443 				free(ioe, M_TEMP, sizeof(*ioe));
2444 				error = ENAMETOOLONG;
2445 				goto fail;
2446 			}
2447 			switch (ioe->type) {
2448 			case PF_TRANS_TABLE:
2449 				memset(table, 0, sizeof(*table));
2450 				strlcpy(table->pfrt_anchor, ioe->anchor,
2451 				    sizeof(table->pfrt_anchor));
2452 				if ((error = pfr_ina_begin(table,
2453 				    &ioe->ticket, NULL, 0))) {
2454 					PF_UNLOCK();
2455 					NET_UNLOCK();
2456 					free(table, M_TEMP, sizeof(*table));
2457 					free(ioe, M_TEMP, sizeof(*ioe));
2458 					goto fail;
2459 				}
2460 				break;
2461 			case PF_TRANS_RULESET:
2462 				if ((error = pf_begin_rules(&ioe->ticket,
2463 				    ioe->anchor))) {
2464 					PF_UNLOCK();
2465 					NET_UNLOCK();
2466 					free(table, M_TEMP, sizeof(*table));
2467 					free(ioe, M_TEMP, sizeof(*ioe));
2468 					goto fail;
2469 				}
2470 				break;
2471 			default:
2472 				PF_UNLOCK();
2473 				NET_UNLOCK();
2474 				free(table, M_TEMP, sizeof(*table));
2475 				free(ioe, M_TEMP, sizeof(*ioe));
2476 				error = EINVAL;
2477 				goto fail;
2478 			}
2479 			if (copyout(ioe, io->array+i, sizeof(io->array[i]))) {
2480 				PF_UNLOCK();
2481 				NET_UNLOCK();
2482 				free(table, M_TEMP, sizeof(*table));
2483 				free(ioe, M_TEMP, sizeof(*ioe));
2484 				error = EFAULT;
2485 				goto fail;
2486 			}
2487 		}
2488 		PF_UNLOCK();
2489 		NET_UNLOCK();
2490 		free(table, M_TEMP, sizeof(*table));
2491 		free(ioe, M_TEMP, sizeof(*ioe));
2492 		break;
2493 	}
2494 
2495 	case DIOCXROLLBACK: {
2496 		struct pfioc_trans	*io = (struct pfioc_trans *)addr;
2497 		struct pfioc_trans_e	*ioe;
2498 		struct pfr_table	*table;
2499 		int			 i;
2500 
2501 		if (io->esize != sizeof(*ioe)) {
2502 			error = ENODEV;
2503 			goto fail;
2504 		}
2505 		ioe = malloc(sizeof(*ioe), M_TEMP, M_WAITOK);
2506 		table = malloc(sizeof(*table), M_TEMP, M_WAITOK);
2507 		NET_LOCK();
2508 		PF_LOCK();
2509 		for (i = 0; i < io->size; i++) {
2510 			if (copyin(io->array+i, ioe, sizeof(*ioe))) {
2511 				PF_UNLOCK();
2512 				NET_UNLOCK();
2513 				free(table, M_TEMP, sizeof(*table));
2514 				free(ioe, M_TEMP, sizeof(*ioe));
2515 				error = EFAULT;
2516 				goto fail;
2517 			}
2518 			if (strnlen(ioe->anchor, sizeof(ioe->anchor)) ==
2519 			    sizeof(ioe->anchor)) {
2520 				PF_UNLOCK();
2521 				NET_UNLOCK();
2522 				free(table, M_TEMP, sizeof(*table));
2523 				free(ioe, M_TEMP, sizeof(*ioe));
2524 				error = ENAMETOOLONG;
2525 				goto fail;
2526 			}
2527 			switch (ioe->type) {
2528 			case PF_TRANS_TABLE:
2529 				memset(table, 0, sizeof(*table));
2530 				strlcpy(table->pfrt_anchor, ioe->anchor,
2531 				    sizeof(table->pfrt_anchor));
2532 				if ((error = pfr_ina_rollback(table,
2533 				    ioe->ticket, NULL, 0))) {
2534 					PF_UNLOCK();
2535 					NET_UNLOCK();
2536 					free(table, M_TEMP, sizeof(*table));
2537 					free(ioe, M_TEMP, sizeof(*ioe));
2538 					goto fail; /* really bad */
2539 				}
2540 				break;
2541 			case PF_TRANS_RULESET:
2542 				if ((error = pf_rollback_rules(ioe->ticket,
2543 				    ioe->anchor))) {
2544 					PF_UNLOCK();
2545 					NET_UNLOCK();
2546 					free(table, M_TEMP, sizeof(*table));
2547 					free(ioe, M_TEMP, sizeof(*ioe));
2548 					goto fail; /* really bad */
2549 				}
2550 				break;
2551 			default:
2552 				PF_UNLOCK();
2553 				NET_UNLOCK();
2554 				free(table, M_TEMP, sizeof(*table));
2555 				free(ioe, M_TEMP, sizeof(*ioe));
2556 				error = EINVAL;
2557 				goto fail; /* really bad */
2558 			}
2559 		}
2560 		PF_UNLOCK();
2561 		NET_UNLOCK();
2562 		free(table, M_TEMP, sizeof(*table));
2563 		free(ioe, M_TEMP, sizeof(*ioe));
2564 		break;
2565 	}
2566 
2567 	case DIOCXCOMMIT: {
2568 		struct pfioc_trans	*io = (struct pfioc_trans *)addr;
2569 		struct pfioc_trans_e	*ioe;
2570 		struct pfr_table	*table;
2571 		struct pf_ruleset	*rs;
2572 		int			 i;
2573 
2574 		if (io->esize != sizeof(*ioe)) {
2575 			error = ENODEV;
2576 			goto fail;
2577 		}
2578 		ioe = malloc(sizeof(*ioe), M_TEMP, M_WAITOK);
2579 		table = malloc(sizeof(*table), M_TEMP, M_WAITOK);
2580 		NET_LOCK();
2581 		PF_LOCK();
2582 		/* first makes sure everything will succeed */
2583 		for (i = 0; i < io->size; i++) {
2584 			if (copyin(io->array+i, ioe, sizeof(*ioe))) {
2585 				PF_UNLOCK();
2586 				NET_UNLOCK();
2587 				free(table, M_TEMP, sizeof(*table));
2588 				free(ioe, M_TEMP, sizeof(*ioe));
2589 				error = EFAULT;
2590 				goto fail;
2591 			}
2592 			if (strnlen(ioe->anchor, sizeof(ioe->anchor)) ==
2593 			    sizeof(ioe->anchor)) {
2594 				PF_UNLOCK();
2595 				NET_UNLOCK();
2596 				free(table, M_TEMP, sizeof(*table));
2597 				free(ioe, M_TEMP, sizeof(*ioe));
2598 				error = ENAMETOOLONG;
2599 				goto fail;
2600 			}
2601 			switch (ioe->type) {
2602 			case PF_TRANS_TABLE:
2603 				rs = pf_find_ruleset(ioe->anchor);
2604 				if (rs == NULL || !rs->topen || ioe->ticket !=
2605 				     rs->tticket) {
2606 					PF_UNLOCK();
2607 					NET_UNLOCK();
2608 					free(table, M_TEMP, sizeof(*table));
2609 					free(ioe, M_TEMP, sizeof(*ioe));
2610 					error = EBUSY;
2611 					goto fail;
2612 				}
2613 				break;
2614 			case PF_TRANS_RULESET:
2615 				rs = pf_find_ruleset(ioe->anchor);
2616 				if (rs == NULL ||
2617 				    !rs->rules.inactive.open ||
2618 				    rs->rules.inactive.ticket !=
2619 				    ioe->ticket) {
2620 					PF_UNLOCK();
2621 					NET_UNLOCK();
2622 					free(table, M_TEMP, sizeof(*table));
2623 					free(ioe, M_TEMP, sizeof(*ioe));
2624 					error = EBUSY;
2625 					goto fail;
2626 				}
2627 				break;
2628 			default:
2629 				PF_UNLOCK();
2630 				NET_UNLOCK();
2631 				free(table, M_TEMP, sizeof(*table));
2632 				free(ioe, M_TEMP, sizeof(*ioe));
2633 				error = EINVAL;
2634 				goto fail;
2635 			}
2636 		}
2637 
2638 		/*
2639 		 * Checked already in DIOCSETLIMIT, but check again as the
2640 		 * situation might have changed.
2641 		 */
2642 		for (i = 0; i < PF_LIMIT_MAX; i++) {
2643 			if (((struct pool *)pf_pool_limits[i].pp)->pr_nout >
2644 			    pf_pool_limits[i].limit_new) {
2645 				PF_UNLOCK();
2646 				NET_UNLOCK();
2647 				free(table, M_TEMP, sizeof(*table));
2648 				free(ioe, M_TEMP, sizeof(*ioe));
2649 				error = EBUSY;
2650 				goto fail;
2651 			}
2652 		}
2653 		/* now do the commit - no errors should happen here */
2654 		for (i = 0; i < io->size; i++) {
2655 			if (copyin(io->array+i, ioe, sizeof(*ioe))) {
2656 				PF_UNLOCK();
2657 				NET_UNLOCK();
2658 				free(table, M_TEMP, sizeof(*table));
2659 				free(ioe, M_TEMP, sizeof(*ioe));
2660 				error = EFAULT;
2661 				goto fail;
2662 			}
2663 			if (strnlen(ioe->anchor, sizeof(ioe->anchor)) ==
2664 			    sizeof(ioe->anchor)) {
2665 				PF_UNLOCK();
2666 				NET_UNLOCK();
2667 				free(table, M_TEMP, sizeof(*table));
2668 				free(ioe, M_TEMP, sizeof(*ioe));
2669 				error = ENAMETOOLONG;
2670 				goto fail;
2671 			}
2672 			switch (ioe->type) {
2673 			case PF_TRANS_TABLE:
2674 				memset(table, 0, sizeof(*table));
2675 				strlcpy(table->pfrt_anchor, ioe->anchor,
2676 				    sizeof(table->pfrt_anchor));
2677 				if ((error = pfr_ina_commit(table, ioe->ticket,
2678 				    NULL, NULL, 0))) {
2679 					PF_UNLOCK();
2680 					NET_UNLOCK();
2681 					free(table, M_TEMP, sizeof(*table));
2682 					free(ioe, M_TEMP, sizeof(*ioe));
2683 					goto fail; /* really bad */
2684 				}
2685 				break;
2686 			case PF_TRANS_RULESET:
2687 				if ((error = pf_commit_rules(ioe->ticket,
2688 				    ioe->anchor))) {
2689 					PF_UNLOCK();
2690 					NET_UNLOCK();
2691 					free(table, M_TEMP, sizeof(*table));
2692 					free(ioe, M_TEMP, sizeof(*ioe));
2693 					goto fail; /* really bad */
2694 				}
2695 				break;
2696 			default:
2697 				PF_UNLOCK();
2698 				NET_UNLOCK();
2699 				free(table, M_TEMP, sizeof(*table));
2700 				free(ioe, M_TEMP, sizeof(*ioe));
2701 				error = EINVAL;
2702 				goto fail; /* really bad */
2703 			}
2704 		}
2705 		for (i = 0; i < PF_LIMIT_MAX; i++) {
2706 			if (pf_pool_limits[i].limit_new !=
2707 			    pf_pool_limits[i].limit &&
2708 			    pool_sethardlimit(pf_pool_limits[i].pp,
2709 			    pf_pool_limits[i].limit_new, NULL, 0) != 0) {
2710 				PF_UNLOCK();
2711 				NET_UNLOCK();
2712 				free(table, M_TEMP, sizeof(*table));
2713 				free(ioe, M_TEMP, sizeof(*ioe));
2714 				error = EBUSY;
2715 				goto fail; /* really bad */
2716 			}
2717 			pf_pool_limits[i].limit = pf_pool_limits[i].limit_new;
2718 		}
2719 		for (i = 0; i < PFTM_MAX; i++) {
2720 			int old = pf_default_rule.timeout[i];
2721 
2722 			pf_default_rule.timeout[i] =
2723 			    pf_default_rule_new.timeout[i];
2724 			if (pf_default_rule.timeout[i] == PFTM_INTERVAL &&
2725 			    pf_default_rule.timeout[i] < old)
2726 				task_add(net_tq(0), &pf_purge_task);
2727 		}
2728 		pfi_xcommit();
2729 		pf_trans_set_commit();
2730 		PF_UNLOCK();
2731 		NET_UNLOCK();
2732 		free(table, M_TEMP, sizeof(*table));
2733 		free(ioe, M_TEMP, sizeof(*ioe));
2734 		break;
2735 	}
2736 
2737 	case DIOCGETSRCNODES: {
2738 		struct pfioc_src_nodes	*psn = (struct pfioc_src_nodes *)addr;
2739 		struct pf_src_node	*n, *p, *pstore;
2740 		u_int32_t		 nr = 0;
2741 		size_t			 space = psn->psn_len;
2742 
2743 		pstore = malloc(sizeof(*pstore), M_TEMP, M_WAITOK);
2744 
2745 		NET_LOCK();
2746 		PF_LOCK();
2747 		if (space == 0) {
2748 			RB_FOREACH(n, pf_src_tree, &tree_src_tracking)
2749 				nr++;
2750 			psn->psn_len = sizeof(struct pf_src_node) * nr;
2751 			PF_UNLOCK();
2752 			NET_UNLOCK();
2753 			free(pstore, M_TEMP, sizeof(*pstore));
2754 			break;
2755 		}
2756 
2757 		p = psn->psn_src_nodes;
2758 		RB_FOREACH(n, pf_src_tree, &tree_src_tracking) {
2759 			int	secs = getuptime(), diff;
2760 
2761 			if ((nr + 1) * sizeof(*p) > psn->psn_len)
2762 				break;
2763 
2764 			memcpy(pstore, n, sizeof(*pstore));
2765 			memset(&pstore->entry, 0, sizeof(pstore->entry));
2766 			pstore->rule.ptr = NULL;
2767 			pstore->kif = NULL;
2768 			pstore->rule.nr = n->rule.ptr->nr;
2769 			pstore->creation = secs - pstore->creation;
2770 			if (pstore->expire > secs)
2771 				pstore->expire -= secs;
2772 			else
2773 				pstore->expire = 0;
2774 
2775 			/* adjust the connection rate estimate */
2776 			diff = secs - n->conn_rate.last;
2777 			if (diff >= n->conn_rate.seconds)
2778 				pstore->conn_rate.count = 0;
2779 			else
2780 				pstore->conn_rate.count -=
2781 				    n->conn_rate.count * diff /
2782 				    n->conn_rate.seconds;
2783 
2784 			error = copyout(pstore, p, sizeof(*p));
2785 			if (error) {
2786 				PF_UNLOCK();
2787 				NET_UNLOCK();
2788 				free(pstore, M_TEMP, sizeof(*pstore));
2789 				goto fail;
2790 			}
2791 			p++;
2792 			nr++;
2793 		}
2794 		psn->psn_len = sizeof(struct pf_src_node) * nr;
2795 
2796 		PF_UNLOCK();
2797 		NET_UNLOCK();
2798 		free(pstore, M_TEMP, sizeof(*pstore));
2799 		break;
2800 	}
2801 
2802 	case DIOCCLRSRCNODES: {
2803 		struct pf_src_node	*n;
2804 		struct pf_state		*state;
2805 
2806 		NET_LOCK();
2807 		PF_LOCK();
2808 		PF_STATE_ENTER_WRITE();
2809 		RB_FOREACH(state, pf_state_tree_id, &tree_id)
2810 			pf_src_tree_remove_state(state);
2811 		PF_STATE_EXIT_WRITE();
2812 		RB_FOREACH(n, pf_src_tree, &tree_src_tracking)
2813 			n->expire = 1;
2814 		pf_purge_expired_src_nodes();
2815 		PF_UNLOCK();
2816 		NET_UNLOCK();
2817 		break;
2818 	}
2819 
2820 	case DIOCKILLSRCNODES: {
2821 		struct pf_src_node	*sn;
2822 		struct pf_state		*s;
2823 		struct pfioc_src_node_kill *psnk =
2824 		    (struct pfioc_src_node_kill *)addr;
2825 		u_int			killed = 0;
2826 
2827 		NET_LOCK();
2828 		PF_LOCK();
2829 		RB_FOREACH(sn, pf_src_tree, &tree_src_tracking) {
2830 			if (pf_match_addr(psnk->psnk_src.neg,
2831 				&psnk->psnk_src.addr.v.a.addr,
2832 				&psnk->psnk_src.addr.v.a.mask,
2833 				&sn->addr, sn->af) &&
2834 			    pf_match_addr(psnk->psnk_dst.neg,
2835 				&psnk->psnk_dst.addr.v.a.addr,
2836 				&psnk->psnk_dst.addr.v.a.mask,
2837 				&sn->raddr, sn->af)) {
2838 				/* Handle state to src_node linkage */
2839 				if (sn->states != 0) {
2840 					PF_ASSERT_LOCKED();
2841 					PF_STATE_ENTER_WRITE();
2842 					RB_FOREACH(s, pf_state_tree_id,
2843 					   &tree_id)
2844 						pf_state_rm_src_node(s, sn);
2845 					PF_STATE_EXIT_WRITE();
2846 				}
2847 				sn->expire = 1;
2848 				killed++;
2849 			}
2850 		}
2851 
2852 		if (killed > 0)
2853 			pf_purge_expired_src_nodes();
2854 
2855 		psnk->psnk_killed = killed;
2856 		PF_UNLOCK();
2857 		NET_UNLOCK();
2858 		break;
2859 	}
2860 
2861 	case DIOCSETHOSTID: {
2862 		u_int32_t	*hostid = (u_int32_t *)addr;
2863 
2864 		NET_LOCK();
2865 		PF_LOCK();
2866 		if (*hostid == 0)
2867 			pf_trans_set.hostid = arc4random();
2868 		else
2869 			pf_trans_set.hostid = *hostid;
2870 		pf_trans_set.mask |= PF_TSET_HOSTID;
2871 		PF_UNLOCK();
2872 		NET_UNLOCK();
2873 		break;
2874 	}
2875 
2876 	case DIOCOSFPFLUSH:
2877 		pf_osfp_flush();
2878 		break;
2879 
2880 	case DIOCIGETIFACES: {
2881 		struct pfioc_iface *io = (struct pfioc_iface *)addr;
2882 
2883 		if (io->pfiio_esize != sizeof(struct pfi_kif)) {
2884 			error = ENODEV;
2885 			break;
2886 		}
2887 		NET_LOCK();
2888 		PF_LOCK();
2889 		error = pfi_get_ifaces(io->pfiio_name, io->pfiio_buffer,
2890 		    &io->pfiio_size);
2891 		PF_UNLOCK();
2892 		NET_UNLOCK();
2893 		break;
2894 	}
2895 
2896 	case DIOCSETIFFLAG: {
2897 		struct pfioc_iface *io = (struct pfioc_iface *)addr;
2898 
2899 		NET_LOCK();
2900 		PF_LOCK();
2901 		error = pfi_set_flags(io->pfiio_name, io->pfiio_flags);
2902 		PF_UNLOCK();
2903 		NET_UNLOCK();
2904 		break;
2905 	}
2906 
2907 	case DIOCCLRIFFLAG: {
2908 		struct pfioc_iface *io = (struct pfioc_iface *)addr;
2909 
2910 		NET_LOCK();
2911 		PF_LOCK();
2912 		error = pfi_clear_flags(io->pfiio_name, io->pfiio_flags);
2913 		PF_UNLOCK();
2914 		NET_UNLOCK();
2915 		break;
2916 	}
2917 
2918 	case DIOCSETREASS: {
2919 		u_int32_t	*reass = (u_int32_t *)addr;
2920 
2921 		NET_LOCK();
2922 		PF_LOCK();
2923 		pf_trans_set.reass = *reass;
2924 		pf_trans_set.mask |= PF_TSET_REASS;
2925 		PF_UNLOCK();
2926 		NET_UNLOCK();
2927 		break;
2928 	}
2929 
2930 	case DIOCSETSYNFLWATS: {
2931 		struct pfioc_synflwats *io = (struct pfioc_synflwats *)addr;
2932 
2933 		NET_LOCK();
2934 		PF_LOCK();
2935 		error = pf_syncookies_setwats(io->hiwat, io->lowat);
2936 		PF_UNLOCK();
2937 		NET_UNLOCK();
2938 		break;
2939 	}
2940 
2941 	case DIOCGETSYNFLWATS: {
2942 		struct pfioc_synflwats *io = (struct pfioc_synflwats *)addr;
2943 
2944 		NET_LOCK();
2945 		PF_LOCK();
2946 		error = pf_syncookies_getwats(io);
2947 		PF_UNLOCK();
2948 		NET_UNLOCK();
2949 		break;
2950 	}
2951 
2952 	case DIOCSETSYNCOOKIES: {
2953 		u_int8_t	*mode = (u_int8_t *)addr;
2954 
2955 		NET_LOCK();
2956 		PF_LOCK();
2957 		error = pf_syncookies_setmode(*mode);
2958 		PF_UNLOCK();
2959 		NET_UNLOCK();
2960 		break;
2961 	}
2962 
2963 	default:
2964 		error = ENODEV;
2965 		break;
2966 	}
2967 fail:
2968 	return (error);
2969 }
2970 
2971 void
2972 pf_trans_set_commit(void)
2973 {
2974 	if (pf_trans_set.mask & PF_TSET_STATUSIF)
2975 		strlcpy(pf_status.ifname, pf_trans_set.statusif, IFNAMSIZ);
2976 	if (pf_trans_set.mask & PF_TSET_DEBUG)
2977 		pf_status.debug = pf_trans_set.debug;
2978 	if (pf_trans_set.mask & PF_TSET_HOSTID)
2979 		pf_status.hostid = pf_trans_set.hostid;
2980 	if (pf_trans_set.mask & PF_TSET_REASS)
2981 		pf_status.reass = pf_trans_set.reass;
2982 }
2983 
2984 void
2985 pf_pool_copyin(struct pf_pool *from, struct pf_pool *to)
2986 {
2987 	memmove(to, from, sizeof(*to));
2988 	to->kif = NULL;
2989 	to->addr.p.tbl = NULL;
2990 }
2991 
2992 int
2993 pf_validate_range(u_int8_t op, u_int16_t port[2])
2994 {
2995 	u_int16_t a = ntohs(port[0]);
2996 	u_int16_t b = ntohs(port[1]);
2997 
2998 	if ((op == PF_OP_RRG && a > b) ||  /* 34:12,  i.e. none */
2999 	    (op == PF_OP_IRG && a >= b) || /* 34><12, i.e. none */
3000 	    (op == PF_OP_XRG && a > b))    /* 34<>22, i.e. all */
3001 		return 1;
3002 	return 0;
3003 }
3004 
3005 int
3006 pf_rule_copyin(struct pf_rule *from, struct pf_rule *to,
3007     struct pf_ruleset *ruleset)
3008 {
3009 	int i;
3010 
3011 	to->src = from->src;
3012 	to->src.addr.p.tbl = NULL;
3013 	to->dst = from->dst;
3014 	to->dst.addr.p.tbl = NULL;
3015 
3016 	if (pf_validate_range(to->src.port_op, to->src.port))
3017 		return (EINVAL);
3018 	if (pf_validate_range(to->dst.port_op, to->dst.port))
3019 		return (EINVAL);
3020 
3021 	/* XXX union skip[] */
3022 
3023 	strlcpy(to->label, from->label, sizeof(to->label));
3024 	strlcpy(to->ifname, from->ifname, sizeof(to->ifname));
3025 	strlcpy(to->rcv_ifname, from->rcv_ifname, sizeof(to->rcv_ifname));
3026 	strlcpy(to->qname, from->qname, sizeof(to->qname));
3027 	strlcpy(to->pqname, from->pqname, sizeof(to->pqname));
3028 	strlcpy(to->tagname, from->tagname, sizeof(to->tagname));
3029 	strlcpy(to->match_tagname, from->match_tagname,
3030 	    sizeof(to->match_tagname));
3031 	strlcpy(to->overload_tblname, from->overload_tblname,
3032 	    sizeof(to->overload_tblname));
3033 
3034 	pf_pool_copyin(&from->nat, &to->nat);
3035 	pf_pool_copyin(&from->rdr, &to->rdr);
3036 	pf_pool_copyin(&from->route, &to->route);
3037 
3038 	if (pf_validate_range(to->rdr.port_op, to->rdr.proxy_port))
3039 		return (EINVAL);
3040 
3041 	if (pf_kif_setup(to->ifname, &to->kif))
3042 		return (EINVAL);
3043 	if (pf_kif_setup(to->rcv_ifname, &to->rcv_kif))
3044 		return (EINVAL);
3045 	if (to->overload_tblname[0]) {
3046 		if ((to->overload_tbl = pfr_attach_table(ruleset,
3047 		    to->overload_tblname, 0)) == NULL)
3048 			return (EINVAL);
3049 		else
3050 			to->overload_tbl->pfrkt_flags |= PFR_TFLAG_ACTIVE;
3051 	}
3052 
3053 	if (pf_kif_setup(to->rdr.ifname, &to->rdr.kif))
3054 		return (EINVAL);
3055 	if (pf_kif_setup(to->nat.ifname, &to->nat.kif))
3056 		return (EINVAL);
3057 	if (pf_kif_setup(to->route.ifname, &to->route.kif))
3058 		return (EINVAL);
3059 
3060 	to->os_fingerprint = from->os_fingerprint;
3061 
3062 	to->rtableid = from->rtableid;
3063 	if (to->rtableid >= 0 && !rtable_exists(to->rtableid))
3064 		return (EBUSY);
3065 	to->onrdomain = from->onrdomain;
3066 	if (to->onrdomain != -1 && (to->onrdomain < 0 ||
3067 	    to->onrdomain > RT_TABLEID_MAX))
3068 		return (EINVAL);
3069 
3070 	for (i = 0; i < PFTM_MAX; i++)
3071 		to->timeout[i] = from->timeout[i];
3072 	to->states_tot = from->states_tot;
3073 	to->max_states = from->max_states;
3074 	to->max_src_nodes = from->max_src_nodes;
3075 	to->max_src_states = from->max_src_states;
3076 	to->max_src_conn = from->max_src_conn;
3077 	to->max_src_conn_rate.limit = from->max_src_conn_rate.limit;
3078 	to->max_src_conn_rate.seconds = from->max_src_conn_rate.seconds;
3079 	pf_init_threshold(&to->pktrate, from->pktrate.limit,
3080 	    from->pktrate.seconds);
3081 
3082 	if (to->qname[0] != 0) {
3083 		if ((to->qid = pf_qname2qid(to->qname, 0)) == 0)
3084 			return (EBUSY);
3085 		if (to->pqname[0] != 0) {
3086 			if ((to->pqid = pf_qname2qid(to->pqname, 0)) == 0)
3087 				return (EBUSY);
3088 		} else
3089 			to->pqid = to->qid;
3090 	}
3091 	to->rt_listid = from->rt_listid;
3092 	to->prob = from->prob;
3093 	to->return_icmp = from->return_icmp;
3094 	to->return_icmp6 = from->return_icmp6;
3095 	to->max_mss = from->max_mss;
3096 	if (to->tagname[0])
3097 		if ((to->tag = pf_tagname2tag(to->tagname, 1)) == 0)
3098 			return (EBUSY);
3099 	if (to->match_tagname[0])
3100 		if ((to->match_tag = pf_tagname2tag(to->match_tagname, 1)) == 0)
3101 			return (EBUSY);
3102 	to->scrub_flags = from->scrub_flags;
3103 	to->delay = from->delay;
3104 	to->uid = from->uid;
3105 	to->gid = from->gid;
3106 	to->rule_flag = from->rule_flag;
3107 	to->action = from->action;
3108 	to->direction = from->direction;
3109 	to->log = from->log;
3110 	to->logif = from->logif;
3111 #if NPFLOG > 0
3112 	if (!to->log)
3113 		to->logif = 0;
3114 #endif	/* NPFLOG > 0 */
3115 	to->quick = from->quick;
3116 	to->ifnot = from->ifnot;
3117 	to->rcvifnot = from->rcvifnot;
3118 	to->match_tag_not = from->match_tag_not;
3119 	to->keep_state = from->keep_state;
3120 	to->af = from->af;
3121 	to->naf = from->naf;
3122 	to->proto = from->proto;
3123 	to->type = from->type;
3124 	to->code = from->code;
3125 	to->flags = from->flags;
3126 	to->flagset = from->flagset;
3127 	to->min_ttl = from->min_ttl;
3128 	to->allow_opts = from->allow_opts;
3129 	to->rt = from->rt;
3130 	to->return_ttl = from->return_ttl;
3131 	to->tos = from->tos;
3132 	to->set_tos = from->set_tos;
3133 	to->anchor_relative = from->anchor_relative; /* XXX */
3134 	to->anchor_wildcard = from->anchor_wildcard; /* XXX */
3135 	to->flush = from->flush;
3136 	to->divert.addr = from->divert.addr;
3137 	to->divert.port = from->divert.port;
3138 	to->divert.type = from->divert.type;
3139 	to->prio = from->prio;
3140 	to->set_prio[0] = from->set_prio[0];
3141 	to->set_prio[1] = from->set_prio[1];
3142 
3143 	return (0);
3144 }
3145 
3146 int
3147 pf_sysctl(void *oldp, size_t *oldlenp, void *newp, size_t newlen)
3148 {
3149 	struct pf_status	pfs;
3150 
3151 	NET_RLOCK_IN_IOCTL();
3152 	PF_LOCK();
3153 	memcpy(&pfs, &pf_status, sizeof(struct pf_status));
3154 	pfi_update_status(pfs.ifname, &pfs);
3155 	PF_UNLOCK();
3156 	NET_RUNLOCK_IN_IOCTL();
3157 
3158 	return sysctl_rdstruct(oldp, oldlenp, newp, &pfs, sizeof(pfs));
3159 }
3160