xref: /openbsd-src/sys/net/pf_ioctl.c (revision d2c5a4743fb945f45b034a3a830a96f7e1bc695d)
1 /*	$OpenBSD: pf_ioctl.c,v 1.360 2020/10/22 12:25:20 sashan Exp $ */
2 
3 /*
4  * Copyright (c) 2001 Daniel Hartmeier
5  * Copyright (c) 2002 - 2018 Henning Brauer <henning@openbsd.org>
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  *    - Redistributions of source code must retain the above copyright
13  *      notice, this list of conditions and the following disclaimer.
14  *    - Redistributions in binary form must reproduce the above
15  *      copyright notice, this list of conditions and the following
16  *      disclaimer in the documentation and/or other materials provided
17  *      with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
22  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
23  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
24  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
25  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
29  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  *
32  * Effort sponsored in part by the Defense Advanced Research Projects
33  * Agency (DARPA) and Air Force Research Laboratory, Air Force
34  * Materiel Command, USAF, under agreement number F30602-01-2-0537.
35  *
36  */
37 
38 #include "pfsync.h"
39 #include "pflog.h"
40 
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/sysctl.h>
44 #include <sys/mbuf.h>
45 #include <sys/filio.h>
46 #include <sys/fcntl.h>
47 #include <sys/socket.h>
48 #include <sys/socketvar.h>
49 #include <sys/kernel.h>
50 #include <sys/time.h>
51 #include <sys/timeout.h>
52 #include <sys/pool.h>
53 #include <sys/malloc.h>
54 #include <sys/proc.h>
55 #include <sys/rwlock.h>
56 #include <sys/syslog.h>
57 #include <uvm/uvm_extern.h>
58 
59 #include <crypto/md5.h>
60 
61 #include <net/if.h>
62 #include <net/if_var.h>
63 #include <net/route.h>
64 #include <net/hfsc.h>
65 #include <net/fq_codel.h>
66 
67 #include <netinet/in.h>
68 #include <netinet/ip.h>
69 #include <netinet/in_pcb.h>
70 #include <netinet/ip_var.h>
71 #include <netinet/ip_icmp.h>
72 #include <netinet/tcp.h>
73 #include <netinet/udp.h>
74 
75 #ifdef INET6
76 #include <netinet/ip6.h>
77 #include <netinet/icmp6.h>
78 #endif /* INET6 */
79 
80 #include <net/pfvar.h>
81 #include <net/pfvar_priv.h>
82 
83 #if NPFSYNC > 0
84 #include <netinet/ip_ipsp.h>
85 #include <net/if_pfsync.h>
86 #endif /* NPFSYNC > 0 */
87 
88 struct pool		 pf_tag_pl;
89 
90 void			 pfattach(int);
91 void			 pf_thread_create(void *);
92 int			 pfopen(dev_t, int, int, struct proc *);
93 int			 pfclose(dev_t, int, int, struct proc *);
94 int			 pfioctl(dev_t, u_long, caddr_t, int, struct proc *);
95 int			 pf_begin_rules(u_int32_t *, const char *);
96 int			 pf_rollback_rules(u_int32_t, char *);
97 void			 pf_remove_queues(void);
98 int			 pf_commit_queues(void);
99 void			 pf_free_queues(struct pf_queuehead *);
100 void			 pf_calc_chksum(struct pf_ruleset *);
101 void			 pf_hash_rule(MD5_CTX *, struct pf_rule *);
102 void			 pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *);
103 int			 pf_commit_rules(u_int32_t, char *);
104 int			 pf_addr_setup(struct pf_ruleset *,
105 			    struct pf_addr_wrap *, sa_family_t);
106 int			 pf_kif_setup(char *, struct pfi_kif **);
107 void			 pf_addr_copyout(struct pf_addr_wrap *);
108 void			 pf_trans_set_commit(void);
109 void			 pf_pool_copyin(struct pf_pool *, struct pf_pool *);
110 int			 pf_rule_copyin(struct pf_rule *, struct pf_rule *,
111 			    struct pf_ruleset *);
112 u_int16_t		 pf_qname2qid(char *, int);
113 void			 pf_qid2qname(u_int16_t, char *);
114 void			 pf_qid_unref(u_int16_t);
115 
116 struct pf_rule		 pf_default_rule, pf_default_rule_new;
117 
118 struct {
119 	char		statusif[IFNAMSIZ];
120 	u_int32_t	debug;
121 	u_int32_t	hostid;
122 	u_int32_t	reass;
123 	u_int32_t	mask;
124 } pf_trans_set;
125 
126 #define	PF_TSET_STATUSIF	0x01
127 #define	PF_TSET_DEBUG		0x02
128 #define	PF_TSET_HOSTID		0x04
129 #define	PF_TSET_REASS		0x08
130 
131 #define	TAGID_MAX	 50000
132 TAILQ_HEAD(pf_tags, pf_tagname)	pf_tags = TAILQ_HEAD_INITIALIZER(pf_tags),
133 				pf_qids = TAILQ_HEAD_INITIALIZER(pf_qids);
134 
135 #ifdef WITH_PF_LOCK
136 /*
137  * pf_lock protects consistency of PF data structures, which don't have
138  * their dedicated lock yet. The pf_lock currently protects:
139  *	- rules,
140  *	- radix tables,
141  *	- source nodes
142  * All callers must grab pf_lock exclusively.
143  *
144  * pf_state_lock protects consistency of state table. Packets, which do state
145  * look up grab the lock as readers. If packet must create state, then it must
146  * grab the lock as writer. Whenever packet creates state it grabs pf_lock
147  * first then it locks pf_state_lock as the writer.
148  */
149 struct rwlock		 pf_lock = RWLOCK_INITIALIZER("pf_lock");
150 struct rwlock		 pf_state_lock = RWLOCK_INITIALIZER("pf_state_lock");
151 #endif /* WITH_PF_LOCK */
152 
153 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE)
154 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE
155 #endif
156 u_int16_t		 tagname2tag(struct pf_tags *, char *, int);
157 void			 tag2tagname(struct pf_tags *, u_int16_t, char *);
158 void			 tag_unref(struct pf_tags *, u_int16_t);
159 int			 pf_rtlabel_add(struct pf_addr_wrap *);
160 void			 pf_rtlabel_remove(struct pf_addr_wrap *);
161 void			 pf_rtlabel_copyout(struct pf_addr_wrap *);
162 
163 
164 void
165 pfattach(int num)
166 {
167 	u_int32_t *timeout = pf_default_rule.timeout;
168 
169 	pool_init(&pf_rule_pl, sizeof(struct pf_rule), 0,
170 	    IPL_SOFTNET, 0, "pfrule", NULL);
171 	pool_init(&pf_src_tree_pl, sizeof(struct pf_src_node), 0,
172 	    IPL_SOFTNET, 0, "pfsrctr", NULL);
173 	pool_init(&pf_sn_item_pl, sizeof(struct pf_sn_item), 0,
174 	    IPL_SOFTNET, 0, "pfsnitem", NULL);
175 	pool_init(&pf_state_pl, sizeof(struct pf_state), 0,
176 	    IPL_SOFTNET, 0, "pfstate", NULL);
177 	pool_init(&pf_state_key_pl, sizeof(struct pf_state_key), 0,
178 	    IPL_SOFTNET, 0, "pfstkey", NULL);
179 	pool_init(&pf_state_item_pl, sizeof(struct pf_state_item), 0,
180 	    IPL_SOFTNET, 0, "pfstitem", NULL);
181 	pool_init(&pf_rule_item_pl, sizeof(struct pf_rule_item), 0,
182 	    IPL_SOFTNET, 0, "pfruleitem", NULL);
183 	pool_init(&pf_queue_pl, sizeof(struct pf_queuespec), 0,
184 	    IPL_SOFTNET, 0, "pfqueue", NULL);
185 	pool_init(&pf_tag_pl, sizeof(struct pf_tagname), 0,
186 	    IPL_SOFTNET, 0, "pftag", NULL);
187 	pool_init(&pf_pktdelay_pl, sizeof(struct pf_pktdelay), 0,
188 	    IPL_SOFTNET, 0, "pfpktdelay", NULL);
189 
190 	hfsc_initialize();
191 	pfr_initialize();
192 	pfi_initialize();
193 	pf_osfp_initialize();
194 	pf_syncookies_init();
195 
196 	pool_sethardlimit(pf_pool_limits[PF_LIMIT_STATES].pp,
197 	    pf_pool_limits[PF_LIMIT_STATES].limit, NULL, 0);
198 
199 	if (physmem <= atop(100*1024*1024))
200 		pf_pool_limits[PF_LIMIT_TABLE_ENTRIES].limit =
201 		    PFR_KENTRY_HIWAT_SMALL;
202 
203 	RB_INIT(&tree_src_tracking);
204 	RB_INIT(&pf_anchors);
205 	pf_init_ruleset(&pf_main_ruleset);
206 	TAILQ_INIT(&pf_queues[0]);
207 	TAILQ_INIT(&pf_queues[1]);
208 	pf_queues_active = &pf_queues[0];
209 	pf_queues_inactive = &pf_queues[1];
210 	TAILQ_INIT(&state_list);
211 
212 	/* default rule should never be garbage collected */
213 	pf_default_rule.entries.tqe_prev = &pf_default_rule.entries.tqe_next;
214 	pf_default_rule.action = PF_PASS;
215 	pf_default_rule.nr = (u_int32_t)-1;
216 	pf_default_rule.rtableid = -1;
217 
218 	/* initialize default timeouts */
219 	timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL;
220 	timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL;
221 	timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL;
222 	timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL;
223 	timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL;
224 	timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL;
225 	timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL;
226 	timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL;
227 	timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL;
228 	timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL;
229 	timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL;
230 	timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL;
231 	timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL;
232 	timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL;
233 	timeout[PFTM_FRAG] = PFTM_FRAG_VAL;
234 	timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL;
235 	timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL;
236 	timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL;
237 	timeout[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START;
238 	timeout[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END;
239 
240 	pf_default_rule.src.addr.type =  PF_ADDR_ADDRMASK;
241 	pf_default_rule.dst.addr.type =  PF_ADDR_ADDRMASK;
242 	pf_default_rule.rdr.addr.type =  PF_ADDR_NONE;
243 	pf_default_rule.nat.addr.type =  PF_ADDR_NONE;
244 	pf_default_rule.route.addr.type =  PF_ADDR_NONE;
245 
246 	pf_normalize_init();
247 	memset(&pf_status, 0, sizeof(pf_status));
248 	pf_status.debug = LOG_ERR;
249 	pf_status.reass = PF_REASS_ENABLED;
250 
251 	/* XXX do our best to avoid a conflict */
252 	pf_status.hostid = arc4random();
253 }
254 
255 int
256 pfopen(dev_t dev, int flags, int fmt, struct proc *p)
257 {
258 	if (minor(dev) >= 1)
259 		return (ENXIO);
260 	return (0);
261 }
262 
263 int
264 pfclose(dev_t dev, int flags, int fmt, struct proc *p)
265 {
266 	if (minor(dev) >= 1)
267 		return (ENXIO);
268 	return (0);
269 }
270 
271 void
272 pf_rm_rule(struct pf_rulequeue *rulequeue, struct pf_rule *rule)
273 {
274 	if (rulequeue != NULL) {
275 		if (rule->states_cur == 0 && rule->src_nodes == 0) {
276 			/*
277 			 * XXX - we need to remove the table *before* detaching
278 			 * the rule to make sure the table code does not delete
279 			 * the anchor under our feet.
280 			 */
281 			pf_tbladdr_remove(&rule->src.addr);
282 			pf_tbladdr_remove(&rule->dst.addr);
283 			pf_tbladdr_remove(&rule->rdr.addr);
284 			pf_tbladdr_remove(&rule->nat.addr);
285 			pf_tbladdr_remove(&rule->route.addr);
286 			if (rule->overload_tbl)
287 				pfr_detach_table(rule->overload_tbl);
288 		}
289 		TAILQ_REMOVE(rulequeue, rule, entries);
290 		rule->entries.tqe_prev = NULL;
291 		rule->nr = (u_int32_t)-1;
292 	}
293 
294 	if (rule->states_cur > 0 || rule->src_nodes > 0 ||
295 	    rule->entries.tqe_prev != NULL)
296 		return;
297 	pf_tag_unref(rule->tag);
298 	pf_tag_unref(rule->match_tag);
299 	pf_rtlabel_remove(&rule->src.addr);
300 	pf_rtlabel_remove(&rule->dst.addr);
301 	pfi_dynaddr_remove(&rule->src.addr);
302 	pfi_dynaddr_remove(&rule->dst.addr);
303 	pfi_dynaddr_remove(&rule->rdr.addr);
304 	pfi_dynaddr_remove(&rule->nat.addr);
305 	pfi_dynaddr_remove(&rule->route.addr);
306 	if (rulequeue == NULL) {
307 		pf_tbladdr_remove(&rule->src.addr);
308 		pf_tbladdr_remove(&rule->dst.addr);
309 		pf_tbladdr_remove(&rule->rdr.addr);
310 		pf_tbladdr_remove(&rule->nat.addr);
311 		pf_tbladdr_remove(&rule->route.addr);
312 		if (rule->overload_tbl)
313 			pfr_detach_table(rule->overload_tbl);
314 	}
315 	pfi_kif_unref(rule->rcv_kif, PFI_KIF_REF_RULE);
316 	pfi_kif_unref(rule->kif, PFI_KIF_REF_RULE);
317 	pfi_kif_unref(rule->rdr.kif, PFI_KIF_REF_RULE);
318 	pfi_kif_unref(rule->nat.kif, PFI_KIF_REF_RULE);
319 	pfi_kif_unref(rule->route.kif, PFI_KIF_REF_RULE);
320 	pf_remove_anchor(rule);
321 	pool_put(&pf_rule_pl, rule);
322 }
323 
324 void
325 pf_purge_rule(struct pf_rule *rule)
326 {
327 	u_int32_t		 nr = 0;
328 	struct pf_ruleset	*ruleset;
329 
330 	KASSERT((rule != NULL) && (rule->ruleset != NULL));
331 	ruleset = rule->ruleset;
332 
333 	pf_rm_rule(ruleset->rules.active.ptr, rule);
334 	ruleset->rules.active.rcount--;
335 	TAILQ_FOREACH(rule, ruleset->rules.active.ptr, entries)
336 		rule->nr = nr++;
337 	ruleset->rules.active.ticket++;
338 	pf_calc_skip_steps(ruleset->rules.active.ptr);
339 	pf_remove_if_empty_ruleset(ruleset);
340 
341 	if (ruleset == &pf_main_ruleset)
342 		pf_calc_chksum(ruleset);
343 }
344 
345 u_int16_t
346 tagname2tag(struct pf_tags *head, char *tagname, int create)
347 {
348 	struct pf_tagname	*tag, *p = NULL;
349 	u_int16_t		 new_tagid = 1;
350 
351 	TAILQ_FOREACH(tag, head, entries)
352 		if (strcmp(tagname, tag->name) == 0) {
353 			tag->ref++;
354 			return (tag->tag);
355 		}
356 
357 	if (!create)
358 		return (0);
359 
360 	/*
361 	 * to avoid fragmentation, we do a linear search from the beginning
362 	 * and take the first free slot we find. if there is none or the list
363 	 * is empty, append a new entry at the end.
364 	 */
365 
366 	/* new entry */
367 	TAILQ_FOREACH(p, head, entries) {
368 		if (p->tag != new_tagid)
369 			break;
370 		new_tagid = p->tag + 1;
371 	}
372 
373 	if (new_tagid > TAGID_MAX)
374 		return (0);
375 
376 	/* allocate and fill new struct pf_tagname */
377 	tag = pool_get(&pf_tag_pl, PR_NOWAIT | PR_ZERO);
378 	if (tag == NULL)
379 		return (0);
380 	strlcpy(tag->name, tagname, sizeof(tag->name));
381 	tag->tag = new_tagid;
382 	tag->ref++;
383 
384 	if (p != NULL)	/* insert new entry before p */
385 		TAILQ_INSERT_BEFORE(p, tag, entries);
386 	else	/* either list empty or no free slot in between */
387 		TAILQ_INSERT_TAIL(head, tag, entries);
388 
389 	return (tag->tag);
390 }
391 
392 void
393 tag2tagname(struct pf_tags *head, u_int16_t tagid, char *p)
394 {
395 	struct pf_tagname	*tag;
396 
397 	TAILQ_FOREACH(tag, head, entries)
398 		if (tag->tag == tagid) {
399 			strlcpy(p, tag->name, PF_TAG_NAME_SIZE);
400 			return;
401 		}
402 }
403 
404 void
405 tag_unref(struct pf_tags *head, u_int16_t tag)
406 {
407 	struct pf_tagname	*p, *next;
408 
409 	if (tag == 0)
410 		return;
411 
412 	TAILQ_FOREACH_SAFE(p, head, entries, next) {
413 		if (tag == p->tag) {
414 			if (--p->ref == 0) {
415 				TAILQ_REMOVE(head, p, entries);
416 				pool_put(&pf_tag_pl, p);
417 			}
418 			break;
419 		}
420 	}
421 }
422 
423 u_int16_t
424 pf_tagname2tag(char *tagname, int create)
425 {
426 	return (tagname2tag(&pf_tags, tagname, create));
427 }
428 
429 void
430 pf_tag2tagname(u_int16_t tagid, char *p)
431 {
432 	tag2tagname(&pf_tags, tagid, p);
433 }
434 
435 void
436 pf_tag_ref(u_int16_t tag)
437 {
438 	struct pf_tagname *t;
439 
440 	TAILQ_FOREACH(t, &pf_tags, entries)
441 		if (t->tag == tag)
442 			break;
443 	if (t != NULL)
444 		t->ref++;
445 }
446 
447 void
448 pf_tag_unref(u_int16_t tag)
449 {
450 	tag_unref(&pf_tags, tag);
451 }
452 
453 int
454 pf_rtlabel_add(struct pf_addr_wrap *a)
455 {
456 	if (a->type == PF_ADDR_RTLABEL &&
457 	    (a->v.rtlabel = rtlabel_name2id(a->v.rtlabelname)) == 0)
458 		return (-1);
459 	return (0);
460 }
461 
462 void
463 pf_rtlabel_remove(struct pf_addr_wrap *a)
464 {
465 	if (a->type == PF_ADDR_RTLABEL)
466 		rtlabel_unref(a->v.rtlabel);
467 }
468 
469 void
470 pf_rtlabel_copyout(struct pf_addr_wrap *a)
471 {
472 	const char	*name;
473 
474 	if (a->type == PF_ADDR_RTLABEL && a->v.rtlabel) {
475 		if ((name = rtlabel_id2name(a->v.rtlabel)) == NULL)
476 			strlcpy(a->v.rtlabelname, "?",
477 			    sizeof(a->v.rtlabelname));
478 		else
479 			strlcpy(a->v.rtlabelname, name,
480 			    sizeof(a->v.rtlabelname));
481 	}
482 }
483 
484 u_int16_t
485 pf_qname2qid(char *qname, int create)
486 {
487 	return (tagname2tag(&pf_qids, qname, create));
488 }
489 
490 void
491 pf_qid2qname(u_int16_t qid, char *p)
492 {
493 	tag2tagname(&pf_qids, qid, p);
494 }
495 
496 void
497 pf_qid_unref(u_int16_t qid)
498 {
499 	tag_unref(&pf_qids, (u_int16_t)qid);
500 }
501 
502 int
503 pf_begin_rules(u_int32_t *ticket, const char *anchor)
504 {
505 	struct pf_ruleset	*rs;
506 	struct pf_rule		*rule;
507 
508 	if ((rs = pf_find_or_create_ruleset(anchor)) == NULL)
509 		return (EINVAL);
510 	while ((rule = TAILQ_FIRST(rs->rules.inactive.ptr)) != NULL) {
511 		pf_rm_rule(rs->rules.inactive.ptr, rule);
512 		rs->rules.inactive.rcount--;
513 	}
514 	*ticket = ++rs->rules.inactive.ticket;
515 	rs->rules.inactive.open = 1;
516 	return (0);
517 }
518 
519 int
520 pf_rollback_rules(u_int32_t ticket, char *anchor)
521 {
522 	struct pf_ruleset	*rs;
523 	struct pf_rule		*rule;
524 
525 	rs = pf_find_ruleset(anchor);
526 	if (rs == NULL || !rs->rules.inactive.open ||
527 	    rs->rules.inactive.ticket != ticket)
528 		return (0);
529 	while ((rule = TAILQ_FIRST(rs->rules.inactive.ptr)) != NULL) {
530 		pf_rm_rule(rs->rules.inactive.ptr, rule);
531 		rs->rules.inactive.rcount--;
532 	}
533 	rs->rules.inactive.open = 0;
534 
535 	/* queue defs only in the main ruleset */
536 	if (anchor[0])
537 		return (0);
538 
539 	pf_free_queues(pf_queues_inactive);
540 
541 	return (0);
542 }
543 
544 void
545 pf_free_queues(struct pf_queuehead *where)
546 {
547 	struct pf_queuespec	*q, *qtmp;
548 
549 	TAILQ_FOREACH_SAFE(q, where, entries, qtmp) {
550 		TAILQ_REMOVE(where, q, entries);
551 		pfi_kif_unref(q->kif, PFI_KIF_REF_RULE);
552 		pool_put(&pf_queue_pl, q);
553 	}
554 }
555 
556 void
557 pf_remove_queues(void)
558 {
559 	struct pf_queuespec	*q;
560 	struct ifnet		*ifp;
561 
562 	/* put back interfaces in normal queueing mode */
563 	TAILQ_FOREACH(q, pf_queues_active, entries) {
564 		if (q->parent_qid != 0)
565 			continue;
566 
567 		ifp = q->kif->pfik_ifp;
568 		if (ifp == NULL)
569 			continue;
570 
571 		ifq_attach(&ifp->if_snd, ifq_priq_ops, NULL);
572 	}
573 }
574 
575 struct pf_queue_if {
576 	struct ifnet		*ifp;
577 	const struct ifq_ops	*ifqops;
578 	const struct pfq_ops	*pfqops;
579 	void			*disc;
580 	struct pf_queue_if	*next;
581 };
582 
583 static inline struct pf_queue_if *
584 pf_ifp2q(struct pf_queue_if *list, struct ifnet *ifp)
585 {
586 	struct pf_queue_if *qif = list;
587 
588 	while (qif != NULL) {
589 		if (qif->ifp == ifp)
590 			return (qif);
591 
592 		qif = qif->next;
593 	}
594 
595 	return (qif);
596 }
597 
598 int
599 pf_create_queues(void)
600 {
601 	struct pf_queuespec	*q;
602 	struct ifnet		*ifp;
603 	struct pf_queue_if		*list = NULL, *qif;
604 	int			 error;
605 
606 	/*
607 	 * Find root queues and allocate traffic conditioner
608 	 * private data for these interfaces
609 	 */
610 	TAILQ_FOREACH(q, pf_queues_active, entries) {
611 		if (q->parent_qid != 0)
612 			continue;
613 
614 		ifp = q->kif->pfik_ifp;
615 		if (ifp == NULL)
616 			continue;
617 
618 		qif = malloc(sizeof(*qif), M_TEMP, M_WAITOK);
619 		qif->ifp = ifp;
620 
621 		if (q->flags & PFQS_ROOTCLASS) {
622 			qif->ifqops = ifq_hfsc_ops;
623 			qif->pfqops = pfq_hfsc_ops;
624 		} else {
625 			qif->ifqops = ifq_fqcodel_ops;
626 			qif->pfqops = pfq_fqcodel_ops;
627 		}
628 
629 		qif->disc = qif->pfqops->pfq_alloc(ifp);
630 
631 		qif->next = list;
632 		list = qif;
633 	}
634 
635 	/* and now everything */
636 	TAILQ_FOREACH(q, pf_queues_active, entries) {
637 		ifp = q->kif->pfik_ifp;
638 		if (ifp == NULL)
639 			continue;
640 
641 		qif = pf_ifp2q(list, ifp);
642 		KASSERT(qif != NULL);
643 
644 		error = qif->pfqops->pfq_addqueue(qif->disc, q);
645 		if (error != 0)
646 			goto error;
647 	}
648 
649 	/* find root queues in old list to disable them if necessary */
650 	TAILQ_FOREACH(q, pf_queues_inactive, entries) {
651 		if (q->parent_qid != 0)
652 			continue;
653 
654 		ifp = q->kif->pfik_ifp;
655 		if (ifp == NULL)
656 			continue;
657 
658 		qif = pf_ifp2q(list, ifp);
659 		if (qif != NULL)
660 			continue;
661 
662 		ifq_attach(&ifp->if_snd, ifq_priq_ops, NULL);
663 	}
664 
665 	/* commit the new queues */
666 	while (list != NULL) {
667 		qif = list;
668 		list = qif->next;
669 
670 		ifp = qif->ifp;
671 
672 		ifq_attach(&ifp->if_snd, qif->ifqops, qif->disc);
673 		free(qif, M_TEMP, sizeof(*qif));
674 	}
675 
676 	return (0);
677 
678 error:
679 	while (list != NULL) {
680 		qif = list;
681 		list = qif->next;
682 
683 		qif->pfqops->pfq_free(qif->disc);
684 		free(qif, M_TEMP, sizeof(*qif));
685 	}
686 
687 	return (error);
688 }
689 
690 int
691 pf_commit_queues(void)
692 {
693 	struct pf_queuehead	*qswap;
694 	int error;
695 
696         /* swap */
697         qswap = pf_queues_active;
698         pf_queues_active = pf_queues_inactive;
699         pf_queues_inactive = qswap;
700 
701 	error = pf_create_queues();
702 	if (error != 0) {
703 		pf_queues_inactive = pf_queues_active;
704 		pf_queues_active = qswap;
705 		return (error);
706 	}
707 
708         pf_free_queues(pf_queues_inactive);
709 
710 	return (0);
711 }
712 
713 const struct pfq_ops *
714 pf_queue_manager(struct pf_queuespec *q)
715 {
716 	if (q->flags & PFQS_FLOWQUEUE)
717 		return pfq_fqcodel_ops;
718 	return (/* pfq_default_ops */ NULL);
719 }
720 
721 #define PF_MD5_UPD(st, elm)						\
722 		MD5Update(ctx, (u_int8_t *) &(st)->elm, sizeof((st)->elm))
723 
724 #define PF_MD5_UPD_STR(st, elm)						\
725 		MD5Update(ctx, (u_int8_t *) (st)->elm, strlen((st)->elm))
726 
727 #define PF_MD5_UPD_HTONL(st, elm, stor) do {				\
728 		(stor) = htonl((st)->elm);				\
729 		MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int32_t));\
730 } while (0)
731 
732 #define PF_MD5_UPD_HTONS(st, elm, stor) do {				\
733 		(stor) = htons((st)->elm);				\
734 		MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int16_t));\
735 } while (0)
736 
737 void
738 pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr)
739 {
740 	PF_MD5_UPD(pfr, addr.type);
741 	switch (pfr->addr.type) {
742 		case PF_ADDR_DYNIFTL:
743 			PF_MD5_UPD(pfr, addr.v.ifname);
744 			PF_MD5_UPD(pfr, addr.iflags);
745 			break;
746 		case PF_ADDR_TABLE:
747 			if (strncmp(pfr->addr.v.tblname, PF_OPTIMIZER_TABLE_PFX,
748 			    strlen(PF_OPTIMIZER_TABLE_PFX)))
749 				PF_MD5_UPD(pfr, addr.v.tblname);
750 			break;
751 		case PF_ADDR_ADDRMASK:
752 			/* XXX ignore af? */
753 			PF_MD5_UPD(pfr, addr.v.a.addr.addr32);
754 			PF_MD5_UPD(pfr, addr.v.a.mask.addr32);
755 			break;
756 		case PF_ADDR_RTLABEL:
757 			PF_MD5_UPD(pfr, addr.v.rtlabelname);
758 			break;
759 	}
760 
761 	PF_MD5_UPD(pfr, port[0]);
762 	PF_MD5_UPD(pfr, port[1]);
763 	PF_MD5_UPD(pfr, neg);
764 	PF_MD5_UPD(pfr, port_op);
765 }
766 
767 void
768 pf_hash_rule(MD5_CTX *ctx, struct pf_rule *rule)
769 {
770 	u_int16_t x;
771 	u_int32_t y;
772 
773 	pf_hash_rule_addr(ctx, &rule->src);
774 	pf_hash_rule_addr(ctx, &rule->dst);
775 	PF_MD5_UPD_STR(rule, label);
776 	PF_MD5_UPD_STR(rule, ifname);
777 	PF_MD5_UPD_STR(rule, rcv_ifname);
778 	PF_MD5_UPD_STR(rule, match_tagname);
779 	PF_MD5_UPD_HTONS(rule, match_tag, x); /* dup? */
780 	PF_MD5_UPD_HTONL(rule, os_fingerprint, y);
781 	PF_MD5_UPD_HTONL(rule, prob, y);
782 	PF_MD5_UPD_HTONL(rule, uid.uid[0], y);
783 	PF_MD5_UPD_HTONL(rule, uid.uid[1], y);
784 	PF_MD5_UPD(rule, uid.op);
785 	PF_MD5_UPD_HTONL(rule, gid.gid[0], y);
786 	PF_MD5_UPD_HTONL(rule, gid.gid[1], y);
787 	PF_MD5_UPD(rule, gid.op);
788 	PF_MD5_UPD_HTONL(rule, rule_flag, y);
789 	PF_MD5_UPD(rule, action);
790 	PF_MD5_UPD(rule, direction);
791 	PF_MD5_UPD(rule, af);
792 	PF_MD5_UPD(rule, quick);
793 	PF_MD5_UPD(rule, ifnot);
794 	PF_MD5_UPD(rule, rcvifnot);
795 	PF_MD5_UPD(rule, match_tag_not);
796 	PF_MD5_UPD(rule, keep_state);
797 	PF_MD5_UPD(rule, proto);
798 	PF_MD5_UPD(rule, type);
799 	PF_MD5_UPD(rule, code);
800 	PF_MD5_UPD(rule, flags);
801 	PF_MD5_UPD(rule, flagset);
802 	PF_MD5_UPD(rule, allow_opts);
803 	PF_MD5_UPD(rule, rt);
804 	PF_MD5_UPD(rule, tos);
805 }
806 
807 int
808 pf_commit_rules(u_int32_t ticket, char *anchor)
809 {
810 	struct pf_ruleset	*rs;
811 	struct pf_rule		*rule;
812 	struct pf_rulequeue	*old_rules;
813 	u_int32_t		 old_rcount;
814 
815 	/* Make sure any expired rules get removed from active rules first. */
816 	pf_purge_expired_rules();
817 
818 	rs = pf_find_ruleset(anchor);
819 	if (rs == NULL || !rs->rules.inactive.open ||
820 	    ticket != rs->rules.inactive.ticket)
821 		return (EBUSY);
822 
823 	if (rs == &pf_main_ruleset)
824 		pf_calc_chksum(rs);
825 
826 	/* Swap rules, keep the old. */
827 	old_rules = rs->rules.active.ptr;
828 	old_rcount = rs->rules.active.rcount;
829 
830 	rs->rules.active.ptr = rs->rules.inactive.ptr;
831 	rs->rules.active.rcount = rs->rules.inactive.rcount;
832 	rs->rules.inactive.ptr = old_rules;
833 	rs->rules.inactive.rcount = old_rcount;
834 
835 	rs->rules.active.ticket = rs->rules.inactive.ticket;
836 	pf_calc_skip_steps(rs->rules.active.ptr);
837 
838 
839 	/* Purge the old rule list. */
840 	while ((rule = TAILQ_FIRST(old_rules)) != NULL)
841 		pf_rm_rule(old_rules, rule);
842 	rs->rules.inactive.rcount = 0;
843 	rs->rules.inactive.open = 0;
844 	pf_remove_if_empty_ruleset(rs);
845 
846 	/* queue defs only in the main ruleset */
847 	if (anchor[0])
848 		return (0);
849 	return (pf_commit_queues());
850 }
851 
852 void
853 pf_calc_chksum(struct pf_ruleset *rs)
854 {
855 	MD5_CTX			 ctx;
856 	struct pf_rule		*rule;
857 	u_int8_t		 digest[PF_MD5_DIGEST_LENGTH];
858 
859 	MD5Init(&ctx);
860 
861 	if (rs->rules.inactive.rcount) {
862 		TAILQ_FOREACH(rule, rs->rules.inactive.ptr, entries) {
863 			pf_hash_rule(&ctx, rule);
864 		}
865 	}
866 
867 	MD5Final(digest, &ctx);
868 	memcpy(pf_status.pf_chksum, digest, sizeof(pf_status.pf_chksum));
869 }
870 
871 int
872 pf_addr_setup(struct pf_ruleset *ruleset, struct pf_addr_wrap *addr,
873     sa_family_t af)
874 {
875 	if (pfi_dynaddr_setup(addr, af) ||
876 	    pf_tbladdr_setup(ruleset, addr) ||
877 	    pf_rtlabel_add(addr))
878 		return (EINVAL);
879 
880 	return (0);
881 }
882 
883 int
884 pf_kif_setup(char *ifname, struct pfi_kif **kif)
885 {
886 	if (ifname[0]) {
887 		*kif = pfi_kif_get(ifname);
888 		if (*kif == NULL)
889 			return (EINVAL);
890 
891 		pfi_kif_ref(*kif, PFI_KIF_REF_RULE);
892 	} else
893 		*kif = NULL;
894 
895 	return (0);
896 }
897 
898 void
899 pf_addr_copyout(struct pf_addr_wrap *addr)
900 {
901 	pfi_dynaddr_copyout(addr);
902 	pf_tbladdr_copyout(addr);
903 	pf_rtlabel_copyout(addr);
904 }
905 
906 int
907 pfioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct proc *p)
908 {
909 	int			 error = 0;
910 
911 	/* XXX keep in sync with switch() below */
912 	if (securelevel > 1)
913 		switch (cmd) {
914 		case DIOCGETRULES:
915 		case DIOCGETRULE:
916 		case DIOCGETSTATE:
917 		case DIOCSETSTATUSIF:
918 		case DIOCGETSTATUS:
919 		case DIOCCLRSTATUS:
920 		case DIOCNATLOOK:
921 		case DIOCSETDEBUG:
922 		case DIOCGETSTATES:
923 		case DIOCGETTIMEOUT:
924 		case DIOCGETLIMIT:
925 		case DIOCGETRULESETS:
926 		case DIOCGETRULESET:
927 		case DIOCGETQUEUES:
928 		case DIOCGETQUEUE:
929 		case DIOCGETQSTATS:
930 		case DIOCRGETTABLES:
931 		case DIOCRGETTSTATS:
932 		case DIOCRCLRTSTATS:
933 		case DIOCRCLRADDRS:
934 		case DIOCRADDADDRS:
935 		case DIOCRDELADDRS:
936 		case DIOCRSETADDRS:
937 		case DIOCRGETADDRS:
938 		case DIOCRGETASTATS:
939 		case DIOCRCLRASTATS:
940 		case DIOCRTSTADDRS:
941 		case DIOCOSFPGET:
942 		case DIOCGETSRCNODES:
943 		case DIOCCLRSRCNODES:
944 		case DIOCIGETIFACES:
945 		case DIOCSETIFFLAG:
946 		case DIOCCLRIFFLAG:
947 		case DIOCGETSYNFLWATS:
948 			break;
949 		case DIOCRCLRTABLES:
950 		case DIOCRADDTABLES:
951 		case DIOCRDELTABLES:
952 		case DIOCRSETTFLAGS:
953 			if (((struct pfioc_table *)addr)->pfrio_flags &
954 			    PFR_FLAG_DUMMY)
955 				break; /* dummy operation ok */
956 			return (EPERM);
957 		default:
958 			return (EPERM);
959 		}
960 
961 	if (!(flags & FWRITE))
962 		switch (cmd) {
963 		case DIOCGETRULES:
964 		case DIOCGETSTATE:
965 		case DIOCGETSTATUS:
966 		case DIOCGETSTATES:
967 		case DIOCGETTIMEOUT:
968 		case DIOCGETLIMIT:
969 		case DIOCGETRULESETS:
970 		case DIOCGETRULESET:
971 		case DIOCGETQUEUES:
972 		case DIOCGETQUEUE:
973 		case DIOCGETQSTATS:
974 		case DIOCNATLOOK:
975 		case DIOCRGETTABLES:
976 		case DIOCRGETTSTATS:
977 		case DIOCRGETADDRS:
978 		case DIOCRGETASTATS:
979 		case DIOCRTSTADDRS:
980 		case DIOCOSFPGET:
981 		case DIOCGETSRCNODES:
982 		case DIOCIGETIFACES:
983 		case DIOCGETSYNFLWATS:
984 			break;
985 		case DIOCRCLRTABLES:
986 		case DIOCRADDTABLES:
987 		case DIOCRDELTABLES:
988 		case DIOCRCLRTSTATS:
989 		case DIOCRCLRADDRS:
990 		case DIOCRADDADDRS:
991 		case DIOCRDELADDRS:
992 		case DIOCRSETADDRS:
993 		case DIOCRSETTFLAGS:
994 			if (((struct pfioc_table *)addr)->pfrio_flags &
995 			    PFR_FLAG_DUMMY) {
996 				flags |= FWRITE; /* need write lock for dummy */
997 				break; /* dummy operation ok */
998 			}
999 			return (EACCES);
1000 		case DIOCGETRULE:
1001 			if (((struct pfioc_rule *)addr)->action ==
1002 			    PF_GET_CLR_CNTR)
1003 				return (EACCES);
1004 			break;
1005 		default:
1006 			return (EACCES);
1007 		}
1008 
1009 	switch (cmd) {
1010 
1011 	case DIOCSTART:
1012 		NET_LOCK();
1013 		PF_LOCK();
1014 		if (pf_status.running)
1015 			error = EEXIST;
1016 		else {
1017 			pf_status.running = 1;
1018 			pf_status.since = getuptime();
1019 			if (pf_status.stateid == 0) {
1020 				pf_status.stateid = gettime();
1021 				pf_status.stateid = pf_status.stateid << 32;
1022 			}
1023 			timeout_add_sec(&pf_purge_to, 1);
1024 			pf_create_queues();
1025 			DPFPRINTF(LOG_NOTICE, "pf: started");
1026 		}
1027 		PF_UNLOCK();
1028 		NET_UNLOCK();
1029 		break;
1030 
1031 	case DIOCSTOP:
1032 		NET_LOCK();
1033 		PF_LOCK();
1034 		if (!pf_status.running)
1035 			error = ENOENT;
1036 		else {
1037 			pf_status.running = 0;
1038 			pf_status.since = getuptime();
1039 			pf_remove_queues();
1040 			DPFPRINTF(LOG_NOTICE, "pf: stopped");
1041 		}
1042 		PF_UNLOCK();
1043 		NET_UNLOCK();
1044 		break;
1045 
1046 	case DIOCGETQUEUES: {
1047 		struct pfioc_queue	*pq = (struct pfioc_queue *)addr;
1048 		struct pf_queuespec	*qs;
1049 		u_int32_t		 nr = 0;
1050 
1051 		NET_LOCK();
1052 		PF_LOCK();
1053 		pq->ticket = pf_main_ruleset.rules.active.ticket;
1054 
1055 		/* save state to not run over them all each time? */
1056 		qs = TAILQ_FIRST(pf_queues_active);
1057 		while (qs != NULL) {
1058 			qs = TAILQ_NEXT(qs, entries);
1059 			nr++;
1060 		}
1061 		pq->nr = nr;
1062 		PF_UNLOCK();
1063 		NET_UNLOCK();
1064 		break;
1065 	}
1066 
1067 	case DIOCGETQUEUE: {
1068 		struct pfioc_queue	*pq = (struct pfioc_queue *)addr;
1069 		struct pf_queuespec	*qs;
1070 		u_int32_t		 nr = 0;
1071 
1072 		NET_LOCK();
1073 		PF_LOCK();
1074 		if (pq->ticket != pf_main_ruleset.rules.active.ticket) {
1075 			error = EBUSY;
1076 			PF_UNLOCK();
1077 			NET_UNLOCK();
1078 			break;
1079 		}
1080 
1081 		/* save state to not run over them all each time? */
1082 		qs = TAILQ_FIRST(pf_queues_active);
1083 		while ((qs != NULL) && (nr++ < pq->nr))
1084 			qs = TAILQ_NEXT(qs, entries);
1085 		if (qs == NULL) {
1086 			error = EBUSY;
1087 			PF_UNLOCK();
1088 			NET_UNLOCK();
1089 			break;
1090 		}
1091 		memcpy(&pq->queue, qs, sizeof(pq->queue));
1092 		PF_UNLOCK();
1093 		NET_UNLOCK();
1094 		break;
1095 	}
1096 
1097 	case DIOCGETQSTATS: {
1098 		struct pfioc_qstats	*pq = (struct pfioc_qstats *)addr;
1099 		struct pf_queuespec	*qs;
1100 		u_int32_t		 nr;
1101 		int			 nbytes;
1102 
1103 		NET_LOCK();
1104 		PF_LOCK();
1105 		if (pq->ticket != pf_main_ruleset.rules.active.ticket) {
1106 			error = EBUSY;
1107 			PF_UNLOCK();
1108 			NET_UNLOCK();
1109 			break;
1110 		}
1111 		nbytes = pq->nbytes;
1112 		nr = 0;
1113 
1114 		/* save state to not run over them all each time? */
1115 		qs = TAILQ_FIRST(pf_queues_active);
1116 		while ((qs != NULL) && (nr++ < pq->nr))
1117 			qs = TAILQ_NEXT(qs, entries);
1118 		if (qs == NULL) {
1119 			error = EBUSY;
1120 			PF_UNLOCK();
1121 			NET_UNLOCK();
1122 			break;
1123 		}
1124 		memcpy(&pq->queue, qs, sizeof(pq->queue));
1125 		/* It's a root flow queue but is not an HFSC root class */
1126 		if ((qs->flags & PFQS_FLOWQUEUE) && qs->parent_qid == 0 &&
1127 		    !(qs->flags & PFQS_ROOTCLASS))
1128 			error = pfq_fqcodel_ops->pfq_qstats(qs, pq->buf,
1129 			    &nbytes);
1130 		else
1131 			error = pfq_hfsc_ops->pfq_qstats(qs, pq->buf,
1132 			    &nbytes);
1133 		if (error == 0)
1134 			pq->nbytes = nbytes;
1135 		PF_UNLOCK();
1136 		NET_UNLOCK();
1137 		break;
1138 	}
1139 
1140 	case DIOCADDQUEUE: {
1141 		struct pfioc_queue	*q = (struct pfioc_queue *)addr;
1142 		struct pf_queuespec	*qs;
1143 
1144 		qs = pool_get(&pf_queue_pl, PR_WAITOK|PR_LIMITFAIL|PR_ZERO);
1145 		if (qs == NULL) {
1146 			error = ENOMEM;
1147 			break;
1148 		}
1149 
1150 		NET_LOCK();
1151 		PF_LOCK();
1152 		if (q->ticket != pf_main_ruleset.rules.inactive.ticket) {
1153 			error = EBUSY;
1154 			PF_UNLOCK();
1155 			NET_UNLOCK();
1156 			pool_put(&pf_queue_pl, qs);
1157 			break;
1158 		}
1159 		memcpy(qs, &q->queue, sizeof(*qs));
1160 		qs->qid = pf_qname2qid(qs->qname, 1);
1161 		if (qs->qid == 0) {
1162 			error = EBUSY;
1163 			PF_UNLOCK();
1164 			NET_UNLOCK();
1165 			pool_put(&pf_queue_pl, qs);
1166 			break;
1167 		}
1168 		if (qs->parent[0] && (qs->parent_qid =
1169 		    pf_qname2qid(qs->parent, 0)) == 0) {
1170 			error = ESRCH;
1171 			PF_UNLOCK();
1172 			NET_UNLOCK();
1173 			pool_put(&pf_queue_pl, qs);
1174 			break;
1175 		}
1176 		qs->kif = pfi_kif_get(qs->ifname);
1177 		if (qs->kif == NULL) {
1178 			error = ESRCH;
1179 			PF_UNLOCK();
1180 			NET_UNLOCK();
1181 			pool_put(&pf_queue_pl, qs);
1182 			break;
1183 		}
1184 		/* XXX resolve bw percentage specs */
1185 		pfi_kif_ref(qs->kif, PFI_KIF_REF_RULE);
1186 
1187 		TAILQ_INSERT_TAIL(pf_queues_inactive, qs, entries);
1188 		PF_UNLOCK();
1189 		NET_UNLOCK();
1190 
1191 		break;
1192 	}
1193 
1194 	case DIOCADDRULE: {
1195 		struct pfioc_rule	*pr = (struct pfioc_rule *)addr;
1196 		struct pf_ruleset	*ruleset;
1197 		struct pf_rule		*rule, *tail;
1198 
1199 		rule = pool_get(&pf_rule_pl, PR_WAITOK|PR_LIMITFAIL|PR_ZERO);
1200 		if (rule == NULL) {
1201 			error = ENOMEM;
1202 			break;
1203 		}
1204 
1205 		NET_LOCK();
1206 		PF_LOCK();
1207 		pr->anchor[sizeof(pr->anchor) - 1] = '\0';
1208 		ruleset = pf_find_ruleset(pr->anchor);
1209 		if (ruleset == NULL) {
1210 			error = EINVAL;
1211 			PF_UNLOCK();
1212 			NET_UNLOCK();
1213 			pool_put(&pf_rule_pl, rule);
1214 			break;
1215 		}
1216 		if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
1217 			error = EINVAL;
1218 			PF_UNLOCK();
1219 			NET_UNLOCK();
1220 			pool_put(&pf_rule_pl, rule);
1221 			break;
1222 		}
1223 		if (pr->ticket != ruleset->rules.inactive.ticket) {
1224 			error = EBUSY;
1225 			PF_UNLOCK();
1226 			NET_UNLOCK();
1227 			pool_put(&pf_rule_pl, rule);
1228 			break;
1229 		}
1230 		if ((error = pf_rule_copyin(&pr->rule, rule, ruleset))) {
1231 			pf_rm_rule(NULL, rule);
1232 			rule = NULL;
1233 			PF_UNLOCK();
1234 			NET_UNLOCK();
1235 			break;
1236 		}
1237 		rule->cuid = p->p_ucred->cr_ruid;
1238 		rule->cpid = p->p_p->ps_pid;
1239 
1240 		switch (rule->af) {
1241 		case 0:
1242 			break;
1243 		case AF_INET:
1244 			break;
1245 #ifdef INET6
1246 		case AF_INET6:
1247 			break;
1248 #endif /* INET6 */
1249 		default:
1250 			pf_rm_rule(NULL, rule);
1251 			rule = NULL;
1252 			error = EAFNOSUPPORT;
1253 			PF_UNLOCK();
1254 			NET_UNLOCK();
1255 			goto fail;
1256 		}
1257 		tail = TAILQ_LAST(ruleset->rules.inactive.ptr,
1258 		    pf_rulequeue);
1259 		if (tail)
1260 			rule->nr = tail->nr + 1;
1261 		else
1262 			rule->nr = 0;
1263 
1264 		if (rule->src.addr.type == PF_ADDR_NONE ||
1265 		    rule->dst.addr.type == PF_ADDR_NONE)
1266 			error = EINVAL;
1267 
1268 		if (pf_addr_setup(ruleset, &rule->src.addr, rule->af))
1269 			error = EINVAL;
1270 		if (pf_addr_setup(ruleset, &rule->dst.addr, rule->af))
1271 			error = EINVAL;
1272 		if (pf_addr_setup(ruleset, &rule->rdr.addr, rule->af))
1273 			error = EINVAL;
1274 		if (pf_addr_setup(ruleset, &rule->nat.addr, rule->af))
1275 			error = EINVAL;
1276 		if (pf_addr_setup(ruleset, &rule->route.addr, rule->af))
1277 			error = EINVAL;
1278 		if (pf_anchor_setup(rule, ruleset, pr->anchor_call))
1279 			error = EINVAL;
1280 		if (rule->rt && !rule->direction)
1281 			error = EINVAL;
1282 		if (rule->scrub_flags & PFSTATE_SETPRIO &&
1283 		    (rule->set_prio[0] > IFQ_MAXPRIO ||
1284 		    rule->set_prio[1] > IFQ_MAXPRIO))
1285 			error = EINVAL;
1286 
1287 		if (error) {
1288 			pf_rm_rule(NULL, rule);
1289 			PF_UNLOCK();
1290 			NET_UNLOCK();
1291 			break;
1292 		}
1293 		TAILQ_INSERT_TAIL(ruleset->rules.inactive.ptr,
1294 		    rule, entries);
1295 		rule->ruleset = ruleset;
1296 		ruleset->rules.inactive.rcount++;
1297 		PF_UNLOCK();
1298 		NET_UNLOCK();
1299 		break;
1300 	}
1301 
1302 	case DIOCGETRULES: {
1303 		struct pfioc_rule	*pr = (struct pfioc_rule *)addr;
1304 		struct pf_ruleset	*ruleset;
1305 		struct pf_rule		*tail;
1306 
1307 		NET_LOCK();
1308 		PF_LOCK();
1309 		pr->anchor[sizeof(pr->anchor) - 1] = '\0';
1310 		ruleset = pf_find_ruleset(pr->anchor);
1311 		if (ruleset == NULL) {
1312 			error = EINVAL;
1313 			PF_UNLOCK();
1314 			NET_UNLOCK();
1315 			break;
1316 		}
1317 		tail = TAILQ_LAST(ruleset->rules.active.ptr, pf_rulequeue);
1318 		if (tail)
1319 			pr->nr = tail->nr + 1;
1320 		else
1321 			pr->nr = 0;
1322 		pr->ticket = ruleset->rules.active.ticket;
1323 		PF_UNLOCK();
1324 		NET_UNLOCK();
1325 		break;
1326 	}
1327 
1328 	case DIOCGETRULE: {
1329 		struct pfioc_rule	*pr = (struct pfioc_rule *)addr;
1330 		struct pf_ruleset	*ruleset;
1331 		struct pf_rule		*rule;
1332 		int			 i;
1333 
1334 		NET_LOCK();
1335 		PF_LOCK();
1336 		pr->anchor[sizeof(pr->anchor) - 1] = '\0';
1337 		ruleset = pf_find_ruleset(pr->anchor);
1338 		if (ruleset == NULL) {
1339 			error = EINVAL;
1340 			PF_UNLOCK();
1341 			NET_UNLOCK();
1342 			break;
1343 		}
1344 		if (pr->ticket != ruleset->rules.active.ticket) {
1345 			error = EBUSY;
1346 			PF_UNLOCK();
1347 			NET_UNLOCK();
1348 			break;
1349 		}
1350 		rule = TAILQ_FIRST(ruleset->rules.active.ptr);
1351 		while ((rule != NULL) && (rule->nr != pr->nr))
1352 			rule = TAILQ_NEXT(rule, entries);
1353 		if (rule == NULL) {
1354 			error = EBUSY;
1355 			PF_UNLOCK();
1356 			NET_UNLOCK();
1357 			break;
1358 		}
1359 		memcpy(&pr->rule, rule, sizeof(struct pf_rule));
1360 		memset(&pr->rule.entries, 0, sizeof(pr->rule.entries));
1361 		pr->rule.kif = NULL;
1362 		pr->rule.nat.kif = NULL;
1363 		pr->rule.rdr.kif = NULL;
1364 		pr->rule.route.kif = NULL;
1365 		pr->rule.rcv_kif = NULL;
1366 		pr->rule.anchor = NULL;
1367 		pr->rule.overload_tbl = NULL;
1368 		pr->rule.pktrate.limit /= PF_THRESHOLD_MULT;
1369 		memset(&pr->rule.gcle, 0, sizeof(pr->rule.gcle));
1370 		pr->rule.ruleset = NULL;
1371 		if (pf_anchor_copyout(ruleset, rule, pr)) {
1372 			error = EBUSY;
1373 			PF_UNLOCK();
1374 			NET_UNLOCK();
1375 			break;
1376 		}
1377 		pf_addr_copyout(&pr->rule.src.addr);
1378 		pf_addr_copyout(&pr->rule.dst.addr);
1379 		pf_addr_copyout(&pr->rule.rdr.addr);
1380 		pf_addr_copyout(&pr->rule.nat.addr);
1381 		pf_addr_copyout(&pr->rule.route.addr);
1382 		for (i = 0; i < PF_SKIP_COUNT; ++i)
1383 			if (rule->skip[i].ptr == NULL)
1384 				pr->rule.skip[i].nr = (u_int32_t)-1;
1385 			else
1386 				pr->rule.skip[i].nr =
1387 				    rule->skip[i].ptr->nr;
1388 
1389 		if (pr->action == PF_GET_CLR_CNTR) {
1390 			rule->evaluations = 0;
1391 			rule->packets[0] = rule->packets[1] = 0;
1392 			rule->bytes[0] = rule->bytes[1] = 0;
1393 			rule->states_tot = 0;
1394 		}
1395 		PF_UNLOCK();
1396 		NET_UNLOCK();
1397 		break;
1398 	}
1399 
1400 	case DIOCCHANGERULE: {
1401 		struct pfioc_rule	*pcr = (struct pfioc_rule *)addr;
1402 		struct pf_ruleset	*ruleset;
1403 		struct pf_rule		*oldrule = NULL, *newrule = NULL;
1404 		u_int32_t		 nr = 0;
1405 
1406 		if (pcr->action < PF_CHANGE_ADD_HEAD ||
1407 		    pcr->action > PF_CHANGE_GET_TICKET) {
1408 			error = EINVAL;
1409 			break;
1410 		}
1411 
1412 		newrule = pool_get(&pf_rule_pl, PR_WAITOK|PR_LIMITFAIL|PR_ZERO);
1413 		if (newrule == NULL) {
1414 			error = ENOMEM;
1415 			break;
1416 		}
1417 
1418 		NET_LOCK();
1419 		PF_LOCK();
1420 		ruleset = pf_find_ruleset(pcr->anchor);
1421 		if (ruleset == NULL) {
1422 			error = EINVAL;
1423 			PF_UNLOCK();
1424 			NET_UNLOCK();
1425 			pool_put(&pf_rule_pl, newrule);
1426 			break;
1427 		}
1428 
1429 		if (pcr->action == PF_CHANGE_GET_TICKET) {
1430 			pcr->ticket = ++ruleset->rules.active.ticket;
1431 			PF_UNLOCK();
1432 			NET_UNLOCK();
1433 			pool_put(&pf_rule_pl, newrule);
1434 			break;
1435 		} else {
1436 			if (pcr->ticket !=
1437 			    ruleset->rules.active.ticket) {
1438 				error = EINVAL;
1439 				PF_UNLOCK();
1440 				NET_UNLOCK();
1441 				pool_put(&pf_rule_pl, newrule);
1442 				break;
1443 			}
1444 			if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
1445 				error = EINVAL;
1446 				PF_UNLOCK();
1447 				NET_UNLOCK();
1448 				pool_put(&pf_rule_pl, newrule);
1449 				break;
1450 			}
1451 		}
1452 
1453 		if (pcr->action != PF_CHANGE_REMOVE) {
1454 			pf_rule_copyin(&pcr->rule, newrule, ruleset);
1455 			newrule->cuid = p->p_ucred->cr_ruid;
1456 			newrule->cpid = p->p_p->ps_pid;
1457 
1458 			switch (newrule->af) {
1459 			case 0:
1460 				break;
1461 			case AF_INET:
1462 				break;
1463 #ifdef INET6
1464 			case AF_INET6:
1465 				break;
1466 #endif /* INET6 */
1467 			default:
1468 				pf_rm_rule(NULL, newrule);
1469 				error = EAFNOSUPPORT;
1470 				PF_UNLOCK();
1471 				NET_UNLOCK();
1472 				goto fail;
1473 			}
1474 
1475 			if (newrule->rt && !newrule->direction)
1476 				error = EINVAL;
1477 			if (pf_addr_setup(ruleset, &newrule->src.addr, newrule->af))
1478 				error = EINVAL;
1479 			if (pf_addr_setup(ruleset, &newrule->dst.addr, newrule->af))
1480 				error = EINVAL;
1481 			if (pf_addr_setup(ruleset, &newrule->rdr.addr, newrule->af))
1482 				error = EINVAL;
1483 			if (pf_addr_setup(ruleset, &newrule->nat.addr, newrule->af))
1484 				error = EINVAL;
1485 			if (pf_addr_setup(ruleset, &newrule->route.addr, newrule->af))
1486 				error = EINVAL;
1487 			if (pf_anchor_setup(newrule, ruleset, pcr->anchor_call))
1488 				error = EINVAL;
1489 
1490 			if (error) {
1491 				pf_rm_rule(NULL, newrule);
1492 				PF_UNLOCK();
1493 				NET_UNLOCK();
1494 				break;
1495 			}
1496 		}
1497 
1498 		if (pcr->action == PF_CHANGE_ADD_HEAD)
1499 			oldrule = TAILQ_FIRST(ruleset->rules.active.ptr);
1500 		else if (pcr->action == PF_CHANGE_ADD_TAIL)
1501 			oldrule = TAILQ_LAST(ruleset->rules.active.ptr,
1502 			    pf_rulequeue);
1503 		else {
1504 			oldrule = TAILQ_FIRST(ruleset->rules.active.ptr);
1505 			while ((oldrule != NULL) && (oldrule->nr != pcr->nr))
1506 				oldrule = TAILQ_NEXT(oldrule, entries);
1507 			if (oldrule == NULL) {
1508 				if (newrule != NULL)
1509 					pf_rm_rule(NULL, newrule);
1510 				error = EINVAL;
1511 				PF_UNLOCK();
1512 				NET_UNLOCK();
1513 				break;
1514 			}
1515 		}
1516 
1517 		if (pcr->action == PF_CHANGE_REMOVE) {
1518 			pf_rm_rule(ruleset->rules.active.ptr, oldrule);
1519 			ruleset->rules.active.rcount--;
1520 		} else {
1521 			if (oldrule == NULL)
1522 				TAILQ_INSERT_TAIL(
1523 				    ruleset->rules.active.ptr,
1524 				    newrule, entries);
1525 			else if (pcr->action == PF_CHANGE_ADD_HEAD ||
1526 			    pcr->action == PF_CHANGE_ADD_BEFORE)
1527 				TAILQ_INSERT_BEFORE(oldrule, newrule, entries);
1528 			else
1529 				TAILQ_INSERT_AFTER(
1530 				    ruleset->rules.active.ptr,
1531 				    oldrule, newrule, entries);
1532 			ruleset->rules.active.rcount++;
1533 		}
1534 
1535 		nr = 0;
1536 		TAILQ_FOREACH(oldrule, ruleset->rules.active.ptr, entries)
1537 			oldrule->nr = nr++;
1538 
1539 		ruleset->rules.active.ticket++;
1540 
1541 		pf_calc_skip_steps(ruleset->rules.active.ptr);
1542 		pf_remove_if_empty_ruleset(ruleset);
1543 
1544 		PF_UNLOCK();
1545 		NET_UNLOCK();
1546 		break;
1547 	}
1548 
1549 	case DIOCCLRSTATES: {
1550 		struct pf_state		*s, *nexts;
1551 		struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr;
1552 		u_int			 killed = 0;
1553 
1554 		NET_LOCK();
1555 		PF_LOCK();
1556 		PF_STATE_ENTER_WRITE();
1557 		for (s = RB_MIN(pf_state_tree_id, &tree_id); s; s = nexts) {
1558 			nexts = RB_NEXT(pf_state_tree_id, &tree_id, s);
1559 
1560 			if (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname,
1561 			    s->kif->pfik_name)) {
1562 #if NPFSYNC > 0
1563 				/* don't send out individual delete messages */
1564 				SET(s->state_flags, PFSTATE_NOSYNC);
1565 #endif	/* NPFSYNC > 0 */
1566 				pf_remove_state(s);
1567 				killed++;
1568 			}
1569 		}
1570 		PF_STATE_EXIT_WRITE();
1571 		psk->psk_killed = killed;
1572 #if NPFSYNC > 0
1573 		pfsync_clear_states(pf_status.hostid, psk->psk_ifname);
1574 #endif	/* NPFSYNC > 0 */
1575 		PF_UNLOCK();
1576 		NET_UNLOCK();
1577 		break;
1578 	}
1579 
1580 	case DIOCKILLSTATES: {
1581 		struct pf_state		*s, *nexts;
1582 		struct pf_state_item	*si, *sit;
1583 		struct pf_state_key	*sk, key;
1584 		struct pf_addr		*srcaddr, *dstaddr;
1585 		u_int16_t		 srcport, dstport;
1586 		struct pfioc_state_kill	*psk = (struct pfioc_state_kill *)addr;
1587 		u_int			 i, killed = 0;
1588 		const int 		 dirs[] = { PF_IN, PF_OUT };
1589 		int			 sidx, didx;
1590 
1591 		if (psk->psk_pfcmp.id) {
1592 			if (psk->psk_pfcmp.creatorid == 0)
1593 				psk->psk_pfcmp.creatorid = pf_status.hostid;
1594 			NET_LOCK();
1595 			PF_LOCK();
1596 			PF_STATE_ENTER_WRITE();
1597 			if ((s = pf_find_state_byid(&psk->psk_pfcmp))) {
1598 				pf_remove_state(s);
1599 				psk->psk_killed = 1;
1600 			}
1601 			PF_STATE_EXIT_WRITE();
1602 			PF_UNLOCK();
1603 			NET_UNLOCK();
1604 			break;
1605 		}
1606 
1607 		if (psk->psk_af && psk->psk_proto &&
1608 		    psk->psk_src.port_op == PF_OP_EQ &&
1609 		    psk->psk_dst.port_op == PF_OP_EQ) {
1610 
1611 			key.af = psk->psk_af;
1612 			key.proto = psk->psk_proto;
1613 			key.rdomain = psk->psk_rdomain;
1614 
1615 			NET_LOCK();
1616 			PF_LOCK();
1617 			PF_STATE_ENTER_WRITE();
1618 			for (i = 0; i < nitems(dirs); i++) {
1619 				if (dirs[i] == PF_IN) {
1620 					sidx = 0;
1621 					didx = 1;
1622 				} else {
1623 					sidx = 1;
1624 					didx = 0;
1625 				}
1626 				pf_addrcpy(&key.addr[sidx],
1627 				    &psk->psk_src.addr.v.a.addr, key.af);
1628 				pf_addrcpy(&key.addr[didx],
1629 				    &psk->psk_dst.addr.v.a.addr, key.af);
1630 				key.port[sidx] = psk->psk_src.port[0];
1631 				key.port[didx] = psk->psk_dst.port[0];
1632 
1633 				sk = RB_FIND(pf_state_tree, &pf_statetbl, &key);
1634 				if (sk == NULL)
1635 					continue;
1636 
1637 				TAILQ_FOREACH_SAFE(si, &sk->states, entry, sit)
1638 					if (((si->s->key[PF_SK_WIRE]->af ==
1639 					    si->s->key[PF_SK_STACK]->af &&
1640 					    sk == (dirs[i] == PF_IN ?
1641 					    si->s->key[PF_SK_WIRE] :
1642 					    si->s->key[PF_SK_STACK])) ||
1643 					    (si->s->key[PF_SK_WIRE]->af !=
1644 					    si->s->key[PF_SK_STACK]->af &&
1645 					    dirs[i] == PF_IN &&
1646 					    (sk == si->s->key[PF_SK_STACK] ||
1647 					    sk == si->s->key[PF_SK_WIRE]))) &&
1648 					    (!psk->psk_ifname[0] ||
1649 					    (si->s->kif != pfi_all &&
1650 					    !strcmp(psk->psk_ifname,
1651 					    si->s->kif->pfik_name)))) {
1652 						pf_remove_state(si->s);
1653 						killed++;
1654 					}
1655 			}
1656 			if (killed)
1657 				psk->psk_killed = killed;
1658 			PF_STATE_EXIT_WRITE();
1659 			PF_UNLOCK();
1660 			NET_UNLOCK();
1661 			break;
1662 		}
1663 
1664 		NET_LOCK();
1665 		PF_LOCK();
1666 		PF_STATE_ENTER_WRITE();
1667 		for (s = RB_MIN(pf_state_tree_id, &tree_id); s;
1668 		    s = nexts) {
1669 			nexts = RB_NEXT(pf_state_tree_id, &tree_id, s);
1670 
1671 			if (s->direction == PF_OUT) {
1672 				sk = s->key[PF_SK_STACK];
1673 				srcaddr = &sk->addr[1];
1674 				dstaddr = &sk->addr[0];
1675 				srcport = sk->port[1];
1676 				dstport = sk->port[0];
1677 			} else {
1678 				sk = s->key[PF_SK_WIRE];
1679 				srcaddr = &sk->addr[0];
1680 				dstaddr = &sk->addr[1];
1681 				srcport = sk->port[0];
1682 				dstport = sk->port[1];
1683 			}
1684 			if ((!psk->psk_af || sk->af == psk->psk_af)
1685 			    && (!psk->psk_proto || psk->psk_proto ==
1686 			    sk->proto) && psk->psk_rdomain == sk->rdomain &&
1687 			    pf_match_addr(psk->psk_src.neg,
1688 			    &psk->psk_src.addr.v.a.addr,
1689 			    &psk->psk_src.addr.v.a.mask,
1690 			    srcaddr, sk->af) &&
1691 			    pf_match_addr(psk->psk_dst.neg,
1692 			    &psk->psk_dst.addr.v.a.addr,
1693 			    &psk->psk_dst.addr.v.a.mask,
1694 			    dstaddr, sk->af) &&
1695 			    (psk->psk_src.port_op == 0 ||
1696 			    pf_match_port(psk->psk_src.port_op,
1697 			    psk->psk_src.port[0], psk->psk_src.port[1],
1698 			    srcport)) &&
1699 			    (psk->psk_dst.port_op == 0 ||
1700 			    pf_match_port(psk->psk_dst.port_op,
1701 			    psk->psk_dst.port[0], psk->psk_dst.port[1],
1702 			    dstport)) &&
1703 			    (!psk->psk_label[0] || (s->rule.ptr->label[0] &&
1704 			    !strcmp(psk->psk_label, s->rule.ptr->label))) &&
1705 			    (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname,
1706 			    s->kif->pfik_name))) {
1707 				pf_remove_state(s);
1708 				killed++;
1709 			}
1710 		}
1711 		psk->psk_killed = killed;
1712 		PF_STATE_EXIT_WRITE();
1713 		PF_UNLOCK();
1714 		NET_UNLOCK();
1715 		break;
1716 	}
1717 
1718 #if NPFSYNC > 0
1719 	case DIOCADDSTATE: {
1720 		struct pfioc_state	*ps = (struct pfioc_state *)addr;
1721 		struct pfsync_state	*sp = &ps->state;
1722 
1723 		if (sp->timeout >= PFTM_MAX) {
1724 			error = EINVAL;
1725 			break;
1726 		}
1727 		NET_LOCK();
1728 		PF_LOCK();
1729 		PF_STATE_ENTER_WRITE();
1730 		error = pfsync_state_import(sp, PFSYNC_SI_IOCTL);
1731 		PF_STATE_EXIT_WRITE();
1732 		PF_UNLOCK();
1733 		NET_UNLOCK();
1734 		break;
1735 	}
1736 #endif	/* NPFSYNC > 0 */
1737 
1738 	case DIOCGETSTATE: {
1739 		struct pfioc_state	*ps = (struct pfioc_state *)addr;
1740 		struct pf_state		*s;
1741 		struct pf_state_cmp	 id_key;
1742 
1743 		memset(&id_key, 0, sizeof(id_key));
1744 		id_key.id = ps->state.id;
1745 		id_key.creatorid = ps->state.creatorid;
1746 
1747 		NET_LOCK();
1748 		PF_STATE_ENTER_READ();
1749 		s = pf_find_state_byid(&id_key);
1750 		s = pf_state_ref(s);
1751 		PF_STATE_EXIT_READ();
1752 		NET_UNLOCK();
1753 		if (s == NULL) {
1754 			error = ENOENT;
1755 			break;
1756 		}
1757 
1758 		pf_state_export(&ps->state, s);
1759 		pf_state_unref(s);
1760 		break;
1761 	}
1762 
1763 	case DIOCGETSTATES: {
1764 		struct pfioc_states	*ps = (struct pfioc_states *)addr;
1765 		struct pf_state		*state;
1766 		struct pfsync_state	*p, *pstore;
1767 		u_int32_t		 nr = 0;
1768 
1769 		if (ps->ps_len == 0) {
1770 			nr = pf_status.states;
1771 			ps->ps_len = sizeof(struct pfsync_state) * nr;
1772 			break;
1773 		}
1774 
1775 		pstore = malloc(sizeof(*pstore), M_TEMP, M_WAITOK);
1776 
1777 		p = ps->ps_states;
1778 
1779 		NET_LOCK();
1780 		PF_STATE_ENTER_READ();
1781 		state = TAILQ_FIRST(&state_list);
1782 		while (state) {
1783 			if (state->timeout != PFTM_UNLINKED) {
1784 				if ((nr+1) * sizeof(*p) > ps->ps_len)
1785 					break;
1786 				pf_state_export(pstore, state);
1787 				error = copyout(pstore, p, sizeof(*p));
1788 				if (error) {
1789 					free(pstore, M_TEMP, sizeof(*pstore));
1790 					PF_STATE_EXIT_READ();
1791 					NET_UNLOCK();
1792 					goto fail;
1793 				}
1794 				p++;
1795 				nr++;
1796 			}
1797 			state = TAILQ_NEXT(state, entry_list);
1798 		}
1799 		PF_STATE_EXIT_READ();
1800 		NET_UNLOCK();
1801 
1802 		ps->ps_len = sizeof(struct pfsync_state) * nr;
1803 
1804 		free(pstore, M_TEMP, sizeof(*pstore));
1805 		break;
1806 	}
1807 
1808 	case DIOCGETSTATUS: {
1809 		struct pf_status *s = (struct pf_status *)addr;
1810 		NET_LOCK();
1811 		PF_LOCK();
1812 		memcpy(s, &pf_status, sizeof(struct pf_status));
1813 		pfi_update_status(s->ifname, s);
1814 		PF_UNLOCK();
1815 		NET_UNLOCK();
1816 		break;
1817 	}
1818 
1819 	case DIOCSETSTATUSIF: {
1820 		struct pfioc_iface	*pi = (struct pfioc_iface *)addr;
1821 
1822 		NET_LOCK();
1823 		PF_LOCK();
1824 		if (pi->pfiio_name[0] == 0) {
1825 			memset(pf_status.ifname, 0, IFNAMSIZ);
1826 			PF_UNLOCK();
1827 			NET_UNLOCK();
1828 			break;
1829 		}
1830 		strlcpy(pf_trans_set.statusif, pi->pfiio_name, IFNAMSIZ);
1831 		pf_trans_set.mask |= PF_TSET_STATUSIF;
1832 		PF_UNLOCK();
1833 		NET_UNLOCK();
1834 		break;
1835 	}
1836 
1837 	case DIOCCLRSTATUS: {
1838 		struct pfioc_iface	*pi = (struct pfioc_iface *)addr;
1839 
1840 		NET_LOCK();
1841 		PF_LOCK();
1842 		/* if ifname is specified, clear counters there only */
1843 		if (pi->pfiio_name[0]) {
1844 			pfi_update_status(pi->pfiio_name, NULL);
1845 			PF_UNLOCK();
1846 			NET_UNLOCK();
1847 			break;
1848 		}
1849 
1850 		memset(pf_status.counters, 0, sizeof(pf_status.counters));
1851 		memset(pf_status.fcounters, 0, sizeof(pf_status.fcounters));
1852 		memset(pf_status.scounters, 0, sizeof(pf_status.scounters));
1853 		pf_status.since = getuptime();
1854 
1855 		PF_UNLOCK();
1856 		NET_UNLOCK();
1857 		break;
1858 	}
1859 
1860 	case DIOCNATLOOK: {
1861 		struct pfioc_natlook	*pnl = (struct pfioc_natlook *)addr;
1862 		struct pf_state_key	*sk;
1863 		struct pf_state		*state;
1864 		struct pf_state_key_cmp	 key;
1865 		int			 m = 0, direction = pnl->direction;
1866 		int			 sidx, didx;
1867 
1868 		switch (pnl->af) {
1869 		case AF_INET:
1870 			break;
1871 #ifdef INET6
1872 		case AF_INET6:
1873 			break;
1874 #endif /* INET6 */
1875 		default:
1876 			error = EAFNOSUPPORT;
1877 			goto fail;
1878 		}
1879 
1880 		/* NATLOOK src and dst are reversed, so reverse sidx/didx */
1881 		sidx = (direction == PF_IN) ? 1 : 0;
1882 		didx = (direction == PF_IN) ? 0 : 1;
1883 
1884 		if (!pnl->proto ||
1885 		    PF_AZERO(&pnl->saddr, pnl->af) ||
1886 		    PF_AZERO(&pnl->daddr, pnl->af) ||
1887 		    ((pnl->proto == IPPROTO_TCP ||
1888 		    pnl->proto == IPPROTO_UDP) &&
1889 		    (!pnl->dport || !pnl->sport)) ||
1890 		    pnl->rdomain > RT_TABLEID_MAX)
1891 			error = EINVAL;
1892 		else {
1893 			key.af = pnl->af;
1894 			key.proto = pnl->proto;
1895 			key.rdomain = pnl->rdomain;
1896 			pf_addrcpy(&key.addr[sidx], &pnl->saddr, pnl->af);
1897 			key.port[sidx] = pnl->sport;
1898 			pf_addrcpy(&key.addr[didx], &pnl->daddr, pnl->af);
1899 			key.port[didx] = pnl->dport;
1900 
1901 			NET_LOCK();
1902 			PF_STATE_ENTER_READ();
1903 			state = pf_find_state_all(&key, direction, &m);
1904 			state = pf_state_ref(state);
1905 			PF_STATE_EXIT_READ();
1906 			NET_UNLOCK();
1907 
1908 			if (m > 1)
1909 				error = E2BIG;	/* more than one state */
1910 			else if (state != NULL) {
1911 				sk = state->key[sidx];
1912 				pf_addrcpy(&pnl->rsaddr, &sk->addr[sidx],
1913 				    sk->af);
1914 				pnl->rsport = sk->port[sidx];
1915 				pf_addrcpy(&pnl->rdaddr, &sk->addr[didx],
1916 				    sk->af);
1917 				pnl->rdport = sk->port[didx];
1918 				pnl->rrdomain = sk->rdomain;
1919 			} else
1920 				error = ENOENT;
1921 			pf_state_unref(state);
1922 		}
1923 		break;
1924 	}
1925 
1926 	case DIOCSETTIMEOUT: {
1927 		struct pfioc_tm	*pt = (struct pfioc_tm *)addr;
1928 
1929 		if (pt->timeout < 0 || pt->timeout >= PFTM_MAX ||
1930 		    pt->seconds < 0) {
1931 			error = EINVAL;
1932 			goto fail;
1933 		}
1934 		NET_LOCK();
1935 		PF_LOCK();
1936 		if (pt->timeout == PFTM_INTERVAL && pt->seconds == 0)
1937 			pt->seconds = 1;
1938 		pf_default_rule_new.timeout[pt->timeout] = pt->seconds;
1939 		pt->seconds = pf_default_rule.timeout[pt->timeout];
1940 		PF_UNLOCK();
1941 		NET_UNLOCK();
1942 		break;
1943 	}
1944 
1945 	case DIOCGETTIMEOUT: {
1946 		struct pfioc_tm	*pt = (struct pfioc_tm *)addr;
1947 
1948 		if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) {
1949 			error = EINVAL;
1950 			goto fail;
1951 		}
1952 		NET_LOCK();
1953 		PF_LOCK();
1954 		pt->seconds = pf_default_rule.timeout[pt->timeout];
1955 		PF_UNLOCK();
1956 		NET_UNLOCK();
1957 		break;
1958 	}
1959 
1960 	case DIOCGETLIMIT: {
1961 		struct pfioc_limit	*pl = (struct pfioc_limit *)addr;
1962 
1963 		if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) {
1964 			error = EINVAL;
1965 			goto fail;
1966 		}
1967 		NET_LOCK();
1968 		PF_LOCK();
1969 		pl->limit = pf_pool_limits[pl->index].limit;
1970 		PF_UNLOCK();
1971 		NET_UNLOCK();
1972 		break;
1973 	}
1974 
1975 	case DIOCSETLIMIT: {
1976 		struct pfioc_limit	*pl = (struct pfioc_limit *)addr;
1977 
1978 		NET_LOCK();
1979 		PF_LOCK();
1980 		if (pl->index < 0 || pl->index >= PF_LIMIT_MAX ||
1981 		    pf_pool_limits[pl->index].pp == NULL) {
1982 			error = EINVAL;
1983 			PF_UNLOCK();
1984 			NET_UNLOCK();
1985 			goto fail;
1986 		}
1987 		if (((struct pool *)pf_pool_limits[pl->index].pp)->pr_nout >
1988 		    pl->limit) {
1989 			error = EBUSY;
1990 			PF_UNLOCK();
1991 			NET_UNLOCK();
1992 			goto fail;
1993 		}
1994 		/* Fragments reference mbuf clusters. */
1995 		if (pl->index == PF_LIMIT_FRAGS && pl->limit > nmbclust) {
1996 			error = EINVAL;
1997 			PF_UNLOCK();
1998 			NET_UNLOCK();
1999 			goto fail;
2000 		}
2001 
2002 		pf_pool_limits[pl->index].limit_new = pl->limit;
2003 		pl->limit = pf_pool_limits[pl->index].limit;
2004 		PF_UNLOCK();
2005 		NET_UNLOCK();
2006 		break;
2007 	}
2008 
2009 	case DIOCSETDEBUG: {
2010 		u_int32_t	*level = (u_int32_t *)addr;
2011 
2012 		NET_LOCK();
2013 		PF_LOCK();
2014 		pf_trans_set.debug = *level;
2015 		pf_trans_set.mask |= PF_TSET_DEBUG;
2016 		PF_UNLOCK();
2017 		NET_UNLOCK();
2018 		break;
2019 	}
2020 
2021 	case DIOCGETRULESETS: {
2022 		struct pfioc_ruleset	*pr = (struct pfioc_ruleset *)addr;
2023 		struct pf_ruleset	*ruleset;
2024 		struct pf_anchor	*anchor;
2025 
2026 		NET_LOCK();
2027 		PF_LOCK();
2028 		pr->path[sizeof(pr->path) - 1] = '\0';
2029 		if ((ruleset = pf_find_ruleset(pr->path)) == NULL) {
2030 			error = EINVAL;
2031 			PF_UNLOCK();
2032 			NET_UNLOCK();
2033 			break;
2034 		}
2035 		pr->nr = 0;
2036 		if (ruleset == &pf_main_ruleset) {
2037 			/* XXX kludge for pf_main_ruleset */
2038 			RB_FOREACH(anchor, pf_anchor_global, &pf_anchors)
2039 				if (anchor->parent == NULL)
2040 					pr->nr++;
2041 		} else {
2042 			RB_FOREACH(anchor, pf_anchor_node,
2043 			    &ruleset->anchor->children)
2044 				pr->nr++;
2045 		}
2046 		PF_UNLOCK();
2047 		NET_UNLOCK();
2048 		break;
2049 	}
2050 
2051 	case DIOCGETRULESET: {
2052 		struct pfioc_ruleset	*pr = (struct pfioc_ruleset *)addr;
2053 		struct pf_ruleset	*ruleset;
2054 		struct pf_anchor	*anchor;
2055 		u_int32_t		 nr = 0;
2056 
2057 		NET_LOCK();
2058 		PF_LOCK();
2059 		pr->path[sizeof(pr->path) - 1] = '\0';
2060 		if ((ruleset = pf_find_ruleset(pr->path)) == NULL) {
2061 			error = EINVAL;
2062 			PF_UNLOCK();
2063 			NET_UNLOCK();
2064 			break;
2065 		}
2066 		pr->name[0] = '\0';
2067 		if (ruleset == &pf_main_ruleset) {
2068 			/* XXX kludge for pf_main_ruleset */
2069 			RB_FOREACH(anchor, pf_anchor_global, &pf_anchors)
2070 				if (anchor->parent == NULL && nr++ == pr->nr) {
2071 					strlcpy(pr->name, anchor->name,
2072 					    sizeof(pr->name));
2073 					break;
2074 				}
2075 		} else {
2076 			RB_FOREACH(anchor, pf_anchor_node,
2077 			    &ruleset->anchor->children)
2078 				if (nr++ == pr->nr) {
2079 					strlcpy(pr->name, anchor->name,
2080 					    sizeof(pr->name));
2081 					break;
2082 				}
2083 		}
2084 		PF_UNLOCK();
2085 		NET_UNLOCK();
2086 		if (!pr->name[0])
2087 			error = EBUSY;
2088 		break;
2089 	}
2090 
2091 	case DIOCRCLRTABLES: {
2092 		struct pfioc_table *io = (struct pfioc_table *)addr;
2093 
2094 		if (io->pfrio_esize != 0) {
2095 			error = ENODEV;
2096 			break;
2097 		}
2098 		NET_LOCK();
2099 		PF_LOCK();
2100 		error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel,
2101 		    io->pfrio_flags | PFR_FLAG_USERIOCTL);
2102 		PF_UNLOCK();
2103 		NET_UNLOCK();
2104 		break;
2105 	}
2106 
2107 	case DIOCRADDTABLES: {
2108 		struct pfioc_table *io = (struct pfioc_table *)addr;
2109 
2110 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
2111 			error = ENODEV;
2112 			break;
2113 		}
2114 		NET_LOCK();
2115 		PF_LOCK();
2116 		error = pfr_add_tables(io->pfrio_buffer, io->pfrio_size,
2117 		    &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2118 		PF_UNLOCK();
2119 		NET_UNLOCK();
2120 		break;
2121 	}
2122 
2123 	case DIOCRDELTABLES: {
2124 		struct pfioc_table *io = (struct pfioc_table *)addr;
2125 
2126 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
2127 			error = ENODEV;
2128 			break;
2129 		}
2130 		NET_LOCK();
2131 		PF_LOCK();
2132 		error = pfr_del_tables(io->pfrio_buffer, io->pfrio_size,
2133 		    &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2134 		PF_UNLOCK();
2135 		NET_UNLOCK();
2136 		break;
2137 	}
2138 
2139 	case DIOCRGETTABLES: {
2140 		struct pfioc_table *io = (struct pfioc_table *)addr;
2141 
2142 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
2143 			error = ENODEV;
2144 			break;
2145 		}
2146 		NET_LOCK();
2147 		PF_LOCK();
2148 		error = pfr_get_tables(&io->pfrio_table, io->pfrio_buffer,
2149 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2150 		PF_UNLOCK();
2151 		NET_UNLOCK();
2152 		break;
2153 	}
2154 
2155 	case DIOCRGETTSTATS: {
2156 		struct pfioc_table *io = (struct pfioc_table *)addr;
2157 
2158 		if (io->pfrio_esize != sizeof(struct pfr_tstats)) {
2159 			error = ENODEV;
2160 			break;
2161 		}
2162 		NET_LOCK();
2163 		PF_LOCK();
2164 		error = pfr_get_tstats(&io->pfrio_table, io->pfrio_buffer,
2165 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2166 		PF_UNLOCK();
2167 		NET_UNLOCK();
2168 		break;
2169 	}
2170 
2171 	case DIOCRCLRTSTATS: {
2172 		struct pfioc_table *io = (struct pfioc_table *)addr;
2173 
2174 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
2175 			error = ENODEV;
2176 			break;
2177 		}
2178 		NET_LOCK();
2179 		PF_LOCK();
2180 		error = pfr_clr_tstats(io->pfrio_buffer, io->pfrio_size,
2181 		    &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2182 		PF_UNLOCK();
2183 		NET_UNLOCK();
2184 		break;
2185 	}
2186 
2187 	case DIOCRSETTFLAGS: {
2188 		struct pfioc_table *io = (struct pfioc_table *)addr;
2189 
2190 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
2191 			error = ENODEV;
2192 			break;
2193 		}
2194 		NET_LOCK();
2195 		PF_LOCK();
2196 		error = pfr_set_tflags(io->pfrio_buffer, io->pfrio_size,
2197 		    io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange,
2198 		    &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2199 		PF_UNLOCK();
2200 		NET_UNLOCK();
2201 		break;
2202 	}
2203 
2204 	case DIOCRCLRADDRS: {
2205 		struct pfioc_table *io = (struct pfioc_table *)addr;
2206 
2207 		if (io->pfrio_esize != 0) {
2208 			error = ENODEV;
2209 			break;
2210 		}
2211 		NET_LOCK();
2212 		PF_LOCK();
2213 		error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel,
2214 		    io->pfrio_flags | PFR_FLAG_USERIOCTL);
2215 		PF_UNLOCK();
2216 		NET_UNLOCK();
2217 		break;
2218 	}
2219 
2220 	case DIOCRADDADDRS: {
2221 		struct pfioc_table *io = (struct pfioc_table *)addr;
2222 
2223 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2224 			error = ENODEV;
2225 			break;
2226 		}
2227 		NET_LOCK();
2228 		PF_LOCK();
2229 		error = pfr_add_addrs(&io->pfrio_table, io->pfrio_buffer,
2230 		    io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags |
2231 		    PFR_FLAG_USERIOCTL);
2232 		PF_UNLOCK();
2233 		NET_UNLOCK();
2234 		break;
2235 	}
2236 
2237 	case DIOCRDELADDRS: {
2238 		struct pfioc_table *io = (struct pfioc_table *)addr;
2239 
2240 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2241 			error = ENODEV;
2242 			break;
2243 		}
2244 		NET_LOCK();
2245 		PF_LOCK();
2246 		error = pfr_del_addrs(&io->pfrio_table, io->pfrio_buffer,
2247 		    io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags |
2248 		    PFR_FLAG_USERIOCTL);
2249 		PF_UNLOCK();
2250 		NET_UNLOCK();
2251 		break;
2252 	}
2253 
2254 	case DIOCRSETADDRS: {
2255 		struct pfioc_table *io = (struct pfioc_table *)addr;
2256 
2257 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2258 			error = ENODEV;
2259 			break;
2260 		}
2261 		NET_LOCK();
2262 		PF_LOCK();
2263 		error = pfr_set_addrs(&io->pfrio_table, io->pfrio_buffer,
2264 		    io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd,
2265 		    &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags |
2266 		    PFR_FLAG_USERIOCTL, 0);
2267 		PF_UNLOCK();
2268 		NET_UNLOCK();
2269 		break;
2270 	}
2271 
2272 	case DIOCRGETADDRS: {
2273 		struct pfioc_table *io = (struct pfioc_table *)addr;
2274 
2275 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2276 			error = ENODEV;
2277 			break;
2278 		}
2279 		NET_LOCK();
2280 		PF_LOCK();
2281 		error = pfr_get_addrs(&io->pfrio_table, io->pfrio_buffer,
2282 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2283 		PF_UNLOCK();
2284 		NET_UNLOCK();
2285 		break;
2286 	}
2287 
2288 	case DIOCRGETASTATS: {
2289 		struct pfioc_table *io = (struct pfioc_table *)addr;
2290 
2291 		if (io->pfrio_esize != sizeof(struct pfr_astats)) {
2292 			error = ENODEV;
2293 			break;
2294 		}
2295 		NET_LOCK();
2296 		PF_LOCK();
2297 		error = pfr_get_astats(&io->pfrio_table, io->pfrio_buffer,
2298 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2299 		PF_UNLOCK();
2300 		NET_UNLOCK();
2301 		break;
2302 	}
2303 
2304 	case DIOCRCLRASTATS: {
2305 		struct pfioc_table *io = (struct pfioc_table *)addr;
2306 
2307 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2308 			error = ENODEV;
2309 			break;
2310 		}
2311 		NET_LOCK();
2312 		PF_LOCK();
2313 		error = pfr_clr_astats(&io->pfrio_table, io->pfrio_buffer,
2314 		    io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags |
2315 		    PFR_FLAG_USERIOCTL);
2316 		PF_UNLOCK();
2317 		NET_UNLOCK();
2318 		break;
2319 	}
2320 
2321 	case DIOCRTSTADDRS: {
2322 		struct pfioc_table *io = (struct pfioc_table *)addr;
2323 
2324 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2325 			error = ENODEV;
2326 			break;
2327 		}
2328 		NET_LOCK();
2329 		PF_LOCK();
2330 		error = pfr_tst_addrs(&io->pfrio_table, io->pfrio_buffer,
2331 		    io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags |
2332 		    PFR_FLAG_USERIOCTL);
2333 		PF_UNLOCK();
2334 		NET_UNLOCK();
2335 		break;
2336 	}
2337 
2338 	case DIOCRINADEFINE: {
2339 		struct pfioc_table *io = (struct pfioc_table *)addr;
2340 
2341 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2342 			error = ENODEV;
2343 			break;
2344 		}
2345 		NET_LOCK();
2346 		PF_LOCK();
2347 		error = pfr_ina_define(&io->pfrio_table, io->pfrio_buffer,
2348 		    io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr,
2349 		    io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2350 		PF_UNLOCK();
2351 		NET_UNLOCK();
2352 		break;
2353 	}
2354 
2355 	case DIOCOSFPADD: {
2356 		struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
2357 		error = pf_osfp_add(io);
2358 		break;
2359 	}
2360 
2361 	case DIOCOSFPGET: {
2362 		struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
2363 		error = pf_osfp_get(io);
2364 		break;
2365 	}
2366 
2367 	case DIOCXBEGIN: {
2368 		struct pfioc_trans	*io = (struct pfioc_trans *)addr;
2369 		struct pfioc_trans_e	*ioe;
2370 		struct pfr_table	*table;
2371 		int			 i;
2372 
2373 		if (io->esize != sizeof(*ioe)) {
2374 			error = ENODEV;
2375 			goto fail;
2376 		}
2377 		ioe = malloc(sizeof(*ioe), M_TEMP, M_WAITOK);
2378 		table = malloc(sizeof(*table), M_TEMP, M_WAITOK);
2379 		NET_LOCK();
2380 		PF_LOCK();
2381 		pf_default_rule_new = pf_default_rule;
2382 		memset(&pf_trans_set, 0, sizeof(pf_trans_set));
2383 		for (i = 0; i < io->size; i++) {
2384 			if (copyin(io->array+i, ioe, sizeof(*ioe))) {
2385 				PF_UNLOCK();
2386 				NET_UNLOCK();
2387 				free(table, M_TEMP, sizeof(*table));
2388 				free(ioe, M_TEMP, sizeof(*ioe));
2389 				error = EFAULT;
2390 				goto fail;
2391 			}
2392 			if (strnlen(ioe->anchor, sizeof(ioe->anchor)) ==
2393 			    sizeof(ioe->anchor)) {
2394 				PF_UNLOCK();
2395 				NET_UNLOCK();
2396 				free(table, M_TEMP, sizeof(*table));
2397 				free(ioe, M_TEMP, sizeof(*ioe));
2398 				error = ENAMETOOLONG;
2399 				goto fail;
2400 			}
2401 			switch (ioe->type) {
2402 			case PF_TRANS_TABLE:
2403 				memset(table, 0, sizeof(*table));
2404 				strlcpy(table->pfrt_anchor, ioe->anchor,
2405 				    sizeof(table->pfrt_anchor));
2406 				if ((error = pfr_ina_begin(table,
2407 				    &ioe->ticket, NULL, 0))) {
2408 					PF_UNLOCK();
2409 					NET_UNLOCK();
2410 					free(table, M_TEMP, sizeof(*table));
2411 					free(ioe, M_TEMP, sizeof(*ioe));
2412 					goto fail;
2413 				}
2414 				break;
2415 			case PF_TRANS_RULESET:
2416 				if ((error = pf_begin_rules(&ioe->ticket,
2417 				    ioe->anchor))) {
2418 					PF_UNLOCK();
2419 					NET_UNLOCK();
2420 					free(table, M_TEMP, sizeof(*table));
2421 					free(ioe, M_TEMP, sizeof(*ioe));
2422 					goto fail;
2423 				}
2424 				break;
2425 			default:
2426 				PF_UNLOCK();
2427 				NET_UNLOCK();
2428 				free(table, M_TEMP, sizeof(*table));
2429 				free(ioe, M_TEMP, sizeof(*ioe));
2430 				error = EINVAL;
2431 				goto fail;
2432 			}
2433 			if (copyout(ioe, io->array+i, sizeof(io->array[i]))) {
2434 				PF_UNLOCK();
2435 				NET_UNLOCK();
2436 				free(table, M_TEMP, sizeof(*table));
2437 				free(ioe, M_TEMP, sizeof(*ioe));
2438 				error = EFAULT;
2439 				goto fail;
2440 			}
2441 		}
2442 		PF_UNLOCK();
2443 		NET_UNLOCK();
2444 		free(table, M_TEMP, sizeof(*table));
2445 		free(ioe, M_TEMP, sizeof(*ioe));
2446 		break;
2447 	}
2448 
2449 	case DIOCXROLLBACK: {
2450 		struct pfioc_trans	*io = (struct pfioc_trans *)addr;
2451 		struct pfioc_trans_e	*ioe;
2452 		struct pfr_table	*table;
2453 		int			 i;
2454 
2455 		if (io->esize != sizeof(*ioe)) {
2456 			error = ENODEV;
2457 			goto fail;
2458 		}
2459 		ioe = malloc(sizeof(*ioe), M_TEMP, M_WAITOK);
2460 		table = malloc(sizeof(*table), M_TEMP, M_WAITOK);
2461 		NET_LOCK();
2462 		PF_LOCK();
2463 		for (i = 0; i < io->size; i++) {
2464 			if (copyin(io->array+i, ioe, sizeof(*ioe))) {
2465 				PF_UNLOCK();
2466 				NET_UNLOCK();
2467 				free(table, M_TEMP, sizeof(*table));
2468 				free(ioe, M_TEMP, sizeof(*ioe));
2469 				error = EFAULT;
2470 				goto fail;
2471 			}
2472 			if (strnlen(ioe->anchor, sizeof(ioe->anchor)) ==
2473 			    sizeof(ioe->anchor)) {
2474 				PF_UNLOCK();
2475 				NET_UNLOCK();
2476 				free(table, M_TEMP, sizeof(*table));
2477 				free(ioe, M_TEMP, sizeof(*ioe));
2478 				error = ENAMETOOLONG;
2479 				goto fail;
2480 			}
2481 			switch (ioe->type) {
2482 			case PF_TRANS_TABLE:
2483 				memset(table, 0, sizeof(*table));
2484 				strlcpy(table->pfrt_anchor, ioe->anchor,
2485 				    sizeof(table->pfrt_anchor));
2486 				if ((error = pfr_ina_rollback(table,
2487 				    ioe->ticket, NULL, 0))) {
2488 					PF_UNLOCK();
2489 					NET_UNLOCK();
2490 					free(table, M_TEMP, sizeof(*table));
2491 					free(ioe, M_TEMP, sizeof(*ioe));
2492 					goto fail; /* really bad */
2493 				}
2494 				break;
2495 			case PF_TRANS_RULESET:
2496 				if ((error = pf_rollback_rules(ioe->ticket,
2497 				    ioe->anchor))) {
2498 					PF_UNLOCK();
2499 					NET_UNLOCK();
2500 					free(table, M_TEMP, sizeof(*table));
2501 					free(ioe, M_TEMP, sizeof(*ioe));
2502 					goto fail; /* really bad */
2503 				}
2504 				break;
2505 			default:
2506 				PF_UNLOCK();
2507 				NET_UNLOCK();
2508 				free(table, M_TEMP, sizeof(*table));
2509 				free(ioe, M_TEMP, sizeof(*ioe));
2510 				error = EINVAL;
2511 				goto fail; /* really bad */
2512 			}
2513 		}
2514 		PF_UNLOCK();
2515 		NET_UNLOCK();
2516 		free(table, M_TEMP, sizeof(*table));
2517 		free(ioe, M_TEMP, sizeof(*ioe));
2518 		break;
2519 	}
2520 
2521 	case DIOCXCOMMIT: {
2522 		struct pfioc_trans	*io = (struct pfioc_trans *)addr;
2523 		struct pfioc_trans_e	*ioe;
2524 		struct pfr_table	*table;
2525 		struct pf_ruleset	*rs;
2526 		int			 i;
2527 
2528 		if (io->esize != sizeof(*ioe)) {
2529 			error = ENODEV;
2530 			goto fail;
2531 		}
2532 		ioe = malloc(sizeof(*ioe), M_TEMP, M_WAITOK);
2533 		table = malloc(sizeof(*table), M_TEMP, M_WAITOK);
2534 		NET_LOCK();
2535 		PF_LOCK();
2536 		/* first makes sure everything will succeed */
2537 		for (i = 0; i < io->size; i++) {
2538 			if (copyin(io->array+i, ioe, sizeof(*ioe))) {
2539 				PF_UNLOCK();
2540 				NET_UNLOCK();
2541 				free(table, M_TEMP, sizeof(*table));
2542 				free(ioe, M_TEMP, sizeof(*ioe));
2543 				error = EFAULT;
2544 				goto fail;
2545 			}
2546 			if (strnlen(ioe->anchor, sizeof(ioe->anchor)) ==
2547 			    sizeof(ioe->anchor)) {
2548 				PF_UNLOCK();
2549 				NET_UNLOCK();
2550 				free(table, M_TEMP, sizeof(*table));
2551 				free(ioe, M_TEMP, sizeof(*ioe));
2552 				error = ENAMETOOLONG;
2553 				goto fail;
2554 			}
2555 			switch (ioe->type) {
2556 			case PF_TRANS_TABLE:
2557 				rs = pf_find_ruleset(ioe->anchor);
2558 				if (rs == NULL || !rs->topen || ioe->ticket !=
2559 				     rs->tticket) {
2560 					PF_UNLOCK();
2561 					NET_UNLOCK();
2562 					free(table, M_TEMP, sizeof(*table));
2563 					free(ioe, M_TEMP, sizeof(*ioe));
2564 					error = EBUSY;
2565 					goto fail;
2566 				}
2567 				break;
2568 			case PF_TRANS_RULESET:
2569 				rs = pf_find_ruleset(ioe->anchor);
2570 				if (rs == NULL ||
2571 				    !rs->rules.inactive.open ||
2572 				    rs->rules.inactive.ticket !=
2573 				    ioe->ticket) {
2574 					PF_UNLOCK();
2575 					NET_UNLOCK();
2576 					free(table, M_TEMP, sizeof(*table));
2577 					free(ioe, M_TEMP, sizeof(*ioe));
2578 					error = EBUSY;
2579 					goto fail;
2580 				}
2581 				break;
2582 			default:
2583 				PF_UNLOCK();
2584 				NET_UNLOCK();
2585 				free(table, M_TEMP, sizeof(*table));
2586 				free(ioe, M_TEMP, sizeof(*ioe));
2587 				error = EINVAL;
2588 				goto fail;
2589 			}
2590 		}
2591 
2592 		/*
2593 		 * Checked already in DIOCSETLIMIT, but check again as the
2594 		 * situation might have changed.
2595 		 */
2596 		for (i = 0; i < PF_LIMIT_MAX; i++) {
2597 			if (((struct pool *)pf_pool_limits[i].pp)->pr_nout >
2598 			    pf_pool_limits[i].limit_new) {
2599 				PF_UNLOCK();
2600 				NET_UNLOCK();
2601 				free(table, M_TEMP, sizeof(*table));
2602 				free(ioe, M_TEMP, sizeof(*ioe));
2603 				error = EBUSY;
2604 				goto fail;
2605 			}
2606 		}
2607 		/* now do the commit - no errors should happen here */
2608 		for (i = 0; i < io->size; i++) {
2609 			if (copyin(io->array+i, ioe, sizeof(*ioe))) {
2610 				PF_UNLOCK();
2611 				NET_UNLOCK();
2612 				free(table, M_TEMP, sizeof(*table));
2613 				free(ioe, M_TEMP, sizeof(*ioe));
2614 				error = EFAULT;
2615 				goto fail;
2616 			}
2617 			if (strnlen(ioe->anchor, sizeof(ioe->anchor)) ==
2618 			    sizeof(ioe->anchor)) {
2619 				PF_UNLOCK();
2620 				NET_UNLOCK();
2621 				free(table, M_TEMP, sizeof(*table));
2622 				free(ioe, M_TEMP, sizeof(*ioe));
2623 				error = ENAMETOOLONG;
2624 				goto fail;
2625 			}
2626 			switch (ioe->type) {
2627 			case PF_TRANS_TABLE:
2628 				memset(table, 0, sizeof(*table));
2629 				strlcpy(table->pfrt_anchor, ioe->anchor,
2630 				    sizeof(table->pfrt_anchor));
2631 				if ((error = pfr_ina_commit(table, ioe->ticket,
2632 				    NULL, NULL, 0))) {
2633 					PF_UNLOCK();
2634 					NET_UNLOCK();
2635 					free(table, M_TEMP, sizeof(*table));
2636 					free(ioe, M_TEMP, sizeof(*ioe));
2637 					goto fail; /* really bad */
2638 				}
2639 				break;
2640 			case PF_TRANS_RULESET:
2641 				if ((error = pf_commit_rules(ioe->ticket,
2642 				    ioe->anchor))) {
2643 					PF_UNLOCK();
2644 					NET_UNLOCK();
2645 					free(table, M_TEMP, sizeof(*table));
2646 					free(ioe, M_TEMP, sizeof(*ioe));
2647 					goto fail; /* really bad */
2648 				}
2649 				break;
2650 			default:
2651 				PF_UNLOCK();
2652 				NET_UNLOCK();
2653 				free(table, M_TEMP, sizeof(*table));
2654 				free(ioe, M_TEMP, sizeof(*ioe));
2655 				error = EINVAL;
2656 				goto fail; /* really bad */
2657 			}
2658 		}
2659 		for (i = 0; i < PF_LIMIT_MAX; i++) {
2660 			if (pf_pool_limits[i].limit_new !=
2661 			    pf_pool_limits[i].limit &&
2662 			    pool_sethardlimit(pf_pool_limits[i].pp,
2663 			    pf_pool_limits[i].limit_new, NULL, 0) != 0) {
2664 				PF_UNLOCK();
2665 				NET_UNLOCK();
2666 				free(table, M_TEMP, sizeof(*table));
2667 				free(ioe, M_TEMP, sizeof(*ioe));
2668 				error = EBUSY;
2669 				goto fail; /* really bad */
2670 			}
2671 			pf_pool_limits[i].limit = pf_pool_limits[i].limit_new;
2672 		}
2673 		for (i = 0; i < PFTM_MAX; i++) {
2674 			int old = pf_default_rule.timeout[i];
2675 
2676 			pf_default_rule.timeout[i] =
2677 			    pf_default_rule_new.timeout[i];
2678 			if (pf_default_rule.timeout[i] == PFTM_INTERVAL &&
2679 			    pf_default_rule.timeout[i] < old)
2680 				task_add(net_tq(0), &pf_purge_task);
2681 		}
2682 		pfi_xcommit();
2683 		pf_trans_set_commit();
2684 		PF_UNLOCK();
2685 		NET_UNLOCK();
2686 		free(table, M_TEMP, sizeof(*table));
2687 		free(ioe, M_TEMP, sizeof(*ioe));
2688 		break;
2689 	}
2690 
2691 	case DIOCGETSRCNODES: {
2692 		struct pfioc_src_nodes	*psn = (struct pfioc_src_nodes *)addr;
2693 		struct pf_src_node	*n, *p, *pstore;
2694 		u_int32_t		 nr = 0;
2695 		size_t			 space = psn->psn_len;
2696 
2697 		pstore = malloc(sizeof(*pstore), M_TEMP, M_WAITOK);
2698 
2699 		NET_LOCK();
2700 		PF_LOCK();
2701 		if (space == 0) {
2702 			RB_FOREACH(n, pf_src_tree, &tree_src_tracking)
2703 				nr++;
2704 			psn->psn_len = sizeof(struct pf_src_node) * nr;
2705 			PF_UNLOCK();
2706 			NET_UNLOCK();
2707 			free(pstore, M_TEMP, sizeof(*pstore));
2708 			break;
2709 		}
2710 
2711 		p = psn->psn_src_nodes;
2712 		RB_FOREACH(n, pf_src_tree, &tree_src_tracking) {
2713 			int	secs = getuptime(), diff;
2714 
2715 			if ((nr + 1) * sizeof(*p) > psn->psn_len)
2716 				break;
2717 
2718 			memcpy(pstore, n, sizeof(*pstore));
2719 			memset(&pstore->entry, 0, sizeof(pstore->entry));
2720 			pstore->rule.ptr = NULL;
2721 			pstore->kif = NULL;
2722 			pstore->rule.nr = n->rule.ptr->nr;
2723 			pstore->creation = secs - pstore->creation;
2724 			if (pstore->expire > secs)
2725 				pstore->expire -= secs;
2726 			else
2727 				pstore->expire = 0;
2728 
2729 			/* adjust the connection rate estimate */
2730 			diff = secs - n->conn_rate.last;
2731 			if (diff >= n->conn_rate.seconds)
2732 				pstore->conn_rate.count = 0;
2733 			else
2734 				pstore->conn_rate.count -=
2735 				    n->conn_rate.count * diff /
2736 				    n->conn_rate.seconds;
2737 
2738 			error = copyout(pstore, p, sizeof(*p));
2739 			if (error) {
2740 				PF_UNLOCK();
2741 				NET_UNLOCK();
2742 				free(pstore, M_TEMP, sizeof(*pstore));
2743 				goto fail;
2744 			}
2745 			p++;
2746 			nr++;
2747 		}
2748 		psn->psn_len = sizeof(struct pf_src_node) * nr;
2749 
2750 		PF_UNLOCK();
2751 		NET_UNLOCK();
2752 		free(pstore, M_TEMP, sizeof(*pstore));
2753 		break;
2754 	}
2755 
2756 	case DIOCCLRSRCNODES: {
2757 		struct pf_src_node	*n;
2758 		struct pf_state		*state;
2759 
2760 		NET_LOCK();
2761 		PF_LOCK();
2762 		PF_STATE_ENTER_WRITE();
2763 		RB_FOREACH(state, pf_state_tree_id, &tree_id)
2764 			pf_src_tree_remove_state(state);
2765 		PF_STATE_EXIT_WRITE();
2766 		RB_FOREACH(n, pf_src_tree, &tree_src_tracking)
2767 			n->expire = 1;
2768 		pf_purge_expired_src_nodes();
2769 		PF_UNLOCK();
2770 		NET_UNLOCK();
2771 		break;
2772 	}
2773 
2774 	case DIOCKILLSRCNODES: {
2775 		struct pf_src_node	*sn;
2776 		struct pf_state		*s;
2777 		struct pfioc_src_node_kill *psnk =
2778 		    (struct pfioc_src_node_kill *)addr;
2779 		u_int			killed = 0;
2780 
2781 		NET_LOCK();
2782 		PF_LOCK();
2783 		RB_FOREACH(sn, pf_src_tree, &tree_src_tracking) {
2784 			if (pf_match_addr(psnk->psnk_src.neg,
2785 				&psnk->psnk_src.addr.v.a.addr,
2786 				&psnk->psnk_src.addr.v.a.mask,
2787 				&sn->addr, sn->af) &&
2788 			    pf_match_addr(psnk->psnk_dst.neg,
2789 				&psnk->psnk_dst.addr.v.a.addr,
2790 				&psnk->psnk_dst.addr.v.a.mask,
2791 				&sn->raddr, sn->af)) {
2792 				/* Handle state to src_node linkage */
2793 				if (sn->states != 0) {
2794 					PF_ASSERT_LOCKED();
2795 					PF_STATE_ENTER_WRITE();
2796 					RB_FOREACH(s, pf_state_tree_id,
2797 					   &tree_id)
2798 						pf_state_rm_src_node(s, sn);
2799 					PF_STATE_EXIT_WRITE();
2800 				}
2801 				sn->expire = 1;
2802 				killed++;
2803 			}
2804 		}
2805 
2806 		if (killed > 0)
2807 			pf_purge_expired_src_nodes();
2808 
2809 		psnk->psnk_killed = killed;
2810 		PF_UNLOCK();
2811 		NET_UNLOCK();
2812 		break;
2813 	}
2814 
2815 	case DIOCSETHOSTID: {
2816 		u_int32_t	*hostid = (u_int32_t *)addr;
2817 
2818 		NET_LOCK();
2819 		PF_LOCK();
2820 		if (*hostid == 0)
2821 			pf_trans_set.hostid = arc4random();
2822 		else
2823 			pf_trans_set.hostid = *hostid;
2824 		pf_trans_set.mask |= PF_TSET_HOSTID;
2825 		PF_UNLOCK();
2826 		NET_UNLOCK();
2827 		break;
2828 	}
2829 
2830 	case DIOCOSFPFLUSH:
2831 		pf_osfp_flush();
2832 		break;
2833 
2834 	case DIOCIGETIFACES: {
2835 		struct pfioc_iface *io = (struct pfioc_iface *)addr;
2836 
2837 		if (io->pfiio_esize != sizeof(struct pfi_kif)) {
2838 			error = ENODEV;
2839 			break;
2840 		}
2841 		NET_LOCK();
2842 		PF_LOCK();
2843 		error = pfi_get_ifaces(io->pfiio_name, io->pfiio_buffer,
2844 		    &io->pfiio_size);
2845 		PF_UNLOCK();
2846 		NET_UNLOCK();
2847 		break;
2848 	}
2849 
2850 	case DIOCSETIFFLAG: {
2851 		struct pfioc_iface *io = (struct pfioc_iface *)addr;
2852 
2853 		NET_LOCK();
2854 		PF_LOCK();
2855 		error = pfi_set_flags(io->pfiio_name, io->pfiio_flags);
2856 		PF_UNLOCK();
2857 		NET_UNLOCK();
2858 		break;
2859 	}
2860 
2861 	case DIOCCLRIFFLAG: {
2862 		struct pfioc_iface *io = (struct pfioc_iface *)addr;
2863 
2864 		NET_LOCK();
2865 		PF_LOCK();
2866 		error = pfi_clear_flags(io->pfiio_name, io->pfiio_flags);
2867 		PF_UNLOCK();
2868 		NET_UNLOCK();
2869 		break;
2870 	}
2871 
2872 	case DIOCSETREASS: {
2873 		u_int32_t	*reass = (u_int32_t *)addr;
2874 
2875 		NET_LOCK();
2876 		PF_LOCK();
2877 		pf_trans_set.reass = *reass;
2878 		pf_trans_set.mask |= PF_TSET_REASS;
2879 		PF_UNLOCK();
2880 		NET_UNLOCK();
2881 		break;
2882 	}
2883 
2884 	case DIOCSETSYNFLWATS: {
2885 		struct pfioc_synflwats *io = (struct pfioc_synflwats *)addr;
2886 
2887 		NET_LOCK();
2888 		PF_LOCK();
2889 		error = pf_syncookies_setwats(io->hiwat, io->lowat);
2890 		PF_UNLOCK();
2891 		NET_UNLOCK();
2892 		break;
2893 	}
2894 
2895 	case DIOCGETSYNFLWATS: {
2896 		struct pfioc_synflwats *io = (struct pfioc_synflwats *)addr;
2897 
2898 		NET_LOCK();
2899 		PF_LOCK();
2900 		error = pf_syncookies_getwats(io);
2901 		PF_UNLOCK();
2902 		NET_UNLOCK();
2903 		break;
2904 	}
2905 
2906 	case DIOCSETSYNCOOKIES: {
2907 		u_int8_t	*mode = (u_int8_t *)addr;
2908 
2909 		NET_LOCK();
2910 		PF_LOCK();
2911 		error = pf_syncookies_setmode(*mode);
2912 		PF_UNLOCK();
2913 		NET_UNLOCK();
2914 		break;
2915 	}
2916 
2917 	default:
2918 		error = ENODEV;
2919 		break;
2920 	}
2921 fail:
2922 	return (error);
2923 }
2924 
2925 void
2926 pf_trans_set_commit(void)
2927 {
2928 	if (pf_trans_set.mask & PF_TSET_STATUSIF)
2929 		strlcpy(pf_status.ifname, pf_trans_set.statusif, IFNAMSIZ);
2930 	if (pf_trans_set.mask & PF_TSET_DEBUG)
2931 		pf_status.debug = pf_trans_set.debug;
2932 	if (pf_trans_set.mask & PF_TSET_HOSTID)
2933 		pf_status.hostid = pf_trans_set.hostid;
2934 	if (pf_trans_set.mask & PF_TSET_REASS)
2935 		pf_status.reass = pf_trans_set.reass;
2936 }
2937 
2938 void
2939 pf_pool_copyin(struct pf_pool *from, struct pf_pool *to)
2940 {
2941 	memmove(to, from, sizeof(*to));
2942 	to->kif = NULL;
2943 	to->addr.p.tbl = NULL;
2944 }
2945 
2946 int
2947 pf_rule_copyin(struct pf_rule *from, struct pf_rule *to,
2948     struct pf_ruleset *ruleset)
2949 {
2950 	int i;
2951 
2952 	to->src = from->src;
2953 	to->src.addr.p.tbl = NULL;
2954 	to->dst = from->dst;
2955 	to->dst.addr.p.tbl = NULL;
2956 
2957 	/* XXX union skip[] */
2958 
2959 	strlcpy(to->label, from->label, sizeof(to->label));
2960 	strlcpy(to->ifname, from->ifname, sizeof(to->ifname));
2961 	strlcpy(to->rcv_ifname, from->rcv_ifname, sizeof(to->rcv_ifname));
2962 	strlcpy(to->qname, from->qname, sizeof(to->qname));
2963 	strlcpy(to->pqname, from->pqname, sizeof(to->pqname));
2964 	strlcpy(to->tagname, from->tagname, sizeof(to->tagname));
2965 	strlcpy(to->match_tagname, from->match_tagname,
2966 	    sizeof(to->match_tagname));
2967 	strlcpy(to->overload_tblname, from->overload_tblname,
2968 	    sizeof(to->overload_tblname));
2969 
2970 	pf_pool_copyin(&from->nat, &to->nat);
2971 	pf_pool_copyin(&from->rdr, &to->rdr);
2972 	pf_pool_copyin(&from->route, &to->route);
2973 
2974 	if (pf_kif_setup(to->ifname, &to->kif))
2975 		return (EINVAL);
2976 	if (pf_kif_setup(to->rcv_ifname, &to->rcv_kif))
2977 		return (EINVAL);
2978 	if (to->overload_tblname[0]) {
2979 		if ((to->overload_tbl = pfr_attach_table(ruleset,
2980 		    to->overload_tblname, 0)) == NULL)
2981 			return (EINVAL);
2982 		else
2983 			to->overload_tbl->pfrkt_flags |= PFR_TFLAG_ACTIVE;
2984 	}
2985 
2986 	if (pf_kif_setup(to->rdr.ifname, &to->rdr.kif))
2987 		return (EINVAL);
2988 	if (pf_kif_setup(to->nat.ifname, &to->nat.kif))
2989 		return (EINVAL);
2990 	if (pf_kif_setup(to->route.ifname, &to->route.kif))
2991 		return (EINVAL);
2992 
2993 	to->os_fingerprint = from->os_fingerprint;
2994 
2995 	to->rtableid = from->rtableid;
2996 	if (to->rtableid >= 0 && !rtable_exists(to->rtableid))
2997 		return (EBUSY);
2998 	to->onrdomain = from->onrdomain;
2999 	if (to->onrdomain != -1 && (to->onrdomain < 0 ||
3000 	    to->onrdomain > RT_TABLEID_MAX))
3001 		return (EINVAL);
3002 
3003 	for (i = 0; i < PFTM_MAX; i++)
3004 		to->timeout[i] = from->timeout[i];
3005 	to->states_tot = from->states_tot;
3006 	to->max_states = from->max_states;
3007 	to->max_src_nodes = from->max_src_nodes;
3008 	to->max_src_states = from->max_src_states;
3009 	to->max_src_conn = from->max_src_conn;
3010 	to->max_src_conn_rate.limit = from->max_src_conn_rate.limit;
3011 	to->max_src_conn_rate.seconds = from->max_src_conn_rate.seconds;
3012 	pf_init_threshold(&to->pktrate, from->pktrate.limit,
3013 	    from->pktrate.seconds);
3014 
3015 	if (to->qname[0] != 0) {
3016 		if ((to->qid = pf_qname2qid(to->qname, 0)) == 0)
3017 			return (EBUSY);
3018 		if (to->pqname[0] != 0) {
3019 			if ((to->pqid = pf_qname2qid(to->pqname, 0)) == 0)
3020 				return (EBUSY);
3021 		} else
3022 			to->pqid = to->qid;
3023 	}
3024 	to->rt_listid = from->rt_listid;
3025 	to->prob = from->prob;
3026 	to->return_icmp = from->return_icmp;
3027 	to->return_icmp6 = from->return_icmp6;
3028 	to->max_mss = from->max_mss;
3029 	if (to->tagname[0])
3030 		if ((to->tag = pf_tagname2tag(to->tagname, 1)) == 0)
3031 			return (EBUSY);
3032 	if (to->match_tagname[0])
3033 		if ((to->match_tag = pf_tagname2tag(to->match_tagname, 1)) == 0)
3034 			return (EBUSY);
3035 	to->scrub_flags = from->scrub_flags;
3036 	to->delay = from->delay;
3037 	to->uid = from->uid;
3038 	to->gid = from->gid;
3039 	to->rule_flag = from->rule_flag;
3040 	to->action = from->action;
3041 	to->direction = from->direction;
3042 	to->log = from->log;
3043 	to->logif = from->logif;
3044 #if NPFLOG > 0
3045 	if (!to->log)
3046 		to->logif = 0;
3047 #endif	/* NPFLOG > 0 */
3048 	to->quick = from->quick;
3049 	to->ifnot = from->ifnot;
3050 	to->rcvifnot = from->rcvifnot;
3051 	to->match_tag_not = from->match_tag_not;
3052 	to->keep_state = from->keep_state;
3053 	to->af = from->af;
3054 	to->naf = from->naf;
3055 	to->proto = from->proto;
3056 	to->type = from->type;
3057 	to->code = from->code;
3058 	to->flags = from->flags;
3059 	to->flagset = from->flagset;
3060 	to->min_ttl = from->min_ttl;
3061 	to->allow_opts = from->allow_opts;
3062 	to->rt = from->rt;
3063 	to->return_ttl = from->return_ttl;
3064 	to->tos = from->tos;
3065 	to->set_tos = from->set_tos;
3066 	to->anchor_relative = from->anchor_relative; /* XXX */
3067 	to->anchor_wildcard = from->anchor_wildcard; /* XXX */
3068 	to->flush = from->flush;
3069 	to->divert.addr = from->divert.addr;
3070 	to->divert.port = from->divert.port;
3071 	to->divert.type = from->divert.type;
3072 	to->prio = from->prio;
3073 	to->set_prio[0] = from->set_prio[0];
3074 	to->set_prio[1] = from->set_prio[1];
3075 
3076 	return (0);
3077 }
3078 
3079 int
3080 pf_sysctl(void *oldp, size_t *oldlenp, void *newp, size_t newlen)
3081 {
3082 	struct pf_status	pfs;
3083 
3084 	NET_RLOCK_IN_IOCTL();
3085 	PF_LOCK();
3086 	memcpy(&pfs, &pf_status, sizeof(struct pf_status));
3087 	pfi_update_status(pfs.ifname, &pfs);
3088 	PF_UNLOCK();
3089 	NET_RUNLOCK_IN_IOCTL();
3090 
3091 	return sysctl_rdstruct(oldp, oldlenp, newp, &pfs, sizeof(pfs));
3092 }
3093