xref: /openbsd-src/sys/net/pf_ioctl.c (revision d59bb9942320b767f2a19aaa7690c8c6e30b724c)
1 /*	$OpenBSD: pf_ioctl.c,v 1.307 2017/01/30 17:41:34 benno Exp $ */
2 
3 /*
4  * Copyright (c) 2001 Daniel Hartmeier
5  * Copyright (c) 2002 - 2013 Henning Brauer <henning@openbsd.org>
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  *    - Redistributions of source code must retain the above copyright
13  *      notice, this list of conditions and the following disclaimer.
14  *    - Redistributions in binary form must reproduce the above
15  *      copyright notice, this list of conditions and the following
16  *      disclaimer in the documentation and/or other materials provided
17  *      with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
22  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
23  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
24  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
25  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
29  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  *
32  * Effort sponsored in part by the Defense Advanced Research Projects
33  * Agency (DARPA) and Air Force Research Laboratory, Air Force
34  * Materiel Command, USAF, under agreement number F30602-01-2-0537.
35  *
36  */
37 
38 #include "pfsync.h"
39 #include "pflog.h"
40 
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/mbuf.h>
44 #include <sys/filio.h>
45 #include <sys/fcntl.h>
46 #include <sys/socket.h>
47 #include <sys/socketvar.h>
48 #include <sys/kernel.h>
49 #include <sys/time.h>
50 #include <sys/timeout.h>
51 #include <sys/pool.h>
52 #include <sys/malloc.h>
53 #include <sys/kthread.h>
54 #include <sys/rwlock.h>
55 #include <sys/syslog.h>
56 #include <uvm/uvm_extern.h>
57 
58 #include <crypto/md5.h>
59 
60 #include <net/if.h>
61 #include <net/if_var.h>
62 #include <net/route.h>
63 #include <net/hfsc.h>
64 
65 #include <netinet/in.h>
66 #include <netinet/ip.h>
67 #include <netinet/in_pcb.h>
68 #include <netinet/ip_var.h>
69 #include <netinet/ip_icmp.h>
70 #include <netinet/tcp.h>
71 #include <netinet/udp.h>
72 
73 #ifdef INET6
74 #include <netinet/ip6.h>
75 #include <netinet/icmp6.h>
76 #endif /* INET6 */
77 
78 #include <net/pfvar.h>
79 #include <net/pfvar_priv.h>
80 
81 #if NPFSYNC > 0
82 #include <netinet/ip_ipsp.h>
83 #include <net/if_pfsync.h>
84 #endif /* NPFSYNC > 0 */
85 
86 void			 pfattach(int);
87 void			 pf_thread_create(void *);
88 int			 pfopen(dev_t, int, int, struct proc *);
89 int			 pfclose(dev_t, int, int, struct proc *);
90 int			 pfioctl(dev_t, u_long, caddr_t, int, struct proc *);
91 int			 pf_begin_rules(u_int32_t *, const char *);
92 int			 pf_rollback_rules(u_int32_t, char *);
93 int			 pf_enable_queues(void);
94 void			 pf_remove_queues(void);
95 int			 pf_commit_queues(void);
96 void			 pf_free_queues(struct pf_queuehead *);
97 int			 pf_setup_pfsync_matching(struct pf_ruleset *);
98 void			 pf_hash_rule(MD5_CTX *, struct pf_rule *);
99 void			 pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *);
100 int			 pf_commit_rules(u_int32_t, char *);
101 int			 pf_addr_setup(struct pf_ruleset *,
102 			    struct pf_addr_wrap *, sa_family_t);
103 int			 pf_kif_setup(char *, struct pfi_kif **);
104 void			 pf_addr_copyout(struct pf_addr_wrap *);
105 void			 pf_trans_set_commit(void);
106 void			 pf_pool_copyin(struct pf_pool *, struct pf_pool *);
107 int			 pf_rule_copyin(struct pf_rule *, struct pf_rule *,
108 			    struct pf_ruleset *);
109 u_int16_t		 pf_qname2qid(char *, int);
110 void			 pf_qid2qname(u_int16_t, char *);
111 void			 pf_qid_unref(u_int16_t);
112 
113 struct pf_rule		 pf_default_rule, pf_default_rule_new;
114 
115 struct {
116 	char		statusif[IFNAMSIZ];
117 	u_int32_t	debug;
118 	u_int32_t	hostid;
119 	u_int32_t	reass;
120 	u_int32_t	mask;
121 } pf_trans_set;
122 
123 #define	PF_TSET_STATUSIF	0x01
124 #define	PF_TSET_DEBUG		0x02
125 #define	PF_TSET_HOSTID		0x04
126 #define	PF_TSET_REASS		0x08
127 
128 #define	TAGID_MAX	 50000
129 TAILQ_HEAD(pf_tags, pf_tagname)	pf_tags = TAILQ_HEAD_INITIALIZER(pf_tags),
130 				pf_qids = TAILQ_HEAD_INITIALIZER(pf_qids);
131 
132 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE)
133 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE
134 #endif
135 u_int16_t		 tagname2tag(struct pf_tags *, char *, int);
136 void			 tag2tagname(struct pf_tags *, u_int16_t, char *);
137 void			 tag_unref(struct pf_tags *, u_int16_t);
138 int			 pf_rtlabel_add(struct pf_addr_wrap *);
139 void			 pf_rtlabel_remove(struct pf_addr_wrap *);
140 void			 pf_rtlabel_copyout(struct pf_addr_wrap *);
141 
142 
143 void
144 pfattach(int num)
145 {
146 	u_int32_t *timeout = pf_default_rule.timeout;
147 
148 	pool_init(&pf_rule_pl, sizeof(struct pf_rule), 0,
149 	    IPL_SOFTNET, 0, "pfrule", NULL);
150 	pool_init(&pf_src_tree_pl, sizeof(struct pf_src_node), 0,
151 	    IPL_SOFTNET, 0, "pfsrctr", NULL);
152 	pool_init(&pf_sn_item_pl, sizeof(struct pf_sn_item), 0,
153 	    IPL_SOFTNET, 0, "pfsnitem", NULL);
154 	pool_init(&pf_state_pl, sizeof(struct pf_state), 0,
155 	    IPL_SOFTNET, 0, "pfstate", NULL);
156 	pool_init(&pf_state_key_pl, sizeof(struct pf_state_key), 0,
157 	    IPL_SOFTNET, 0, "pfstkey", NULL);
158 	pool_init(&pf_state_item_pl, sizeof(struct pf_state_item), 0,
159 	    IPL_SOFTNET, 0, "pfstitem", NULL);
160 	pool_init(&pf_rule_item_pl, sizeof(struct pf_rule_item), 0,
161 	    IPL_SOFTNET, 0, "pfruleitem", NULL);
162 	pool_init(&pf_queue_pl, sizeof(struct pf_queuespec), 0,
163 	    IPL_SOFTNET, 0, "pfqueue", NULL);
164 	hfsc_initialize();
165 	pfr_initialize();
166 	pfi_initialize();
167 	pf_osfp_initialize();
168 
169 	pool_sethardlimit(pf_pool_limits[PF_LIMIT_STATES].pp,
170 	    pf_pool_limits[PF_LIMIT_STATES].limit, NULL, 0);
171 
172 	if (physmem <= atop(100*1024*1024))
173 		pf_pool_limits[PF_LIMIT_TABLE_ENTRIES].limit =
174 		    PFR_KENTRY_HIWAT_SMALL;
175 
176 	RB_INIT(&tree_src_tracking);
177 	RB_INIT(&pf_anchors);
178 	pf_init_ruleset(&pf_main_ruleset);
179 	TAILQ_INIT(&pf_queues[0]);
180 	TAILQ_INIT(&pf_queues[1]);
181 	pf_queues_active = &pf_queues[0];
182 	pf_queues_inactive = &pf_queues[1];
183 	TAILQ_INIT(&state_list);
184 
185 	/* default rule should never be garbage collected */
186 	pf_default_rule.entries.tqe_prev = &pf_default_rule.entries.tqe_next;
187 	pf_default_rule.action = PF_PASS;
188 	pf_default_rule.nr = (u_int32_t)-1;
189 	pf_default_rule.rtableid = -1;
190 
191 	/* initialize default timeouts */
192 	timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL;
193 	timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL;
194 	timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL;
195 	timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL;
196 	timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL;
197 	timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL;
198 	timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL;
199 	timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL;
200 	timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL;
201 	timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL;
202 	timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL;
203 	timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL;
204 	timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL;
205 	timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL;
206 	timeout[PFTM_FRAG] = PFTM_FRAG_VAL;
207 	timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL;
208 	timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL;
209 	timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL;
210 	timeout[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START;
211 	timeout[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END;
212 
213 	pf_default_rule.src.addr.type =  PF_ADDR_ADDRMASK;
214 	pf_default_rule.dst.addr.type =  PF_ADDR_ADDRMASK;
215 	pf_default_rule.rdr.addr.type =  PF_ADDR_NONE;
216 	pf_default_rule.nat.addr.type =  PF_ADDR_NONE;
217 	pf_default_rule.route.addr.type =  PF_ADDR_NONE;
218 
219 	pf_normalize_init();
220 	bzero(&pf_status, sizeof(pf_status));
221 	pf_status.debug = LOG_ERR;
222 	pf_status.reass = PF_REASS_ENABLED;
223 
224 	/* XXX do our best to avoid a conflict */
225 	pf_status.hostid = arc4random();
226 
227 	/* require process context to purge states, so perform in a thread */
228 	kthread_create_deferred(pf_thread_create, NULL);
229 }
230 
231 void
232 pf_thread_create(void *v)
233 {
234 	if (kthread_create(pf_purge_thread, NULL, NULL, "pfpurge"))
235 		panic("pfpurge thread");
236 }
237 
238 int
239 pfopen(dev_t dev, int flags, int fmt, struct proc *p)
240 {
241 	if (minor(dev) >= 1)
242 		return (ENXIO);
243 	return (0);
244 }
245 
246 int
247 pfclose(dev_t dev, int flags, int fmt, struct proc *p)
248 {
249 	if (minor(dev) >= 1)
250 		return (ENXIO);
251 	return (0);
252 }
253 
254 void
255 pf_rm_rule(struct pf_rulequeue *rulequeue, struct pf_rule *rule)
256 {
257 	if (rulequeue != NULL) {
258 		if (rule->states_cur == 0 && rule->src_nodes == 0) {
259 			/*
260 			 * XXX - we need to remove the table *before* detaching
261 			 * the rule to make sure the table code does not delete
262 			 * the anchor under our feet.
263 			 */
264 			pf_tbladdr_remove(&rule->src.addr);
265 			pf_tbladdr_remove(&rule->dst.addr);
266 			pf_tbladdr_remove(&rule->rdr.addr);
267 			pf_tbladdr_remove(&rule->nat.addr);
268 			pf_tbladdr_remove(&rule->route.addr);
269 			if (rule->overload_tbl)
270 				pfr_detach_table(rule->overload_tbl);
271 		}
272 		TAILQ_REMOVE(rulequeue, rule, entries);
273 		rule->entries.tqe_prev = NULL;
274 		rule->nr = (u_int32_t)-1;
275 	}
276 
277 	if (rule->states_cur > 0 || rule->src_nodes > 0 ||
278 	    rule->entries.tqe_prev != NULL)
279 		return;
280 	pf_tag_unref(rule->tag);
281 	pf_tag_unref(rule->match_tag);
282 	pf_rtlabel_remove(&rule->src.addr);
283 	pf_rtlabel_remove(&rule->dst.addr);
284 	pfi_dynaddr_remove(&rule->src.addr);
285 	pfi_dynaddr_remove(&rule->dst.addr);
286 	pfi_dynaddr_remove(&rule->rdr.addr);
287 	pfi_dynaddr_remove(&rule->nat.addr);
288 	pfi_dynaddr_remove(&rule->route.addr);
289 	if (rulequeue == NULL) {
290 		pf_tbladdr_remove(&rule->src.addr);
291 		pf_tbladdr_remove(&rule->dst.addr);
292 		pf_tbladdr_remove(&rule->rdr.addr);
293 		pf_tbladdr_remove(&rule->nat.addr);
294 		pf_tbladdr_remove(&rule->route.addr);
295 		if (rule->overload_tbl)
296 			pfr_detach_table(rule->overload_tbl);
297 	}
298 	pfi_kif_unref(rule->rcv_kif, PFI_KIF_REF_RULE);
299 	pfi_kif_unref(rule->kif, PFI_KIF_REF_RULE);
300 	pfi_kif_unref(rule->rdr.kif, PFI_KIF_REF_RULE);
301 	pfi_kif_unref(rule->nat.kif, PFI_KIF_REF_RULE);
302 	pfi_kif_unref(rule->route.kif, PFI_KIF_REF_RULE);
303 	pf_anchor_remove(rule);
304 	pool_put(&pf_rule_pl, rule);
305 }
306 
307 void
308 pf_purge_rule(struct pf_rule *rule)
309 {
310 	u_int32_t		 nr = 0;
311 	struct pf_ruleset	*ruleset;
312 
313 	KASSERT((rule != NULL) && (rule->ruleset != NULL));
314 	ruleset = rule->ruleset;
315 
316 	pf_rm_rule(ruleset->rules.active.ptr, rule);
317 	ruleset->rules.active.rcount--;
318 	TAILQ_FOREACH(rule, ruleset->rules.active.ptr, entries)
319 		rule->nr = nr++;
320 	ruleset->rules.active.ticket++;
321 	pf_calc_skip_steps(ruleset->rules.active.ptr);
322 	pf_remove_if_empty_ruleset(ruleset);
323 }
324 
325 u_int16_t
326 tagname2tag(struct pf_tags *head, char *tagname, int create)
327 {
328 	struct pf_tagname	*tag, *p = NULL;
329 	u_int16_t		 new_tagid = 1;
330 
331 	TAILQ_FOREACH(tag, head, entries)
332 		if (strcmp(tagname, tag->name) == 0) {
333 			tag->ref++;
334 			return (tag->tag);
335 		}
336 
337 	if (!create)
338 		return (0);
339 
340 	/*
341 	 * to avoid fragmentation, we do a linear search from the beginning
342 	 * and take the first free slot we find. if there is none or the list
343 	 * is empty, append a new entry at the end.
344 	 */
345 
346 	/* new entry */
347 	if (!TAILQ_EMPTY(head))
348 		for (p = TAILQ_FIRST(head); p != NULL &&
349 		    p->tag == new_tagid; p = TAILQ_NEXT(p, entries))
350 			new_tagid = p->tag + 1;
351 
352 	if (new_tagid > TAGID_MAX)
353 		return (0);
354 
355 	/* allocate and fill new struct pf_tagname */
356 	tag = malloc(sizeof(*tag), M_RTABLE, M_NOWAIT|M_ZERO);
357 	if (tag == NULL)
358 		return (0);
359 	strlcpy(tag->name, tagname, sizeof(tag->name));
360 	tag->tag = new_tagid;
361 	tag->ref++;
362 
363 	if (p != NULL)	/* insert new entry before p */
364 		TAILQ_INSERT_BEFORE(p, tag, entries);
365 	else	/* either list empty or no free slot in between */
366 		TAILQ_INSERT_TAIL(head, tag, entries);
367 
368 	return (tag->tag);
369 }
370 
371 void
372 tag2tagname(struct pf_tags *head, u_int16_t tagid, char *p)
373 {
374 	struct pf_tagname	*tag;
375 
376 	TAILQ_FOREACH(tag, head, entries)
377 		if (tag->tag == tagid) {
378 			strlcpy(p, tag->name, PF_TAG_NAME_SIZE);
379 			return;
380 		}
381 }
382 
383 void
384 tag_unref(struct pf_tags *head, u_int16_t tag)
385 {
386 	struct pf_tagname	*p, *next;
387 
388 	if (tag == 0)
389 		return;
390 
391 	for (p = TAILQ_FIRST(head); p != NULL; p = next) {
392 		next = TAILQ_NEXT(p, entries);
393 		if (tag == p->tag) {
394 			if (--p->ref == 0) {
395 				TAILQ_REMOVE(head, p, entries);
396 				free(p, M_RTABLE, sizeof(*p));
397 			}
398 			break;
399 		}
400 	}
401 }
402 
403 u_int16_t
404 pf_tagname2tag(char *tagname, int create)
405 {
406 	return (tagname2tag(&pf_tags, tagname, create));
407 }
408 
409 void
410 pf_tag2tagname(u_int16_t tagid, char *p)
411 {
412 	tag2tagname(&pf_tags, tagid, p);
413 }
414 
415 void
416 pf_tag_ref(u_int16_t tag)
417 {
418 	struct pf_tagname *t;
419 
420 	TAILQ_FOREACH(t, &pf_tags, entries)
421 		if (t->tag == tag)
422 			break;
423 	if (t != NULL)
424 		t->ref++;
425 }
426 
427 void
428 pf_tag_unref(u_int16_t tag)
429 {
430 	tag_unref(&pf_tags, tag);
431 }
432 
433 int
434 pf_rtlabel_add(struct pf_addr_wrap *a)
435 {
436 	if (a->type == PF_ADDR_RTLABEL &&
437 	    (a->v.rtlabel = rtlabel_name2id(a->v.rtlabelname)) == 0)
438 		return (-1);
439 	return (0);
440 }
441 
442 void
443 pf_rtlabel_remove(struct pf_addr_wrap *a)
444 {
445 	if (a->type == PF_ADDR_RTLABEL)
446 		rtlabel_unref(a->v.rtlabel);
447 }
448 
449 void
450 pf_rtlabel_copyout(struct pf_addr_wrap *a)
451 {
452 	const char	*name;
453 
454 	if (a->type == PF_ADDR_RTLABEL && a->v.rtlabel) {
455 		if ((name = rtlabel_id2name(a->v.rtlabel)) == NULL)
456 			strlcpy(a->v.rtlabelname, "?",
457 			    sizeof(a->v.rtlabelname));
458 		else
459 			strlcpy(a->v.rtlabelname, name,
460 			    sizeof(a->v.rtlabelname));
461 	}
462 }
463 
464 u_int16_t
465 pf_qname2qid(char *qname, int create)
466 {
467 	return (tagname2tag(&pf_qids, qname, create));
468 }
469 
470 void
471 pf_qid2qname(u_int16_t qid, char *p)
472 {
473 	tag2tagname(&pf_qids, qid, p);
474 }
475 
476 void
477 pf_qid_unref(u_int16_t qid)
478 {
479 	tag_unref(&pf_qids, (u_int16_t)qid);
480 }
481 
482 int
483 pf_begin_rules(u_int32_t *ticket, const char *anchor)
484 {
485 	struct pf_ruleset	*rs;
486 	struct pf_rule		*rule;
487 
488 	if ((rs = pf_find_or_create_ruleset(anchor)) == NULL)
489 		return (EINVAL);
490 	while ((rule = TAILQ_FIRST(rs->rules.inactive.ptr)) != NULL) {
491 		pf_rm_rule(rs->rules.inactive.ptr, rule);
492 		rs->rules.inactive.rcount--;
493 	}
494 	*ticket = ++rs->rules.inactive.ticket;
495 	rs->rules.inactive.open = 1;
496 	return (0);
497 }
498 
499 int
500 pf_rollback_rules(u_int32_t ticket, char *anchor)
501 {
502 	struct pf_ruleset	*rs;
503 	struct pf_rule		*rule;
504 
505 	rs = pf_find_ruleset(anchor);
506 	if (rs == NULL || !rs->rules.inactive.open ||
507 	    rs->rules.inactive.ticket != ticket)
508 		return (0);
509 	while ((rule = TAILQ_FIRST(rs->rules.inactive.ptr)) != NULL) {
510 		pf_rm_rule(rs->rules.inactive.ptr, rule);
511 		rs->rules.inactive.rcount--;
512 	}
513 	rs->rules.inactive.open = 0;
514 
515 	/* queue defs only in the main ruleset */
516 	if (anchor[0])
517 		return (0);
518 
519 	pf_free_queues(pf_queues_inactive);
520 
521 	return (0);
522 }
523 
524 void
525 pf_free_queues(struct pf_queuehead *where)
526 {
527 	struct pf_queuespec	*q, *qtmp;
528 
529 	TAILQ_FOREACH_SAFE(q, where, entries, qtmp) {
530 		TAILQ_REMOVE(where, q, entries);
531 		pfi_kif_unref(q->kif, PFI_KIF_REF_RULE);
532 		pool_put(&pf_queue_pl, q);
533 	}
534 }
535 
536 void
537 pf_remove_queues(void)
538 {
539 	struct pf_queuespec	*q;
540 	struct ifnet		*ifp;
541 
542 	/* put back interfaces in normal queueing mode */
543 	TAILQ_FOREACH(q, pf_queues_active, entries) {
544 		if (q->parent_qid != 0)
545 			continue;
546 
547 		ifp = q->kif->pfik_ifp;
548 		if (ifp == NULL)
549 			continue;
550 
551 		KASSERT(HFSC_ENABLED(&ifp->if_snd));
552 
553 		ifq_attach(&ifp->if_snd, ifq_priq_ops, NULL);
554 	}
555 }
556 
557 struct pf_hfsc_queue {
558 	struct ifnet		*ifp;
559 	struct hfsc_if		*hif;
560 	struct pf_hfsc_queue	*next;
561 };
562 
563 static inline struct pf_hfsc_queue *
564 pf_hfsc_ifp2q(struct pf_hfsc_queue *list, struct ifnet *ifp)
565 {
566 	struct pf_hfsc_queue *phq = list;
567 
568 	while (phq != NULL) {
569 		if (phq->ifp == ifp)
570 			return (phq);
571 
572 		phq = phq->next;
573 	}
574 
575 	return (phq);
576 }
577 
578 int
579 pf_create_queues(void)
580 {
581 	struct pf_queuespec	*q;
582 	struct ifnet		*ifp;
583 	struct pf_hfsc_queue	*list = NULL, *phq;
584 	int			 error;
585 
586 	/* find root queues and alloc hfsc for these interfaces */
587 	TAILQ_FOREACH(q, pf_queues_active, entries) {
588 		if (q->parent_qid != 0)
589 			continue;
590 
591 		ifp = q->kif->pfik_ifp;
592 		if (ifp == NULL)
593 			continue;
594 
595 		phq = malloc(sizeof(*phq), M_TEMP, M_WAITOK);
596 		phq->ifp = ifp;
597 		phq->hif = hfsc_pf_alloc(ifp);
598 
599 		phq->next = list;
600 		list = phq;
601 	}
602 
603 	/* and now everything */
604 	TAILQ_FOREACH(q, pf_queues_active, entries) {
605 		ifp = q->kif->pfik_ifp;
606 		if (ifp == NULL)
607 			continue;
608 
609 		phq = pf_hfsc_ifp2q(list, ifp);
610 		KASSERT(phq != NULL);
611 
612 		error = hfsc_pf_addqueue(phq->hif, q);
613 		if (error != 0)
614 			goto error;
615 	}
616 
617 	/* find root queues in old list to disable them if necessary */
618 	TAILQ_FOREACH(q, pf_queues_inactive, entries) {
619 		if (q->parent_qid != 0)
620 			continue;
621 
622 		ifp = q->kif->pfik_ifp;
623 		if (ifp == NULL)
624 			continue;
625 
626 		phq = pf_hfsc_ifp2q(list, ifp);
627 		if (phq != NULL)
628 			continue;
629 
630 		ifq_attach(&ifp->if_snd, ifq_priq_ops, NULL);
631 	}
632 
633 	/* commit the new queues */
634 	while (list != NULL) {
635 		phq = list;
636 		list = phq->next;
637 
638 		ifp = phq->ifp;
639 
640 		ifq_attach(&ifp->if_snd, ifq_hfsc_ops, phq->hif);
641 		free(phq, M_TEMP, sizeof(*phq));
642 	}
643 
644 	return (0);
645 
646 error:
647 	while (list != NULL) {
648 		phq = list;
649 		list = phq->next;
650 
651 		hfsc_pf_free(phq->hif);
652 		free(phq, M_TEMP, sizeof(*phq));
653 	}
654 
655 	return (error);
656 }
657 
658 int
659 pf_commit_queues(void)
660 {
661 	struct pf_queuehead	*qswap;
662 	int error;
663 
664         /* swap */
665         qswap = pf_queues_active;
666         pf_queues_active = pf_queues_inactive;
667         pf_queues_inactive = qswap;
668 
669 	error = pf_create_queues();
670 	if (error != 0) {
671 		pf_queues_inactive = pf_queues_active;
672 		pf_queues_active = qswap;
673 		return (error);
674 	}
675 
676         pf_free_queues(pf_queues_inactive);
677 
678 	return (0);
679 }
680 
681 #define PF_MD5_UPD(st, elm)						\
682 		MD5Update(ctx, (u_int8_t *) &(st)->elm, sizeof((st)->elm))
683 
684 #define PF_MD5_UPD_STR(st, elm)						\
685 		MD5Update(ctx, (u_int8_t *) (st)->elm, strlen((st)->elm))
686 
687 #define PF_MD5_UPD_HTONL(st, elm, stor) do {				\
688 		(stor) = htonl((st)->elm);				\
689 		MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int32_t));\
690 } while (0)
691 
692 #define PF_MD5_UPD_HTONS(st, elm, stor) do {				\
693 		(stor) = htons((st)->elm);				\
694 		MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int16_t));\
695 } while (0)
696 
697 void
698 pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr)
699 {
700 	PF_MD5_UPD(pfr, addr.type);
701 	switch (pfr->addr.type) {
702 		case PF_ADDR_DYNIFTL:
703 			PF_MD5_UPD(pfr, addr.v.ifname);
704 			PF_MD5_UPD(pfr, addr.iflags);
705 			break;
706 		case PF_ADDR_TABLE:
707 			PF_MD5_UPD(pfr, addr.v.tblname);
708 			break;
709 		case PF_ADDR_ADDRMASK:
710 			/* XXX ignore af? */
711 			PF_MD5_UPD(pfr, addr.v.a.addr.addr32);
712 			PF_MD5_UPD(pfr, addr.v.a.mask.addr32);
713 			break;
714 		case PF_ADDR_RTLABEL:
715 			PF_MD5_UPD(pfr, addr.v.rtlabelname);
716 			break;
717 	}
718 
719 	PF_MD5_UPD(pfr, port[0]);
720 	PF_MD5_UPD(pfr, port[1]);
721 	PF_MD5_UPD(pfr, neg);
722 	PF_MD5_UPD(pfr, port_op);
723 }
724 
725 void
726 pf_hash_rule(MD5_CTX *ctx, struct pf_rule *rule)
727 {
728 	u_int16_t x;
729 	u_int32_t y;
730 
731 	pf_hash_rule_addr(ctx, &rule->src);
732 	pf_hash_rule_addr(ctx, &rule->dst);
733 	PF_MD5_UPD_STR(rule, label);
734 	PF_MD5_UPD_STR(rule, ifname);
735 	PF_MD5_UPD_STR(rule, rcv_ifname);
736 	PF_MD5_UPD_STR(rule, match_tagname);
737 	PF_MD5_UPD_HTONS(rule, match_tag, x); /* dup? */
738 	PF_MD5_UPD_HTONL(rule, os_fingerprint, y);
739 	PF_MD5_UPD_HTONL(rule, prob, y);
740 	PF_MD5_UPD_HTONL(rule, uid.uid[0], y);
741 	PF_MD5_UPD_HTONL(rule, uid.uid[1], y);
742 	PF_MD5_UPD(rule, uid.op);
743 	PF_MD5_UPD_HTONL(rule, gid.gid[0], y);
744 	PF_MD5_UPD_HTONL(rule, gid.gid[1], y);
745 	PF_MD5_UPD(rule, gid.op);
746 	PF_MD5_UPD_HTONL(rule, rule_flag, y);
747 	PF_MD5_UPD(rule, action);
748 	PF_MD5_UPD(rule, direction);
749 	PF_MD5_UPD(rule, af);
750 	PF_MD5_UPD(rule, quick);
751 	PF_MD5_UPD(rule, ifnot);
752 	PF_MD5_UPD(rule, rcvifnot);
753 	PF_MD5_UPD(rule, match_tag_not);
754 	PF_MD5_UPD(rule, keep_state);
755 	PF_MD5_UPD(rule, proto);
756 	PF_MD5_UPD(rule, type);
757 	PF_MD5_UPD(rule, code);
758 	PF_MD5_UPD(rule, flags);
759 	PF_MD5_UPD(rule, flagset);
760 	PF_MD5_UPD(rule, allow_opts);
761 	PF_MD5_UPD(rule, rt);
762 	PF_MD5_UPD(rule, tos);
763 }
764 
765 int
766 pf_commit_rules(u_int32_t ticket, char *anchor)
767 {
768 	struct pf_ruleset	*rs;
769 	struct pf_rule		*rule, **old_array;
770 	struct pf_rulequeue	*old_rules;
771 	int			 error;
772 	u_int32_t		 old_rcount;
773 
774 	/* Make sure any expired rules get removed from active rules first. */
775 	pf_purge_expired_rules(1);
776 
777 	rs = pf_find_ruleset(anchor);
778 	if (rs == NULL || !rs->rules.inactive.open ||
779 	    ticket != rs->rules.inactive.ticket)
780 		return (EBUSY);
781 
782 	/* Calculate checksum for the main ruleset */
783 	if (rs == &pf_main_ruleset) {
784 		error = pf_setup_pfsync_matching(rs);
785 		if (error != 0)
786 			return (error);
787 	}
788 
789 	/* Swap rules, keep the old. */
790 	old_rules = rs->rules.active.ptr;
791 	old_rcount = rs->rules.active.rcount;
792 	old_array = rs->rules.active.ptr_array;
793 
794 	rs->rules.active.ptr = rs->rules.inactive.ptr;
795 	rs->rules.active.ptr_array = rs->rules.inactive.ptr_array;
796 	rs->rules.active.rcount = rs->rules.inactive.rcount;
797 	rs->rules.inactive.ptr = old_rules;
798 	rs->rules.inactive.ptr_array = old_array;
799 	rs->rules.inactive.rcount = old_rcount;
800 
801 	rs->rules.active.ticket = rs->rules.inactive.ticket;
802 	pf_calc_skip_steps(rs->rules.active.ptr);
803 
804 
805 	/* Purge the old rule list. */
806 	while ((rule = TAILQ_FIRST(old_rules)) != NULL)
807 		pf_rm_rule(old_rules, rule);
808 	if (rs->rules.inactive.ptr_array)
809 		free(rs->rules.inactive.ptr_array, M_TEMP, 0);
810 	rs->rules.inactive.ptr_array = NULL;
811 	rs->rules.inactive.rcount = 0;
812 	rs->rules.inactive.open = 0;
813 	pf_remove_if_empty_ruleset(rs);
814 
815 	/* queue defs only in the main ruleset */
816 	if (anchor[0])
817 		return (0);
818 	return (pf_commit_queues());
819 }
820 
821 int
822 pf_setup_pfsync_matching(struct pf_ruleset *rs)
823 {
824 	MD5_CTX			 ctx;
825 	struct pf_rule		*rule;
826 	u_int8_t		 digest[PF_MD5_DIGEST_LENGTH];
827 
828 	MD5Init(&ctx);
829 	if (rs->rules.inactive.ptr_array)
830 		free(rs->rules.inactive.ptr_array, M_TEMP, 0);
831 	rs->rules.inactive.ptr_array = NULL;
832 
833 	if (rs->rules.inactive.rcount) {
834 		rs->rules.inactive.ptr_array =
835 		    mallocarray(rs->rules.inactive.rcount, sizeof(caddr_t),
836 		    M_TEMP, M_NOWAIT);
837 
838 		if (!rs->rules.inactive.ptr_array)
839 			return (ENOMEM);
840 
841 		TAILQ_FOREACH(rule, rs->rules.inactive.ptr, entries) {
842 			pf_hash_rule(&ctx, rule);
843 			(rs->rules.inactive.ptr_array)[rule->nr] = rule;
844 		}
845 	}
846 
847 	MD5Final(digest, &ctx);
848 	memcpy(pf_status.pf_chksum, digest, sizeof(pf_status.pf_chksum));
849 	return (0);
850 }
851 
852 int
853 pf_addr_setup(struct pf_ruleset *ruleset, struct pf_addr_wrap *addr,
854     sa_family_t af)
855 {
856 	if (pfi_dynaddr_setup(addr, af) ||
857 	    pf_tbladdr_setup(ruleset, addr) ||
858 	    pf_rtlabel_add(addr))
859 		return (EINVAL);
860 
861 	return (0);
862 }
863 
864 int
865 pf_kif_setup(char *ifname, struct pfi_kif **kif)
866 {
867 	if (ifname[0]) {
868 		*kif = pfi_kif_get(ifname);
869 		if (*kif == NULL)
870 			return (EINVAL);
871 
872 		pfi_kif_ref(*kif, PFI_KIF_REF_RULE);
873 	} else
874 		*kif = NULL;
875 
876 	return (0);
877 }
878 
879 void
880 pf_addr_copyout(struct pf_addr_wrap *addr)
881 {
882 	pfi_dynaddr_copyout(addr);
883 	pf_tbladdr_copyout(addr);
884 	pf_rtlabel_copyout(addr);
885 }
886 
887 int
888 pfioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct proc *p)
889 {
890 	int			 s;
891 	int			 error = 0;
892 
893 	/* XXX keep in sync with switch() below */
894 	if (securelevel > 1)
895 		switch (cmd) {
896 		case DIOCGETRULES:
897 		case DIOCGETRULE:
898 		case DIOCGETSTATE:
899 		case DIOCSETSTATUSIF:
900 		case DIOCGETSTATUS:
901 		case DIOCCLRSTATUS:
902 		case DIOCNATLOOK:
903 		case DIOCSETDEBUG:
904 		case DIOCGETSTATES:
905 		case DIOCGETTIMEOUT:
906 		case DIOCCLRRULECTRS:
907 		case DIOCGETLIMIT:
908 		case DIOCGETRULESETS:
909 		case DIOCGETRULESET:
910 		case DIOCGETQUEUES:
911 		case DIOCGETQUEUE:
912 		case DIOCGETQSTATS:
913 		case DIOCRGETTABLES:
914 		case DIOCRGETTSTATS:
915 		case DIOCRCLRTSTATS:
916 		case DIOCRCLRADDRS:
917 		case DIOCRADDADDRS:
918 		case DIOCRDELADDRS:
919 		case DIOCRSETADDRS:
920 		case DIOCRGETASTATS:
921 		case DIOCRCLRASTATS:
922 		case DIOCRTSTADDRS:
923 		case DIOCOSFPGET:
924 		case DIOCGETSRCNODES:
925 		case DIOCCLRSRCNODES:
926 		case DIOCIGETIFACES:
927 		case DIOCSETIFFLAG:
928 		case DIOCCLRIFFLAG:
929 			break;
930 		case DIOCRCLRTABLES:
931 		case DIOCRADDTABLES:
932 		case DIOCRDELTABLES:
933 		case DIOCRSETTFLAGS:
934 			if (((struct pfioc_table *)addr)->pfrio_flags &
935 			    PFR_FLAG_DUMMY)
936 				break; /* dummy operation ok */
937 			return (EPERM);
938 		default:
939 			return (EPERM);
940 		}
941 
942 	if (!(flags & FWRITE))
943 		switch (cmd) {
944 		case DIOCGETRULES:
945 		case DIOCGETSTATE:
946 		case DIOCGETSTATUS:
947 		case DIOCGETSTATES:
948 		case DIOCGETTIMEOUT:
949 		case DIOCGETLIMIT:
950 		case DIOCGETRULESETS:
951 		case DIOCGETRULESET:
952 		case DIOCGETQUEUES:
953 		case DIOCGETQUEUE:
954 		case DIOCGETQSTATS:
955 		case DIOCNATLOOK:
956 		case DIOCRGETTABLES:
957 		case DIOCRGETTSTATS:
958 		case DIOCRGETADDRS:
959 		case DIOCRGETASTATS:
960 		case DIOCRTSTADDRS:
961 		case DIOCOSFPGET:
962 		case DIOCGETSRCNODES:
963 		case DIOCIGETIFACES:
964 			break;
965 		case DIOCRCLRTABLES:
966 		case DIOCRADDTABLES:
967 		case DIOCRDELTABLES:
968 		case DIOCRCLRTSTATS:
969 		case DIOCRCLRADDRS:
970 		case DIOCRADDADDRS:
971 		case DIOCRDELADDRS:
972 		case DIOCRSETADDRS:
973 		case DIOCRSETTFLAGS:
974 			if (((struct pfioc_table *)addr)->pfrio_flags &
975 			    PFR_FLAG_DUMMY) {
976 				flags |= FWRITE; /* need write lock for dummy */
977 				break; /* dummy operation ok */
978 			}
979 			return (EACCES);
980 		case DIOCGETRULE:
981 			if (((struct pfioc_rule *)addr)->action ==
982 			    PF_GET_CLR_CNTR)
983 				return (EACCES);
984 			break;
985 		default:
986 			return (EACCES);
987 		}
988 
989 	NET_LOCK(s);
990 	switch (cmd) {
991 
992 	case DIOCSTART:
993 		if (pf_status.running)
994 			error = EEXIST;
995 		else {
996 			pf_status.running = 1;
997 			pf_status.since = time_second;
998 			if (pf_status.stateid == 0) {
999 				pf_status.stateid = time_second;
1000 				pf_status.stateid = pf_status.stateid << 32;
1001 			}
1002 			pf_create_queues();
1003 			DPFPRINTF(LOG_NOTICE, "pf: started");
1004 		}
1005 		break;
1006 
1007 	case DIOCSTOP:
1008 		if (!pf_status.running)
1009 			error = ENOENT;
1010 		else {
1011 			pf_status.running = 0;
1012 			pf_status.since = time_second;
1013 			pf_remove_queues();
1014 			DPFPRINTF(LOG_NOTICE, "pf: stopped");
1015 		}
1016 		break;
1017 
1018 	case DIOCGETQUEUES: {
1019 		struct pfioc_queue	*pq = (struct pfioc_queue *)addr;
1020 		struct pf_queuespec	*qs;
1021 		u_int32_t		 nr = 0;
1022 
1023 		pq->ticket = pf_main_ruleset.rules.active.ticket;
1024 
1025 		/* save state to not run over them all each time? */
1026 		qs = TAILQ_FIRST(pf_queues_active);
1027 		while (qs != NULL) {
1028 			qs = TAILQ_NEXT(qs, entries);
1029 			nr++;
1030 		}
1031 		pq->nr = nr;
1032 		break;
1033 	}
1034 
1035 	case DIOCGETQUEUE: {
1036 		struct pfioc_queue	*pq = (struct pfioc_queue *)addr;
1037 		struct pf_queuespec	*qs;
1038 		u_int32_t		 nr = 0;
1039 
1040 		if (pq->ticket != pf_main_ruleset.rules.active.ticket) {
1041 			error = EBUSY;
1042 			break;
1043 		}
1044 
1045 		/* save state to not run over them all each time? */
1046 		qs = TAILQ_FIRST(pf_queues_active);
1047 		while ((qs != NULL) && (nr++ < pq->nr))
1048 			qs = TAILQ_NEXT(qs, entries);
1049 		if (qs == NULL) {
1050 			error = EBUSY;
1051 			break;
1052 		}
1053 		bcopy(qs, &pq->queue, sizeof(pq->queue));
1054 		break;
1055 	}
1056 
1057 	case DIOCGETQSTATS: {
1058 		struct pfioc_qstats	*pq = (struct pfioc_qstats *)addr;
1059 		struct pf_queuespec	*qs;
1060 		u_int32_t		 nr;
1061 		int			 nbytes;
1062 
1063 		if (pq->ticket != pf_main_ruleset.rules.active.ticket) {
1064 			error = EBUSY;
1065 			break;
1066 		}
1067 		nbytes = pq->nbytes;
1068 		nr = 0;
1069 
1070 		/* save state to not run over them all each time? */
1071 		qs = TAILQ_FIRST(pf_queues_active);
1072 		while ((qs != NULL) && (nr++ < pq->nr))
1073 			qs = TAILQ_NEXT(qs, entries);
1074 		if (qs == NULL) {
1075 			error = EBUSY;
1076 			break;
1077 		}
1078 		bcopy(qs, &pq->queue, sizeof(pq->queue));
1079 		error = hfsc_pf_qstats(qs, pq->buf, &nbytes);
1080 		if (error == 0)
1081 			pq->nbytes = nbytes;
1082 		break;
1083 	}
1084 
1085 	case DIOCADDQUEUE: {
1086 		struct pfioc_queue	*q = (struct pfioc_queue *)addr;
1087 		struct pf_queuespec	*qs;
1088 
1089 		if (q->ticket != pf_main_ruleset.rules.inactive.ticket) {
1090 			error = EBUSY;
1091 			break;
1092 		}
1093 		qs = pool_get(&pf_queue_pl, PR_WAITOK|PR_LIMITFAIL|PR_ZERO);
1094 		if (qs == NULL) {
1095 			error = ENOMEM;
1096 			break;
1097 		}
1098 		bcopy(&q->queue, qs, sizeof(*qs));
1099 		qs->qid = pf_qname2qid(qs->qname, 1);
1100 		if (qs->parent[0] && (qs->parent_qid =
1101 		    pf_qname2qid(qs->parent, 0)) == 0) {
1102 			pool_put(&pf_queue_pl, qs);
1103 			error = ESRCH;
1104 			break;
1105 		}
1106 		qs->kif = pfi_kif_get(qs->ifname);
1107 		if (qs->kif == NULL) {
1108 			pool_put(&pf_queue_pl, qs);
1109 			error = ESRCH;
1110 			break;
1111 		}
1112 		/* XXX resolve bw percentage specs */
1113 		pfi_kif_ref(qs->kif, PFI_KIF_REF_RULE);
1114 		if (qs->qlimit == 0)
1115 			qs->qlimit = HFSC_DEFAULT_QLIMIT;
1116 		TAILQ_INSERT_TAIL(pf_queues_inactive, qs, entries);
1117 
1118 		break;
1119 	}
1120 
1121 	case DIOCADDRULE: {
1122 		struct pfioc_rule	*pr = (struct pfioc_rule *)addr;
1123 		struct pf_ruleset	*ruleset;
1124 		struct pf_rule		*rule, *tail;
1125 
1126 		pr->anchor[sizeof(pr->anchor) - 1] = 0;
1127 		ruleset = pf_find_ruleset(pr->anchor);
1128 		if (ruleset == NULL) {
1129 			error = EINVAL;
1130 			break;
1131 		}
1132 		if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
1133 			error = EINVAL;
1134 			break;
1135 		}
1136 		if (pr->ticket != ruleset->rules.inactive.ticket) {
1137 			error = EBUSY;
1138 			break;
1139 		}
1140 		rule = pool_get(&pf_rule_pl, PR_WAITOK|PR_LIMITFAIL|PR_ZERO);
1141 		if (rule == NULL) {
1142 			error = ENOMEM;
1143 			break;
1144 		}
1145 		if ((error = pf_rule_copyin(&pr->rule, rule, ruleset))) {
1146 			pf_rm_rule(NULL, rule);
1147 			rule = NULL;
1148 			break;
1149 		}
1150 		rule->cuid = p->p_ucred->cr_ruid;
1151 		rule->cpid = p->p_p->ps_pid;
1152 
1153 		switch (rule->af) {
1154 		case 0:
1155 			break;
1156 		case AF_INET:
1157 			break;
1158 #ifdef INET6
1159 		case AF_INET6:
1160 			break;
1161 #endif /* INET6 */
1162 		default:
1163 			pf_rm_rule(NULL, rule);
1164 			rule = NULL;
1165 			error = EAFNOSUPPORT;
1166 			goto fail;
1167 		}
1168 		tail = TAILQ_LAST(ruleset->rules.inactive.ptr,
1169 		    pf_rulequeue);
1170 		if (tail)
1171 			rule->nr = tail->nr + 1;
1172 		else
1173 			rule->nr = 0;
1174 
1175 		if (rule->src.addr.type == PF_ADDR_NONE ||
1176 		    rule->dst.addr.type == PF_ADDR_NONE)
1177 			error = EINVAL;
1178 
1179 		if (pf_addr_setup(ruleset, &rule->src.addr, rule->af))
1180 			error = EINVAL;
1181 		if (pf_addr_setup(ruleset, &rule->dst.addr, rule->af))
1182 			error = EINVAL;
1183 		if (pf_addr_setup(ruleset, &rule->rdr.addr, rule->af))
1184 			error = EINVAL;
1185 		if (pf_addr_setup(ruleset, &rule->nat.addr, rule->af))
1186 			error = EINVAL;
1187 		if (pf_addr_setup(ruleset, &rule->route.addr, rule->af))
1188 			error = EINVAL;
1189 		if (pf_anchor_setup(rule, ruleset, pr->anchor_call))
1190 			error = EINVAL;
1191 		if (rule->rt && !rule->direction)
1192 			error = EINVAL;
1193 		if (rule->scrub_flags & PFSTATE_SETPRIO &&
1194 		    (rule->set_prio[0] > IFQ_MAXPRIO ||
1195 		    rule->set_prio[1] > IFQ_MAXPRIO))
1196 			error = EINVAL;
1197 
1198 		if (error) {
1199 			pf_rm_rule(NULL, rule);
1200 			break;
1201 		}
1202 		TAILQ_INSERT_TAIL(ruleset->rules.inactive.ptr,
1203 		    rule, entries);
1204 		rule->ruleset = ruleset;
1205 		ruleset->rules.inactive.rcount++;
1206 		break;
1207 	}
1208 
1209 	case DIOCGETRULES: {
1210 		struct pfioc_rule	*pr = (struct pfioc_rule *)addr;
1211 		struct pf_ruleset	*ruleset;
1212 		struct pf_rule		*tail;
1213 
1214 		pr->anchor[sizeof(pr->anchor) - 1] = 0;
1215 		ruleset = pf_find_ruleset(pr->anchor);
1216 		if (ruleset == NULL) {
1217 			error = EINVAL;
1218 			break;
1219 		}
1220 		tail = TAILQ_LAST(ruleset->rules.active.ptr, pf_rulequeue);
1221 		if (tail)
1222 			pr->nr = tail->nr + 1;
1223 		else
1224 			pr->nr = 0;
1225 		pr->ticket = ruleset->rules.active.ticket;
1226 		break;
1227 	}
1228 
1229 	case DIOCGETRULE: {
1230 		struct pfioc_rule	*pr = (struct pfioc_rule *)addr;
1231 		struct pf_ruleset	*ruleset;
1232 		struct pf_rule		*rule;
1233 		int			 i;
1234 
1235 		pr->anchor[sizeof(pr->anchor) - 1] = 0;
1236 		ruleset = pf_find_ruleset(pr->anchor);
1237 		if (ruleset == NULL) {
1238 			error = EINVAL;
1239 			break;
1240 		}
1241 		if (pr->ticket != ruleset->rules.active.ticket) {
1242 			error = EBUSY;
1243 			break;
1244 		}
1245 		rule = TAILQ_FIRST(ruleset->rules.active.ptr);
1246 		while ((rule != NULL) && (rule->nr != pr->nr))
1247 			rule = TAILQ_NEXT(rule, entries);
1248 		if (rule == NULL) {
1249 			error = EBUSY;
1250 			break;
1251 		}
1252 		bcopy(rule, &pr->rule, sizeof(struct pf_rule));
1253 		bzero(&pr->rule.entries, sizeof(pr->rule.entries));
1254 		pr->rule.kif = NULL;
1255 		pr->rule.nat.kif = NULL;
1256 		pr->rule.rdr.kif = NULL;
1257 		pr->rule.route.kif = NULL;
1258 		pr->rule.rcv_kif = NULL;
1259 		pr->rule.anchor = NULL;
1260 		pr->rule.overload_tbl = NULL;
1261 		bzero(&pr->rule.gcle, sizeof(pr->rule.gcle));
1262 		pr->rule.ruleset = NULL;
1263 		if (pf_anchor_copyout(ruleset, rule, pr)) {
1264 			error = EBUSY;
1265 			break;
1266 		}
1267 		pf_addr_copyout(&pr->rule.src.addr);
1268 		pf_addr_copyout(&pr->rule.dst.addr);
1269 		pf_addr_copyout(&pr->rule.rdr.addr);
1270 		pf_addr_copyout(&pr->rule.nat.addr);
1271 		pf_addr_copyout(&pr->rule.route.addr);
1272 		for (i = 0; i < PF_SKIP_COUNT; ++i)
1273 			if (rule->skip[i].ptr == NULL)
1274 				pr->rule.skip[i].nr = (u_int32_t)-1;
1275 			else
1276 				pr->rule.skip[i].nr =
1277 				    rule->skip[i].ptr->nr;
1278 
1279 		if (pr->action == PF_GET_CLR_CNTR) {
1280 			rule->evaluations = 0;
1281 			rule->packets[0] = rule->packets[1] = 0;
1282 			rule->bytes[0] = rule->bytes[1] = 0;
1283 			rule->states_tot = 0;
1284 		}
1285 		break;
1286 	}
1287 
1288 	case DIOCCHANGERULE: {
1289 		struct pfioc_rule	*pcr = (struct pfioc_rule *)addr;
1290 		struct pf_ruleset	*ruleset;
1291 		struct pf_rule		*oldrule = NULL, *newrule = NULL;
1292 		u_int32_t		 nr = 0;
1293 
1294 		if (pcr->action < PF_CHANGE_ADD_HEAD ||
1295 		    pcr->action > PF_CHANGE_GET_TICKET) {
1296 			error = EINVAL;
1297 			break;
1298 		}
1299 		ruleset = pf_find_ruleset(pcr->anchor);
1300 		if (ruleset == NULL) {
1301 			error = EINVAL;
1302 			break;
1303 		}
1304 
1305 		if (pcr->action == PF_CHANGE_GET_TICKET) {
1306 			pcr->ticket = ++ruleset->rules.active.ticket;
1307 			break;
1308 		} else {
1309 			if (pcr->ticket !=
1310 			    ruleset->rules.active.ticket) {
1311 				error = EINVAL;
1312 				break;
1313 			}
1314 			if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
1315 				error = EINVAL;
1316 				break;
1317 			}
1318 		}
1319 
1320 		if (pcr->action != PF_CHANGE_REMOVE) {
1321 			newrule = pool_get(&pf_rule_pl,
1322 			    PR_WAITOK|PR_LIMITFAIL|PR_ZERO);
1323 			if (newrule == NULL) {
1324 				error = ENOMEM;
1325 				break;
1326 			}
1327 			pf_rule_copyin(&pcr->rule, newrule, ruleset);
1328 			newrule->cuid = p->p_ucred->cr_ruid;
1329 			newrule->cpid = p->p_p->ps_pid;
1330 
1331 			switch (newrule->af) {
1332 			case 0:
1333 				break;
1334 			case AF_INET:
1335 				break;
1336 #ifdef INET6
1337 			case AF_INET6:
1338 				break;
1339 #endif /* INET6 */
1340 			default:
1341 				pool_put(&pf_rule_pl, newrule);
1342 				error = EAFNOSUPPORT;
1343 				goto fail;
1344 			}
1345 
1346 			if (newrule->rt && !newrule->direction)
1347 				error = EINVAL;
1348 			if (pf_addr_setup(ruleset, &newrule->src.addr, newrule->af))
1349 				error = EINVAL;
1350 			if (pf_addr_setup(ruleset, &newrule->dst.addr, newrule->af))
1351 				error = EINVAL;
1352 			if (pf_addr_setup(ruleset, &newrule->rdr.addr, newrule->af))
1353 				error = EINVAL;
1354 			if (pf_addr_setup(ruleset, &newrule->nat.addr, newrule->af))
1355 				error = EINVAL;
1356 			if (pf_addr_setup(ruleset, &newrule->route.addr, newrule->af))
1357 				error = EINVAL;
1358 			if (pf_anchor_setup(newrule, ruleset, pcr->anchor_call))
1359 				error = EINVAL;
1360 
1361 			if (error) {
1362 				pf_rm_rule(NULL, newrule);
1363 				break;
1364 			}
1365 		}
1366 
1367 		if (pcr->action == PF_CHANGE_ADD_HEAD)
1368 			oldrule = TAILQ_FIRST(ruleset->rules.active.ptr);
1369 		else if (pcr->action == PF_CHANGE_ADD_TAIL)
1370 			oldrule = TAILQ_LAST(ruleset->rules.active.ptr,
1371 			    pf_rulequeue);
1372 		else {
1373 			oldrule = TAILQ_FIRST(ruleset->rules.active.ptr);
1374 			while ((oldrule != NULL) && (oldrule->nr != pcr->nr))
1375 				oldrule = TAILQ_NEXT(oldrule, entries);
1376 			if (oldrule == NULL) {
1377 				if (newrule != NULL)
1378 					pf_rm_rule(NULL, newrule);
1379 				error = EINVAL;
1380 				break;
1381 			}
1382 		}
1383 
1384 		if (pcr->action == PF_CHANGE_REMOVE) {
1385 			pf_rm_rule(ruleset->rules.active.ptr, oldrule);
1386 			ruleset->rules.active.rcount--;
1387 		} else {
1388 			if (oldrule == NULL)
1389 				TAILQ_INSERT_TAIL(
1390 				    ruleset->rules.active.ptr,
1391 				    newrule, entries);
1392 			else if (pcr->action == PF_CHANGE_ADD_HEAD ||
1393 			    pcr->action == PF_CHANGE_ADD_BEFORE)
1394 				TAILQ_INSERT_BEFORE(oldrule, newrule, entries);
1395 			else
1396 				TAILQ_INSERT_AFTER(
1397 				    ruleset->rules.active.ptr,
1398 				    oldrule, newrule, entries);
1399 			ruleset->rules.active.rcount++;
1400 		}
1401 
1402 		nr = 0;
1403 		TAILQ_FOREACH(oldrule, ruleset->rules.active.ptr, entries)
1404 			oldrule->nr = nr++;
1405 
1406 		ruleset->rules.active.ticket++;
1407 
1408 		pf_calc_skip_steps(ruleset->rules.active.ptr);
1409 		pf_remove_if_empty_ruleset(ruleset);
1410 
1411 		break;
1412 	}
1413 
1414 	case DIOCCLRSTATES: {
1415 		struct pf_state		*s, *nexts;
1416 		struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr;
1417 		u_int			 killed = 0;
1418 
1419 		for (s = RB_MIN(pf_state_tree_id, &tree_id); s; s = nexts) {
1420 			nexts = RB_NEXT(pf_state_tree_id, &tree_id, s);
1421 
1422 			if (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname,
1423 			    s->kif->pfik_name)) {
1424 #if NPFSYNC > 0
1425 				/* don't send out individual delete messages */
1426 				SET(s->state_flags, PFSTATE_NOSYNC);
1427 #endif	/* NPFSYNC > 0 */
1428 				pf_remove_state(s);
1429 				killed++;
1430 			}
1431 		}
1432 		psk->psk_killed = killed;
1433 #if NPFSYNC > 0
1434 		pfsync_clear_states(pf_status.hostid, psk->psk_ifname);
1435 #endif	/* NPFSYNC > 0 */
1436 		break;
1437 	}
1438 
1439 	case DIOCKILLSTATES: {
1440 		struct pf_state		*s, *nexts;
1441 		struct pf_state_key	*sk;
1442 		struct pf_addr		*srcaddr, *dstaddr;
1443 		u_int16_t		 srcport, dstport;
1444 		struct pfioc_state_kill	*psk = (struct pfioc_state_kill *)addr;
1445 		u_int			 killed = 0;
1446 
1447 		if (psk->psk_pfcmp.id) {
1448 			if (psk->psk_pfcmp.creatorid == 0)
1449 				psk->psk_pfcmp.creatorid = pf_status.hostid;
1450 			if ((s = pf_find_state_byid(&psk->psk_pfcmp))) {
1451 				pf_remove_state(s);
1452 				psk->psk_killed = 1;
1453 			}
1454 			break;
1455 		}
1456 
1457 		for (s = RB_MIN(pf_state_tree_id, &tree_id); s;
1458 		    s = nexts) {
1459 			nexts = RB_NEXT(pf_state_tree_id, &tree_id, s);
1460 
1461 			if (s->direction == PF_OUT) {
1462 				sk = s->key[PF_SK_STACK];
1463 				srcaddr = &sk->addr[1];
1464 				dstaddr = &sk->addr[0];
1465 				srcport = sk->port[1];
1466 				dstport = sk->port[0];
1467 			} else {
1468 				sk = s->key[PF_SK_WIRE];
1469 				srcaddr = &sk->addr[0];
1470 				dstaddr = &sk->addr[1];
1471 				srcport = sk->port[0];
1472 				dstport = sk->port[1];
1473 			}
1474 			if ((!psk->psk_af || sk->af == psk->psk_af)
1475 			    && (!psk->psk_proto || psk->psk_proto ==
1476 			    sk->proto) && psk->psk_rdomain == sk->rdomain &&
1477 			    PF_MATCHA(psk->psk_src.neg,
1478 			    &psk->psk_src.addr.v.a.addr,
1479 			    &psk->psk_src.addr.v.a.mask,
1480 			    srcaddr, sk->af) &&
1481 			    PF_MATCHA(psk->psk_dst.neg,
1482 			    &psk->psk_dst.addr.v.a.addr,
1483 			    &psk->psk_dst.addr.v.a.mask,
1484 			    dstaddr, sk->af) &&
1485 			    (psk->psk_src.port_op == 0 ||
1486 			    pf_match_port(psk->psk_src.port_op,
1487 			    psk->psk_src.port[0], psk->psk_src.port[1],
1488 			    srcport)) &&
1489 			    (psk->psk_dst.port_op == 0 ||
1490 			    pf_match_port(psk->psk_dst.port_op,
1491 			    psk->psk_dst.port[0], psk->psk_dst.port[1],
1492 			    dstport)) &&
1493 			    (!psk->psk_label[0] || (s->rule.ptr->label[0] &&
1494 			    !strcmp(psk->psk_label, s->rule.ptr->label))) &&
1495 			    (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname,
1496 			    s->kif->pfik_name))) {
1497 				pf_remove_state(s);
1498 				killed++;
1499 			}
1500 		}
1501 		psk->psk_killed = killed;
1502 		break;
1503 	}
1504 
1505 #if NPFSYNC > 0
1506 	case DIOCADDSTATE: {
1507 		struct pfioc_state	*ps = (struct pfioc_state *)addr;
1508 		struct pfsync_state	*sp = &ps->state;
1509 
1510 		if (sp->timeout >= PFTM_MAX) {
1511 			error = EINVAL;
1512 			break;
1513 		}
1514 		error = pfsync_state_import(sp, PFSYNC_SI_IOCTL);
1515 		break;
1516 	}
1517 #endif	/* NPFSYNC > 0 */
1518 
1519 	case DIOCGETSTATE: {
1520 		struct pfioc_state	*ps = (struct pfioc_state *)addr;
1521 		struct pf_state		*s;
1522 		struct pf_state_cmp	 id_key;
1523 
1524 		bzero(&id_key, sizeof(id_key));
1525 		id_key.id = ps->state.id;
1526 		id_key.creatorid = ps->state.creatorid;
1527 
1528 		s = pf_find_state_byid(&id_key);
1529 		if (s == NULL) {
1530 			error = ENOENT;
1531 			break;
1532 		}
1533 
1534 		pf_state_export(&ps->state, s);
1535 		break;
1536 	}
1537 
1538 	case DIOCGETSTATES: {
1539 		struct pfioc_states	*ps = (struct pfioc_states *)addr;
1540 		struct pf_state		*state;
1541 		struct pfsync_state	*p, *pstore;
1542 		u_int32_t		 nr = 0;
1543 
1544 		if (ps->ps_len == 0) {
1545 			nr = pf_status.states;
1546 			ps->ps_len = sizeof(struct pfsync_state) * nr;
1547 			break;
1548 		}
1549 
1550 		pstore = malloc(sizeof(*pstore), M_TEMP, M_WAITOK);
1551 
1552 		p = ps->ps_states;
1553 
1554 		state = TAILQ_FIRST(&state_list);
1555 		while (state) {
1556 			if (state->timeout != PFTM_UNLINKED) {
1557 				if ((nr+1) * sizeof(*p) > (unsigned)ps->ps_len)
1558 					break;
1559 				pf_state_export(pstore, state);
1560 				error = copyout(pstore, p, sizeof(*p));
1561 				if (error) {
1562 					free(pstore, M_TEMP, sizeof(*pstore));
1563 					goto fail;
1564 				}
1565 				p++;
1566 				nr++;
1567 			}
1568 			state = TAILQ_NEXT(state, entry_list);
1569 		}
1570 
1571 		ps->ps_len = sizeof(struct pfsync_state) * nr;
1572 
1573 		free(pstore, M_TEMP, sizeof(*pstore));
1574 		break;
1575 	}
1576 
1577 	case DIOCGETSTATUS: {
1578 		struct pf_status *s = (struct pf_status *)addr;
1579 		bcopy(&pf_status, s, sizeof(struct pf_status));
1580 		pfi_update_status(s->ifname, s);
1581 		break;
1582 	}
1583 
1584 	case DIOCSETSTATUSIF: {
1585 		struct pfioc_iface	*pi = (struct pfioc_iface *)addr;
1586 
1587 		if (pi->pfiio_name[0] == 0) {
1588 			bzero(pf_status.ifname, IFNAMSIZ);
1589 			break;
1590 		}
1591 		strlcpy(pf_trans_set.statusif, pi->pfiio_name, IFNAMSIZ);
1592 		pf_trans_set.mask |= PF_TSET_STATUSIF;
1593 		break;
1594 	}
1595 
1596 	case DIOCCLRSTATUS: {
1597 		struct pfioc_iface	*pi = (struct pfioc_iface *)addr;
1598 
1599 		/* if ifname is specified, clear counters there only */
1600 		if (pi->pfiio_name[0]) {
1601 			pfi_update_status(pi->pfiio_name, NULL);
1602 			break;
1603 		}
1604 
1605 		bzero(pf_status.counters, sizeof(pf_status.counters));
1606 		bzero(pf_status.fcounters, sizeof(pf_status.fcounters));
1607 		bzero(pf_status.scounters, sizeof(pf_status.scounters));
1608 		pf_status.since = time_second;
1609 
1610 		break;
1611 	}
1612 
1613 	case DIOCNATLOOK: {
1614 		struct pfioc_natlook	*pnl = (struct pfioc_natlook *)addr;
1615 		struct pf_state_key	*sk;
1616 		struct pf_state		*state;
1617 		struct pf_state_key_cmp	 key;
1618 		int			 m = 0, direction = pnl->direction;
1619 		int			 sidx, didx;
1620 
1621 		/* NATLOOK src and dst are reversed, so reverse sidx/didx */
1622 		sidx = (direction == PF_IN) ? 1 : 0;
1623 		didx = (direction == PF_IN) ? 0 : 1;
1624 
1625 		if (!pnl->proto ||
1626 		    PF_AZERO(&pnl->saddr, pnl->af) ||
1627 		    PF_AZERO(&pnl->daddr, pnl->af) ||
1628 		    ((pnl->proto == IPPROTO_TCP ||
1629 		    pnl->proto == IPPROTO_UDP) &&
1630 		    (!pnl->dport || !pnl->sport)) ||
1631 		    pnl->rdomain > RT_TABLEID_MAX)
1632 			error = EINVAL;
1633 		else {
1634 			key.af = pnl->af;
1635 			key.proto = pnl->proto;
1636 			key.rdomain = pnl->rdomain;
1637 			PF_ACPY(&key.addr[sidx], &pnl->saddr, pnl->af);
1638 			key.port[sidx] = pnl->sport;
1639 			PF_ACPY(&key.addr[didx], &pnl->daddr, pnl->af);
1640 			key.port[didx] = pnl->dport;
1641 
1642 			state = pf_find_state_all(&key, direction, &m);
1643 
1644 			if (m > 1)
1645 				error = E2BIG;	/* more than one state */
1646 			else if (state != NULL) {
1647 				sk = state->key[sidx];
1648 				PF_ACPY(&pnl->rsaddr, &sk->addr[sidx], sk->af);
1649 				pnl->rsport = sk->port[sidx];
1650 				PF_ACPY(&pnl->rdaddr, &sk->addr[didx], sk->af);
1651 				pnl->rdport = sk->port[didx];
1652 				pnl->rrdomain = sk->rdomain;
1653 			} else
1654 				error = ENOENT;
1655 		}
1656 		break;
1657 	}
1658 
1659 	case DIOCSETTIMEOUT: {
1660 		struct pfioc_tm	*pt = (struct pfioc_tm *)addr;
1661 
1662 		if (pt->timeout < 0 || pt->timeout >= PFTM_MAX ||
1663 		    pt->seconds < 0) {
1664 			error = EINVAL;
1665 			goto fail;
1666 		}
1667 		if (pt->timeout == PFTM_INTERVAL && pt->seconds == 0)
1668 			pt->seconds = 1;
1669 		pf_default_rule_new.timeout[pt->timeout] = pt->seconds;
1670 		pt->seconds = pf_default_rule.timeout[pt->timeout];
1671 		break;
1672 	}
1673 
1674 	case DIOCGETTIMEOUT: {
1675 		struct pfioc_tm	*pt = (struct pfioc_tm *)addr;
1676 
1677 		if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) {
1678 			error = EINVAL;
1679 			goto fail;
1680 		}
1681 		pt->seconds = pf_default_rule.timeout[pt->timeout];
1682 		break;
1683 	}
1684 
1685 	case DIOCGETLIMIT: {
1686 		struct pfioc_limit	*pl = (struct pfioc_limit *)addr;
1687 
1688 		if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) {
1689 			error = EINVAL;
1690 			goto fail;
1691 		}
1692 		pl->limit = pf_pool_limits[pl->index].limit;
1693 		break;
1694 	}
1695 
1696 	case DIOCSETLIMIT: {
1697 		struct pfioc_limit	*pl = (struct pfioc_limit *)addr;
1698 
1699 		if (pl->index < 0 || pl->index >= PF_LIMIT_MAX ||
1700 		    pf_pool_limits[pl->index].pp == NULL) {
1701 			error = EINVAL;
1702 			goto fail;
1703 		}
1704 		if (((struct pool *)pf_pool_limits[pl->index].pp)->pr_nout >
1705 		    pl->limit) {
1706 			error = EBUSY;
1707 			goto fail;
1708 		}
1709 		/* Fragments reference mbuf clusters. */
1710 		if (pl->index == PF_LIMIT_FRAGS && pl->limit > nmbclust) {
1711 			error = EINVAL;
1712 			goto fail;
1713 		}
1714 
1715 		pf_pool_limits[pl->index].limit_new = pl->limit;
1716 		pl->limit = pf_pool_limits[pl->index].limit;
1717 		break;
1718 	}
1719 
1720 	case DIOCSETDEBUG: {
1721 		u_int32_t	*level = (u_int32_t *)addr;
1722 
1723 		pf_trans_set.debug = *level;
1724 		pf_trans_set.mask |= PF_TSET_DEBUG;
1725 		break;
1726 	}
1727 
1728 	case DIOCCLRRULECTRS: {
1729 		/* obsoleted by DIOCGETRULE with action=PF_GET_CLR_CNTR */
1730 		struct pf_ruleset	*ruleset = &pf_main_ruleset;
1731 		struct pf_rule		*rule;
1732 
1733 		TAILQ_FOREACH(rule,
1734 		    ruleset->rules.active.ptr, entries) {
1735 			rule->evaluations = 0;
1736 			rule->packets[0] = rule->packets[1] = 0;
1737 			rule->bytes[0] = rule->bytes[1] = 0;
1738 		}
1739 		break;
1740 	}
1741 
1742 	case DIOCGETRULESETS: {
1743 		struct pfioc_ruleset	*pr = (struct pfioc_ruleset *)addr;
1744 		struct pf_ruleset	*ruleset;
1745 		struct pf_anchor	*anchor;
1746 
1747 		pr->path[sizeof(pr->path) - 1] = 0;
1748 		if ((ruleset = pf_find_ruleset(pr->path)) == NULL) {
1749 			error = EINVAL;
1750 			break;
1751 		}
1752 		pr->nr = 0;
1753 		if (ruleset->anchor == NULL) {
1754 			/* XXX kludge for pf_main_ruleset */
1755 			RB_FOREACH(anchor, pf_anchor_global, &pf_anchors)
1756 				if (anchor->parent == NULL)
1757 					pr->nr++;
1758 		} else {
1759 			RB_FOREACH(anchor, pf_anchor_node,
1760 			    &ruleset->anchor->children)
1761 				pr->nr++;
1762 		}
1763 		break;
1764 	}
1765 
1766 	case DIOCGETRULESET: {
1767 		struct pfioc_ruleset	*pr = (struct pfioc_ruleset *)addr;
1768 		struct pf_ruleset	*ruleset;
1769 		struct pf_anchor	*anchor;
1770 		u_int32_t		 nr = 0;
1771 
1772 		pr->path[sizeof(pr->path) - 1] = 0;
1773 		if ((ruleset = pf_find_ruleset(pr->path)) == NULL) {
1774 			error = EINVAL;
1775 			break;
1776 		}
1777 		pr->name[0] = 0;
1778 		if (ruleset->anchor == NULL) {
1779 			/* XXX kludge for pf_main_ruleset */
1780 			RB_FOREACH(anchor, pf_anchor_global, &pf_anchors)
1781 				if (anchor->parent == NULL && nr++ == pr->nr) {
1782 					strlcpy(pr->name, anchor->name,
1783 					    sizeof(pr->name));
1784 					break;
1785 				}
1786 		} else {
1787 			RB_FOREACH(anchor, pf_anchor_node,
1788 			    &ruleset->anchor->children)
1789 				if (nr++ == pr->nr) {
1790 					strlcpy(pr->name, anchor->name,
1791 					    sizeof(pr->name));
1792 					break;
1793 				}
1794 		}
1795 		if (!pr->name[0])
1796 			error = EBUSY;
1797 		break;
1798 	}
1799 
1800 	case DIOCRCLRTABLES: {
1801 		struct pfioc_table *io = (struct pfioc_table *)addr;
1802 
1803 		if (io->pfrio_esize != 0) {
1804 			error = ENODEV;
1805 			break;
1806 		}
1807 		error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel,
1808 		    io->pfrio_flags | PFR_FLAG_USERIOCTL);
1809 		break;
1810 	}
1811 
1812 	case DIOCRADDTABLES: {
1813 		struct pfioc_table *io = (struct pfioc_table *)addr;
1814 
1815 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
1816 			error = ENODEV;
1817 			break;
1818 		}
1819 		error = pfr_add_tables(io->pfrio_buffer, io->pfrio_size,
1820 		    &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL);
1821 		break;
1822 	}
1823 
1824 	case DIOCRDELTABLES: {
1825 		struct pfioc_table *io = (struct pfioc_table *)addr;
1826 
1827 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
1828 			error = ENODEV;
1829 			break;
1830 		}
1831 		error = pfr_del_tables(io->pfrio_buffer, io->pfrio_size,
1832 		    &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
1833 		break;
1834 	}
1835 
1836 	case DIOCRGETTABLES: {
1837 		struct pfioc_table *io = (struct pfioc_table *)addr;
1838 
1839 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
1840 			error = ENODEV;
1841 			break;
1842 		}
1843 		error = pfr_get_tables(&io->pfrio_table, io->pfrio_buffer,
1844 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
1845 		break;
1846 	}
1847 
1848 	case DIOCRGETTSTATS: {
1849 		struct pfioc_table *io = (struct pfioc_table *)addr;
1850 
1851 		if (io->pfrio_esize != sizeof(struct pfr_tstats)) {
1852 			error = ENODEV;
1853 			break;
1854 		}
1855 		error = pfr_get_tstats(&io->pfrio_table, io->pfrio_buffer,
1856 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
1857 		break;
1858 	}
1859 
1860 	case DIOCRCLRTSTATS: {
1861 		struct pfioc_table *io = (struct pfioc_table *)addr;
1862 
1863 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
1864 			error = ENODEV;
1865 			break;
1866 		}
1867 		error = pfr_clr_tstats(io->pfrio_buffer, io->pfrio_size,
1868 		    &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL);
1869 		break;
1870 	}
1871 
1872 	case DIOCRSETTFLAGS: {
1873 		struct pfioc_table *io = (struct pfioc_table *)addr;
1874 
1875 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
1876 			error = ENODEV;
1877 			break;
1878 		}
1879 		error = pfr_set_tflags(io->pfrio_buffer, io->pfrio_size,
1880 		    io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange,
1881 		    &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
1882 		break;
1883 	}
1884 
1885 	case DIOCRCLRADDRS: {
1886 		struct pfioc_table *io = (struct pfioc_table *)addr;
1887 
1888 		if (io->pfrio_esize != 0) {
1889 			error = ENODEV;
1890 			break;
1891 		}
1892 		error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel,
1893 		    io->pfrio_flags | PFR_FLAG_USERIOCTL);
1894 		break;
1895 	}
1896 
1897 	case DIOCRADDADDRS: {
1898 		struct pfioc_table *io = (struct pfioc_table *)addr;
1899 
1900 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
1901 			error = ENODEV;
1902 			break;
1903 		}
1904 		error = pfr_add_addrs(&io->pfrio_table, io->pfrio_buffer,
1905 		    io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags |
1906 		    PFR_FLAG_USERIOCTL);
1907 		break;
1908 	}
1909 
1910 	case DIOCRDELADDRS: {
1911 		struct pfioc_table *io = (struct pfioc_table *)addr;
1912 
1913 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
1914 			error = ENODEV;
1915 			break;
1916 		}
1917 		error = pfr_del_addrs(&io->pfrio_table, io->pfrio_buffer,
1918 		    io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags |
1919 		    PFR_FLAG_USERIOCTL);
1920 		break;
1921 	}
1922 
1923 	case DIOCRSETADDRS: {
1924 		struct pfioc_table *io = (struct pfioc_table *)addr;
1925 
1926 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
1927 			error = ENODEV;
1928 			break;
1929 		}
1930 		error = pfr_set_addrs(&io->pfrio_table, io->pfrio_buffer,
1931 		    io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd,
1932 		    &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags |
1933 		    PFR_FLAG_USERIOCTL, 0);
1934 		break;
1935 	}
1936 
1937 	case DIOCRGETADDRS: {
1938 		struct pfioc_table *io = (struct pfioc_table *)addr;
1939 
1940 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
1941 			error = ENODEV;
1942 			break;
1943 		}
1944 		error = pfr_get_addrs(&io->pfrio_table, io->pfrio_buffer,
1945 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
1946 		break;
1947 	}
1948 
1949 	case DIOCRGETASTATS: {
1950 		struct pfioc_table *io = (struct pfioc_table *)addr;
1951 
1952 		if (io->pfrio_esize != sizeof(struct pfr_astats)) {
1953 			error = ENODEV;
1954 			break;
1955 		}
1956 		error = pfr_get_astats(&io->pfrio_table, io->pfrio_buffer,
1957 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
1958 		break;
1959 	}
1960 
1961 	case DIOCRCLRASTATS: {
1962 		struct pfioc_table *io = (struct pfioc_table *)addr;
1963 
1964 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
1965 			error = ENODEV;
1966 			break;
1967 		}
1968 		error = pfr_clr_astats(&io->pfrio_table, io->pfrio_buffer,
1969 		    io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags |
1970 		    PFR_FLAG_USERIOCTL);
1971 		break;
1972 	}
1973 
1974 	case DIOCRTSTADDRS: {
1975 		struct pfioc_table *io = (struct pfioc_table *)addr;
1976 
1977 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
1978 			error = ENODEV;
1979 			break;
1980 		}
1981 		error = pfr_tst_addrs(&io->pfrio_table, io->pfrio_buffer,
1982 		    io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags |
1983 		    PFR_FLAG_USERIOCTL);
1984 		break;
1985 	}
1986 
1987 	case DIOCRINADEFINE: {
1988 		struct pfioc_table *io = (struct pfioc_table *)addr;
1989 
1990 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
1991 			error = ENODEV;
1992 			break;
1993 		}
1994 		error = pfr_ina_define(&io->pfrio_table, io->pfrio_buffer,
1995 		    io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr,
1996 		    io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL);
1997 		break;
1998 	}
1999 
2000 	case DIOCOSFPADD: {
2001 		struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
2002 		error = pf_osfp_add(io);
2003 		break;
2004 	}
2005 
2006 	case DIOCOSFPGET: {
2007 		struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
2008 		error = pf_osfp_get(io);
2009 		break;
2010 	}
2011 
2012 	case DIOCXBEGIN: {
2013 		struct pfioc_trans	*io = (struct pfioc_trans *)addr;
2014 		struct pfioc_trans_e	*ioe;
2015 		struct pfr_table	*table;
2016 		int			 i;
2017 
2018 		if (io->esize != sizeof(*ioe)) {
2019 			error = ENODEV;
2020 			goto fail;
2021 		}
2022 		ioe = malloc(sizeof(*ioe), M_TEMP, M_WAITOK);
2023 		table = malloc(sizeof(*table), M_TEMP, M_WAITOK);
2024 		pf_default_rule_new = pf_default_rule;
2025 		bzero(&pf_trans_set, sizeof(pf_trans_set));
2026 		for (i = 0; i < io->size; i++) {
2027 			if (copyin(io->array+i, ioe, sizeof(*ioe))) {
2028 				free(table, M_TEMP, sizeof(*table));
2029 				free(ioe, M_TEMP, sizeof(*ioe));
2030 				error = EFAULT;
2031 				goto fail;
2032 			}
2033 			switch (ioe->type) {
2034 			case PF_TRANS_TABLE:
2035 				bzero(table, sizeof(*table));
2036 				strlcpy(table->pfrt_anchor, ioe->anchor,
2037 				    sizeof(table->pfrt_anchor));
2038 				if ((error = pfr_ina_begin(table,
2039 				    &ioe->ticket, NULL, 0))) {
2040 					free(table, M_TEMP, sizeof(*table));
2041 					free(ioe, M_TEMP, sizeof(*ioe));
2042 					goto fail;
2043 				}
2044 				break;
2045 			default:
2046 				if ((error = pf_begin_rules(&ioe->ticket,
2047 				    ioe->anchor))) {
2048 					free(table, M_TEMP, sizeof(*table));
2049 					free(ioe, M_TEMP, sizeof(*ioe));
2050 					goto fail;
2051 				}
2052 				break;
2053 			}
2054 			if (copyout(ioe, io->array+i, sizeof(io->array[i]))) {
2055 				free(table, M_TEMP, sizeof(*table));
2056 				free(ioe, M_TEMP, sizeof(*ioe));
2057 				error = EFAULT;
2058 				goto fail;
2059 			}
2060 		}
2061 		free(table, M_TEMP, sizeof(*table));
2062 		free(ioe, M_TEMP, sizeof(*ioe));
2063 		break;
2064 	}
2065 
2066 	case DIOCXROLLBACK: {
2067 		struct pfioc_trans	*io = (struct pfioc_trans *)addr;
2068 		struct pfioc_trans_e	*ioe;
2069 		struct pfr_table	*table;
2070 		int			 i;
2071 
2072 		if (io->esize != sizeof(*ioe)) {
2073 			error = ENODEV;
2074 			goto fail;
2075 		}
2076 		ioe = malloc(sizeof(*ioe), M_TEMP, M_WAITOK);
2077 		table = malloc(sizeof(*table), M_TEMP, M_WAITOK);
2078 		for (i = 0; i < io->size; i++) {
2079 			if (copyin(io->array+i, ioe, sizeof(*ioe))) {
2080 				free(table, M_TEMP, sizeof(*table));
2081 				free(ioe, M_TEMP, sizeof(*ioe));
2082 				error = EFAULT;
2083 				goto fail;
2084 			}
2085 			switch (ioe->type) {
2086 			case PF_TRANS_TABLE:
2087 				bzero(table, sizeof(*table));
2088 				strlcpy(table->pfrt_anchor, ioe->anchor,
2089 				    sizeof(table->pfrt_anchor));
2090 				if ((error = pfr_ina_rollback(table,
2091 				    ioe->ticket, NULL, 0))) {
2092 					free(table, M_TEMP, sizeof(*table));
2093 					free(ioe, M_TEMP, sizeof(*ioe));
2094 					goto fail; /* really bad */
2095 				}
2096 				break;
2097 			default:
2098 				if ((error = pf_rollback_rules(ioe->ticket,
2099 				    ioe->anchor))) {
2100 					free(table, M_TEMP, sizeof(*table));
2101 					free(ioe, M_TEMP, sizeof(*ioe));
2102 					goto fail; /* really bad */
2103 				}
2104 				break;
2105 			}
2106 		}
2107 		free(table, M_TEMP, sizeof(*table));
2108 		free(ioe, M_TEMP, sizeof(*ioe));
2109 		break;
2110 	}
2111 
2112 	case DIOCXCOMMIT: {
2113 		struct pfioc_trans	*io = (struct pfioc_trans *)addr;
2114 		struct pfioc_trans_e	*ioe;
2115 		struct pfr_table	*table;
2116 		struct pf_ruleset	*rs;
2117 		int			 i;
2118 
2119 		if (io->esize != sizeof(*ioe)) {
2120 			error = ENODEV;
2121 			goto fail;
2122 		}
2123 		ioe = malloc(sizeof(*ioe), M_TEMP, M_WAITOK);
2124 		table = malloc(sizeof(*table), M_TEMP, M_WAITOK);
2125 		/* first makes sure everything will succeed */
2126 		for (i = 0; i < io->size; i++) {
2127 			if (copyin(io->array+i, ioe, sizeof(*ioe))) {
2128 				free(table, M_TEMP, sizeof(*table));
2129 				free(ioe, M_TEMP, sizeof(*ioe));
2130 				error = EFAULT;
2131 				goto fail;
2132 			}
2133 			switch (ioe->type) {
2134 			case PF_TRANS_TABLE:
2135 				rs = pf_find_ruleset(ioe->anchor);
2136 				if (rs == NULL || !rs->topen || ioe->ticket !=
2137 				     rs->tticket) {
2138 					free(table, M_TEMP, sizeof(*table));
2139 					free(ioe, M_TEMP, sizeof(*ioe));
2140 					error = EBUSY;
2141 					goto fail;
2142 				}
2143 				break;
2144 			default:
2145 				rs = pf_find_ruleset(ioe->anchor);
2146 				if (rs == NULL ||
2147 				    !rs->rules.inactive.open ||
2148 				    rs->rules.inactive.ticket !=
2149 				    ioe->ticket) {
2150 					free(table, M_TEMP, sizeof(*table));
2151 					free(ioe, M_TEMP, sizeof(*ioe));
2152 					error = EBUSY;
2153 					goto fail;
2154 				}
2155 				break;
2156 			}
2157 		}
2158 
2159 		/*
2160 		 * Checked already in DIOCSETLIMIT, but check again as the
2161 		 * situation might have changed.
2162 		 */
2163 		for (i = 0; i < PF_LIMIT_MAX; i++) {
2164 			if (((struct pool *)pf_pool_limits[i].pp)->pr_nout >
2165 			    pf_pool_limits[i].limit_new) {
2166 				free(table, M_TEMP, sizeof(*table));
2167 				free(ioe, M_TEMP, sizeof(*ioe));
2168 				error = EBUSY;
2169 				goto fail;
2170 			}
2171 		}
2172 		/* now do the commit - no errors should happen here */
2173 		for (i = 0; i < io->size; i++) {
2174 			if (copyin(io->array+i, ioe, sizeof(*ioe))) {
2175 				free(table, M_TEMP, sizeof(*table));
2176 				free(ioe, M_TEMP, sizeof(*ioe));
2177 				error = EFAULT;
2178 				goto fail;
2179 			}
2180 			switch (ioe->type) {
2181 			case PF_TRANS_TABLE:
2182 				bzero(table, sizeof(*table));
2183 				strlcpy(table->pfrt_anchor, ioe->anchor,
2184 				    sizeof(table->pfrt_anchor));
2185 				if ((error = pfr_ina_commit(table, ioe->ticket,
2186 				    NULL, NULL, 0))) {
2187 					free(table, M_TEMP, sizeof(*table));
2188 					free(ioe, M_TEMP, sizeof(*ioe));
2189 					goto fail; /* really bad */
2190 				}
2191 				break;
2192 			default:
2193 				if ((error = pf_commit_rules(ioe->ticket,
2194 				    ioe->anchor))) {
2195 					free(table, M_TEMP, sizeof(*table));
2196 					free(ioe, M_TEMP, sizeof(*ioe));
2197 					goto fail; /* really bad */
2198 				}
2199 				break;
2200 			}
2201 		}
2202 		for (i = 0; i < PF_LIMIT_MAX; i++) {
2203 			if (pf_pool_limits[i].limit_new !=
2204 			    pf_pool_limits[i].limit &&
2205 			    pool_sethardlimit(pf_pool_limits[i].pp,
2206 			    pf_pool_limits[i].limit_new, NULL, 0) != 0) {
2207 				free(table, M_TEMP, sizeof(*table));
2208 				free(ioe, M_TEMP, sizeof(*ioe));
2209 				error = EBUSY;
2210 				goto fail; /* really bad */
2211 			}
2212 			pf_pool_limits[i].limit = pf_pool_limits[i].limit_new;
2213 		}
2214 		for (i = 0; i < PFTM_MAX; i++) {
2215 			int old = pf_default_rule.timeout[i];
2216 
2217 			pf_default_rule.timeout[i] =
2218 			    pf_default_rule_new.timeout[i];
2219 			if (pf_default_rule.timeout[i] == PFTM_INTERVAL &&
2220 			    pf_default_rule.timeout[i] < old)
2221 				wakeup(pf_purge_thread);
2222 		}
2223 		pfi_xcommit();
2224 		pf_trans_set_commit();
2225 		free(table, M_TEMP, sizeof(*table));
2226 		free(ioe, M_TEMP, sizeof(*ioe));
2227 		break;
2228 	}
2229 
2230 	case DIOCGETSRCNODES: {
2231 		struct pfioc_src_nodes	*psn = (struct pfioc_src_nodes *)addr;
2232 		struct pf_src_node	*n, *p, *pstore;
2233 		u_int32_t		 nr = 0;
2234 		int			 space = psn->psn_len;
2235 
2236 		if (space == 0) {
2237 			RB_FOREACH(n, pf_src_tree, &tree_src_tracking)
2238 				nr++;
2239 			psn->psn_len = sizeof(struct pf_src_node) * nr;
2240 			break;
2241 		}
2242 
2243 		pstore = malloc(sizeof(*pstore), M_TEMP, M_WAITOK);
2244 
2245 		p = psn->psn_src_nodes;
2246 		RB_FOREACH(n, pf_src_tree, &tree_src_tracking) {
2247 			int	secs = time_uptime, diff;
2248 
2249 			if ((nr + 1) * sizeof(*p) > (unsigned)psn->psn_len)
2250 				break;
2251 
2252 			bcopy(n, pstore, sizeof(*pstore));
2253 			bzero(&pstore->entry, sizeof(pstore->entry));
2254 			pstore->rule.ptr = NULL;
2255 			pstore->kif = NULL;
2256 			pstore->rule.nr = n->rule.ptr->nr;
2257 			pstore->creation = secs - pstore->creation;
2258 			if (pstore->expire > secs)
2259 				pstore->expire -= secs;
2260 			else
2261 				pstore->expire = 0;
2262 
2263 			/* adjust the connection rate estimate */
2264 			diff = secs - n->conn_rate.last;
2265 			if (diff >= n->conn_rate.seconds)
2266 				pstore->conn_rate.count = 0;
2267 			else
2268 				pstore->conn_rate.count -=
2269 				    n->conn_rate.count * diff /
2270 				    n->conn_rate.seconds;
2271 
2272 			error = copyout(pstore, p, sizeof(*p));
2273 			if (error) {
2274 				free(pstore, M_TEMP, sizeof(*pstore));
2275 				goto fail;
2276 			}
2277 			p++;
2278 			nr++;
2279 		}
2280 		psn->psn_len = sizeof(struct pf_src_node) * nr;
2281 
2282 		free(pstore, M_TEMP, sizeof(*pstore));
2283 		break;
2284 	}
2285 
2286 	case DIOCCLRSRCNODES: {
2287 		struct pf_src_node	*n;
2288 		struct pf_state		*state;
2289 
2290 		RB_FOREACH(state, pf_state_tree_id, &tree_id)
2291 			pf_src_tree_remove_state(state);
2292 		RB_FOREACH(n, pf_src_tree, &tree_src_tracking)
2293 			n->expire = 1;
2294 		pf_purge_expired_src_nodes(1);
2295 		break;
2296 	}
2297 
2298 	case DIOCKILLSRCNODES: {
2299 		struct pf_src_node	*sn;
2300 		struct pf_state		*s;
2301 		struct pfioc_src_node_kill *psnk =
2302 		    (struct pfioc_src_node_kill *)addr;
2303 		u_int			killed = 0;
2304 
2305 		RB_FOREACH(sn, pf_src_tree, &tree_src_tracking) {
2306 			if (PF_MATCHA(psnk->psnk_src.neg,
2307 				&psnk->psnk_src.addr.v.a.addr,
2308 				&psnk->psnk_src.addr.v.a.mask,
2309 				&sn->addr, sn->af) &&
2310 			    PF_MATCHA(psnk->psnk_dst.neg,
2311 				&psnk->psnk_dst.addr.v.a.addr,
2312 				&psnk->psnk_dst.addr.v.a.mask,
2313 				&sn->raddr, sn->af)) {
2314 				/* Handle state to src_node linkage */
2315 				if (sn->states != 0)
2316 					RB_FOREACH(s, pf_state_tree_id,
2317 					   &tree_id)
2318 						pf_state_rm_src_node(s, sn);
2319 				sn->expire = 1;
2320 				killed++;
2321 			}
2322 		}
2323 
2324 		if (killed > 0)
2325 			pf_purge_expired_src_nodes(1);
2326 
2327 		psnk->psnk_killed = killed;
2328 		break;
2329 	}
2330 
2331 	case DIOCSETHOSTID: {
2332 		u_int32_t	*hostid = (u_int32_t *)addr;
2333 
2334 		if (*hostid == 0)
2335 			pf_trans_set.hostid = arc4random();
2336 		else
2337 			pf_trans_set.hostid = *hostid;
2338 		pf_trans_set.mask |= PF_TSET_HOSTID;
2339 		break;
2340 	}
2341 
2342 	case DIOCOSFPFLUSH:
2343 		pf_osfp_flush();
2344 		break;
2345 
2346 	case DIOCIGETIFACES: {
2347 		struct pfioc_iface *io = (struct pfioc_iface *)addr;
2348 
2349 		if (io->pfiio_esize != sizeof(struct pfi_kif)) {
2350 			error = ENODEV;
2351 			break;
2352 		}
2353 		error = pfi_get_ifaces(io->pfiio_name, io->pfiio_buffer,
2354 		    &io->pfiio_size);
2355 		break;
2356 	}
2357 
2358 	case DIOCSETIFFLAG: {
2359 		struct pfioc_iface *io = (struct pfioc_iface *)addr;
2360 
2361 		error = pfi_set_flags(io->pfiio_name, io->pfiio_flags);
2362 		break;
2363 	}
2364 
2365 	case DIOCCLRIFFLAG: {
2366 		struct pfioc_iface *io = (struct pfioc_iface *)addr;
2367 
2368 		error = pfi_clear_flags(io->pfiio_name, io->pfiio_flags);
2369 		break;
2370 	}
2371 
2372 	case DIOCSETREASS: {
2373 		u_int32_t	*reass = (u_int32_t *)addr;
2374 
2375 		pf_trans_set.reass = *reass;
2376 		pf_trans_set.mask |= PF_TSET_REASS;
2377 		break;
2378 	}
2379 
2380 	default:
2381 		error = ENODEV;
2382 		break;
2383 	}
2384 fail:
2385 	NET_UNLOCK(s);
2386 	return (error);
2387 }
2388 
2389 void
2390 pf_trans_set_commit(void)
2391 {
2392 	if (pf_trans_set.mask & PF_TSET_STATUSIF)
2393 		strlcpy(pf_status.ifname, pf_trans_set.statusif, IFNAMSIZ);
2394 	if (pf_trans_set.mask & PF_TSET_DEBUG)
2395 		pf_status.debug = pf_trans_set.debug;
2396 	if (pf_trans_set.mask & PF_TSET_HOSTID)
2397 		pf_status.hostid = pf_trans_set.hostid;
2398 	if (pf_trans_set.mask & PF_TSET_REASS)
2399 		pf_status.reass = pf_trans_set.reass;
2400 }
2401 
2402 void
2403 pf_pool_copyin(struct pf_pool *from, struct pf_pool *to)
2404 {
2405 	bcopy(from, to, sizeof(*to));
2406 	to->kif = NULL;
2407 }
2408 
2409 int
2410 pf_rule_copyin(struct pf_rule *from, struct pf_rule *to,
2411     struct pf_ruleset *ruleset)
2412 {
2413 	int i;
2414 
2415 	to->src = from->src;
2416 	to->dst = from->dst;
2417 
2418 	/* XXX union skip[] */
2419 
2420 	strlcpy(to->label, from->label, sizeof(to->label));
2421 	strlcpy(to->ifname, from->ifname, sizeof(to->ifname));
2422 	strlcpy(to->rcv_ifname, from->rcv_ifname, sizeof(to->rcv_ifname));
2423 	strlcpy(to->qname, from->qname, sizeof(to->qname));
2424 	strlcpy(to->pqname, from->pqname, sizeof(to->pqname));
2425 	strlcpy(to->tagname, from->tagname, sizeof(to->tagname));
2426 	strlcpy(to->match_tagname, from->match_tagname,
2427 	    sizeof(to->match_tagname));
2428 	strlcpy(to->overload_tblname, from->overload_tblname,
2429 	    sizeof(to->overload_tblname));
2430 
2431 	pf_pool_copyin(&from->nat, &to->nat);
2432 	pf_pool_copyin(&from->rdr, &to->rdr);
2433 	pf_pool_copyin(&from->route, &to->route);
2434 
2435 	if (pf_kif_setup(to->ifname, &to->kif))
2436 		return (EINVAL);
2437 	if (pf_kif_setup(to->rcv_ifname, &to->rcv_kif))
2438 		return (EINVAL);
2439 	if (to->overload_tblname[0]) {
2440 		if ((to->overload_tbl = pfr_attach_table(ruleset,
2441 		    to->overload_tblname, 0)) == NULL)
2442 			return (EINVAL);
2443 		else
2444 			to->overload_tbl->pfrkt_flags |= PFR_TFLAG_ACTIVE;
2445 	}
2446 
2447 	if (pf_kif_setup(to->rdr.ifname, &to->rdr.kif))
2448 		return (EINVAL);
2449 	if (pf_kif_setup(to->nat.ifname, &to->nat.kif))
2450 		return (EINVAL);
2451 	if (pf_kif_setup(to->route.ifname, &to->route.kif))
2452 		return (EINVAL);
2453 
2454 	to->os_fingerprint = from->os_fingerprint;
2455 
2456 	to->rtableid = from->rtableid;
2457 	if (to->rtableid >= 0 && !rtable_exists(to->rtableid))
2458 		return (EBUSY);
2459 	to->onrdomain = from->onrdomain;
2460 	if (to->onrdomain >= 0 && !rtable_exists(to->onrdomain))
2461 		return (EBUSY);
2462 	if (to->onrdomain >= 0)		/* make sure it is a real rdomain */
2463 		to->onrdomain = rtable_l2(to->onrdomain);
2464 
2465 	for (i = 0; i < PFTM_MAX; i++)
2466 		to->timeout[i] = from->timeout[i];
2467 	to->states_tot = from->states_tot;
2468 	to->max_states = from->max_states;
2469 	to->max_src_nodes = from->max_src_nodes;
2470 	to->max_src_states = from->max_src_states;
2471 	to->max_src_conn = from->max_src_conn;
2472 	to->max_src_conn_rate.limit = from->max_src_conn_rate.limit;
2473 	to->max_src_conn_rate.seconds = from->max_src_conn_rate.seconds;
2474 
2475 	if (to->qname[0] != 0) {
2476 		if ((to->qid = pf_qname2qid(to->qname, 0)) == 0)
2477 			return (EBUSY);
2478 		if (to->pqname[0] != 0) {
2479 			if ((to->pqid = pf_qname2qid(to->pqname, 0)) == 0)
2480 				return (EBUSY);
2481 		} else
2482 			to->pqid = to->qid;
2483 	}
2484 	to->rt_listid = from->rt_listid;
2485 	to->prob = from->prob;
2486 	to->return_icmp = from->return_icmp;
2487 	to->return_icmp6 = from->return_icmp6;
2488 	to->max_mss = from->max_mss;
2489 	if (to->tagname[0])
2490 		if ((to->tag = pf_tagname2tag(to->tagname, 1)) == 0)
2491 			return (EBUSY);
2492 	if (to->match_tagname[0])
2493 		if ((to->match_tag = pf_tagname2tag(to->match_tagname, 1)) == 0)
2494 			return (EBUSY);
2495 	to->scrub_flags = from->scrub_flags;
2496 	to->uid = from->uid;
2497 	to->gid = from->gid;
2498 	to->rule_flag = from->rule_flag;
2499 	to->action = from->action;
2500 	to->direction = from->direction;
2501 	to->log = from->log;
2502 	to->logif = from->logif;
2503 #if NPFLOG > 0
2504 	if (!to->log)
2505 		to->logif = 0;
2506 #endif	/* NPFLOG > 0 */
2507 	to->quick = from->quick;
2508 	to->ifnot = from->ifnot;
2509 	to->rcvifnot = from->rcvifnot;
2510 	to->match_tag_not = from->match_tag_not;
2511 	to->keep_state = from->keep_state;
2512 	to->af = from->af;
2513 	to->naf = from->naf;
2514 	to->proto = from->proto;
2515 	to->type = from->type;
2516 	to->code = from->code;
2517 	to->flags = from->flags;
2518 	to->flagset = from->flagset;
2519 	to->min_ttl = from->min_ttl;
2520 	to->allow_opts = from->allow_opts;
2521 	to->rt = from->rt;
2522 	to->return_ttl = from->return_ttl;
2523 	to->tos = from->tos;
2524 	to->set_tos = from->set_tos;
2525 	to->anchor_relative = from->anchor_relative; /* XXX */
2526 	to->anchor_wildcard = from->anchor_wildcard; /* XXX */
2527 	to->flush = from->flush;
2528 	to->divert.addr = from->divert.addr;
2529 	to->divert.port = from->divert.port;
2530 	to->divert_packet.addr = from->divert_packet.addr;
2531 	to->divert_packet.port = from->divert_packet.port;
2532 	to->prio = from->prio;
2533 	to->set_prio[0] = from->set_prio[0];
2534 	to->set_prio[1] = from->set_prio[1];
2535 
2536 	return (0);
2537 }
2538