xref: /openbsd-src/sys/net/pf_ioctl.c (revision 50b7afb2c2c0993b0894d4e34bf857cb13ed9c80)
1 /*	$OpenBSD: pf_ioctl.c,v 1.273 2014/07/12 18:44:22 tedu Exp $ */
2 
3 /*
4  * Copyright (c) 2001 Daniel Hartmeier
5  * Copyright (c) 2002 - 2013 Henning Brauer <henning@openbsd.org>
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  *    - Redistributions of source code must retain the above copyright
13  *      notice, this list of conditions and the following disclaimer.
14  *    - Redistributions in binary form must reproduce the above
15  *      copyright notice, this list of conditions and the following
16  *      disclaimer in the documentation and/or other materials provided
17  *      with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
22  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
23  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
24  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
25  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
29  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  *
32  * Effort sponsored in part by the Defense Advanced Research Projects
33  * Agency (DARPA) and Air Force Research Laboratory, Air Force
34  * Materiel Command, USAF, under agreement number F30602-01-2-0537.
35  *
36  */
37 
38 #include "pfsync.h"
39 #include "pflog.h"
40 
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/mbuf.h>
44 #include <sys/filio.h>
45 #include <sys/fcntl.h>
46 #include <sys/socket.h>
47 #include <sys/socketvar.h>
48 #include <sys/kernel.h>
49 #include <sys/time.h>
50 #include <sys/timeout.h>
51 #include <sys/pool.h>
52 #include <sys/malloc.h>
53 #include <sys/kthread.h>
54 #include <sys/rwlock.h>
55 #include <sys/syslog.h>
56 #include <uvm/uvm_extern.h>
57 
58 #include <net/if.h>
59 #include <net/if_types.h>
60 #include <net/route.h>
61 
62 #include <netinet/in.h>
63 #include <netinet/in_systm.h>
64 #include <netinet/ip.h>
65 #include <netinet/ip_var.h>
66 #include <netinet/ip_icmp.h>
67 
68 #include <dev/rndvar.h>
69 #include <crypto/md5.h>
70 #include <net/pfvar.h>
71 
72 #if NPFSYNC > 0
73 #include <net/if_pfsync.h>
74 #endif /* NPFSYNC > 0 */
75 
76 #if NPFLOG > 0
77 #include <net/if_pflog.h>
78 #endif /* NPFLOG > 0 */
79 
80 #ifdef INET6
81 #include <netinet/ip6.h>
82 #include <netinet/in_pcb.h>
83 #endif /* INET6 */
84 
85 void			 pfattach(int);
86 void			 pf_thread_create(void *);
87 int			 pfopen(dev_t, int, int, struct proc *);
88 int			 pfclose(dev_t, int, int, struct proc *);
89 int			 pfioctl(dev_t, u_long, caddr_t, int, struct proc *);
90 int			 pf_begin_rules(u_int32_t *, const char *);
91 int			 pf_rollback_rules(u_int32_t, char *);
92 int			 pf_create_queues(void);
93 int			 pf_commit_queues(void);
94 int			 pf_setup_pfsync_matching(struct pf_ruleset *);
95 void			 pf_hash_rule(MD5_CTX *, struct pf_rule *);
96 void			 pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *);
97 int			 pf_commit_rules(u_int32_t, char *);
98 int			 pf_addr_setup(struct pf_ruleset *,
99 			    struct pf_addr_wrap *, sa_family_t);
100 int			 pf_kif_setup(char *, struct pfi_kif **);
101 void			 pf_addr_copyout(struct pf_addr_wrap *);
102 void			 pf_trans_set_commit(void);
103 void			 pf_pool_copyin(struct pf_pool *, struct pf_pool *);
104 int			 pf_rule_copyin(struct pf_rule *, struct pf_rule *,
105 			    struct pf_ruleset *);
106 u_int16_t		 pf_qname2qid(char *, int);
107 void			 pf_qid2qname(u_int16_t, char *);
108 void			 pf_qid_unref(u_int16_t);
109 
110 struct pf_rule		 pf_default_rule, pf_default_rule_new;
111 struct rwlock		 pf_consistency_lock = RWLOCK_INITIALIZER("pfcnslk");
112 
113 struct {
114 	char		statusif[IFNAMSIZ];
115 	u_int32_t	debug;
116 	u_int32_t	hostid;
117 	u_int32_t	reass;
118 	u_int32_t	mask;
119 } pf_trans_set;
120 
121 #define	PF_TSET_STATUSIF	0x01
122 #define	PF_TSET_DEBUG		0x02
123 #define	PF_TSET_HOSTID		0x04
124 #define	PF_TSET_REASS		0x08
125 
126 #define	TAGID_MAX	 50000
127 TAILQ_HEAD(pf_tags, pf_tagname)	pf_tags = TAILQ_HEAD_INITIALIZER(pf_tags),
128 				pf_qids = TAILQ_HEAD_INITIALIZER(pf_qids);
129 
130 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE)
131 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE
132 #endif
133 u_int16_t		 tagname2tag(struct pf_tags *, char *, int);
134 void			 tag2tagname(struct pf_tags *, u_int16_t, char *);
135 void			 tag_unref(struct pf_tags *, u_int16_t);
136 int			 pf_rtlabel_add(struct pf_addr_wrap *);
137 void			 pf_rtlabel_remove(struct pf_addr_wrap *);
138 void			 pf_rtlabel_copyout(struct pf_addr_wrap *);
139 
140 
141 void
142 pfattach(int num)
143 {
144 	u_int32_t *timeout = pf_default_rule.timeout;
145 
146 	pool_init(&pf_rule_pl, sizeof(struct pf_rule), 0, 0, 0, "pfrule",
147 	    &pool_allocator_nointr);
148 	pool_init(&pf_src_tree_pl, sizeof(struct pf_src_node), 0, 0, 0,
149 	    "pfsrctr", NULL);
150 	pool_init(&pf_sn_item_pl, sizeof(struct pf_sn_item), 0, 0, 0,
151 	    "pfsnitem", NULL);
152 	pool_init(&pf_state_pl, sizeof(struct pf_state), 0, 0, 0, "pfstate",
153 	    NULL);
154 	pool_init(&pf_state_key_pl, sizeof(struct pf_state_key), 0, 0, 0,
155 	    "pfstkey", NULL);
156 	pool_init(&pf_state_item_pl, sizeof(struct pf_state_item), 0, 0, 0,
157 	    "pfstitem", NULL);
158 	pool_init(&pf_rule_item_pl, sizeof(struct pf_rule_item), 0, 0, 0,
159 	    "pfruleitem", NULL);
160 	pool_init(&pf_queue_pl, sizeof(struct pf_queuespec), 0, 0, 0,
161 	    "pfqueue", NULL);
162 	pool_init(&hfsc_class_pl, sizeof(struct hfsc_class), 0, 0, PR_WAITOK,
163 	    "hfscclass", NULL);
164 	pool_init(&hfsc_classq_pl, sizeof(struct hfsc_classq), 0, 0, PR_WAITOK,
165 	    "hfscclassq", NULL);
166 	pool_init(&hfsc_internal_sc_pl, sizeof(struct hfsc_internal_sc), 0, 0,
167 	    PR_WAITOK, "hfscintsc", NULL);
168 	pfr_initialize();
169 	pfi_initialize();
170 	pf_osfp_initialize();
171 
172 	pool_sethardlimit(pf_pool_limits[PF_LIMIT_STATES].pp,
173 	    pf_pool_limits[PF_LIMIT_STATES].limit, NULL, 0);
174 
175 	if (physmem <= atop(100*1024*1024))
176 		pf_pool_limits[PF_LIMIT_TABLE_ENTRIES].limit =
177 		    PFR_KENTRY_HIWAT_SMALL;
178 
179 	RB_INIT(&tree_src_tracking);
180 	RB_INIT(&pf_anchors);
181 	pf_init_ruleset(&pf_main_ruleset);
182 	TAILQ_INIT(&pf_queues[0]);
183 	TAILQ_INIT(&pf_queues[1]);
184 	pf_queues_active = &pf_queues[0];
185 	pf_queues_inactive = &pf_queues[1];
186 	TAILQ_INIT(&state_list);
187 
188 	/* default rule should never be garbage collected */
189 	pf_default_rule.entries.tqe_prev = &pf_default_rule.entries.tqe_next;
190 	pf_default_rule.action = PF_PASS;
191 	pf_default_rule.nr = -1;
192 	pf_default_rule.rtableid = -1;
193 
194 	/* initialize default timeouts */
195 	timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL;
196 	timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL;
197 	timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL;
198 	timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL;
199 	timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL;
200 	timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL;
201 	timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL;
202 	timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL;
203 	timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL;
204 	timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL;
205 	timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL;
206 	timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL;
207 	timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL;
208 	timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL;
209 	timeout[PFTM_FRAG] = PFTM_FRAG_VAL;
210 	timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL;
211 	timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL;
212 	timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL;
213 	timeout[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START;
214 	timeout[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END;
215 
216 	pf_default_rule.src.addr.type =  PF_ADDR_ADDRMASK;
217 	pf_default_rule.dst.addr.type =  PF_ADDR_ADDRMASK;
218 	pf_default_rule.rdr.addr.type =  PF_ADDR_NONE;
219 	pf_default_rule.nat.addr.type =  PF_ADDR_NONE;
220 	pf_default_rule.route.addr.type =  PF_ADDR_NONE;
221 
222 	pf_normalize_init();
223 	bzero(&pf_status, sizeof(pf_status));
224 	pf_status.debug = LOG_ERR;
225 	pf_status.reass = PF_REASS_ENABLED;
226 
227 	/* XXX do our best to avoid a conflict */
228 	pf_status.hostid = arc4random();
229 
230 	/* require process context to purge states, so perform in a thread */
231 	kthread_create_deferred(pf_thread_create, NULL);
232 }
233 
234 void
235 pf_thread_create(void *v)
236 {
237 	if (kthread_create(pf_purge_thread, NULL, NULL, "pfpurge"))
238 		panic("pfpurge thread");
239 }
240 
241 int
242 pfopen(dev_t dev, int flags, int fmt, struct proc *p)
243 {
244 	if (minor(dev) >= 1)
245 		return (ENXIO);
246 	return (0);
247 }
248 
249 int
250 pfclose(dev_t dev, int flags, int fmt, struct proc *p)
251 {
252 	if (minor(dev) >= 1)
253 		return (ENXIO);
254 	return (0);
255 }
256 
257 void
258 pf_rm_rule(struct pf_rulequeue *rulequeue, struct pf_rule *rule)
259 {
260 	if (rulequeue != NULL) {
261 		if (rule->states_cur <= 0 && rule->src_nodes <= 0) {
262 			/*
263 			 * XXX - we need to remove the table *before* detaching
264 			 * the rule to make sure the table code does not delete
265 			 * the anchor under our feet.
266 			 */
267 			pf_tbladdr_remove(&rule->src.addr);
268 			pf_tbladdr_remove(&rule->dst.addr);
269 			pf_tbladdr_remove(&rule->rdr.addr);
270 			pf_tbladdr_remove(&rule->nat.addr);
271 			pf_tbladdr_remove(&rule->route.addr);
272 			if (rule->overload_tbl)
273 				pfr_detach_table(rule->overload_tbl);
274 		}
275 		TAILQ_REMOVE(rulequeue, rule, entries);
276 		rule->entries.tqe_prev = NULL;
277 		rule->nr = -1;
278 	}
279 
280 	if (rule->states_cur > 0 || rule->src_nodes > 0 ||
281 	    rule->entries.tqe_prev != NULL)
282 		return;
283 	pf_tag_unref(rule->tag);
284 	pf_tag_unref(rule->match_tag);
285 	pf_rtlabel_remove(&rule->src.addr);
286 	pf_rtlabel_remove(&rule->dst.addr);
287 	pfi_dynaddr_remove(&rule->src.addr);
288 	pfi_dynaddr_remove(&rule->dst.addr);
289 	pfi_dynaddr_remove(&rule->rdr.addr);
290 	pfi_dynaddr_remove(&rule->nat.addr);
291 	pfi_dynaddr_remove(&rule->route.addr);
292 	if (rulequeue == NULL) {
293 		pf_tbladdr_remove(&rule->src.addr);
294 		pf_tbladdr_remove(&rule->dst.addr);
295 		pf_tbladdr_remove(&rule->rdr.addr);
296 		pf_tbladdr_remove(&rule->nat.addr);
297 		pf_tbladdr_remove(&rule->route.addr);
298 		if (rule->overload_tbl)
299 			pfr_detach_table(rule->overload_tbl);
300 	}
301 	pfi_kif_unref(rule->rcv_kif, PFI_KIF_REF_RULE);
302 	pfi_kif_unref(rule->kif, PFI_KIF_REF_RULE);
303 	pfi_kif_unref(rule->rdr.kif, PFI_KIF_REF_RULE);
304 	pfi_kif_unref(rule->nat.kif, PFI_KIF_REF_RULE);
305 	pfi_kif_unref(rule->route.kif, PFI_KIF_REF_RULE);
306 	pf_anchor_remove(rule);
307 	pool_put(&pf_rule_pl, rule);
308 }
309 
310 void
311 pf_purge_rule(struct pf_ruleset *ruleset, struct pf_rule *rule)
312 {
313 	u_int32_t		 nr;
314 
315 	if (ruleset == NULL || ruleset->anchor == NULL)
316 		return;
317 
318 	pf_rm_rule(ruleset->rules.active.ptr, rule);
319 	ruleset->rules.active.rcount--;
320 
321 	nr = 0;
322 	TAILQ_FOREACH(rule, ruleset->rules.active.ptr, entries)
323 		rule->nr = nr++;
324 
325 	ruleset->rules.active.ticket++;
326 
327 	pf_calc_skip_steps(ruleset->rules.active.ptr);
328 	pf_remove_if_empty_ruleset(ruleset);
329 }
330 
331 u_int16_t
332 tagname2tag(struct pf_tags *head, char *tagname, int create)
333 {
334 	struct pf_tagname	*tag, *p = NULL;
335 	u_int16_t		 new_tagid = 1;
336 
337 	TAILQ_FOREACH(tag, head, entries)
338 		if (strcmp(tagname, tag->name) == 0) {
339 			tag->ref++;
340 			return (tag->tag);
341 		}
342 
343 	if (!create)
344 		return (0);
345 
346 	/*
347 	 * to avoid fragmentation, we do a linear search from the beginning
348 	 * and take the first free slot we find. if there is none or the list
349 	 * is empty, append a new entry at the end.
350 	 */
351 
352 	/* new entry */
353 	if (!TAILQ_EMPTY(head))
354 		for (p = TAILQ_FIRST(head); p != NULL &&
355 		    p->tag == new_tagid; p = TAILQ_NEXT(p, entries))
356 			new_tagid = p->tag + 1;
357 
358 	if (new_tagid > TAGID_MAX)
359 		return (0);
360 
361 	/* allocate and fill new struct pf_tagname */
362 	tag = malloc(sizeof(*tag), M_TEMP, M_NOWAIT|M_ZERO);
363 	if (tag == NULL)
364 		return (0);
365 	strlcpy(tag->name, tagname, sizeof(tag->name));
366 	tag->tag = new_tagid;
367 	tag->ref++;
368 
369 	if (p != NULL)	/* insert new entry before p */
370 		TAILQ_INSERT_BEFORE(p, tag, entries);
371 	else	/* either list empty or no free slot in between */
372 		TAILQ_INSERT_TAIL(head, tag, entries);
373 
374 	return (tag->tag);
375 }
376 
377 void
378 tag2tagname(struct pf_tags *head, u_int16_t tagid, char *p)
379 {
380 	struct pf_tagname	*tag;
381 
382 	TAILQ_FOREACH(tag, head, entries)
383 		if (tag->tag == tagid) {
384 			strlcpy(p, tag->name, PF_TAG_NAME_SIZE);
385 			return;
386 		}
387 }
388 
389 void
390 tag_unref(struct pf_tags *head, u_int16_t tag)
391 {
392 	struct pf_tagname	*p, *next;
393 
394 	if (tag == 0)
395 		return;
396 
397 	for (p = TAILQ_FIRST(head); p != NULL; p = next) {
398 		next = TAILQ_NEXT(p, entries);
399 		if (tag == p->tag) {
400 			if (--p->ref == 0) {
401 				TAILQ_REMOVE(head, p, entries);
402 				free(p, M_TEMP, 0);
403 			}
404 			break;
405 		}
406 	}
407 }
408 
409 u_int16_t
410 pf_tagname2tag(char *tagname, int create)
411 {
412 	return (tagname2tag(&pf_tags, tagname, create));
413 }
414 
415 void
416 pf_tag2tagname(u_int16_t tagid, char *p)
417 {
418 	tag2tagname(&pf_tags, tagid, p);
419 }
420 
421 void
422 pf_tag_ref(u_int16_t tag)
423 {
424 	struct pf_tagname *t;
425 
426 	TAILQ_FOREACH(t, &pf_tags, entries)
427 		if (t->tag == tag)
428 			break;
429 	if (t != NULL)
430 		t->ref++;
431 }
432 
433 void
434 pf_tag_unref(u_int16_t tag)
435 {
436 	tag_unref(&pf_tags, tag);
437 }
438 
439 int
440 pf_rtlabel_add(struct pf_addr_wrap *a)
441 {
442 	if (a->type == PF_ADDR_RTLABEL &&
443 	    (a->v.rtlabel = rtlabel_name2id(a->v.rtlabelname)) == 0)
444 		return (-1);
445 	return (0);
446 }
447 
448 void
449 pf_rtlabel_remove(struct pf_addr_wrap *a)
450 {
451 	if (a->type == PF_ADDR_RTLABEL)
452 		rtlabel_unref(a->v.rtlabel);
453 }
454 
455 void
456 pf_rtlabel_copyout(struct pf_addr_wrap *a)
457 {
458 	const char	*name;
459 
460 	if (a->type == PF_ADDR_RTLABEL && a->v.rtlabel) {
461 		if ((name = rtlabel_id2name(a->v.rtlabel)) == NULL)
462 			strlcpy(a->v.rtlabelname, "?",
463 			    sizeof(a->v.rtlabelname));
464 		else
465 			strlcpy(a->v.rtlabelname, name,
466 			    sizeof(a->v.rtlabelname));
467 	}
468 }
469 
470 u_int16_t
471 pf_qname2qid(char *qname, int create)
472 {
473 	return (tagname2tag(&pf_qids, qname, create));
474 }
475 
476 void
477 pf_qid2qname(u_int16_t qid, char *p)
478 {
479 	tag2tagname(&pf_qids, qid, p);
480 }
481 
482 void
483 pf_qid_unref(u_int16_t qid)
484 {
485 	tag_unref(&pf_qids, (u_int16_t)qid);
486 }
487 
488 int
489 pf_begin_rules(u_int32_t *ticket, const char *anchor)
490 {
491 	struct pf_ruleset	*rs;
492 	struct pf_rule		*rule;
493 
494 	if ((rs = pf_find_or_create_ruleset(anchor)) == NULL)
495 		return (EINVAL);
496 	while ((rule = TAILQ_FIRST(rs->rules.inactive.ptr)) != NULL) {
497 		pf_rm_rule(rs->rules.inactive.ptr, rule);
498 		rs->rules.inactive.rcount--;
499 	}
500 	*ticket = ++rs->rules.inactive.ticket;
501 	rs->rules.inactive.open = 1;
502 	return (0);
503 }
504 
505 int
506 pf_rollback_rules(u_int32_t ticket, char *anchor)
507 {
508 	struct pf_ruleset	*rs;
509 	struct pf_rule		*rule;
510 
511 	rs = pf_find_ruleset(anchor);
512 	if (rs == NULL || !rs->rules.inactive.open ||
513 	    rs->rules.inactive.ticket != ticket)
514 		return (0);
515 	while ((rule = TAILQ_FIRST(rs->rules.inactive.ptr)) != NULL) {
516 		pf_rm_rule(rs->rules.inactive.ptr, rule);
517 		rs->rules.inactive.rcount--;
518 	}
519 	rs->rules.inactive.open = 0;
520 
521 	/* queue defs only in the main ruleset */
522 	if (anchor[0])
523 		return (0);
524 	return (pf_free_queues(pf_queues_inactive, NULL));
525 }
526 
527 int
528 pf_free_queues(struct pf_queuehead *where, struct ifnet *ifp)
529 {
530 	struct pf_queuespec	*q, *qtmp;
531 
532 	TAILQ_FOREACH_SAFE(q, where, entries, qtmp) {
533 		if (ifp && q->kif->pfik_ifp != ifp)
534 			continue;
535 		TAILQ_REMOVE(where, q, entries);
536 		pfi_kif_unref(q->kif, PFI_KIF_REF_RULE);
537 		pool_put(&pf_queue_pl, q);
538 	}
539 	return (0);
540 }
541 
542 int
543 pf_remove_queues(struct ifnet *ifp)
544 {
545 	struct pf_queuespec	*q;
546 	int			 error = 0;
547 
548 	/* remove queues */
549 	TAILQ_FOREACH_REVERSE(q, pf_queues_active, pf_queuehead, entries) {
550 		if (ifp && q->kif->pfik_ifp != ifp)
551 			continue;
552 		if ((error = hfsc_delqueue(q)) != 0)
553 			return (error);
554 	}
555 
556 	/* put back interfaces in normal queueing mode */
557 	TAILQ_FOREACH(q, pf_queues_active, entries) {
558 		if (ifp && q->kif->pfik_ifp != ifp)
559 			continue;
560 		if (q->parent_qid == 0)
561 			if ((error = hfsc_detach(q->kif->pfik_ifp)) != 0)
562 				return (error);
563 	}
564 
565 	return (0);
566 }
567 
568 int
569 pf_create_queues(void)
570 {
571 	struct pf_queuespec	*q;
572 	int			 error = 0;
573 
574 	/* find root queues and attach hfsc to these interfaces */
575 	TAILQ_FOREACH(q, pf_queues_active, entries)
576 		if (q->parent_qid == 0)
577 			if ((error = hfsc_attach(q->kif->pfik_ifp)) != 0)
578 				return (error);
579 
580 	/* and now everything */
581 	TAILQ_FOREACH(q, pf_queues_active, entries)
582 		if ((error = hfsc_addqueue(q)) != 0)
583 			return (error);
584 
585 	return (0);
586 }
587 
588 int
589 pf_commit_queues(void)
590 {
591 	struct pf_queuehead	*qswap;
592 	int error;
593 
594 	if ((error = pf_remove_queues(NULL)) != 0)
595 		return (error);
596 
597 	/* swap */
598 	qswap = pf_queues_active;
599 	pf_queues_active = pf_queues_inactive;
600 	pf_queues_inactive = qswap;
601 	pf_free_queues(pf_queues_inactive, NULL);
602 
603 	return (pf_create_queues());
604 }
605 
606 #define PF_MD5_UPD(st, elm)						\
607 		MD5Update(ctx, (u_int8_t *) &(st)->elm, sizeof((st)->elm))
608 
609 #define PF_MD5_UPD_STR(st, elm)						\
610 		MD5Update(ctx, (u_int8_t *) (st)->elm, strlen((st)->elm))
611 
612 #define PF_MD5_UPD_HTONL(st, elm, stor) do {				\
613 		(stor) = htonl((st)->elm);				\
614 		MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int32_t));\
615 } while (0)
616 
617 #define PF_MD5_UPD_HTONS(st, elm, stor) do {				\
618 		(stor) = htons((st)->elm);				\
619 		MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int16_t));\
620 } while (0)
621 
622 void
623 pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr)
624 {
625 	PF_MD5_UPD(pfr, addr.type);
626 	switch (pfr->addr.type) {
627 		case PF_ADDR_DYNIFTL:
628 			PF_MD5_UPD(pfr, addr.v.ifname);
629 			PF_MD5_UPD(pfr, addr.iflags);
630 			break;
631 		case PF_ADDR_TABLE:
632 			PF_MD5_UPD(pfr, addr.v.tblname);
633 			break;
634 		case PF_ADDR_ADDRMASK:
635 			/* XXX ignore af? */
636 			PF_MD5_UPD(pfr, addr.v.a.addr.addr32);
637 			PF_MD5_UPD(pfr, addr.v.a.mask.addr32);
638 			break;
639 		case PF_ADDR_RTLABEL:
640 			PF_MD5_UPD(pfr, addr.v.rtlabelname);
641 			break;
642 	}
643 
644 	PF_MD5_UPD(pfr, port[0]);
645 	PF_MD5_UPD(pfr, port[1]);
646 	PF_MD5_UPD(pfr, neg);
647 	PF_MD5_UPD(pfr, port_op);
648 }
649 
650 void
651 pf_hash_rule(MD5_CTX *ctx, struct pf_rule *rule)
652 {
653 	u_int16_t x;
654 	u_int32_t y;
655 
656 	pf_hash_rule_addr(ctx, &rule->src);
657 	pf_hash_rule_addr(ctx, &rule->dst);
658 	PF_MD5_UPD_STR(rule, label);
659 	PF_MD5_UPD_STR(rule, ifname);
660 	PF_MD5_UPD_STR(rule, rcv_ifname);
661 	PF_MD5_UPD_STR(rule, match_tagname);
662 	PF_MD5_UPD_HTONS(rule, match_tag, x); /* dup? */
663 	PF_MD5_UPD_HTONL(rule, os_fingerprint, y);
664 	PF_MD5_UPD_HTONL(rule, prob, y);
665 	PF_MD5_UPD_HTONL(rule, uid.uid[0], y);
666 	PF_MD5_UPD_HTONL(rule, uid.uid[1], y);
667 	PF_MD5_UPD(rule, uid.op);
668 	PF_MD5_UPD_HTONL(rule, gid.gid[0], y);
669 	PF_MD5_UPD_HTONL(rule, gid.gid[1], y);
670 	PF_MD5_UPD(rule, gid.op);
671 	PF_MD5_UPD_HTONL(rule, rule_flag, y);
672 	PF_MD5_UPD(rule, action);
673 	PF_MD5_UPD(rule, direction);
674 	PF_MD5_UPD(rule, af);
675 	PF_MD5_UPD(rule, quick);
676 	PF_MD5_UPD(rule, ifnot);
677 	PF_MD5_UPD(rule, rcvifnot);
678 	PF_MD5_UPD(rule, match_tag_not);
679 	PF_MD5_UPD(rule, keep_state);
680 	PF_MD5_UPD(rule, proto);
681 	PF_MD5_UPD(rule, type);
682 	PF_MD5_UPD(rule, code);
683 	PF_MD5_UPD(rule, flags);
684 	PF_MD5_UPD(rule, flagset);
685 	PF_MD5_UPD(rule, allow_opts);
686 	PF_MD5_UPD(rule, rt);
687 	PF_MD5_UPD(rule, tos);
688 }
689 
690 int
691 pf_commit_rules(u_int32_t ticket, char *anchor)
692 {
693 	struct pf_ruleset	*rs;
694 	struct pf_rule		*rule, **old_array;
695 	struct pf_rulequeue	*old_rules;
696 	int			 s, error;
697 	u_int32_t		 old_rcount;
698 
699 	rs = pf_find_ruleset(anchor);
700 	if (rs == NULL || !rs->rules.inactive.open ||
701 	    ticket != rs->rules.inactive.ticket)
702 		return (EBUSY);
703 
704 	/* Calculate checksum for the main ruleset */
705 	if (rs == &pf_main_ruleset) {
706 		error = pf_setup_pfsync_matching(rs);
707 		if (error != 0)
708 			return (error);
709 	}
710 
711 	/* Swap rules, keep the old. */
712 	s = splsoftnet();
713 	old_rules = rs->rules.active.ptr;
714 	old_rcount = rs->rules.active.rcount;
715 	old_array = rs->rules.active.ptr_array;
716 
717 	rs->rules.active.ptr = rs->rules.inactive.ptr;
718 	rs->rules.active.ptr_array = rs->rules.inactive.ptr_array;
719 	rs->rules.active.rcount = rs->rules.inactive.rcount;
720 	rs->rules.inactive.ptr = old_rules;
721 	rs->rules.inactive.ptr_array = old_array;
722 	rs->rules.inactive.rcount = old_rcount;
723 
724 	rs->rules.active.ticket = rs->rules.inactive.ticket;
725 	pf_calc_skip_steps(rs->rules.active.ptr);
726 
727 
728 	/* Purge the old rule list. */
729 	while ((rule = TAILQ_FIRST(old_rules)) != NULL)
730 		pf_rm_rule(old_rules, rule);
731 	if (rs->rules.inactive.ptr_array)
732 		free(rs->rules.inactive.ptr_array, M_TEMP, 0);
733 	rs->rules.inactive.ptr_array = NULL;
734 	rs->rules.inactive.rcount = 0;
735 	rs->rules.inactive.open = 0;
736 	pf_remove_if_empty_ruleset(rs);
737 	splx(s);
738 
739 	/* queue defs only in the main ruleset */
740 	if (anchor[0])
741 		return (0);
742 	return (pf_commit_queues());
743 }
744 
745 int
746 pf_setup_pfsync_matching(struct pf_ruleset *rs)
747 {
748 	MD5_CTX			 ctx;
749 	struct pf_rule		*rule;
750 	u_int8_t		 digest[PF_MD5_DIGEST_LENGTH];
751 
752 	MD5Init(&ctx);
753 	if (rs->rules.inactive.ptr_array)
754 		free(rs->rules.inactive.ptr_array, M_TEMP, 0);
755 	rs->rules.inactive.ptr_array = NULL;
756 
757 	if (rs->rules.inactive.rcount) {
758 		rs->rules.inactive.ptr_array = malloc(sizeof(caddr_t) *
759 		    rs->rules.inactive.rcount,  M_TEMP, M_NOWAIT);
760 
761 		if (!rs->rules.inactive.ptr_array)
762 			return (ENOMEM);
763 
764 		TAILQ_FOREACH(rule, rs->rules.inactive.ptr, entries) {
765 			pf_hash_rule(&ctx, rule);
766 			(rs->rules.inactive.ptr_array)[rule->nr] = rule;
767 		}
768 	}
769 
770 	MD5Final(digest, &ctx);
771 	memcpy(pf_status.pf_chksum, digest, sizeof(pf_status.pf_chksum));
772 	return (0);
773 }
774 
775 int
776 pf_addr_setup(struct pf_ruleset *ruleset, struct pf_addr_wrap *addr,
777     sa_family_t af)
778 {
779 	if (pfi_dynaddr_setup(addr, af) ||
780 	    pf_tbladdr_setup(ruleset, addr) ||
781 	    pf_rtlabel_add(addr))
782 		return (EINVAL);
783 
784 	return (0);
785 }
786 
787 int
788 pf_kif_setup(char *ifname, struct pfi_kif **kif)
789 {
790 	if (ifname[0]) {
791 		*kif = pfi_kif_get(ifname);
792 		if (*kif == NULL)
793 			return (EINVAL);
794 
795 		pfi_kif_ref(*kif, PFI_KIF_REF_RULE);
796 	} else
797 		*kif = NULL;
798 
799 	return (0);
800 }
801 
802 void
803 pf_addr_copyout(struct pf_addr_wrap *addr)
804 {
805 	pfi_dynaddr_copyout(addr);
806 	pf_tbladdr_copyout(addr);
807 	pf_rtlabel_copyout(addr);
808 }
809 
810 int
811 pfioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct proc *p)
812 {
813 	int			 s;
814 	int			 error = 0;
815 
816 	/* XXX keep in sync with switch() below */
817 	if (securelevel > 1)
818 		switch (cmd) {
819 		case DIOCGETRULES:
820 		case DIOCGETRULE:
821 		case DIOCGETSTATE:
822 		case DIOCSETSTATUSIF:
823 		case DIOCGETSTATUS:
824 		case DIOCCLRSTATUS:
825 		case DIOCNATLOOK:
826 		case DIOCSETDEBUG:
827 		case DIOCGETSTATES:
828 		case DIOCGETTIMEOUT:
829 		case DIOCCLRRULECTRS:
830 		case DIOCGETLIMIT:
831 		case DIOCGETRULESETS:
832 		case DIOCGETRULESET:
833 		case DIOCGETQUEUES:
834 		case DIOCGETQUEUE:
835 		case DIOCGETQSTATS:
836 		case DIOCRGETTABLES:
837 		case DIOCRGETTSTATS:
838 		case DIOCRCLRTSTATS:
839 		case DIOCRCLRADDRS:
840 		case DIOCRADDADDRS:
841 		case DIOCRDELADDRS:
842 		case DIOCRSETADDRS:
843 		case DIOCRGETASTATS:
844 		case DIOCRCLRASTATS:
845 		case DIOCRTSTADDRS:
846 		case DIOCOSFPGET:
847 		case DIOCGETSRCNODES:
848 		case DIOCCLRSRCNODES:
849 		case DIOCIGETIFACES:
850 		case DIOCSETIFFLAG:
851 		case DIOCCLRIFFLAG:
852 			break;
853 		case DIOCRCLRTABLES:
854 		case DIOCRADDTABLES:
855 		case DIOCRDELTABLES:
856 		case DIOCRSETTFLAGS:
857 			if (((struct pfioc_table *)addr)->pfrio_flags &
858 			    PFR_FLAG_DUMMY)
859 				break; /* dummy operation ok */
860 			return (EPERM);
861 		default:
862 			return (EPERM);
863 		}
864 
865 	if (!(flags & FWRITE))
866 		switch (cmd) {
867 		case DIOCGETRULES:
868 		case DIOCGETSTATE:
869 		case DIOCGETSTATUS:
870 		case DIOCGETSTATES:
871 		case DIOCGETTIMEOUT:
872 		case DIOCGETLIMIT:
873 		case DIOCGETRULESETS:
874 		case DIOCGETRULESET:
875 		case DIOCGETQUEUES:
876 		case DIOCGETQUEUE:
877 		case DIOCGETQSTATS:
878 		case DIOCNATLOOK:
879 		case DIOCRGETTABLES:
880 		case DIOCRGETTSTATS:
881 		case DIOCRGETADDRS:
882 		case DIOCRGETASTATS:
883 		case DIOCRTSTADDRS:
884 		case DIOCOSFPGET:
885 		case DIOCGETSRCNODES:
886 		case DIOCIGETIFACES:
887 			break;
888 		case DIOCRCLRTABLES:
889 		case DIOCRADDTABLES:
890 		case DIOCRDELTABLES:
891 		case DIOCRCLRTSTATS:
892 		case DIOCRCLRADDRS:
893 		case DIOCRADDADDRS:
894 		case DIOCRDELADDRS:
895 		case DIOCRSETADDRS:
896 		case DIOCRSETTFLAGS:
897 			if (((struct pfioc_table *)addr)->pfrio_flags &
898 			    PFR_FLAG_DUMMY) {
899 				flags |= FWRITE; /* need write lock for dummy */
900 				break; /* dummy operation ok */
901 			}
902 			return (EACCES);
903 		case DIOCGETRULE:
904 			if (((struct pfioc_rule *)addr)->action ==
905 			    PF_GET_CLR_CNTR)
906 				return (EACCES);
907 			break;
908 		default:
909 			return (EACCES);
910 		}
911 
912 	if (flags & FWRITE)
913 		rw_enter_write(&pf_consistency_lock);
914 	else
915 		rw_enter_read(&pf_consistency_lock);
916 
917 	s = splsoftnet();
918 	switch (cmd) {
919 
920 	case DIOCSTART:
921 		if (pf_status.running)
922 			error = EEXIST;
923 		else {
924 			pf_status.running = 1;
925 			pf_status.since = time_second;
926 			if (pf_status.stateid == 0) {
927 				pf_status.stateid = time_second;
928 				pf_status.stateid = pf_status.stateid << 32;
929 			}
930 			pf_create_queues();
931 			DPFPRINTF(LOG_NOTICE, "pf: started");
932 		}
933 		break;
934 
935 	case DIOCSTOP:
936 		if (!pf_status.running)
937 			error = ENOENT;
938 		else {
939 			pf_status.running = 0;
940 			pf_status.since = time_second;
941 			pf_remove_queues(NULL);
942 			DPFPRINTF(LOG_NOTICE, "pf: stopped");
943 		}
944 		break;
945 
946 	case DIOCGETQUEUES: {
947 		struct pfioc_queue	*pq = (struct pfioc_queue *)addr;
948 		struct pf_queuespec	*qs;
949 		u_int32_t		 nr = 0;
950 
951 		pq->ticket = pf_main_ruleset.rules.active.ticket;
952 
953 		/* save state to not run over them all each time? */
954 		qs = TAILQ_FIRST(pf_queues_active);
955 		while (qs != NULL) {
956 			qs = TAILQ_NEXT(qs, entries);
957 			nr++;
958 		}
959 		pq->nr = nr;
960 		break;
961 	}
962 
963 	case DIOCGETQUEUE: {
964 		struct pfioc_queue	*pq = (struct pfioc_queue *)addr;
965 		struct pf_queuespec	*qs;
966 		u_int32_t		 nr = 0;
967 
968 		if (pq->ticket != pf_main_ruleset.rules.active.ticket) {
969 			error = EBUSY;
970 			break;
971 		}
972 
973 		/* save state to not run over them all each time? */
974 		qs = TAILQ_FIRST(pf_queues_active);
975 		while ((qs != NULL) && (nr++ < pq->nr))
976 			qs = TAILQ_NEXT(qs, entries);
977 		if (qs == NULL) {
978 			error = EBUSY;
979 			break;
980 		}
981 		bcopy(qs, &pq->queue, sizeof(pq->queue));
982 		break;
983 	}
984 
985 	case DIOCGETQSTATS: {
986 		struct pfioc_qstats	*pq = (struct pfioc_qstats *)addr;
987 		struct pf_queuespec	*qs;
988 		u_int32_t		 nr;
989 		int			 nbytes;
990 
991 		if (pq->ticket != pf_main_ruleset.rules.active.ticket) {
992 			error = EBUSY;
993 			break;
994 		}
995 		nbytes = pq->nbytes;
996 		nr = 0;
997 
998 		/* save state to not run over them all each time? */
999 		qs = TAILQ_FIRST(pf_queues_active);
1000 		while ((qs != NULL) && (nr++ < pq->nr))
1001 			qs = TAILQ_NEXT(qs, entries);
1002 		if (qs == NULL) {
1003 			error = EBUSY;
1004 			break;
1005 		}
1006 		bcopy(qs, &pq->queue, sizeof(pq->queue));
1007 		error = hfsc_qstats(qs, pq->buf, &nbytes);
1008 		if (error == 0)
1009 			pq->nbytes = nbytes;
1010 		break;
1011 	}
1012 
1013 	case DIOCADDQUEUE: {
1014 		struct pfioc_queue	*q = (struct pfioc_queue *)addr;
1015 		struct pf_queuespec	*qs;
1016 
1017 		if (q->ticket != pf_main_ruleset.rules.inactive.ticket) {
1018 			error = EBUSY;
1019 			break;
1020 		}
1021 		qs = pool_get(&pf_queue_pl, PR_WAITOK|PR_LIMITFAIL|PR_ZERO);
1022 		if (qs == NULL) {
1023 			error = ENOMEM;
1024 			break;
1025 		}
1026 		bcopy(&q->queue, qs, sizeof(*qs));
1027 		qs->qid = pf_qname2qid(qs->qname, 1);
1028 		if (qs->parent[0] && (qs->parent_qid =
1029 		    pf_qname2qid(qs->parent, 0)) == 0)
1030 			return (ESRCH);
1031 		qs->kif = pfi_kif_get(qs->ifname);
1032 		if (!qs->kif->pfik_ifp) {
1033 			error = ESRCH;
1034 			break;
1035 		}
1036 		/* XXX resolve bw percentage specs */
1037 		pfi_kif_ref(qs->kif, PFI_KIF_REF_RULE);
1038 		if (qs->qlimit == 0)
1039 			qs->qlimit = HFSC_DEFAULT_QLIMIT;
1040 		TAILQ_INSERT_TAIL(pf_queues_inactive, qs, entries);
1041 
1042 		break;
1043 	}
1044 
1045 	case DIOCADDRULE: {
1046 		struct pfioc_rule	*pr = (struct pfioc_rule *)addr;
1047 		struct pf_ruleset	*ruleset;
1048 		struct pf_rule		*rule, *tail;
1049 
1050 		pr->anchor[sizeof(pr->anchor) - 1] = 0;
1051 		ruleset = pf_find_ruleset(pr->anchor);
1052 		if (ruleset == NULL) {
1053 			error = EINVAL;
1054 			break;
1055 		}
1056 		if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
1057 			error = EINVAL;
1058 			break;
1059 		}
1060 		if (pr->ticket != ruleset->rules.inactive.ticket) {
1061 			error = EBUSY;
1062 			break;
1063 		}
1064 		rule = pool_get(&pf_rule_pl, PR_WAITOK|PR_LIMITFAIL|PR_ZERO);
1065 		if (rule == NULL) {
1066 			error = ENOMEM;
1067 			break;
1068 		}
1069 		if ((error = pf_rule_copyin(&pr->rule, rule, ruleset))) {
1070 			pool_put(&pf_rule_pl, rule);
1071 			break;
1072 		}
1073 		rule->cuid = p->p_ucred->cr_ruid;
1074 		rule->cpid = p->p_p->ps_pid;
1075 
1076 		switch (rule->af) {
1077 		case 0:
1078 			break;
1079 #ifdef INET
1080 		case AF_INET:
1081 			break;
1082 #endif /* INET */
1083 #ifdef INET6
1084 		case AF_INET6:
1085 			break;
1086 #endif /* INET6 */
1087 		default:
1088 			pool_put(&pf_rule_pl, rule);
1089 			error = EAFNOSUPPORT;
1090 			goto fail;
1091 		}
1092 		tail = TAILQ_LAST(ruleset->rules.inactive.ptr,
1093 		    pf_rulequeue);
1094 		if (tail)
1095 			rule->nr = tail->nr + 1;
1096 		else
1097 			rule->nr = 0;
1098 
1099 		if (rule->src.addr.type == PF_ADDR_NONE ||
1100 		    rule->dst.addr.type == PF_ADDR_NONE)
1101 			error = EINVAL;
1102 
1103 		if (pf_addr_setup(ruleset, &rule->src.addr, rule->af))
1104 			error = EINVAL;
1105 		if (pf_addr_setup(ruleset, &rule->dst.addr, rule->af))
1106 			error = EINVAL;
1107 		if (pf_addr_setup(ruleset, &rule->rdr.addr, rule->af))
1108 			error = EINVAL;
1109 		if (pf_addr_setup(ruleset, &rule->nat.addr, rule->af))
1110 			error = EINVAL;
1111 		if (pf_addr_setup(ruleset, &rule->route.addr, rule->af))
1112 			error = EINVAL;
1113 		if (pf_anchor_setup(rule, ruleset, pr->anchor_call))
1114 			error = EINVAL;
1115 		if (rule->rt && !rule->direction)
1116 			error = EINVAL;
1117 		if (rule->scrub_flags & PFSTATE_SETPRIO &&
1118 		    (rule->set_prio[0] > IFQ_MAXPRIO ||
1119 		    rule->set_prio[1] > IFQ_MAXPRIO))
1120 			error = EINVAL;
1121 
1122 		if (error) {
1123 			pf_rm_rule(NULL, rule);
1124 			break;
1125 		}
1126 		TAILQ_INSERT_TAIL(ruleset->rules.inactive.ptr,
1127 		    rule, entries);
1128 		ruleset->rules.inactive.rcount++;
1129 		break;
1130 	}
1131 
1132 	case DIOCGETRULES: {
1133 		struct pfioc_rule	*pr = (struct pfioc_rule *)addr;
1134 		struct pf_ruleset	*ruleset;
1135 		struct pf_rule		*tail;
1136 
1137 		pr->anchor[sizeof(pr->anchor) - 1] = 0;
1138 		ruleset = pf_find_ruleset(pr->anchor);
1139 		if (ruleset == NULL) {
1140 			error = EINVAL;
1141 			break;
1142 		}
1143 		tail = TAILQ_LAST(ruleset->rules.active.ptr, pf_rulequeue);
1144 		if (tail)
1145 			pr->nr = tail->nr + 1;
1146 		else
1147 			pr->nr = 0;
1148 		pr->ticket = ruleset->rules.active.ticket;
1149 		break;
1150 	}
1151 
1152 	case DIOCGETRULE: {
1153 		struct pfioc_rule	*pr = (struct pfioc_rule *)addr;
1154 		struct pf_ruleset	*ruleset;
1155 		struct pf_rule		*rule;
1156 		int			 i;
1157 
1158 		pr->anchor[sizeof(pr->anchor) - 1] = 0;
1159 		ruleset = pf_find_ruleset(pr->anchor);
1160 		if (ruleset == NULL) {
1161 			error = EINVAL;
1162 			break;
1163 		}
1164 		if (pr->ticket != ruleset->rules.active.ticket) {
1165 			error = EBUSY;
1166 			break;
1167 		}
1168 		rule = TAILQ_FIRST(ruleset->rules.active.ptr);
1169 		while ((rule != NULL) && (rule->nr != pr->nr))
1170 			rule = TAILQ_NEXT(rule, entries);
1171 		if (rule == NULL) {
1172 			error = EBUSY;
1173 			break;
1174 		}
1175 		bcopy(rule, &pr->rule, sizeof(struct pf_rule));
1176 		bzero(&pr->rule.entries, sizeof(pr->rule.entries));
1177 		pr->rule.kif = NULL;
1178 		pr->rule.nat.kif = NULL;
1179 		pr->rule.rdr.kif = NULL;
1180 		pr->rule.route.kif = NULL;
1181 		pr->rule.rcv_kif = NULL;
1182 		pr->rule.anchor = NULL;
1183 		pr->rule.overload_tbl = NULL;
1184 		if (pf_anchor_copyout(ruleset, rule, pr)) {
1185 			error = EBUSY;
1186 			break;
1187 		}
1188 		pf_addr_copyout(&pr->rule.src.addr);
1189 		pf_addr_copyout(&pr->rule.dst.addr);
1190 		pf_addr_copyout(&pr->rule.rdr.addr);
1191 		pf_addr_copyout(&pr->rule.nat.addr);
1192 		pf_addr_copyout(&pr->rule.route.addr);
1193 		for (i = 0; i < PF_SKIP_COUNT; ++i)
1194 			if (rule->skip[i].ptr == NULL)
1195 				pr->rule.skip[i].nr = -1;
1196 			else
1197 				pr->rule.skip[i].nr =
1198 				    rule->skip[i].ptr->nr;
1199 
1200 		if (pr->action == PF_GET_CLR_CNTR) {
1201 			rule->evaluations = 0;
1202 			rule->packets[0] = rule->packets[1] = 0;
1203 			rule->bytes[0] = rule->bytes[1] = 0;
1204 			rule->states_tot = 0;
1205 		}
1206 		break;
1207 	}
1208 
1209 	case DIOCCHANGERULE: {
1210 		struct pfioc_rule	*pcr = (struct pfioc_rule *)addr;
1211 		struct pf_ruleset	*ruleset;
1212 		struct pf_rule		*oldrule = NULL, *newrule = NULL;
1213 		u_int32_t		 nr = 0;
1214 
1215 		if (pcr->action < PF_CHANGE_ADD_HEAD ||
1216 		    pcr->action > PF_CHANGE_GET_TICKET) {
1217 			error = EINVAL;
1218 			break;
1219 		}
1220 		ruleset = pf_find_ruleset(pcr->anchor);
1221 		if (ruleset == NULL) {
1222 			error = EINVAL;
1223 			break;
1224 		}
1225 
1226 		if (pcr->action == PF_CHANGE_GET_TICKET) {
1227 			pcr->ticket = ++ruleset->rules.active.ticket;
1228 			break;
1229 		} else {
1230 			if (pcr->ticket !=
1231 			    ruleset->rules.active.ticket) {
1232 				error = EINVAL;
1233 				break;
1234 			}
1235 			if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
1236 				error = EINVAL;
1237 				break;
1238 			}
1239 		}
1240 
1241 		if (pcr->action != PF_CHANGE_REMOVE) {
1242 			newrule = pool_get(&pf_rule_pl,
1243 			    PR_WAITOK|PR_LIMITFAIL|PR_ZERO);
1244 			if (newrule == NULL) {
1245 				error = ENOMEM;
1246 				break;
1247 			}
1248 			pf_rule_copyin(&pcr->rule, newrule, ruleset);
1249 			newrule->cuid = p->p_ucred->cr_ruid;
1250 			newrule->cpid = p->p_p->ps_pid;
1251 
1252 			switch (newrule->af) {
1253 			case 0:
1254 				break;
1255 #ifdef INET
1256 			case AF_INET:
1257 				break;
1258 #endif /* INET */
1259 #ifdef INET6
1260 			case AF_INET6:
1261 				break;
1262 #endif /* INET6 */
1263 			default:
1264 				pool_put(&pf_rule_pl, newrule);
1265 				error = EAFNOSUPPORT;
1266 				goto fail;
1267 			}
1268 
1269 			if (newrule->rt && !newrule->direction)
1270 				error = EINVAL;
1271 			if (pf_addr_setup(ruleset, &newrule->src.addr, newrule->af))
1272 				error = EINVAL;
1273 			if (pf_addr_setup(ruleset, &newrule->dst.addr, newrule->af))
1274 				error = EINVAL;
1275 			if (pf_addr_setup(ruleset, &newrule->rdr.addr, newrule->af))
1276 				error = EINVAL;
1277 			if (pf_addr_setup(ruleset, &newrule->nat.addr, newrule->af))
1278 				error = EINVAL;
1279 			if (pf_addr_setup(ruleset, &newrule->route.addr, newrule->af))
1280 				error = EINVAL;
1281 			if (pf_anchor_setup(newrule, ruleset, pcr->anchor_call))
1282 				error = EINVAL;
1283 
1284 			if (error) {
1285 				pf_rm_rule(NULL, newrule);
1286 				break;
1287 			}
1288 		}
1289 
1290 		if (pcr->action == PF_CHANGE_ADD_HEAD)
1291 			oldrule = TAILQ_FIRST(ruleset->rules.active.ptr);
1292 		else if (pcr->action == PF_CHANGE_ADD_TAIL)
1293 			oldrule = TAILQ_LAST(ruleset->rules.active.ptr,
1294 			    pf_rulequeue);
1295 		else {
1296 			oldrule = TAILQ_FIRST(ruleset->rules.active.ptr);
1297 			while ((oldrule != NULL) && (oldrule->nr != pcr->nr))
1298 				oldrule = TAILQ_NEXT(oldrule, entries);
1299 			if (oldrule == NULL) {
1300 				if (newrule != NULL)
1301 					pf_rm_rule(NULL, newrule);
1302 				error = EINVAL;
1303 				break;
1304 			}
1305 		}
1306 
1307 		if (pcr->action == PF_CHANGE_REMOVE) {
1308 			pf_rm_rule(ruleset->rules.active.ptr, oldrule);
1309 			ruleset->rules.active.rcount--;
1310 		} else {
1311 			if (oldrule == NULL)
1312 				TAILQ_INSERT_TAIL(
1313 				    ruleset->rules.active.ptr,
1314 				    newrule, entries);
1315 			else if (pcr->action == PF_CHANGE_ADD_HEAD ||
1316 			    pcr->action == PF_CHANGE_ADD_BEFORE)
1317 				TAILQ_INSERT_BEFORE(oldrule, newrule, entries);
1318 			else
1319 				TAILQ_INSERT_AFTER(
1320 				    ruleset->rules.active.ptr,
1321 				    oldrule, newrule, entries);
1322 			ruleset->rules.active.rcount++;
1323 		}
1324 
1325 		nr = 0;
1326 		TAILQ_FOREACH(oldrule, ruleset->rules.active.ptr, entries)
1327 			oldrule->nr = nr++;
1328 
1329 		ruleset->rules.active.ticket++;
1330 
1331 		pf_calc_skip_steps(ruleset->rules.active.ptr);
1332 		pf_remove_if_empty_ruleset(ruleset);
1333 
1334 		break;
1335 	}
1336 
1337 	case DIOCCLRSTATES: {
1338 		struct pf_state		*s, *nexts;
1339 		struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr;
1340 		u_int			 killed = 0;
1341 
1342 		for (s = RB_MIN(pf_state_tree_id, &tree_id); s; s = nexts) {
1343 			nexts = RB_NEXT(pf_state_tree_id, &tree_id, s);
1344 
1345 			if (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname,
1346 			    s->kif->pfik_name)) {
1347 #if NPFSYNC > 0
1348 				/* don't send out individual delete messages */
1349 				SET(s->state_flags, PFSTATE_NOSYNC);
1350 #endif
1351 				pf_unlink_state(s);
1352 				killed++;
1353 			}
1354 		}
1355 		psk->psk_killed = killed;
1356 #if NPFSYNC > 0
1357 		pfsync_clear_states(pf_status.hostid, psk->psk_ifname);
1358 #endif
1359 		break;
1360 	}
1361 
1362 	case DIOCKILLSTATES: {
1363 		struct pf_state		*s, *nexts;
1364 		struct pf_state_key	*sk;
1365 		struct pf_addr		*srcaddr, *dstaddr;
1366 		u_int16_t		 srcport, dstport;
1367 		struct pfioc_state_kill	*psk = (struct pfioc_state_kill *)addr;
1368 		u_int			 killed = 0;
1369 
1370 		if (psk->psk_pfcmp.id) {
1371 			if (psk->psk_pfcmp.creatorid == 0)
1372 				psk->psk_pfcmp.creatorid = pf_status.hostid;
1373 			if ((s = pf_find_state_byid(&psk->psk_pfcmp))) {
1374 				pf_unlink_state(s);
1375 				psk->psk_killed = 1;
1376 			}
1377 			break;
1378 		}
1379 
1380 		for (s = RB_MIN(pf_state_tree_id, &tree_id); s;
1381 		    s = nexts) {
1382 			nexts = RB_NEXT(pf_state_tree_id, &tree_id, s);
1383 
1384 			if (s->direction == PF_OUT) {
1385 				sk = s->key[PF_SK_STACK];
1386 				srcaddr = &sk->addr[1];
1387 				dstaddr = &sk->addr[0];
1388 				srcport = sk->port[1];
1389 				dstport = sk->port[0];
1390 			} else {
1391 				sk = s->key[PF_SK_WIRE];
1392 				srcaddr = &sk->addr[0];
1393 				dstaddr = &sk->addr[1];
1394 				srcport = sk->port[0];
1395 				dstport = sk->port[1];
1396 			}
1397 			if ((!psk->psk_af || sk->af == psk->psk_af)
1398 			    && (!psk->psk_proto || psk->psk_proto ==
1399 			    sk->proto) && psk->psk_rdomain == sk->rdomain &&
1400 			    PF_MATCHA(psk->psk_src.neg,
1401 			    &psk->psk_src.addr.v.a.addr,
1402 			    &psk->psk_src.addr.v.a.mask,
1403 			    srcaddr, sk->af) &&
1404 			    PF_MATCHA(psk->psk_dst.neg,
1405 			    &psk->psk_dst.addr.v.a.addr,
1406 			    &psk->psk_dst.addr.v.a.mask,
1407 			    dstaddr, sk->af) &&
1408 			    (psk->psk_src.port_op == 0 ||
1409 			    pf_match_port(psk->psk_src.port_op,
1410 			    psk->psk_src.port[0], psk->psk_src.port[1],
1411 			    srcport)) &&
1412 			    (psk->psk_dst.port_op == 0 ||
1413 			    pf_match_port(psk->psk_dst.port_op,
1414 			    psk->psk_dst.port[0], psk->psk_dst.port[1],
1415 			    dstport)) &&
1416 			    (!psk->psk_label[0] || (s->rule.ptr->label[0] &&
1417 			    !strcmp(psk->psk_label, s->rule.ptr->label))) &&
1418 			    (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname,
1419 			    s->kif->pfik_name))) {
1420 				pf_unlink_state(s);
1421 				killed++;
1422 			}
1423 		}
1424 		psk->psk_killed = killed;
1425 		break;
1426 	}
1427 
1428 #if NPFSYNC > 0
1429 	case DIOCADDSTATE: {
1430 		struct pfioc_state	*ps = (struct pfioc_state *)addr;
1431 		struct pfsync_state	*sp = &ps->state;
1432 
1433 		if (sp->timeout >= PFTM_MAX) {
1434 			error = EINVAL;
1435 			break;
1436 		}
1437 		error = pfsync_state_import(sp, PFSYNC_SI_IOCTL);
1438 		break;
1439 	}
1440 #endif
1441 
1442 	case DIOCGETSTATE: {
1443 		struct pfioc_state	*ps = (struct pfioc_state *)addr;
1444 		struct pf_state		*s;
1445 		struct pf_state_cmp	 id_key;
1446 
1447 		bzero(&id_key, sizeof(id_key));
1448 		id_key.id = ps->state.id;
1449 		id_key.creatorid = ps->state.creatorid;
1450 
1451 		s = pf_find_state_byid(&id_key);
1452 		if (s == NULL) {
1453 			error = ENOENT;
1454 			break;
1455 		}
1456 
1457 		pf_state_export(&ps->state, s);
1458 		break;
1459 	}
1460 
1461 	case DIOCGETSTATES: {
1462 		struct pfioc_states	*ps = (struct pfioc_states *)addr;
1463 		struct pf_state		*state;
1464 		struct pfsync_state	*p, *pstore;
1465 		u_int32_t		 nr = 0;
1466 
1467 		if (ps->ps_len == 0) {
1468 			nr = pf_status.states;
1469 			ps->ps_len = sizeof(struct pfsync_state) * nr;
1470 			break;
1471 		}
1472 
1473 		pstore = malloc(sizeof(*pstore), M_TEMP, M_WAITOK);
1474 
1475 		p = ps->ps_states;
1476 
1477 		state = TAILQ_FIRST(&state_list);
1478 		while (state) {
1479 			if (state->timeout != PFTM_UNLINKED) {
1480 				if ((nr+1) * sizeof(*p) > (unsigned)ps->ps_len)
1481 					break;
1482 				pf_state_export(pstore, state);
1483 				error = copyout(pstore, p, sizeof(*p));
1484 				if (error) {
1485 					free(pstore, M_TEMP, 0);
1486 					goto fail;
1487 				}
1488 				p++;
1489 				nr++;
1490 			}
1491 			state = TAILQ_NEXT(state, entry_list);
1492 		}
1493 
1494 		ps->ps_len = sizeof(struct pfsync_state) * nr;
1495 
1496 		free(pstore, M_TEMP, 0);
1497 		break;
1498 	}
1499 
1500 	case DIOCGETSTATUS: {
1501 		struct pf_status *s = (struct pf_status *)addr;
1502 		bcopy(&pf_status, s, sizeof(struct pf_status));
1503 		pfi_update_status(s->ifname, s);
1504 		break;
1505 	}
1506 
1507 	case DIOCSETSTATUSIF: {
1508 		struct pfioc_iface	*pi = (struct pfioc_iface *)addr;
1509 
1510 		if (pi->pfiio_name[0] == 0) {
1511 			bzero(pf_status.ifname, IFNAMSIZ);
1512 			break;
1513 		}
1514 		strlcpy(pf_trans_set.statusif, pi->pfiio_name, IFNAMSIZ);
1515 		pf_trans_set.mask |= PF_TSET_STATUSIF;
1516 		break;
1517 	}
1518 
1519 	case DIOCCLRSTATUS: {
1520 		struct pfioc_iface	*pi = (struct pfioc_iface *)addr;
1521 
1522 		/* if ifname is specified, clear counters there only */
1523 		if (pi->pfiio_name[0]) {
1524 			pfi_update_status(pi->pfiio_name, NULL);
1525 			break;
1526 		}
1527 
1528 		bzero(pf_status.counters, sizeof(pf_status.counters));
1529 		bzero(pf_status.fcounters, sizeof(pf_status.fcounters));
1530 		bzero(pf_status.scounters, sizeof(pf_status.scounters));
1531 		pf_status.since = time_second;
1532 
1533 		break;
1534 	}
1535 
1536 	case DIOCNATLOOK: {
1537 		struct pfioc_natlook	*pnl = (struct pfioc_natlook *)addr;
1538 		struct pf_state_key	*sk;
1539 		struct pf_state		*state;
1540 		struct pf_state_key_cmp	 key;
1541 		int			 m = 0, direction = pnl->direction;
1542 		int			 sidx, didx;
1543 
1544 		/* NATLOOK src and dst are reversed, so reverse sidx/didx */
1545 		sidx = (direction == PF_IN) ? 1 : 0;
1546 		didx = (direction == PF_IN) ? 0 : 1;
1547 
1548 		if (!pnl->proto ||
1549 		    PF_AZERO(&pnl->saddr, pnl->af) ||
1550 		    PF_AZERO(&pnl->daddr, pnl->af) ||
1551 		    ((pnl->proto == IPPROTO_TCP ||
1552 		    pnl->proto == IPPROTO_UDP) &&
1553 		    (!pnl->dport || !pnl->sport)) ||
1554 		    pnl->rdomain > RT_TABLEID_MAX)
1555 			error = EINVAL;
1556 		else {
1557 			key.af = pnl->af;
1558 			key.proto = pnl->proto;
1559 			key.rdomain = pnl->rdomain;
1560 			PF_ACPY(&key.addr[sidx], &pnl->saddr, pnl->af);
1561 			key.port[sidx] = pnl->sport;
1562 			PF_ACPY(&key.addr[didx], &pnl->daddr, pnl->af);
1563 			key.port[didx] = pnl->dport;
1564 
1565 			state = pf_find_state_all(&key, direction, &m);
1566 
1567 			if (m > 1)
1568 				error = E2BIG;	/* more than one state */
1569 			else if (state != NULL) {
1570 				sk = state->key[sidx];
1571 				PF_ACPY(&pnl->rsaddr, &sk->addr[sidx], sk->af);
1572 				pnl->rsport = sk->port[sidx];
1573 				PF_ACPY(&pnl->rdaddr, &sk->addr[didx], sk->af);
1574 				pnl->rdport = sk->port[didx];
1575 				pnl->rrdomain = sk->rdomain;
1576 			} else
1577 				error = ENOENT;
1578 		}
1579 		break;
1580 	}
1581 
1582 	case DIOCSETTIMEOUT: {
1583 		struct pfioc_tm	*pt = (struct pfioc_tm *)addr;
1584 
1585 		if (pt->timeout < 0 || pt->timeout >= PFTM_MAX ||
1586 		    pt->seconds < 0) {
1587 			error = EINVAL;
1588 			goto fail;
1589 		}
1590 		if (pt->timeout == PFTM_INTERVAL && pt->seconds == 0)
1591 			pt->seconds = 1;
1592 		pf_default_rule_new.timeout[pt->timeout] = pt->seconds;
1593 		pt->seconds = pf_default_rule.timeout[pt->timeout];
1594 		break;
1595 	}
1596 
1597 	case DIOCGETTIMEOUT: {
1598 		struct pfioc_tm	*pt = (struct pfioc_tm *)addr;
1599 
1600 		if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) {
1601 			error = EINVAL;
1602 			goto fail;
1603 		}
1604 		pt->seconds = pf_default_rule.timeout[pt->timeout];
1605 		break;
1606 	}
1607 
1608 	case DIOCGETLIMIT: {
1609 		struct pfioc_limit	*pl = (struct pfioc_limit *)addr;
1610 
1611 		if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) {
1612 			error = EINVAL;
1613 			goto fail;
1614 		}
1615 		pl->limit = pf_pool_limits[pl->index].limit;
1616 		break;
1617 	}
1618 
1619 	case DIOCSETLIMIT: {
1620 		struct pfioc_limit	*pl = (struct pfioc_limit *)addr;
1621 
1622 		if (pl->index < 0 || pl->index >= PF_LIMIT_MAX ||
1623 		    pf_pool_limits[pl->index].pp == NULL) {
1624 			error = EINVAL;
1625 			goto fail;
1626 		}
1627 		if (((struct pool *)pf_pool_limits[pl->index].pp)->pr_nout >
1628 		    pl->limit) {
1629 			error = EBUSY;
1630 			goto fail;
1631 		}
1632 		/* Fragments reference mbuf clusters. */
1633 		if (pl->index == PF_LIMIT_FRAGS && pl->limit > nmbclust) {
1634 			error = EINVAL;
1635 			goto fail;
1636 		}
1637 
1638 		pf_pool_limits[pl->index].limit_new = pl->limit;
1639 		pl->limit = pf_pool_limits[pl->index].limit;
1640 		break;
1641 	}
1642 
1643 	case DIOCSETDEBUG: {
1644 		u_int32_t	*level = (u_int32_t *)addr;
1645 
1646 		pf_trans_set.debug = *level;
1647 		pf_trans_set.mask |= PF_TSET_DEBUG;
1648 		break;
1649 	}
1650 
1651 	case DIOCCLRRULECTRS: {
1652 		/* obsoleted by DIOCGETRULE with action=PF_GET_CLR_CNTR */
1653 		struct pf_ruleset	*ruleset = &pf_main_ruleset;
1654 		struct pf_rule		*rule;
1655 
1656 		TAILQ_FOREACH(rule,
1657 		    ruleset->rules.active.ptr, entries) {
1658 			rule->evaluations = 0;
1659 			rule->packets[0] = rule->packets[1] = 0;
1660 			rule->bytes[0] = rule->bytes[1] = 0;
1661 		}
1662 		break;
1663 	}
1664 
1665 	case DIOCGETRULESETS: {
1666 		struct pfioc_ruleset	*pr = (struct pfioc_ruleset *)addr;
1667 		struct pf_ruleset	*ruleset;
1668 		struct pf_anchor	*anchor;
1669 
1670 		pr->path[sizeof(pr->path) - 1] = 0;
1671 		if ((ruleset = pf_find_ruleset(pr->path)) == NULL) {
1672 			error = EINVAL;
1673 			break;
1674 		}
1675 		pr->nr = 0;
1676 		if (ruleset->anchor == NULL) {
1677 			/* XXX kludge for pf_main_ruleset */
1678 			RB_FOREACH(anchor, pf_anchor_global, &pf_anchors)
1679 				if (anchor->parent == NULL)
1680 					pr->nr++;
1681 		} else {
1682 			RB_FOREACH(anchor, pf_anchor_node,
1683 			    &ruleset->anchor->children)
1684 				pr->nr++;
1685 		}
1686 		break;
1687 	}
1688 
1689 	case DIOCGETRULESET: {
1690 		struct pfioc_ruleset	*pr = (struct pfioc_ruleset *)addr;
1691 		struct pf_ruleset	*ruleset;
1692 		struct pf_anchor	*anchor;
1693 		u_int32_t		 nr = 0;
1694 
1695 		pr->path[sizeof(pr->path) - 1] = 0;
1696 		if ((ruleset = pf_find_ruleset(pr->path)) == NULL) {
1697 			error = EINVAL;
1698 			break;
1699 		}
1700 		pr->name[0] = 0;
1701 		if (ruleset->anchor == NULL) {
1702 			/* XXX kludge for pf_main_ruleset */
1703 			RB_FOREACH(anchor, pf_anchor_global, &pf_anchors)
1704 				if (anchor->parent == NULL && nr++ == pr->nr) {
1705 					strlcpy(pr->name, anchor->name,
1706 					    sizeof(pr->name));
1707 					break;
1708 				}
1709 		} else {
1710 			RB_FOREACH(anchor, pf_anchor_node,
1711 			    &ruleset->anchor->children)
1712 				if (nr++ == pr->nr) {
1713 					strlcpy(pr->name, anchor->name,
1714 					    sizeof(pr->name));
1715 					break;
1716 				}
1717 		}
1718 		if (!pr->name[0])
1719 			error = EBUSY;
1720 		break;
1721 	}
1722 
1723 	case DIOCRCLRTABLES: {
1724 		struct pfioc_table *io = (struct pfioc_table *)addr;
1725 
1726 		if (io->pfrio_esize != 0) {
1727 			error = ENODEV;
1728 			break;
1729 		}
1730 		error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel,
1731 		    io->pfrio_flags | PFR_FLAG_USERIOCTL);
1732 		break;
1733 	}
1734 
1735 	case DIOCRADDTABLES: {
1736 		struct pfioc_table *io = (struct pfioc_table *)addr;
1737 
1738 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
1739 			error = ENODEV;
1740 			break;
1741 		}
1742 		error = pfr_add_tables(io->pfrio_buffer, io->pfrio_size,
1743 		    &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL);
1744 		break;
1745 	}
1746 
1747 	case DIOCRDELTABLES: {
1748 		struct pfioc_table *io = (struct pfioc_table *)addr;
1749 
1750 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
1751 			error = ENODEV;
1752 			break;
1753 		}
1754 		error = pfr_del_tables(io->pfrio_buffer, io->pfrio_size,
1755 		    &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
1756 		break;
1757 	}
1758 
1759 	case DIOCRGETTABLES: {
1760 		struct pfioc_table *io = (struct pfioc_table *)addr;
1761 
1762 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
1763 			error = ENODEV;
1764 			break;
1765 		}
1766 		error = pfr_get_tables(&io->pfrio_table, io->pfrio_buffer,
1767 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
1768 		break;
1769 	}
1770 
1771 	case DIOCRGETTSTATS: {
1772 		struct pfioc_table *io = (struct pfioc_table *)addr;
1773 
1774 		if (io->pfrio_esize != sizeof(struct pfr_tstats)) {
1775 			error = ENODEV;
1776 			break;
1777 		}
1778 		error = pfr_get_tstats(&io->pfrio_table, io->pfrio_buffer,
1779 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
1780 		break;
1781 	}
1782 
1783 	case DIOCRCLRTSTATS: {
1784 		struct pfioc_table *io = (struct pfioc_table *)addr;
1785 
1786 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
1787 			error = ENODEV;
1788 			break;
1789 		}
1790 		error = pfr_clr_tstats(io->pfrio_buffer, io->pfrio_size,
1791 		    &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL);
1792 		break;
1793 	}
1794 
1795 	case DIOCRSETTFLAGS: {
1796 		struct pfioc_table *io = (struct pfioc_table *)addr;
1797 
1798 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
1799 			error = ENODEV;
1800 			break;
1801 		}
1802 		error = pfr_set_tflags(io->pfrio_buffer, io->pfrio_size,
1803 		    io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange,
1804 		    &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
1805 		break;
1806 	}
1807 
1808 	case DIOCRCLRADDRS: {
1809 		struct pfioc_table *io = (struct pfioc_table *)addr;
1810 
1811 		if (io->pfrio_esize != 0) {
1812 			error = ENODEV;
1813 			break;
1814 		}
1815 		error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel,
1816 		    io->pfrio_flags | PFR_FLAG_USERIOCTL);
1817 		break;
1818 	}
1819 
1820 	case DIOCRADDADDRS: {
1821 		struct pfioc_table *io = (struct pfioc_table *)addr;
1822 
1823 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
1824 			error = ENODEV;
1825 			break;
1826 		}
1827 		error = pfr_add_addrs(&io->pfrio_table, io->pfrio_buffer,
1828 		    io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags |
1829 		    PFR_FLAG_USERIOCTL);
1830 		break;
1831 	}
1832 
1833 	case DIOCRDELADDRS: {
1834 		struct pfioc_table *io = (struct pfioc_table *)addr;
1835 
1836 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
1837 			error = ENODEV;
1838 			break;
1839 		}
1840 		error = pfr_del_addrs(&io->pfrio_table, io->pfrio_buffer,
1841 		    io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags |
1842 		    PFR_FLAG_USERIOCTL);
1843 		break;
1844 	}
1845 
1846 	case DIOCRSETADDRS: {
1847 		struct pfioc_table *io = (struct pfioc_table *)addr;
1848 
1849 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
1850 			error = ENODEV;
1851 			break;
1852 		}
1853 		error = pfr_set_addrs(&io->pfrio_table, io->pfrio_buffer,
1854 		    io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd,
1855 		    &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags |
1856 		    PFR_FLAG_USERIOCTL, 0);
1857 		break;
1858 	}
1859 
1860 	case DIOCRGETADDRS: {
1861 		struct pfioc_table *io = (struct pfioc_table *)addr;
1862 
1863 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
1864 			error = ENODEV;
1865 			break;
1866 		}
1867 		error = pfr_get_addrs(&io->pfrio_table, io->pfrio_buffer,
1868 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
1869 		break;
1870 	}
1871 
1872 	case DIOCRGETASTATS: {
1873 		struct pfioc_table *io = (struct pfioc_table *)addr;
1874 
1875 		if (io->pfrio_esize != sizeof(struct pfr_astats)) {
1876 			error = ENODEV;
1877 			break;
1878 		}
1879 		error = pfr_get_astats(&io->pfrio_table, io->pfrio_buffer,
1880 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
1881 		break;
1882 	}
1883 
1884 	case DIOCRCLRASTATS: {
1885 		struct pfioc_table *io = (struct pfioc_table *)addr;
1886 
1887 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
1888 			error = ENODEV;
1889 			break;
1890 		}
1891 		error = pfr_clr_astats(&io->pfrio_table, io->pfrio_buffer,
1892 		    io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags |
1893 		    PFR_FLAG_USERIOCTL);
1894 		break;
1895 	}
1896 
1897 	case DIOCRTSTADDRS: {
1898 		struct pfioc_table *io = (struct pfioc_table *)addr;
1899 
1900 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
1901 			error = ENODEV;
1902 			break;
1903 		}
1904 		error = pfr_tst_addrs(&io->pfrio_table, io->pfrio_buffer,
1905 		    io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags |
1906 		    PFR_FLAG_USERIOCTL);
1907 		break;
1908 	}
1909 
1910 	case DIOCRINADEFINE: {
1911 		struct pfioc_table *io = (struct pfioc_table *)addr;
1912 
1913 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
1914 			error = ENODEV;
1915 			break;
1916 		}
1917 		error = pfr_ina_define(&io->pfrio_table, io->pfrio_buffer,
1918 		    io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr,
1919 		    io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL);
1920 		break;
1921 	}
1922 
1923 	case DIOCOSFPADD: {
1924 		struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
1925 		error = pf_osfp_add(io);
1926 		break;
1927 	}
1928 
1929 	case DIOCOSFPGET: {
1930 		struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
1931 		error = pf_osfp_get(io);
1932 		break;
1933 	}
1934 
1935 	case DIOCXBEGIN: {
1936 		struct pfioc_trans	*io = (struct pfioc_trans *)addr;
1937 		struct pfioc_trans_e	*ioe;
1938 		struct pfr_table	*table;
1939 		int			 i;
1940 
1941 		if (io->esize != sizeof(*ioe)) {
1942 			error = ENODEV;
1943 			goto fail;
1944 		}
1945 		ioe = malloc(sizeof(*ioe), M_TEMP, M_WAITOK);
1946 		table = malloc(sizeof(*table), M_TEMP, M_WAITOK);
1947 		pf_default_rule_new = pf_default_rule;
1948 		bzero(&pf_trans_set, sizeof(pf_trans_set));
1949 		for (i = 0; i < io->size; i++) {
1950 			if (copyin(io->array+i, ioe, sizeof(*ioe))) {
1951 				free(table, M_TEMP, 0);
1952 				free(ioe, M_TEMP, 0);
1953 				error = EFAULT;
1954 				goto fail;
1955 			}
1956 			switch (ioe->type) {
1957 			case PF_TRANS_TABLE:
1958 				bzero(table, sizeof(*table));
1959 				strlcpy(table->pfrt_anchor, ioe->anchor,
1960 				    sizeof(table->pfrt_anchor));
1961 				if ((error = pfr_ina_begin(table,
1962 				    &ioe->ticket, NULL, 0))) {
1963 					free(table, M_TEMP, 0);
1964 					free(ioe, M_TEMP, 0);
1965 					goto fail;
1966 				}
1967 				break;
1968 			default:
1969 				if ((error = pf_begin_rules(&ioe->ticket,
1970 				    ioe->anchor))) {
1971 					free(table, M_TEMP, 0);
1972 					free(ioe, M_TEMP, 0);
1973 					goto fail;
1974 				}
1975 				break;
1976 			}
1977 			if (copyout(ioe, io->array+i, sizeof(io->array[i]))) {
1978 				free(table, M_TEMP, 0);
1979 				free(ioe, M_TEMP, 0);
1980 				error = EFAULT;
1981 				goto fail;
1982 			}
1983 		}
1984 		free(table, M_TEMP, 0);
1985 		free(ioe, M_TEMP, 0);
1986 		break;
1987 	}
1988 
1989 	case DIOCXROLLBACK: {
1990 		struct pfioc_trans	*io = (struct pfioc_trans *)addr;
1991 		struct pfioc_trans_e	*ioe;
1992 		struct pfr_table	*table;
1993 		int			 i;
1994 
1995 		if (io->esize != sizeof(*ioe)) {
1996 			error = ENODEV;
1997 			goto fail;
1998 		}
1999 		ioe = malloc(sizeof(*ioe), M_TEMP, M_WAITOK);
2000 		table = malloc(sizeof(*table), M_TEMP, M_WAITOK);
2001 		for (i = 0; i < io->size; i++) {
2002 			if (copyin(io->array+i, ioe, sizeof(*ioe))) {
2003 				free(table, M_TEMP, 0);
2004 				free(ioe, M_TEMP, 0);
2005 				error = EFAULT;
2006 				goto fail;
2007 			}
2008 			switch (ioe->type) {
2009 			case PF_TRANS_TABLE:
2010 				bzero(table, sizeof(*table));
2011 				strlcpy(table->pfrt_anchor, ioe->anchor,
2012 				    sizeof(table->pfrt_anchor));
2013 				if ((error = pfr_ina_rollback(table,
2014 				    ioe->ticket, NULL, 0))) {
2015 					free(table, M_TEMP, 0);
2016 					free(ioe, M_TEMP, 0);
2017 					goto fail; /* really bad */
2018 				}
2019 				break;
2020 			default:
2021 				if ((error = pf_rollback_rules(ioe->ticket,
2022 				    ioe->anchor))) {
2023 					free(table, M_TEMP, 0);
2024 					free(ioe, M_TEMP, 0);
2025 					goto fail; /* really bad */
2026 				}
2027 				break;
2028 			}
2029 		}
2030 		free(table, M_TEMP, 0);
2031 		free(ioe, M_TEMP, 0);
2032 		break;
2033 	}
2034 
2035 	case DIOCXCOMMIT: {
2036 		struct pfioc_trans	*io = (struct pfioc_trans *)addr;
2037 		struct pfioc_trans_e	*ioe;
2038 		struct pfr_table	*table;
2039 		struct pf_ruleset	*rs;
2040 		int			 i;
2041 
2042 		if (io->esize != sizeof(*ioe)) {
2043 			error = ENODEV;
2044 			goto fail;
2045 		}
2046 		ioe = malloc(sizeof(*ioe), M_TEMP, M_WAITOK);
2047 		table = malloc(sizeof(*table), M_TEMP, M_WAITOK);
2048 		/* first makes sure everything will succeed */
2049 		for (i = 0; i < io->size; i++) {
2050 			if (copyin(io->array+i, ioe, sizeof(*ioe))) {
2051 				free(table, M_TEMP, 0);
2052 				free(ioe, M_TEMP, 0);
2053 				error = EFAULT;
2054 				goto fail;
2055 			}
2056 			switch (ioe->type) {
2057 			case PF_TRANS_TABLE:
2058 				rs = pf_find_ruleset(ioe->anchor);
2059 				if (rs == NULL || !rs->topen || ioe->ticket !=
2060 				     rs->tticket) {
2061 					free(table, M_TEMP, 0);
2062 					free(ioe, M_TEMP, 0);
2063 					error = EBUSY;
2064 					goto fail;
2065 				}
2066 				break;
2067 			default:
2068 				rs = pf_find_ruleset(ioe->anchor);
2069 				if (rs == NULL ||
2070 				    !rs->rules.inactive.open ||
2071 				    rs->rules.inactive.ticket !=
2072 				    ioe->ticket) {
2073 					free(table, M_TEMP, 0);
2074 					free(ioe, M_TEMP, 0);
2075 					error = EBUSY;
2076 					goto fail;
2077 				}
2078 				break;
2079 			}
2080 		}
2081 
2082 		/*
2083 		 * Checked already in DIOCSETLIMIT, but check again as the
2084 		 * situation might have changed.
2085 		 */
2086 		for (i = 0; i < PF_LIMIT_MAX; i++) {
2087 			if (((struct pool *)pf_pool_limits[i].pp)->pr_nout >
2088 			    pf_pool_limits[i].limit_new) {
2089 				free(table, M_TEMP, 0);
2090 				free(ioe, M_TEMP, 0);
2091 				error = EBUSY;
2092 				goto fail;
2093 			}
2094 		}
2095 		/* now do the commit - no errors should happen here */
2096 		for (i = 0; i < io->size; i++) {
2097 			if (copyin(io->array+i, ioe, sizeof(*ioe))) {
2098 				free(table, M_TEMP, 0);
2099 				free(ioe, M_TEMP, 0);
2100 				error = EFAULT;
2101 				goto fail;
2102 			}
2103 			switch (ioe->type) {
2104 			case PF_TRANS_TABLE:
2105 				bzero(table, sizeof(*table));
2106 				strlcpy(table->pfrt_anchor, ioe->anchor,
2107 				    sizeof(table->pfrt_anchor));
2108 				if ((error = pfr_ina_commit(table, ioe->ticket,
2109 				    NULL, NULL, 0))) {
2110 					free(table, M_TEMP, 0);
2111 					free(ioe, M_TEMP, 0);
2112 					goto fail; /* really bad */
2113 				}
2114 				break;
2115 			default:
2116 				if ((error = pf_commit_rules(ioe->ticket,
2117 				    ioe->anchor))) {
2118 					free(table, M_TEMP, 0);
2119 					free(ioe, M_TEMP, 0);
2120 					goto fail; /* really bad */
2121 				}
2122 				break;
2123 			}
2124 		}
2125 		for (i = 0; i < PF_LIMIT_MAX; i++) {
2126 			if (pf_pool_limits[i].limit_new !=
2127 			    pf_pool_limits[i].limit &&
2128 			    pool_sethardlimit(pf_pool_limits[i].pp,
2129 			    pf_pool_limits[i].limit_new, NULL, 0) != 0) {
2130 				free(table, M_TEMP, 0);
2131 				free(ioe, M_TEMP, 0);
2132 				error = EBUSY;
2133 				goto fail; /* really bad */
2134 			}
2135 			pf_pool_limits[i].limit = pf_pool_limits[i].limit_new;
2136 		}
2137 		for (i = 0; i < PFTM_MAX; i++) {
2138 			int old = pf_default_rule.timeout[i];
2139 
2140 			pf_default_rule.timeout[i] =
2141 			    pf_default_rule_new.timeout[i];
2142 			if (pf_default_rule.timeout[i] == PFTM_INTERVAL &&
2143 			    pf_default_rule.timeout[i] < old)
2144 				wakeup(pf_purge_thread);
2145 		}
2146 		pfi_xcommit();
2147 		pf_trans_set_commit();
2148 		free(table, M_TEMP, 0);
2149 		free(ioe, M_TEMP, 0);
2150 		break;
2151 	}
2152 
2153 	case DIOCGETSRCNODES: {
2154 		struct pfioc_src_nodes	*psn = (struct pfioc_src_nodes *)addr;
2155 		struct pf_src_node	*n, *p, *pstore;
2156 		u_int32_t		 nr = 0;
2157 		int			 space = psn->psn_len;
2158 
2159 		if (space == 0) {
2160 			RB_FOREACH(n, pf_src_tree, &tree_src_tracking)
2161 				nr++;
2162 			psn->psn_len = sizeof(struct pf_src_node) * nr;
2163 			break;
2164 		}
2165 
2166 		pstore = malloc(sizeof(*pstore), M_TEMP, M_WAITOK);
2167 
2168 		p = psn->psn_src_nodes;
2169 		RB_FOREACH(n, pf_src_tree, &tree_src_tracking) {
2170 			int	secs = time_uptime, diff;
2171 
2172 			if ((nr + 1) * sizeof(*p) > (unsigned)psn->psn_len)
2173 				break;
2174 
2175 			bcopy(n, pstore, sizeof(*pstore));
2176 			bzero(&pstore->entry, sizeof(pstore->entry));
2177 			pstore->rule.ptr = NULL;
2178 			pstore->kif = NULL;
2179 			if (n->rule.ptr != NULL)
2180 				pstore->rule.nr = n->rule.ptr->nr;
2181 			pstore->creation = secs - pstore->creation;
2182 			if (pstore->expire > secs)
2183 				pstore->expire -= secs;
2184 			else
2185 				pstore->expire = 0;
2186 
2187 			/* adjust the connection rate estimate */
2188 			diff = secs - n->conn_rate.last;
2189 			if (diff >= n->conn_rate.seconds)
2190 				pstore->conn_rate.count = 0;
2191 			else
2192 				pstore->conn_rate.count -=
2193 				    n->conn_rate.count * diff /
2194 				    n->conn_rate.seconds;
2195 
2196 			error = copyout(pstore, p, sizeof(*p));
2197 			if (error) {
2198 				free(pstore, M_TEMP, 0);
2199 				goto fail;
2200 			}
2201 			p++;
2202 			nr++;
2203 		}
2204 		psn->psn_len = sizeof(struct pf_src_node) * nr;
2205 
2206 		free(pstore, M_TEMP, 0);
2207 		break;
2208 	}
2209 
2210 	case DIOCCLRSRCNODES: {
2211 		struct pf_src_node	*n;
2212 		struct pf_state		*state;
2213 
2214 		RB_FOREACH(state, pf_state_tree_id, &tree_id)
2215 			pf_src_tree_remove_state(state);
2216 		RB_FOREACH(n, pf_src_tree, &tree_src_tracking)
2217 			n->expire = 1;
2218 		pf_purge_expired_src_nodes(1);
2219 		break;
2220 	}
2221 
2222 	case DIOCKILLSRCNODES: {
2223 		struct pf_src_node	*sn;
2224 		struct pf_state		*s;
2225 		struct pfioc_src_node_kill *psnk =
2226 		    (struct pfioc_src_node_kill *)addr;
2227 		u_int			killed = 0;
2228 
2229 		RB_FOREACH(sn, pf_src_tree, &tree_src_tracking) {
2230 			if (PF_MATCHA(psnk->psnk_src.neg,
2231 				&psnk->psnk_src.addr.v.a.addr,
2232 				&psnk->psnk_src.addr.v.a.mask,
2233 				&sn->addr, sn->af) &&
2234 			    PF_MATCHA(psnk->psnk_dst.neg,
2235 				&psnk->psnk_dst.addr.v.a.addr,
2236 				&psnk->psnk_dst.addr.v.a.mask,
2237 				&sn->raddr, sn->af)) {
2238 				/* Handle state to src_node linkage */
2239 				if (sn->states != 0)
2240 					RB_FOREACH(s, pf_state_tree_id,
2241 					   &tree_id)
2242 						pf_state_rm_src_node(s, sn);
2243 				sn->expire = 1;
2244 				killed++;
2245 			}
2246 		}
2247 
2248 		if (killed > 0)
2249 			pf_purge_expired_src_nodes(1);
2250 
2251 		psnk->psnk_killed = killed;
2252 		break;
2253 	}
2254 
2255 	case DIOCSETHOSTID: {
2256 		u_int32_t	*hostid = (u_int32_t *)addr;
2257 
2258 		if (*hostid == 0)
2259 			pf_trans_set.hostid = arc4random();
2260 		else
2261 			pf_trans_set.hostid = *hostid;
2262 		pf_trans_set.mask |= PF_TSET_HOSTID;
2263 		break;
2264 	}
2265 
2266 	case DIOCOSFPFLUSH:
2267 		pf_osfp_flush();
2268 		break;
2269 
2270 	case DIOCIGETIFACES: {
2271 		struct pfioc_iface *io = (struct pfioc_iface *)addr;
2272 
2273 		if (io->pfiio_esize != sizeof(struct pfi_kif)) {
2274 			error = ENODEV;
2275 			break;
2276 		}
2277 		error = pfi_get_ifaces(io->pfiio_name, io->pfiio_buffer,
2278 		    &io->pfiio_size);
2279 		break;
2280 	}
2281 
2282 	case DIOCSETIFFLAG: {
2283 		struct pfioc_iface *io = (struct pfioc_iface *)addr;
2284 
2285 		error = pfi_set_flags(io->pfiio_name, io->pfiio_flags);
2286 		break;
2287 	}
2288 
2289 	case DIOCCLRIFFLAG: {
2290 		struct pfioc_iface *io = (struct pfioc_iface *)addr;
2291 
2292 		error = pfi_clear_flags(io->pfiio_name, io->pfiio_flags);
2293 		break;
2294 	}
2295 
2296 	case DIOCSETREASS: {
2297 		u_int32_t	*reass = (u_int32_t *)addr;
2298 
2299 		pf_trans_set.reass = *reass;
2300 		pf_trans_set.mask |= PF_TSET_REASS;
2301 		break;
2302 	}
2303 
2304 	default:
2305 		error = ENODEV;
2306 		break;
2307 	}
2308 fail:
2309 	splx(s);
2310 	if (flags & FWRITE)
2311 		rw_exit_write(&pf_consistency_lock);
2312 	else
2313 		rw_exit_read(&pf_consistency_lock);
2314 	return (error);
2315 }
2316 
2317 void
2318 pf_trans_set_commit(void)
2319 {
2320 	if (pf_trans_set.mask & PF_TSET_STATUSIF)
2321 		strlcpy(pf_status.ifname, pf_trans_set.statusif, IFNAMSIZ);
2322 	if (pf_trans_set.mask & PF_TSET_DEBUG)
2323 		pf_status.debug = pf_trans_set.debug;
2324 	if (pf_trans_set.mask & PF_TSET_HOSTID)
2325 		pf_status.hostid = pf_trans_set.hostid;
2326 	if (pf_trans_set.mask & PF_TSET_REASS)
2327 		pf_status.reass = pf_trans_set.reass;
2328 }
2329 
2330 void
2331 pf_pool_copyin(struct pf_pool *from, struct pf_pool *to)
2332 {
2333 	bcopy(from, to, sizeof(*to));
2334 	to->kif = NULL;
2335 }
2336 
2337 int
2338 pf_rule_copyin(struct pf_rule *from, struct pf_rule *to,
2339     struct pf_ruleset *ruleset)
2340 {
2341 	int i;
2342 
2343 	to->src = from->src;
2344 	to->dst = from->dst;
2345 
2346 	/* XXX union skip[] */
2347 
2348 	strlcpy(to->label, from->label, sizeof(to->label));
2349 	strlcpy(to->ifname, from->ifname, sizeof(to->ifname));
2350 	strlcpy(to->rcv_ifname, from->rcv_ifname, sizeof(to->rcv_ifname));
2351 	strlcpy(to->qname, from->qname, sizeof(to->qname));
2352 	strlcpy(to->pqname, from->pqname, sizeof(to->pqname));
2353 	strlcpy(to->tagname, from->tagname, sizeof(to->tagname));
2354 	strlcpy(to->match_tagname, from->match_tagname,
2355 	    sizeof(to->match_tagname));
2356 	strlcpy(to->overload_tblname, from->overload_tblname,
2357 	    sizeof(to->overload_tblname));
2358 
2359 	pf_pool_copyin(&from->nat, &to->nat);
2360 	pf_pool_copyin(&from->rdr, &to->rdr);
2361 	pf_pool_copyin(&from->route, &to->route);
2362 
2363 	if (pf_kif_setup(to->ifname, &to->kif))
2364 		return (EINVAL);
2365 	if (pf_kif_setup(to->rcv_ifname, &to->rcv_kif))
2366 		return (EINVAL);
2367 	if (to->overload_tblname[0]) {
2368 		if ((to->overload_tbl = pfr_attach_table(ruleset,
2369 		    to->overload_tblname, 0)) == NULL)
2370 			return (EINVAL);
2371 		else
2372 			to->overload_tbl->pfrkt_flags |= PFR_TFLAG_ACTIVE;
2373 	}
2374 
2375 	if (pf_kif_setup(to->rdr.ifname, &to->rdr.kif))
2376 		return (EINVAL);
2377 	if (pf_kif_setup(to->nat.ifname, &to->nat.kif))
2378 		return (EINVAL);
2379 	if (pf_kif_setup(to->route.ifname, &to->route.kif))
2380 		return (EINVAL);
2381 
2382 	to->os_fingerprint = from->os_fingerprint;
2383 
2384 	to->rtableid = from->rtableid;
2385 	if (to->rtableid >= 0 && !rtable_exists(to->rtableid))
2386 		return (EBUSY);
2387 	to->onrdomain = from->onrdomain;
2388 	if (to->onrdomain >= 0 && !rtable_exists(to->onrdomain))
2389 		return (EBUSY);
2390 	if (to->onrdomain >= 0)		/* make sure it is a real rdomain */
2391 		to->onrdomain = rtable_l2(to->onrdomain);
2392 
2393 	for (i = 0; i < PFTM_MAX; i++)
2394 		to->timeout[i] = from->timeout[i];
2395 	to->states_tot = from->states_tot;
2396 	to->max_states = from->max_states;
2397 	to->max_src_nodes = from->max_src_nodes;
2398 	to->max_src_states = from->max_src_states;
2399 	to->max_src_conn = from->max_src_conn;
2400 	to->max_src_conn_rate.limit = from->max_src_conn_rate.limit;
2401 	to->max_src_conn_rate.seconds = from->max_src_conn_rate.seconds;
2402 
2403 	if (to->qname[0] != 0) {
2404 		if ((to->qid = pf_qname2qid(to->qname, 0)) == 0)
2405 			return (EBUSY);
2406 		if (to->pqname[0] != 0) {
2407 			if ((to->pqid = pf_qname2qid(to->pqname, 0)) == 0)
2408 				return (EBUSY);
2409 		} else
2410 			to->pqid = to->qid;
2411 	}
2412 	to->rt_listid = from->rt_listid;
2413 	to->prob = from->prob;
2414 	to->return_icmp = from->return_icmp;
2415 	to->return_icmp6 = from->return_icmp6;
2416 	to->max_mss = from->max_mss;
2417 	if (to->tagname[0])
2418 		if ((to->tag = pf_tagname2tag(to->tagname, 1)) == 0)
2419 			return (EBUSY);
2420 	if (to->match_tagname[0])
2421 		if ((to->match_tag = pf_tagname2tag(to->match_tagname, 1)) == 0)
2422 			return (EBUSY);
2423 	to->scrub_flags = from->scrub_flags;
2424 	to->uid = from->uid;
2425 	to->gid = from->gid;
2426 	to->rule_flag = from->rule_flag;
2427 	to->action = from->action;
2428 	to->direction = from->direction;
2429 	to->log = from->log;
2430 	to->logif = from->logif;
2431 #if NPFLOG > 0
2432 	if (!to->log)
2433 		to->logif = 0;
2434 #endif
2435 	to->quick = from->quick;
2436 	to->ifnot = from->ifnot;
2437 	to->rcvifnot = from->rcvifnot;
2438 	to->match_tag_not = from->match_tag_not;
2439 	to->keep_state = from->keep_state;
2440 	to->af = from->af;
2441 	to->naf = from->naf;
2442 	to->proto = from->proto;
2443 	to->type = from->type;
2444 	to->code = from->code;
2445 	to->flags = from->flags;
2446 	to->flagset = from->flagset;
2447 	to->min_ttl = from->min_ttl;
2448 	to->allow_opts = from->allow_opts;
2449 	to->rt = from->rt;
2450 	to->return_ttl = from->return_ttl;
2451 	to->tos = from->tos;
2452 	to->set_tos = from->set_tos;
2453 	to->anchor_relative = from->anchor_relative; /* XXX */
2454 	to->anchor_wildcard = from->anchor_wildcard; /* XXX */
2455 	to->flush = from->flush;
2456 	to->divert.addr = from->divert.addr;
2457 	to->divert.port = from->divert.port;
2458 	to->divert_packet.addr = from->divert_packet.addr;
2459 	to->divert_packet.port = from->divert_packet.port;
2460 	to->set_prio[0] = from->set_prio[0];
2461 	to->set_prio[1] = from->set_prio[1];
2462 
2463 	return (0);
2464 }
2465