xref: /openbsd-src/sys/net/pf_ioctl.c (revision d13be5d47e4149db2549a9828e244d59dbc43f15)
1 /*	$OpenBSD: pf_ioctl.c,v 1.242 2011/08/30 00:40:47 mikeb Exp $ */
2 
3 /*
4  * Copyright (c) 2001 Daniel Hartmeier
5  * Copyright (c) 2002,2003 Henning Brauer
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  *    - Redistributions of source code must retain the above copyright
13  *      notice, this list of conditions and the following disclaimer.
14  *    - Redistributions in binary form must reproduce the above
15  *      copyright notice, this list of conditions and the following
16  *      disclaimer in the documentation and/or other materials provided
17  *      with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
22  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
23  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
24  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
25  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
29  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  *
32  * Effort sponsored in part by the Defense Advanced Research Projects
33  * Agency (DARPA) and Air Force Research Laboratory, Air Force
34  * Materiel Command, USAF, under agreement number F30602-01-2-0537.
35  *
36  */
37 
38 #include "pfsync.h"
39 #include "pflog.h"
40 
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/mbuf.h>
44 #include <sys/filio.h>
45 #include <sys/fcntl.h>
46 #include <sys/socket.h>
47 #include <sys/socketvar.h>
48 #include <sys/kernel.h>
49 #include <sys/time.h>
50 #include <sys/timeout.h>
51 #include <sys/pool.h>
52 #include <sys/proc.h>
53 #include <sys/malloc.h>
54 #include <sys/kthread.h>
55 #include <sys/rwlock.h>
56 #include <sys/syslog.h>
57 #include <uvm/uvm_extern.h>
58 
59 #include <net/if.h>
60 #include <net/if_types.h>
61 #include <net/route.h>
62 
63 #include <netinet/in.h>
64 #include <netinet/in_var.h>
65 #include <netinet/in_systm.h>
66 #include <netinet/ip.h>
67 #include <netinet/ip_var.h>
68 #include <netinet/ip_icmp.h>
69 
70 #include <dev/rndvar.h>
71 #include <crypto/md5.h>
72 #include <net/pfvar.h>
73 
74 #if NPFSYNC > 0
75 #include <net/if_pfsync.h>
76 #endif /* NPFSYNC > 0 */
77 
78 #if NPFLOG > 0
79 #include <net/if_pflog.h>
80 #endif /* NPFLOG > 0 */
81 
82 #ifdef INET6
83 #include <netinet/ip6.h>
84 #include <netinet/in_pcb.h>
85 #endif /* INET6 */
86 
87 #ifdef ALTQ
88 #include <altq/altq.h>
89 #endif
90 
91 void			 pfattach(int);
92 void			 pf_thread_create(void *);
93 int			 pfopen(dev_t, int, int, struct proc *);
94 int			 pfclose(dev_t, int, int, struct proc *);
95 int			 pfioctl(dev_t, u_long, caddr_t, int, struct proc *);
96 #ifdef ALTQ
97 int			 pf_begin_altq(u_int32_t *);
98 int			 pf_rollback_altq(u_int32_t);
99 int			 pf_commit_altq(u_int32_t);
100 int			 pf_enable_altq(struct pf_altq *);
101 int			 pf_disable_altq(struct pf_altq *);
102 #endif /* ALTQ */
103 int			 pf_begin_rules(u_int32_t *, const char *);
104 int			 pf_rollback_rules(u_int32_t, char *);
105 int			 pf_setup_pfsync_matching(struct pf_ruleset *);
106 void			 pf_hash_rule(MD5_CTX *, struct pf_rule *);
107 void			 pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *);
108 int			 pf_commit_rules(u_int32_t, char *);
109 int			 pf_addr_setup(struct pf_ruleset *,
110 			    struct pf_addr_wrap *, sa_family_t);
111 int			 pf_kif_setup(char *, struct pfi_kif **);
112 void			 pf_addr_copyout(struct pf_addr_wrap *);
113 void			 pf_trans_set_commit(void);
114 void			 pf_pool_copyin(struct pf_pool *, struct pf_pool *);
115 int			 pf_rule_copyin(struct pf_rule *, struct pf_rule *,
116 			    struct pf_ruleset *);
117 
118 struct pf_rule		 pf_default_rule, pf_default_rule_new;
119 struct rwlock		 pf_consistency_lock = RWLOCK_INITIALIZER("pfcnslk");
120 #ifdef ALTQ
121 static int		 pf_altq_running;
122 #endif
123 
124 struct {
125 	char		statusif[IFNAMSIZ];
126 	u_int32_t	debug;
127 	u_int32_t	hostid;
128 	u_int32_t	reass;
129 	u_int32_t	mask;
130 } pf_trans_set;
131 
132 #define	PF_TSET_STATUSIF	0x01
133 #define	PF_TSET_DEBUG		0x02
134 #define	PF_TSET_HOSTID		0x04
135 #define	PF_TSET_REASS		0x08
136 
137 #define	TAGID_MAX	 50000
138 TAILQ_HEAD(pf_tags, pf_tagname)	pf_tags = TAILQ_HEAD_INITIALIZER(pf_tags),
139 				pf_qids = TAILQ_HEAD_INITIALIZER(pf_qids);
140 
141 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE)
142 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE
143 #endif
144 u_int16_t		 tagname2tag(struct pf_tags *, char *);
145 void			 tag2tagname(struct pf_tags *, u_int16_t, char *);
146 void			 tag_unref(struct pf_tags *, u_int16_t);
147 int			 pf_rtlabel_add(struct pf_addr_wrap *);
148 void			 pf_rtlabel_remove(struct pf_addr_wrap *);
149 void			 pf_rtlabel_copyout(struct pf_addr_wrap *);
150 
151 
152 void
153 pfattach(int num)
154 {
155 	u_int32_t *timeout = pf_default_rule.timeout;
156 
157 	pool_init(&pf_rule_pl, sizeof(struct pf_rule), 0, 0, 0, "pfrulepl",
158 	    &pool_allocator_nointr);
159 	pool_init(&pf_src_tree_pl, sizeof(struct pf_src_node), 0, 0, 0,
160 	    "pfsrctrpl", NULL);
161 	pool_init(&pf_sn_item_pl, sizeof(struct pf_sn_item), 0, 0, 0,
162 	    "pfsnitempl", NULL);
163 	pool_init(&pf_state_pl, sizeof(struct pf_state), 0, 0, 0, "pfstatepl",
164 	    NULL);
165 	pool_init(&pf_state_key_pl, sizeof(struct pf_state_key), 0, 0, 0,
166 	    "pfstatekeypl", NULL);
167 	pool_init(&pf_state_item_pl, sizeof(struct pf_state_item), 0, 0, 0,
168 	    "pfstateitempl", NULL);
169 	pool_init(&pf_rule_item_pl, sizeof(struct pf_rule_item), 0, 0, 0,
170 	    "pfruleitempl", NULL);
171 	pool_init(&pf_altq_pl, sizeof(struct pf_altq), 0, 0, 0, "pfaltqpl",
172 	    &pool_allocator_nointr);
173 	pfr_initialize();
174 	pfi_initialize();
175 	pf_osfp_initialize();
176 
177 	pool_sethardlimit(pf_pool_limits[PF_LIMIT_STATES].pp,
178 	    pf_pool_limits[PF_LIMIT_STATES].limit, NULL, 0);
179 
180 	if (physmem <= atop(100*1024*1024))
181 		pf_pool_limits[PF_LIMIT_TABLE_ENTRIES].limit =
182 		    PFR_KENTRY_HIWAT_SMALL;
183 
184 	RB_INIT(&tree_src_tracking);
185 	RB_INIT(&pf_anchors);
186 	pf_init_ruleset(&pf_main_ruleset);
187 	TAILQ_INIT(&pf_altqs[0]);
188 	TAILQ_INIT(&pf_altqs[1]);
189 	pf_altqs_active = &pf_altqs[0];
190 	pf_altqs_inactive = &pf_altqs[1];
191 	TAILQ_INIT(&state_list);
192 
193 	/* default rule should never be garbage collected */
194 	pf_default_rule.entries.tqe_prev = &pf_default_rule.entries.tqe_next;
195 	pf_default_rule.action = PF_PASS;
196 	pf_default_rule.nr = -1;
197 	pf_default_rule.rtableid = -1;
198 
199 	/* initialize default timeouts */
200 	timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL;
201 	timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL;
202 	timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL;
203 	timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL;
204 	timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL;
205 	timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL;
206 	timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL;
207 	timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL;
208 	timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL;
209 	timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL;
210 	timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL;
211 	timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL;
212 	timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL;
213 	timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL;
214 	timeout[PFTM_FRAG] = PFTM_FRAG_VAL;
215 	timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL;
216 	timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL;
217 	timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL;
218 	timeout[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START;
219 	timeout[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END;
220 
221 	pf_default_rule.src.addr.type =  PF_ADDR_ADDRMASK;
222 	pf_default_rule.dst.addr.type =  PF_ADDR_ADDRMASK;
223 	pf_default_rule.rdr.addr.type =  PF_ADDR_NONE;
224 	pf_default_rule.nat.addr.type =  PF_ADDR_NONE;
225 	pf_default_rule.route.addr.type =  PF_ADDR_NONE;
226 
227 	pf_normalize_init();
228 	bzero(&pf_status, sizeof(pf_status));
229 	pf_status.debug = LOG_ERR;
230 	pf_status.reass = PF_REASS_ENABLED;
231 
232 	/* XXX do our best to avoid a conflict */
233 	pf_status.hostid = arc4random();
234 
235 	/* require process context to purge states, so perform in a thread */
236 	kthread_create_deferred(pf_thread_create, NULL);
237 }
238 
239 void
240 pf_thread_create(void *v)
241 {
242 	if (kthread_create(pf_purge_thread, NULL, NULL, "pfpurge"))
243 		panic("pfpurge thread");
244 }
245 
246 int
247 pfopen(dev_t dev, int flags, int fmt, struct proc *p)
248 {
249 	if (minor(dev) >= 1)
250 		return (ENXIO);
251 	return (0);
252 }
253 
254 int
255 pfclose(dev_t dev, int flags, int fmt, struct proc *p)
256 {
257 	if (minor(dev) >= 1)
258 		return (ENXIO);
259 	return (0);
260 }
261 
262 void
263 pf_rm_rule(struct pf_rulequeue *rulequeue, struct pf_rule *rule)
264 {
265 	if (rulequeue != NULL) {
266 		if (rule->states_cur <= 0 && rule->src_nodes <= 0) {
267 			/*
268 			 * XXX - we need to remove the table *before* detaching
269 			 * the rule to make sure the table code does not delete
270 			 * the anchor under our feet.
271 			 */
272 			pf_tbladdr_remove(&rule->src.addr);
273 			pf_tbladdr_remove(&rule->dst.addr);
274 			pf_tbladdr_remove(&rule->rdr.addr);
275 			pf_tbladdr_remove(&rule->nat.addr);
276 			pf_tbladdr_remove(&rule->route.addr);
277 			if (rule->overload_tbl)
278 				pfr_detach_table(rule->overload_tbl);
279 		}
280 		TAILQ_REMOVE(rulequeue, rule, entries);
281 		rule->entries.tqe_prev = NULL;
282 		rule->nr = -1;
283 	}
284 
285 	if (rule->states_cur > 0 || rule->src_nodes > 0 ||
286 	    rule->entries.tqe_prev != NULL)
287 		return;
288 	pf_tag_unref(rule->tag);
289 	pf_tag_unref(rule->match_tag);
290 #ifdef ALTQ
291 	if (rule->pqid != rule->qid)
292 		pf_qid_unref(rule->pqid);
293 	pf_qid_unref(rule->qid);
294 #endif
295 	pf_rtlabel_remove(&rule->src.addr);
296 	pf_rtlabel_remove(&rule->dst.addr);
297 	pfi_dynaddr_remove(&rule->src.addr);
298 	pfi_dynaddr_remove(&rule->dst.addr);
299 	pfi_dynaddr_remove(&rule->rdr.addr);
300 	pfi_dynaddr_remove(&rule->nat.addr);
301 	pfi_dynaddr_remove(&rule->route.addr);
302 	if (rulequeue == NULL) {
303 		pf_tbladdr_remove(&rule->src.addr);
304 		pf_tbladdr_remove(&rule->dst.addr);
305 		pf_tbladdr_remove(&rule->rdr.addr);
306 		pf_tbladdr_remove(&rule->nat.addr);
307 		pf_tbladdr_remove(&rule->route.addr);
308 		if (rule->overload_tbl)
309 			pfr_detach_table(rule->overload_tbl);
310 	}
311 	pfi_kif_unref(rule->rcv_kif, PFI_KIF_REF_RULE);
312 	pfi_kif_unref(rule->kif, PFI_KIF_REF_RULE);
313 	pfi_kif_unref(rule->rdr.kif, PFI_KIF_REF_RULE);
314 	pfi_kif_unref(rule->nat.kif, PFI_KIF_REF_RULE);
315 	pfi_kif_unref(rule->route.kif, PFI_KIF_REF_RULE);
316 	pf_anchor_remove(rule);
317 	pool_put(&pf_rule_pl, rule);
318 }
319 
320 void
321 pf_purge_rule(struct pf_ruleset *ruleset, struct pf_rule *rule)
322 {
323 	u_int32_t		 nr;
324 
325 	pf_rm_rule(ruleset->rules.active.ptr, rule);
326 	ruleset->rules.active.rcount--;
327 
328 	nr = 0;
329 	TAILQ_FOREACH(rule, ruleset->rules.active.ptr, entries)
330 		rule->nr = nr++;
331 
332 	ruleset->rules.active.ticket++;
333 
334 	pf_calc_skip_steps(ruleset->rules.active.ptr);
335 	pf_remove_if_empty_ruleset(ruleset);
336 }
337 
338 u_int16_t
339 tagname2tag(struct pf_tags *head, char *tagname)
340 {
341 	struct pf_tagname	*tag, *p = NULL;
342 	u_int16_t		 new_tagid = 1;
343 
344 	TAILQ_FOREACH(tag, head, entries)
345 		if (strcmp(tagname, tag->name) == 0) {
346 			tag->ref++;
347 			return (tag->tag);
348 		}
349 
350 	/*
351 	 * to avoid fragmentation, we do a linear search from the beginning
352 	 * and take the first free slot we find. if there is none or the list
353 	 * is empty, append a new entry at the end.
354 	 */
355 
356 	/* new entry */
357 	if (!TAILQ_EMPTY(head))
358 		for (p = TAILQ_FIRST(head); p != NULL &&
359 		    p->tag == new_tagid; p = TAILQ_NEXT(p, entries))
360 			new_tagid = p->tag + 1;
361 
362 	if (new_tagid > TAGID_MAX)
363 		return (0);
364 
365 	/* allocate and fill new struct pf_tagname */
366 	tag = malloc(sizeof(*tag), M_TEMP, M_NOWAIT|M_ZERO);
367 	if (tag == NULL)
368 		return (0);
369 	strlcpy(tag->name, tagname, sizeof(tag->name));
370 	tag->tag = new_tagid;
371 	tag->ref++;
372 
373 	if (p != NULL)	/* insert new entry before p */
374 		TAILQ_INSERT_BEFORE(p, tag, entries);
375 	else	/* either list empty or no free slot in between */
376 		TAILQ_INSERT_TAIL(head, tag, entries);
377 
378 	return (tag->tag);
379 }
380 
381 void
382 tag2tagname(struct pf_tags *head, u_int16_t tagid, char *p)
383 {
384 	struct pf_tagname	*tag;
385 
386 	TAILQ_FOREACH(tag, head, entries)
387 		if (tag->tag == tagid) {
388 			strlcpy(p, tag->name, PF_TAG_NAME_SIZE);
389 			return;
390 		}
391 }
392 
393 void
394 tag_unref(struct pf_tags *head, u_int16_t tag)
395 {
396 	struct pf_tagname	*p, *next;
397 
398 	if (tag == 0)
399 		return;
400 
401 	for (p = TAILQ_FIRST(head); p != NULL; p = next) {
402 		next = TAILQ_NEXT(p, entries);
403 		if (tag == p->tag) {
404 			if (--p->ref == 0) {
405 				TAILQ_REMOVE(head, p, entries);
406 				free(p, M_TEMP);
407 			}
408 			break;
409 		}
410 	}
411 }
412 
413 u_int16_t
414 pf_tagname2tag(char *tagname)
415 {
416 	return (tagname2tag(&pf_tags, tagname));
417 }
418 
419 void
420 pf_tag2tagname(u_int16_t tagid, char *p)
421 {
422 	tag2tagname(&pf_tags, tagid, p);
423 }
424 
425 void
426 pf_tag_ref(u_int16_t tag)
427 {
428 	struct pf_tagname *t;
429 
430 	TAILQ_FOREACH(t, &pf_tags, entries)
431 		if (t->tag == tag)
432 			break;
433 	if (t != NULL)
434 		t->ref++;
435 }
436 
437 void
438 pf_tag_unref(u_int16_t tag)
439 {
440 	tag_unref(&pf_tags, tag);
441 }
442 
443 int
444 pf_rtlabel_add(struct pf_addr_wrap *a)
445 {
446 	if (a->type == PF_ADDR_RTLABEL &&
447 	    (a->v.rtlabel = rtlabel_name2id(a->v.rtlabelname)) == 0)
448 		return (-1);
449 	return (0);
450 }
451 
452 void
453 pf_rtlabel_remove(struct pf_addr_wrap *a)
454 {
455 	if (a->type == PF_ADDR_RTLABEL)
456 		rtlabel_unref(a->v.rtlabel);
457 }
458 
459 void
460 pf_rtlabel_copyout(struct pf_addr_wrap *a)
461 {
462 	const char	*name;
463 
464 	if (a->type == PF_ADDR_RTLABEL && a->v.rtlabel) {
465 		if ((name = rtlabel_id2name(a->v.rtlabel)) == NULL)
466 			strlcpy(a->v.rtlabelname, "?",
467 			    sizeof(a->v.rtlabelname));
468 		else
469 			strlcpy(a->v.rtlabelname, name,
470 			    sizeof(a->v.rtlabelname));
471 	}
472 }
473 
474 #ifdef ALTQ
475 u_int32_t
476 pf_qname2qid(char *qname)
477 {
478 	return ((u_int32_t)tagname2tag(&pf_qids, qname));
479 }
480 
481 void
482 pf_qid2qname(u_int32_t qid, char *p)
483 {
484 	tag2tagname(&pf_qids, (u_int16_t)qid, p);
485 }
486 
487 void
488 pf_qid_unref(u_int32_t qid)
489 {
490 	tag_unref(&pf_qids, (u_int16_t)qid);
491 }
492 
493 int
494 pf_begin_altq(u_int32_t *ticket)
495 {
496 	struct pf_altq	*altq;
497 	int		 error = 0;
498 
499 	/* Purge the old altq list */
500 	while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) {
501 		TAILQ_REMOVE(pf_altqs_inactive, altq, entries);
502 		if (altq->qname[0] == 0) {
503 			/* detach and destroy the discipline */
504 			error = altq_remove(altq);
505 		} else
506 			pf_qid_unref(altq->qid);
507 		pool_put(&pf_altq_pl, altq);
508 	}
509 	if (error)
510 		return (error);
511 	*ticket = ++ticket_altqs_inactive;
512 	altqs_inactive_open = 1;
513 	return (0);
514 }
515 
516 int
517 pf_rollback_altq(u_int32_t ticket)
518 {
519 	struct pf_altq	*altq;
520 	int		 error = 0;
521 
522 	if (!altqs_inactive_open || ticket != ticket_altqs_inactive)
523 		return (0);
524 	/* Purge the old altq list */
525 	while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) {
526 		TAILQ_REMOVE(pf_altqs_inactive, altq, entries);
527 		if (altq->qname[0] == 0) {
528 			/* detach and destroy the discipline */
529 			error = altq_remove(altq);
530 		} else
531 			pf_qid_unref(altq->qid);
532 		pool_put(&pf_altq_pl, altq);
533 	}
534 	altqs_inactive_open = 0;
535 	return (error);
536 }
537 
538 int
539 pf_commit_altq(u_int32_t ticket)
540 {
541 	struct pf_altqqueue	*old_altqs;
542 	struct pf_altq		*altq;
543 	int			 s, err, error = 0;
544 
545 	if (!altqs_inactive_open || ticket != ticket_altqs_inactive)
546 		return (EBUSY);
547 
548 	/* swap altqs, keep the old. */
549 	s = splsoftnet();
550 	old_altqs = pf_altqs_active;
551 	pf_altqs_active = pf_altqs_inactive;
552 	pf_altqs_inactive = old_altqs;
553 	ticket_altqs_active = ticket_altqs_inactive;
554 
555 	/* Attach new disciplines */
556 	TAILQ_FOREACH(altq, pf_altqs_active, entries) {
557 		if (altq->qname[0] == 0) {
558 			/* attach the discipline */
559 			error = altq_pfattach(altq);
560 			if (error == 0 && pf_altq_running)
561 				error = pf_enable_altq(altq);
562 			if (error != 0) {
563 				splx(s);
564 				return (error);
565 			}
566 		}
567 	}
568 
569 	/* Purge the old altq list */
570 	while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) {
571 		TAILQ_REMOVE(pf_altqs_inactive, altq, entries);
572 		if (altq->qname[0] == 0) {
573 			/* detach and destroy the discipline */
574 			if (pf_altq_running)
575 				error = pf_disable_altq(altq);
576 			err = altq_pfdetach(altq);
577 			if (err != 0 && error == 0)
578 				error = err;
579 			err = altq_remove(altq);
580 			if (err != 0 && error == 0)
581 				error = err;
582 		} else
583 			pf_qid_unref(altq->qid);
584 		pool_put(&pf_altq_pl, altq);
585 	}
586 	splx(s);
587 
588 	altqs_inactive_open = 0;
589 	return (error);
590 }
591 
592 int
593 pf_enable_altq(struct pf_altq *altq)
594 {
595 	struct ifnet		*ifp;
596 	struct tb_profile	 tb;
597 	int			 s, error = 0;
598 
599 	if ((ifp = ifunit(altq->ifname)) == NULL)
600 		return (EINVAL);
601 
602 	if (ifp->if_snd.altq_type != ALTQT_NONE)
603 		error = altq_enable(&ifp->if_snd);
604 
605 	/* set tokenbucket regulator */
606 	if (error == 0 && ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) {
607 		tb.rate = altq->ifbandwidth;
608 		tb.depth = altq->tbrsize;
609 		s = splnet();
610 		error = tbr_set(&ifp->if_snd, &tb);
611 		splx(s);
612 	}
613 
614 	return (error);
615 }
616 
617 int
618 pf_disable_altq(struct pf_altq *altq)
619 {
620 	struct ifnet		*ifp;
621 	struct tb_profile	 tb;
622 	int			 s, error;
623 
624 	if ((ifp = ifunit(altq->ifname)) == NULL)
625 		return (EINVAL);
626 
627 	/*
628 	 * when the discipline is no longer referenced, it was overridden
629 	 * by a new one.  if so, just return.
630 	 */
631 	if (altq->altq_disc != ifp->if_snd.altq_disc)
632 		return (0);
633 
634 	error = altq_disable(&ifp->if_snd);
635 
636 	if (error == 0) {
637 		/* clear tokenbucket regulator */
638 		tb.rate = 0;
639 		s = splnet();
640 		error = tbr_set(&ifp->if_snd, &tb);
641 		splx(s);
642 	}
643 
644 	return (error);
645 }
646 #endif /* ALTQ */
647 
648 int
649 pf_begin_rules(u_int32_t *ticket, const char *anchor)
650 {
651 	struct pf_ruleset	*rs;
652 	struct pf_rule		*rule;
653 
654 	if ((rs = pf_find_or_create_ruleset(anchor)) == NULL)
655 		return (EINVAL);
656 	while ((rule = TAILQ_FIRST(rs->rules.inactive.ptr)) != NULL) {
657 		pf_rm_rule(rs->rules.inactive.ptr, rule);
658 		rs->rules.inactive.rcount--;
659 	}
660 	*ticket = ++rs->rules.inactive.ticket;
661 	rs->rules.inactive.open = 1;
662 	return (0);
663 }
664 
665 int
666 pf_rollback_rules(u_int32_t ticket, char *anchor)
667 {
668 	struct pf_ruleset	*rs;
669 	struct pf_rule		*rule;
670 
671 	rs = pf_find_ruleset(anchor);
672 	if (rs == NULL || !rs->rules.inactive.open ||
673 	    rs->rules.inactive.ticket != ticket)
674 		return (0);
675 	while ((rule = TAILQ_FIRST(rs->rules.inactive.ptr)) != NULL) {
676 		pf_rm_rule(rs->rules.inactive.ptr, rule);
677 		rs->rules.inactive.rcount--;
678 	}
679 	rs->rules.inactive.open = 0;
680 	return (0);
681 }
682 
683 #define PF_MD5_UPD(st, elm)						\
684 		MD5Update(ctx, (u_int8_t *) &(st)->elm, sizeof((st)->elm))
685 
686 #define PF_MD5_UPD_STR(st, elm)						\
687 		MD5Update(ctx, (u_int8_t *) (st)->elm, strlen((st)->elm))
688 
689 #define PF_MD5_UPD_HTONL(st, elm, stor) do {				\
690 		(stor) = htonl((st)->elm);				\
691 		MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int32_t));\
692 } while (0)
693 
694 #define PF_MD5_UPD_HTONS(st, elm, stor) do {				\
695 		(stor) = htons((st)->elm);				\
696 		MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int16_t));\
697 } while (0)
698 
699 void
700 pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr)
701 {
702 	PF_MD5_UPD(pfr, addr.type);
703 	switch (pfr->addr.type) {
704 		case PF_ADDR_DYNIFTL:
705 			PF_MD5_UPD(pfr, addr.v.ifname);
706 			PF_MD5_UPD(pfr, addr.iflags);
707 			break;
708 		case PF_ADDR_TABLE:
709 			PF_MD5_UPD(pfr, addr.v.tblname);
710 			break;
711 		case PF_ADDR_ADDRMASK:
712 			/* XXX ignore af? */
713 			PF_MD5_UPD(pfr, addr.v.a.addr.addr32);
714 			PF_MD5_UPD(pfr, addr.v.a.mask.addr32);
715 			break;
716 		case PF_ADDR_RTLABEL:
717 			PF_MD5_UPD(pfr, addr.v.rtlabelname);
718 			break;
719 	}
720 
721 	PF_MD5_UPD(pfr, port[0]);
722 	PF_MD5_UPD(pfr, port[1]);
723 	PF_MD5_UPD(pfr, neg);
724 	PF_MD5_UPD(pfr, port_op);
725 }
726 
727 void
728 pf_hash_rule(MD5_CTX *ctx, struct pf_rule *rule)
729 {
730 	u_int16_t x;
731 	u_int32_t y;
732 
733 	pf_hash_rule_addr(ctx, &rule->src);
734 	pf_hash_rule_addr(ctx, &rule->dst);
735 	PF_MD5_UPD_STR(rule, label);
736 	PF_MD5_UPD_STR(rule, ifname);
737 	PF_MD5_UPD_STR(rule, rcv_ifname);
738 	PF_MD5_UPD_STR(rule, match_tagname);
739 	PF_MD5_UPD_HTONS(rule, match_tag, x); /* dup? */
740 	PF_MD5_UPD_HTONL(rule, os_fingerprint, y);
741 	PF_MD5_UPD_HTONL(rule, prob, y);
742 	PF_MD5_UPD_HTONL(rule, uid.uid[0], y);
743 	PF_MD5_UPD_HTONL(rule, uid.uid[1], y);
744 	PF_MD5_UPD(rule, uid.op);
745 	PF_MD5_UPD_HTONL(rule, gid.gid[0], y);
746 	PF_MD5_UPD_HTONL(rule, gid.gid[1], y);
747 	PF_MD5_UPD(rule, gid.op);
748 	PF_MD5_UPD_HTONL(rule, rule_flag, y);
749 	PF_MD5_UPD(rule, action);
750 	PF_MD5_UPD(rule, direction);
751 	PF_MD5_UPD(rule, af);
752 	PF_MD5_UPD(rule, quick);
753 	PF_MD5_UPD(rule, ifnot);
754 	PF_MD5_UPD(rule, match_tag_not);
755 	PF_MD5_UPD(rule, keep_state);
756 	PF_MD5_UPD(rule, proto);
757 	PF_MD5_UPD(rule, type);
758 	PF_MD5_UPD(rule, code);
759 	PF_MD5_UPD(rule, flags);
760 	PF_MD5_UPD(rule, flagset);
761 	PF_MD5_UPD(rule, allow_opts);
762 	PF_MD5_UPD(rule, rt);
763 	PF_MD5_UPD(rule, tos);
764 }
765 
766 int
767 pf_commit_rules(u_int32_t ticket, char *anchor)
768 {
769 	struct pf_ruleset	*rs;
770 	struct pf_rule		*rule, **old_array;
771 	struct pf_rulequeue	*old_rules;
772 	int			 s, error;
773 	u_int32_t		 old_rcount;
774 
775 	rs = pf_find_ruleset(anchor);
776 	if (rs == NULL || !rs->rules.inactive.open ||
777 	    ticket != rs->rules.inactive.ticket)
778 		return (EBUSY);
779 
780 	/* Calculate checksum for the main ruleset */
781 	if (rs == &pf_main_ruleset) {
782 		error = pf_setup_pfsync_matching(rs);
783 		if (error != 0)
784 			return (error);
785 	}
786 
787 	/* Swap rules, keep the old. */
788 	s = splsoftnet();
789 	old_rules = rs->rules.active.ptr;
790 	old_rcount = rs->rules.active.rcount;
791 	old_array = rs->rules.active.ptr_array;
792 
793 	rs->rules.active.ptr = rs->rules.inactive.ptr;
794 	rs->rules.active.ptr_array = rs->rules.inactive.ptr_array;
795 	rs->rules.active.rcount = rs->rules.inactive.rcount;
796 	rs->rules.inactive.ptr = old_rules;
797 	rs->rules.inactive.ptr_array = old_array;
798 	rs->rules.inactive.rcount = old_rcount;
799 
800 	rs->rules.active.ticket = rs->rules.inactive.ticket;
801 	pf_calc_skip_steps(rs->rules.active.ptr);
802 
803 
804 	/* Purge the old rule list. */
805 	while ((rule = TAILQ_FIRST(old_rules)) != NULL)
806 		pf_rm_rule(old_rules, rule);
807 	if (rs->rules.inactive.ptr_array)
808 		free(rs->rules.inactive.ptr_array, M_TEMP);
809 	rs->rules.inactive.ptr_array = NULL;
810 	rs->rules.inactive.rcount = 0;
811 	rs->rules.inactive.open = 0;
812 	pf_remove_if_empty_ruleset(rs);
813 	splx(s);
814 	return (0);
815 }
816 
817 int
818 pf_setup_pfsync_matching(struct pf_ruleset *rs)
819 {
820 	MD5_CTX			 ctx;
821 	struct pf_rule		*rule;
822 	u_int8_t		 digest[PF_MD5_DIGEST_LENGTH];
823 
824 	MD5Init(&ctx);
825 	if (rs->rules.inactive.ptr_array)
826 		free(rs->rules.inactive.ptr_array, M_TEMP);
827 	rs->rules.inactive.ptr_array = NULL;
828 
829 	if (rs->rules.inactive.rcount) {
830 		rs->rules.inactive.ptr_array = malloc(sizeof(caddr_t) *
831 		    rs->rules.inactive.rcount,  M_TEMP, M_NOWAIT);
832 
833 		if (!rs->rules.inactive.ptr_array)
834 			return (ENOMEM);
835 
836 		TAILQ_FOREACH(rule, rs->rules.inactive.ptr, entries) {
837 			pf_hash_rule(&ctx, rule);
838 			(rs->rules.inactive.ptr_array)[rule->nr] = rule;
839 		}
840 	}
841 
842 	MD5Final(digest, &ctx);
843 	memcpy(pf_status.pf_chksum, digest, sizeof(pf_status.pf_chksum));
844 	return (0);
845 }
846 
847 int
848 pf_addr_setup(struct pf_ruleset *ruleset, struct pf_addr_wrap *addr,
849     sa_family_t af)
850 {
851 	if (pfi_dynaddr_setup(addr, af) ||
852 	    pf_tbladdr_setup(ruleset, addr) ||
853 	    pf_rtlabel_add(addr))
854 		return (EINVAL);
855 
856 	return (0);
857 }
858 
859 int
860 pf_kif_setup(char *ifname, struct pfi_kif **kif)
861 {
862 	if (ifname[0]) {
863 		*kif = pfi_kif_get(ifname);
864 		if (*kif == NULL)
865 			return (EINVAL);
866 
867 		pfi_kif_ref(*kif, PFI_KIF_REF_RULE);
868 	} else
869 		*kif = NULL;
870 
871 	return (0);
872 }
873 
874 void
875 pf_addr_copyout(struct pf_addr_wrap *addr)
876 {
877 	pfi_dynaddr_copyout(addr);
878 	pf_tbladdr_copyout(addr);
879 	pf_rtlabel_copyout(addr);
880 }
881 
882 int
883 pfioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct proc *p)
884 {
885 	int			 s;
886 	int			 error = 0;
887 
888 	/* XXX keep in sync with switch() below */
889 	if (securelevel > 1)
890 		switch (cmd) {
891 		case DIOCGETRULES:
892 		case DIOCGETRULE:
893 		case DIOCGETSTATE:
894 		case DIOCSETSTATUSIF:
895 		case DIOCGETSTATUS:
896 		case DIOCCLRSTATUS:
897 		case DIOCNATLOOK:
898 		case DIOCSETDEBUG:
899 		case DIOCGETSTATES:
900 		case DIOCGETTIMEOUT:
901 		case DIOCCLRRULECTRS:
902 		case DIOCGETLIMIT:
903 		case DIOCGETALTQS:
904 		case DIOCGETALTQ:
905 		case DIOCGETQSTATS:
906 		case DIOCGETRULESETS:
907 		case DIOCGETRULESET:
908 		case DIOCRGETTABLES:
909 		case DIOCRGETTSTATS:
910 		case DIOCRCLRTSTATS:
911 		case DIOCRCLRADDRS:
912 		case DIOCRADDADDRS:
913 		case DIOCRDELADDRS:
914 		case DIOCRSETADDRS:
915 		case DIOCRGETASTATS:
916 		case DIOCRCLRASTATS:
917 		case DIOCRTSTADDRS:
918 		case DIOCOSFPGET:
919 		case DIOCGETSRCNODES:
920 		case DIOCCLRSRCNODES:
921 		case DIOCIGETIFACES:
922 		case DIOCSETIFFLAG:
923 		case DIOCCLRIFFLAG:
924 			break;
925 		case DIOCRCLRTABLES:
926 		case DIOCRADDTABLES:
927 		case DIOCRDELTABLES:
928 		case DIOCRSETTFLAGS:
929 			if (((struct pfioc_table *)addr)->pfrio_flags &
930 			    PFR_FLAG_DUMMY)
931 				break; /* dummy operation ok */
932 			return (EPERM);
933 		default:
934 			return (EPERM);
935 		}
936 
937 	if (!(flags & FWRITE))
938 		switch (cmd) {
939 		case DIOCGETRULES:
940 		case DIOCGETSTATE:
941 		case DIOCGETSTATUS:
942 		case DIOCGETSTATES:
943 		case DIOCGETTIMEOUT:
944 		case DIOCGETLIMIT:
945 		case DIOCGETALTQS:
946 		case DIOCGETALTQ:
947 		case DIOCGETQSTATS:
948 		case DIOCGETRULESETS:
949 		case DIOCGETRULESET:
950 		case DIOCNATLOOK:
951 		case DIOCRGETTABLES:
952 		case DIOCRGETTSTATS:
953 		case DIOCRGETADDRS:
954 		case DIOCRGETASTATS:
955 		case DIOCRTSTADDRS:
956 		case DIOCOSFPGET:
957 		case DIOCGETSRCNODES:
958 		case DIOCIGETIFACES:
959 			break;
960 		case DIOCRCLRTABLES:
961 		case DIOCRADDTABLES:
962 		case DIOCRDELTABLES:
963 		case DIOCRCLRTSTATS:
964 		case DIOCRCLRADDRS:
965 		case DIOCRADDADDRS:
966 		case DIOCRDELADDRS:
967 		case DIOCRSETADDRS:
968 		case DIOCRSETTFLAGS:
969 			if (((struct pfioc_table *)addr)->pfrio_flags &
970 			    PFR_FLAG_DUMMY) {
971 				flags |= FWRITE; /* need write lock for dummy */
972 				break; /* dummy operation ok */
973 			}
974 			return (EACCES);
975 		case DIOCGETRULE:
976 			if (((struct pfioc_rule *)addr)->action ==
977 			    PF_GET_CLR_CNTR)
978 				return (EACCES);
979 			break;
980 		default:
981 			return (EACCES);
982 		}
983 
984 	if (flags & FWRITE)
985 		rw_enter_write(&pf_consistency_lock);
986 	else
987 		rw_enter_read(&pf_consistency_lock);
988 
989 	s = splsoftnet();
990 	switch (cmd) {
991 
992 	case DIOCSTART:
993 		if (pf_status.running)
994 			error = EEXIST;
995 		else {
996 			pf_status.running = 1;
997 			pf_status.since = time_second;
998 			if (pf_status.stateid == 0) {
999 				pf_status.stateid = time_second;
1000 				pf_status.stateid = pf_status.stateid << 32;
1001 			}
1002 			DPFPRINTF(LOG_NOTICE, "pf: started");
1003 		}
1004 		break;
1005 
1006 	case DIOCSTOP:
1007 		if (!pf_status.running)
1008 			error = ENOENT;
1009 		else {
1010 			pf_status.running = 0;
1011 			pf_status.since = time_second;
1012 			DPFPRINTF(LOG_NOTICE, "pf: stopped");
1013 		}
1014 		break;
1015 
1016 	case DIOCADDRULE: {
1017 		struct pfioc_rule	*pr = (struct pfioc_rule *)addr;
1018 		struct pf_ruleset	*ruleset;
1019 		struct pf_rule		*rule, *tail;
1020 
1021 		pr->anchor[sizeof(pr->anchor) - 1] = 0;
1022 		ruleset = pf_find_ruleset(pr->anchor);
1023 		if (ruleset == NULL) {
1024 			error = EINVAL;
1025 			break;
1026 		}
1027 		if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
1028 			error = EINVAL;
1029 			break;
1030 		}
1031 		if (pr->ticket != ruleset->rules.inactive.ticket) {
1032 			error = EBUSY;
1033 			break;
1034 		}
1035 		rule = pool_get(&pf_rule_pl, PR_WAITOK|PR_LIMITFAIL|PR_ZERO);
1036 		if (rule == NULL) {
1037 			error = ENOMEM;
1038 			break;
1039 		}
1040 		if ((error = pf_rule_copyin(&pr->rule, rule, ruleset))) {
1041 			pool_put(&pf_rule_pl, rule);
1042 			break;
1043 		}
1044 		rule->cuid = p->p_cred->p_ruid;
1045 		rule->cpid = p->p_pid;
1046 
1047 		switch (rule->af) {
1048 		case 0:
1049 			break;
1050 #ifdef INET
1051 		case AF_INET:
1052 			break;
1053 #endif /* INET */
1054 #ifdef INET6
1055 		case AF_INET6:
1056 			break;
1057 #endif /* INET6 */
1058 		default:
1059 			pool_put(&pf_rule_pl, rule);
1060 			error = EAFNOSUPPORT;
1061 			goto fail;
1062 		}
1063 		tail = TAILQ_LAST(ruleset->rules.inactive.ptr,
1064 		    pf_rulequeue);
1065 		if (tail)
1066 			rule->nr = tail->nr + 1;
1067 		else
1068 			rule->nr = 0;
1069 
1070 		if (rule->src.addr.type == PF_ADDR_NONE ||
1071 		    rule->dst.addr.type == PF_ADDR_NONE)
1072 			error = EINVAL;
1073 
1074 		if (pf_addr_setup(ruleset, &rule->src.addr, rule->af))
1075 			error = EINVAL;
1076 		if (pf_addr_setup(ruleset, &rule->dst.addr, rule->af))
1077 			error = EINVAL;
1078 		if (pf_addr_setup(ruleset, &rule->rdr.addr, rule->af))
1079 			error = EINVAL;
1080 		if (pf_addr_setup(ruleset, &rule->nat.addr, rule->af))
1081 			error = EINVAL;
1082 		if (pf_addr_setup(ruleset, &rule->route.addr, rule->af))
1083 			error = EINVAL;
1084 		if (pf_anchor_setup(rule, ruleset, pr->anchor_call))
1085 			error = EINVAL;
1086 		if (rule->rt && !rule->direction)
1087 			error = EINVAL;
1088 		if ((rule->prio[0] != PF_PRIO_NOTSET && rule->prio[0] >
1089 		    IFQ_MAXPRIO) || (rule->prio[1] != PF_PRIO_NOTSET &&
1090                     rule->prio[1] > IFQ_MAXPRIO))
1091 			error = EINVAL;
1092 
1093 		if (error) {
1094 			pf_rm_rule(NULL, rule);
1095 			break;
1096 		}
1097 		TAILQ_INSERT_TAIL(ruleset->rules.inactive.ptr,
1098 		    rule, entries);
1099 		ruleset->rules.inactive.rcount++;
1100 		break;
1101 	}
1102 
1103 	case DIOCGETRULES: {
1104 		struct pfioc_rule	*pr = (struct pfioc_rule *)addr;
1105 		struct pf_ruleset	*ruleset;
1106 		struct pf_rule		*tail;
1107 
1108 		pr->anchor[sizeof(pr->anchor) - 1] = 0;
1109 		ruleset = pf_find_ruleset(pr->anchor);
1110 		if (ruleset == NULL) {
1111 			error = EINVAL;
1112 			break;
1113 		}
1114 		tail = TAILQ_LAST(ruleset->rules.active.ptr, pf_rulequeue);
1115 		if (tail)
1116 			pr->nr = tail->nr + 1;
1117 		else
1118 			pr->nr = 0;
1119 		pr->ticket = ruleset->rules.active.ticket;
1120 		break;
1121 	}
1122 
1123 	case DIOCGETRULE: {
1124 		struct pfioc_rule	*pr = (struct pfioc_rule *)addr;
1125 		struct pf_ruleset	*ruleset;
1126 		struct pf_rule		*rule;
1127 		int			 i;
1128 
1129 		pr->anchor[sizeof(pr->anchor) - 1] = 0;
1130 		ruleset = pf_find_ruleset(pr->anchor);
1131 		if (ruleset == NULL) {
1132 			error = EINVAL;
1133 			break;
1134 		}
1135 		if (pr->ticket != ruleset->rules.active.ticket) {
1136 			error = EBUSY;
1137 			break;
1138 		}
1139 		rule = TAILQ_FIRST(ruleset->rules.active.ptr);
1140 		while ((rule != NULL) && (rule->nr != pr->nr))
1141 			rule = TAILQ_NEXT(rule, entries);
1142 		if (rule == NULL) {
1143 			error = EBUSY;
1144 			break;
1145 		}
1146 		bcopy(rule, &pr->rule, sizeof(struct pf_rule));
1147 		if (pf_anchor_copyout(ruleset, rule, pr)) {
1148 			error = EBUSY;
1149 			break;
1150 		}
1151 		pf_addr_copyout(&pr->rule.src.addr);
1152 		pf_addr_copyout(&pr->rule.dst.addr);
1153 		pf_addr_copyout(&pr->rule.rdr.addr);
1154 		pf_addr_copyout(&pr->rule.nat.addr);
1155 		pf_addr_copyout(&pr->rule.route.addr);
1156 		for (i = 0; i < PF_SKIP_COUNT; ++i)
1157 			if (rule->skip[i].ptr == NULL)
1158 				pr->rule.skip[i].nr = -1;
1159 			else
1160 				pr->rule.skip[i].nr =
1161 				    rule->skip[i].ptr->nr;
1162 
1163 		if (pr->action == PF_GET_CLR_CNTR) {
1164 			rule->evaluations = 0;
1165 			rule->packets[0] = rule->packets[1] = 0;
1166 			rule->bytes[0] = rule->bytes[1] = 0;
1167 			rule->states_tot = 0;
1168 		}
1169 		break;
1170 	}
1171 
1172 	case DIOCCHANGERULE: {
1173 		struct pfioc_rule	*pcr = (struct pfioc_rule *)addr;
1174 		struct pf_ruleset	*ruleset;
1175 		struct pf_rule		*oldrule = NULL, *newrule = NULL;
1176 		u_int32_t		 nr = 0;
1177 
1178 		if (pcr->action < PF_CHANGE_ADD_HEAD ||
1179 		    pcr->action > PF_CHANGE_GET_TICKET) {
1180 			error = EINVAL;
1181 			break;
1182 		}
1183 		ruleset = pf_find_ruleset(pcr->anchor);
1184 		if (ruleset == NULL) {
1185 			error = EINVAL;
1186 			break;
1187 		}
1188 
1189 		if (pcr->action == PF_CHANGE_GET_TICKET) {
1190 			pcr->ticket = ++ruleset->rules.active.ticket;
1191 			break;
1192 		} else {
1193 			if (pcr->ticket !=
1194 			    ruleset->rules.active.ticket) {
1195 				error = EINVAL;
1196 				break;
1197 			}
1198 			if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
1199 				error = EINVAL;
1200 				break;
1201 			}
1202 		}
1203 
1204 		if (pcr->action != PF_CHANGE_REMOVE) {
1205 			newrule = pool_get(&pf_rule_pl,
1206 			    PR_WAITOK|PR_LIMITFAIL|PR_ZERO);
1207 			if (newrule == NULL) {
1208 				error = ENOMEM;
1209 				break;
1210 			}
1211 			pf_rule_copyin(&pcr->rule, newrule, ruleset);
1212 			newrule->cuid = p->p_cred->p_ruid;
1213 			newrule->cpid = p->p_pid;
1214 
1215 			switch (newrule->af) {
1216 			case 0:
1217 				break;
1218 #ifdef INET
1219 			case AF_INET:
1220 				break;
1221 #endif /* INET */
1222 #ifdef INET6
1223 			case AF_INET6:
1224 				break;
1225 #endif /* INET6 */
1226 			default:
1227 				pool_put(&pf_rule_pl, newrule);
1228 				error = EAFNOSUPPORT;
1229 				goto fail;
1230 			}
1231 
1232 			if (newrule->rt && !newrule->direction)
1233 				error = EINVAL;
1234 			if (pf_addr_setup(ruleset, &newrule->src.addr, newrule->af))
1235 				error = EINVAL;
1236 			if (pf_addr_setup(ruleset, &newrule->dst.addr, newrule->af))
1237 				error = EINVAL;
1238 			if (pf_addr_setup(ruleset, &newrule->rdr.addr, newrule->af))
1239 				error = EINVAL;
1240 			if (pf_addr_setup(ruleset, &newrule->nat.addr, newrule->af))
1241 				error = EINVAL;
1242 			if (pf_addr_setup(ruleset, &newrule->route.addr, newrule->af))
1243 				error = EINVAL;
1244 			if (pf_anchor_setup(newrule, ruleset, pcr->anchor_call))
1245 				error = EINVAL;
1246 
1247 			if (error) {
1248 				pf_rm_rule(NULL, newrule);
1249 				break;
1250 			}
1251 		}
1252 
1253 		if (pcr->action == PF_CHANGE_ADD_HEAD)
1254 			oldrule = TAILQ_FIRST(ruleset->rules.active.ptr);
1255 		else if (pcr->action == PF_CHANGE_ADD_TAIL)
1256 			oldrule = TAILQ_LAST(ruleset->rules.active.ptr,
1257 			    pf_rulequeue);
1258 		else {
1259 			oldrule = TAILQ_FIRST(ruleset->rules.active.ptr);
1260 			while ((oldrule != NULL) && (oldrule->nr != pcr->nr))
1261 				oldrule = TAILQ_NEXT(oldrule, entries);
1262 			if (oldrule == NULL) {
1263 				if (newrule != NULL)
1264 					pf_rm_rule(NULL, newrule);
1265 				error = EINVAL;
1266 				break;
1267 			}
1268 		}
1269 
1270 		if (pcr->action == PF_CHANGE_REMOVE) {
1271 			pf_rm_rule(ruleset->rules.active.ptr, oldrule);
1272 			ruleset->rules.active.rcount--;
1273 		} else {
1274 			if (oldrule == NULL)
1275 				TAILQ_INSERT_TAIL(
1276 				    ruleset->rules.active.ptr,
1277 				    newrule, entries);
1278 			else if (pcr->action == PF_CHANGE_ADD_HEAD ||
1279 			    pcr->action == PF_CHANGE_ADD_BEFORE)
1280 				TAILQ_INSERT_BEFORE(oldrule, newrule, entries);
1281 			else
1282 				TAILQ_INSERT_AFTER(
1283 				    ruleset->rules.active.ptr,
1284 				    oldrule, newrule, entries);
1285 			ruleset->rules.active.rcount++;
1286 		}
1287 
1288 		nr = 0;
1289 		TAILQ_FOREACH(oldrule, ruleset->rules.active.ptr, entries)
1290 			oldrule->nr = nr++;
1291 
1292 		ruleset->rules.active.ticket++;
1293 
1294 		pf_calc_skip_steps(ruleset->rules.active.ptr);
1295 		pf_remove_if_empty_ruleset(ruleset);
1296 
1297 		break;
1298 	}
1299 
1300 	case DIOCCLRSTATES: {
1301 		struct pf_state		*s, *nexts;
1302 		struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr;
1303 		u_int			 killed = 0;
1304 
1305 		for (s = RB_MIN(pf_state_tree_id, &tree_id); s; s = nexts) {
1306 			nexts = RB_NEXT(pf_state_tree_id, &tree_id, s);
1307 
1308 			if (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname,
1309 			    s->kif->pfik_name)) {
1310 #if NPFSYNC > 0
1311 				/* don't send out individual delete messages */
1312 				SET(s->state_flags, PFSTATE_NOSYNC);
1313 #endif
1314 				pf_unlink_state(s);
1315 				killed++;
1316 			}
1317 		}
1318 		psk->psk_killed = killed;
1319 #if NPFSYNC > 0
1320 		pfsync_clear_states(pf_status.hostid, psk->psk_ifname);
1321 #endif
1322 		break;
1323 	}
1324 
1325 	case DIOCKILLSTATES: {
1326 		struct pf_state		*s, *nexts;
1327 		struct pf_state_key	*sk;
1328 		struct pf_addr		*srcaddr, *dstaddr;
1329 		u_int16_t		 srcport, dstport;
1330 		struct pfioc_state_kill	*psk = (struct pfioc_state_kill *)addr;
1331 		u_int			 killed = 0;
1332 
1333 		if (psk->psk_pfcmp.id) {
1334 			if (psk->psk_pfcmp.creatorid == 0)
1335 				psk->psk_pfcmp.creatorid = pf_status.hostid;
1336 			if ((s = pf_find_state_byid(&psk->psk_pfcmp))) {
1337 				pf_unlink_state(s);
1338 				psk->psk_killed = 1;
1339 			}
1340 			break;
1341 		}
1342 
1343 		for (s = RB_MIN(pf_state_tree_id, &tree_id); s;
1344 		    s = nexts) {
1345 			nexts = RB_NEXT(pf_state_tree_id, &tree_id, s);
1346 			sk = s->key[PF_SK_WIRE];
1347 
1348 			if (s->direction == PF_OUT) {
1349 				srcaddr = &sk->addr[1];
1350 				dstaddr = &sk->addr[0];
1351 				srcport = sk->port[0];
1352 				dstport = sk->port[0];
1353 			} else {
1354 				srcaddr = &sk->addr[0];
1355 				dstaddr = &sk->addr[1];
1356 				srcport = sk->port[0];
1357 				dstport = sk->port[0];
1358 			}
1359 			if ((!psk->psk_af || sk->af == psk->psk_af)
1360 			    && (!psk->psk_proto || psk->psk_proto ==
1361 			    sk->proto) && psk->psk_rdomain == sk->rdomain &&
1362 			    PF_MATCHA(psk->psk_src.neg,
1363 			    &psk->psk_src.addr.v.a.addr,
1364 			    &psk->psk_src.addr.v.a.mask,
1365 			    srcaddr, sk->af) &&
1366 			    PF_MATCHA(psk->psk_dst.neg,
1367 			    &psk->psk_dst.addr.v.a.addr,
1368 			    &psk->psk_dst.addr.v.a.mask,
1369 			    dstaddr, sk->af) &&
1370 			    (psk->psk_src.port_op == 0 ||
1371 			    pf_match_port(psk->psk_src.port_op,
1372 			    psk->psk_src.port[0], psk->psk_src.port[1],
1373 			    srcport)) &&
1374 			    (psk->psk_dst.port_op == 0 ||
1375 			    pf_match_port(psk->psk_dst.port_op,
1376 			    psk->psk_dst.port[0], psk->psk_dst.port[1],
1377 			    dstport)) &&
1378 			    (!psk->psk_label[0] || (s->rule.ptr->label[0] &&
1379 			    !strcmp(psk->psk_label, s->rule.ptr->label))) &&
1380 			    (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname,
1381 			    s->kif->pfik_name))) {
1382 				pf_unlink_state(s);
1383 				killed++;
1384 			}
1385 		}
1386 		psk->psk_killed = killed;
1387 		break;
1388 	}
1389 
1390 	case DIOCADDSTATE: {
1391 		struct pfioc_state	*ps = (struct pfioc_state *)addr;
1392 		struct pfsync_state	*sp = &ps->state;
1393 
1394 		if (sp->timeout >= PFTM_MAX &&
1395 		    sp->timeout != PFTM_UNTIL_PACKET) {
1396 			error = EINVAL;
1397 			break;
1398 		}
1399 		error = pfsync_state_import(sp, PFSYNC_SI_IOCTL);
1400 		break;
1401 	}
1402 
1403 	case DIOCGETSTATE: {
1404 		struct pfioc_state	*ps = (struct pfioc_state *)addr;
1405 		struct pf_state		*s;
1406 		struct pf_state_cmp	 id_key;
1407 
1408 		bzero(&id_key, sizeof(id_key));
1409 		bcopy(ps->state.id, &id_key.id, sizeof(id_key.id));
1410 		id_key.creatorid = ps->state.creatorid;
1411 
1412 		s = pf_find_state_byid(&id_key);
1413 		if (s == NULL) {
1414 			error = ENOENT;
1415 			break;
1416 		}
1417 
1418 		pfsync_state_export(&ps->state, s);
1419 		break;
1420 	}
1421 
1422 	case DIOCGETSTATES: {
1423 		struct pfioc_states	*ps = (struct pfioc_states *)addr;
1424 		struct pf_state		*state;
1425 		struct pfsync_state	*p, *pstore;
1426 		u_int32_t		 nr = 0;
1427 
1428 		if (ps->ps_len == 0) {
1429 			nr = pf_status.states;
1430 			ps->ps_len = sizeof(struct pfsync_state) * nr;
1431 			break;
1432 		}
1433 
1434 		pstore = malloc(sizeof(*pstore), M_TEMP, M_WAITOK);
1435 
1436 		p = ps->ps_states;
1437 
1438 		state = TAILQ_FIRST(&state_list);
1439 		while (state) {
1440 			if (state->timeout != PFTM_UNLINKED) {
1441 				if ((nr+1) * sizeof(*p) > (unsigned)ps->ps_len)
1442 					break;
1443 				pfsync_state_export(pstore, state);
1444 				error = copyout(pstore, p, sizeof(*p));
1445 				if (error) {
1446 					free(pstore, M_TEMP);
1447 					goto fail;
1448 				}
1449 				p++;
1450 				nr++;
1451 			}
1452 			state = TAILQ_NEXT(state, entry_list);
1453 		}
1454 
1455 		ps->ps_len = sizeof(struct pfsync_state) * nr;
1456 
1457 		free(pstore, M_TEMP);
1458 		break;
1459 	}
1460 
1461 	case DIOCGETSTATUS: {
1462 		struct pf_status *s = (struct pf_status *)addr;
1463 		bcopy(&pf_status, s, sizeof(struct pf_status));
1464 		pfi_update_status(s->ifname, s);
1465 		break;
1466 	}
1467 
1468 	case DIOCSETSTATUSIF: {
1469 		struct pfioc_iface	*pi = (struct pfioc_iface *)addr;
1470 
1471 		if (pi->pfiio_name[0] == 0) {
1472 			bzero(pf_status.ifname, IFNAMSIZ);
1473 			break;
1474 		}
1475 		strlcpy(pf_trans_set.statusif, pi->pfiio_name, IFNAMSIZ);
1476 		pf_trans_set.mask |= PF_TSET_STATUSIF;
1477 		break;
1478 	}
1479 
1480 	case DIOCCLRSTATUS: {
1481 		struct pfioc_iface	*pi = (struct pfioc_iface *)addr;
1482 
1483 		/* if ifname is specified, clear counters there only */
1484 		if (pi->pfiio_name[0]) {
1485 			pfi_update_status(pi->pfiio_name, NULL);
1486 			break;
1487 		}
1488 
1489 		bzero(pf_status.counters, sizeof(pf_status.counters));
1490 		bzero(pf_status.fcounters, sizeof(pf_status.fcounters));
1491 		bzero(pf_status.scounters, sizeof(pf_status.scounters));
1492 		pf_status.since = time_second;
1493 
1494 		break;
1495 	}
1496 
1497 	case DIOCNATLOOK: {
1498 		struct pfioc_natlook	*pnl = (struct pfioc_natlook *)addr;
1499 		struct pf_state_key	*sk;
1500 		struct pf_state		*state;
1501 		struct pf_state_key_cmp	 key;
1502 		int			 m = 0, direction = pnl->direction;
1503 		int			 sidx, didx;
1504 
1505 		/* NATLOOK src and dst are reversed, so reverse sidx/didx */
1506 		sidx = (direction == PF_IN) ? 1 : 0;
1507 		didx = (direction == PF_IN) ? 0 : 1;
1508 
1509 		if (!pnl->proto ||
1510 		    PF_AZERO(&pnl->saddr, pnl->af) ||
1511 		    PF_AZERO(&pnl->daddr, pnl->af) ||
1512 		    ((pnl->proto == IPPROTO_TCP ||
1513 		    pnl->proto == IPPROTO_UDP) &&
1514 		    (!pnl->dport || !pnl->sport)) ||
1515 		    pnl->rdomain > RT_TABLEID_MAX)
1516 			error = EINVAL;
1517 		else {
1518 			key.af = pnl->af;
1519 			key.proto = pnl->proto;
1520 			key.rdomain = pnl->rdomain;
1521 			PF_ACPY(&key.addr[sidx], &pnl->saddr, pnl->af);
1522 			key.port[sidx] = pnl->sport;
1523 			PF_ACPY(&key.addr[didx], &pnl->daddr, pnl->af);
1524 			key.port[didx] = pnl->dport;
1525 
1526 			state = pf_find_state_all(&key, direction, &m);
1527 
1528 			if (m > 1)
1529 				error = E2BIG;	/* more than one state */
1530 			else if (state != NULL) {
1531 				sk = state->key[sidx];
1532 				PF_ACPY(&pnl->rsaddr, &sk->addr[sidx], sk->af);
1533 				pnl->rsport = sk->port[sidx];
1534 				PF_ACPY(&pnl->rdaddr, &sk->addr[didx], sk->af);
1535 				pnl->rdport = sk->port[didx];
1536 				pnl->rrdomain = sk->rdomain;
1537 			} else
1538 				error = ENOENT;
1539 		}
1540 		break;
1541 	}
1542 
1543 	case DIOCSETTIMEOUT: {
1544 		struct pfioc_tm	*pt = (struct pfioc_tm *)addr;
1545 
1546 		if (pt->timeout < 0 || pt->timeout >= PFTM_MAX ||
1547 		    pt->seconds < 0) {
1548 			error = EINVAL;
1549 			goto fail;
1550 		}
1551 		if (pt->timeout == PFTM_INTERVAL && pt->seconds == 0)
1552 			pt->seconds = 1;
1553 		pf_default_rule_new.timeout[pt->timeout] = pt->seconds;
1554 		pt->seconds = pf_default_rule.timeout[pt->timeout];
1555 		break;
1556 	}
1557 
1558 	case DIOCGETTIMEOUT: {
1559 		struct pfioc_tm	*pt = (struct pfioc_tm *)addr;
1560 
1561 		if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) {
1562 			error = EINVAL;
1563 			goto fail;
1564 		}
1565 		pt->seconds = pf_default_rule.timeout[pt->timeout];
1566 		break;
1567 	}
1568 
1569 	case DIOCGETLIMIT: {
1570 		struct pfioc_limit	*pl = (struct pfioc_limit *)addr;
1571 
1572 		if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) {
1573 			error = EINVAL;
1574 			goto fail;
1575 		}
1576 		pl->limit = pf_pool_limits[pl->index].limit;
1577 		break;
1578 	}
1579 
1580 	case DIOCSETLIMIT: {
1581 		struct pfioc_limit	*pl = (struct pfioc_limit *)addr;
1582 
1583 		if (pl->index < 0 || pl->index >= PF_LIMIT_MAX ||
1584 		    pf_pool_limits[pl->index].pp == NULL) {
1585 			error = EINVAL;
1586 			goto fail;
1587 		}
1588 		if (((struct pool *)pf_pool_limits[pl->index].pp)->pr_nout >
1589 		    pl->limit) {
1590 			error = EBUSY;
1591 			goto fail;
1592 		}
1593 		pf_pool_limits[pl->index].limit_new = pl->limit;
1594 		pl->limit = pf_pool_limits[pl->index].limit;
1595 		break;
1596 	}
1597 
1598 	case DIOCSETDEBUG: {
1599 		u_int32_t	*level = (u_int32_t *)addr;
1600 
1601 		pf_trans_set.debug = *level;
1602 		pf_trans_set.mask |= PF_TSET_DEBUG;
1603 		break;
1604 	}
1605 
1606 	case DIOCCLRRULECTRS: {
1607 		/* obsoleted by DIOCGETRULE with action=PF_GET_CLR_CNTR */
1608 		struct pf_ruleset	*ruleset = &pf_main_ruleset;
1609 		struct pf_rule		*rule;
1610 
1611 		TAILQ_FOREACH(rule,
1612 		    ruleset->rules.active.ptr, entries) {
1613 			rule->evaluations = 0;
1614 			rule->packets[0] = rule->packets[1] = 0;
1615 			rule->bytes[0] = rule->bytes[1] = 0;
1616 		}
1617 		break;
1618 	}
1619 
1620 #ifdef ALTQ
1621 	case DIOCSTARTALTQ: {
1622 		struct pf_altq		*altq;
1623 
1624 		/* enable all altq interfaces on active list */
1625 		TAILQ_FOREACH(altq, pf_altqs_active, entries) {
1626 			if (altq->qname[0] == 0) {
1627 				error = pf_enable_altq(altq);
1628 				if (error != 0)
1629 					break;
1630 			}
1631 		}
1632 		if (error == 0)
1633 			pf_altq_running = 1;
1634 		DPFPRINTF(LOG_NOTICE, "altq: started");
1635 		break;
1636 	}
1637 
1638 	case DIOCSTOPALTQ: {
1639 		struct pf_altq		*altq;
1640 
1641 		/* disable all altq interfaces on active list */
1642 		TAILQ_FOREACH(altq, pf_altqs_active, entries) {
1643 			if (altq->qname[0] == 0) {
1644 				error = pf_disable_altq(altq);
1645 				if (error != 0)
1646 					break;
1647 			}
1648 		}
1649 		if (error == 0)
1650 			pf_altq_running = 0;
1651 		DPFPRINTF(LOG_NOTICE, "altq: stopped");
1652 		break;
1653 	}
1654 
1655 	case DIOCADDALTQ: {
1656 		struct pfioc_altq	*pa = (struct pfioc_altq *)addr;
1657 		struct pf_altq		*altq, *a;
1658 
1659 		if (pa->ticket != ticket_altqs_inactive) {
1660 			error = EBUSY;
1661 			break;
1662 		}
1663 		altq = pool_get(&pf_altq_pl, PR_WAITOK|PR_LIMITFAIL);
1664 		if (altq == NULL) {
1665 			error = ENOMEM;
1666 			break;
1667 		}
1668 		bcopy(&pa->altq, altq, sizeof(struct pf_altq));
1669 		altq->altq_disc = NULL;
1670 
1671 		/*
1672 		 * if this is for a queue, find the discipline and
1673 		 * copy the necessary fields
1674 		 */
1675 		if (altq->qname[0] != 0) {
1676 			if ((altq->qid = pf_qname2qid(altq->qname)) == 0) {
1677 				error = EBUSY;
1678 				pool_put(&pf_altq_pl, altq);
1679 				break;
1680 			}
1681 			TAILQ_FOREACH(a, pf_altqs_inactive, entries) {
1682 				if (strncmp(a->ifname, altq->ifname,
1683 				    IFNAMSIZ) == 0 && a->qname[0] == 0) {
1684 					altq->altq_disc = a->altq_disc;
1685 					break;
1686 				}
1687 			}
1688 		}
1689 
1690 		error = altq_add(altq);
1691 		if (error) {
1692 			pool_put(&pf_altq_pl, altq);
1693 			break;
1694 		}
1695 
1696 		TAILQ_INSERT_TAIL(pf_altqs_inactive, altq, entries);
1697 		bcopy(altq, &pa->altq, sizeof(struct pf_altq));
1698 		break;
1699 	}
1700 
1701 	case DIOCGETALTQS: {
1702 		struct pfioc_altq	*pa = (struct pfioc_altq *)addr;
1703 		struct pf_altq		*altq;
1704 
1705 		pa->nr = 0;
1706 		TAILQ_FOREACH(altq, pf_altqs_active, entries)
1707 			pa->nr++;
1708 		pa->ticket = ticket_altqs_active;
1709 		break;
1710 	}
1711 
1712 	case DIOCGETALTQ: {
1713 		struct pfioc_altq	*pa = (struct pfioc_altq *)addr;
1714 		struct pf_altq		*altq;
1715 		u_int32_t		 nr;
1716 
1717 		if (pa->ticket != ticket_altqs_active) {
1718 			error = EBUSY;
1719 			break;
1720 		}
1721 		nr = 0;
1722 		altq = TAILQ_FIRST(pf_altqs_active);
1723 		while ((altq != NULL) && (nr < pa->nr)) {
1724 			altq = TAILQ_NEXT(altq, entries);
1725 			nr++;
1726 		}
1727 		if (altq == NULL) {
1728 			error = EBUSY;
1729 			break;
1730 		}
1731 		bcopy(altq, &pa->altq, sizeof(struct pf_altq));
1732 		break;
1733 	}
1734 
1735 	case DIOCCHANGEALTQ:
1736 		/* CHANGEALTQ not supported yet! */
1737 		error = ENODEV;
1738 		break;
1739 
1740 	case DIOCGETQSTATS: {
1741 		struct pfioc_qstats	*pq = (struct pfioc_qstats *)addr;
1742 		struct pf_altq		*altq;
1743 		u_int32_t		 nr;
1744 		int			 nbytes;
1745 
1746 		if (pq->ticket != ticket_altqs_active) {
1747 			error = EBUSY;
1748 			break;
1749 		}
1750 		nbytes = pq->nbytes;
1751 		nr = 0;
1752 		altq = TAILQ_FIRST(pf_altqs_active);
1753 		while ((altq != NULL) && (nr < pq->nr)) {
1754 			altq = TAILQ_NEXT(altq, entries);
1755 			nr++;
1756 		}
1757 		if (altq == NULL) {
1758 			error = EBUSY;
1759 			break;
1760 		}
1761 		error = altq_getqstats(altq, pq->buf, &nbytes);
1762 		if (error == 0) {
1763 			pq->scheduler = altq->scheduler;
1764 			pq->nbytes = nbytes;
1765 		}
1766 		break;
1767 	}
1768 #endif /* ALTQ */
1769 
1770 	case DIOCGETRULESETS: {
1771 		struct pfioc_ruleset	*pr = (struct pfioc_ruleset *)addr;
1772 		struct pf_ruleset	*ruleset;
1773 		struct pf_anchor	*anchor;
1774 
1775 		pr->path[sizeof(pr->path) - 1] = 0;
1776 		if ((ruleset = pf_find_ruleset(pr->path)) == NULL) {
1777 			error = EINVAL;
1778 			break;
1779 		}
1780 		pr->nr = 0;
1781 		if (ruleset->anchor == NULL) {
1782 			/* XXX kludge for pf_main_ruleset */
1783 			RB_FOREACH(anchor, pf_anchor_global, &pf_anchors)
1784 				if (anchor->parent == NULL)
1785 					pr->nr++;
1786 		} else {
1787 			RB_FOREACH(anchor, pf_anchor_node,
1788 			    &ruleset->anchor->children)
1789 				pr->nr++;
1790 		}
1791 		break;
1792 	}
1793 
1794 	case DIOCGETRULESET: {
1795 		struct pfioc_ruleset	*pr = (struct pfioc_ruleset *)addr;
1796 		struct pf_ruleset	*ruleset;
1797 		struct pf_anchor	*anchor;
1798 		u_int32_t		 nr = 0;
1799 
1800 		pr->path[sizeof(pr->path) - 1] = 0;
1801 		if ((ruleset = pf_find_ruleset(pr->path)) == NULL) {
1802 			error = EINVAL;
1803 			break;
1804 		}
1805 		pr->name[0] = 0;
1806 		if (ruleset->anchor == NULL) {
1807 			/* XXX kludge for pf_main_ruleset */
1808 			RB_FOREACH(anchor, pf_anchor_global, &pf_anchors)
1809 				if (anchor->parent == NULL && nr++ == pr->nr) {
1810 					strlcpy(pr->name, anchor->name,
1811 					    sizeof(pr->name));
1812 					break;
1813 				}
1814 		} else {
1815 			RB_FOREACH(anchor, pf_anchor_node,
1816 			    &ruleset->anchor->children)
1817 				if (nr++ == pr->nr) {
1818 					strlcpy(pr->name, anchor->name,
1819 					    sizeof(pr->name));
1820 					break;
1821 				}
1822 		}
1823 		if (!pr->name[0])
1824 			error = EBUSY;
1825 		break;
1826 	}
1827 
1828 	case DIOCRCLRTABLES: {
1829 		struct pfioc_table *io = (struct pfioc_table *)addr;
1830 
1831 		if (io->pfrio_esize != 0) {
1832 			error = ENODEV;
1833 			break;
1834 		}
1835 		error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel,
1836 		    io->pfrio_flags | PFR_FLAG_USERIOCTL);
1837 		break;
1838 	}
1839 
1840 	case DIOCRADDTABLES: {
1841 		struct pfioc_table *io = (struct pfioc_table *)addr;
1842 
1843 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
1844 			error = ENODEV;
1845 			break;
1846 		}
1847 		error = pfr_add_tables(io->pfrio_buffer, io->pfrio_size,
1848 		    &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL);
1849 		break;
1850 	}
1851 
1852 	case DIOCRDELTABLES: {
1853 		struct pfioc_table *io = (struct pfioc_table *)addr;
1854 
1855 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
1856 			error = ENODEV;
1857 			break;
1858 		}
1859 		error = pfr_del_tables(io->pfrio_buffer, io->pfrio_size,
1860 		    &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
1861 		break;
1862 	}
1863 
1864 	case DIOCRGETTABLES: {
1865 		struct pfioc_table *io = (struct pfioc_table *)addr;
1866 
1867 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
1868 			error = ENODEV;
1869 			break;
1870 		}
1871 		error = pfr_get_tables(&io->pfrio_table, io->pfrio_buffer,
1872 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
1873 		break;
1874 	}
1875 
1876 	case DIOCRGETTSTATS: {
1877 		struct pfioc_table *io = (struct pfioc_table *)addr;
1878 
1879 		if (io->pfrio_esize != sizeof(struct pfr_tstats)) {
1880 			error = ENODEV;
1881 			break;
1882 		}
1883 		error = pfr_get_tstats(&io->pfrio_table, io->pfrio_buffer,
1884 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
1885 		break;
1886 	}
1887 
1888 	case DIOCRCLRTSTATS: {
1889 		struct pfioc_table *io = (struct pfioc_table *)addr;
1890 
1891 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
1892 			error = ENODEV;
1893 			break;
1894 		}
1895 		error = pfr_clr_tstats(io->pfrio_buffer, io->pfrio_size,
1896 		    &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL);
1897 		break;
1898 	}
1899 
1900 	case DIOCRSETTFLAGS: {
1901 		struct pfioc_table *io = (struct pfioc_table *)addr;
1902 
1903 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
1904 			error = ENODEV;
1905 			break;
1906 		}
1907 		error = pfr_set_tflags(io->pfrio_buffer, io->pfrio_size,
1908 		    io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange,
1909 		    &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
1910 		break;
1911 	}
1912 
1913 	case DIOCRCLRADDRS: {
1914 		struct pfioc_table *io = (struct pfioc_table *)addr;
1915 
1916 		if (io->pfrio_esize != 0) {
1917 			error = ENODEV;
1918 			break;
1919 		}
1920 		error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel,
1921 		    io->pfrio_flags | PFR_FLAG_USERIOCTL);
1922 		break;
1923 	}
1924 
1925 	case DIOCRADDADDRS: {
1926 		struct pfioc_table *io = (struct pfioc_table *)addr;
1927 
1928 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
1929 			error = ENODEV;
1930 			break;
1931 		}
1932 		error = pfr_add_addrs(&io->pfrio_table, io->pfrio_buffer,
1933 		    io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags |
1934 		    PFR_FLAG_USERIOCTL);
1935 		break;
1936 	}
1937 
1938 	case DIOCRDELADDRS: {
1939 		struct pfioc_table *io = (struct pfioc_table *)addr;
1940 
1941 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
1942 			error = ENODEV;
1943 			break;
1944 		}
1945 		error = pfr_del_addrs(&io->pfrio_table, io->pfrio_buffer,
1946 		    io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags |
1947 		    PFR_FLAG_USERIOCTL);
1948 		break;
1949 	}
1950 
1951 	case DIOCRSETADDRS: {
1952 		struct pfioc_table *io = (struct pfioc_table *)addr;
1953 
1954 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
1955 			error = ENODEV;
1956 			break;
1957 		}
1958 		error = pfr_set_addrs(&io->pfrio_table, io->pfrio_buffer,
1959 		    io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd,
1960 		    &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags |
1961 		    PFR_FLAG_USERIOCTL, 0);
1962 		break;
1963 	}
1964 
1965 	case DIOCRGETADDRS: {
1966 		struct pfioc_table *io = (struct pfioc_table *)addr;
1967 
1968 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
1969 			error = ENODEV;
1970 			break;
1971 		}
1972 		error = pfr_get_addrs(&io->pfrio_table, io->pfrio_buffer,
1973 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
1974 		break;
1975 	}
1976 
1977 	case DIOCRGETASTATS: {
1978 		struct pfioc_table *io = (struct pfioc_table *)addr;
1979 
1980 		if (io->pfrio_esize != sizeof(struct pfr_astats)) {
1981 			error = ENODEV;
1982 			break;
1983 		}
1984 		error = pfr_get_astats(&io->pfrio_table, io->pfrio_buffer,
1985 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
1986 		break;
1987 	}
1988 
1989 	case DIOCRCLRASTATS: {
1990 		struct pfioc_table *io = (struct pfioc_table *)addr;
1991 
1992 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
1993 			error = ENODEV;
1994 			break;
1995 		}
1996 		error = pfr_clr_astats(&io->pfrio_table, io->pfrio_buffer,
1997 		    io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags |
1998 		    PFR_FLAG_USERIOCTL);
1999 		break;
2000 	}
2001 
2002 	case DIOCRTSTADDRS: {
2003 		struct pfioc_table *io = (struct pfioc_table *)addr;
2004 
2005 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2006 			error = ENODEV;
2007 			break;
2008 		}
2009 		error = pfr_tst_addrs(&io->pfrio_table, io->pfrio_buffer,
2010 		    io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags |
2011 		    PFR_FLAG_USERIOCTL);
2012 		break;
2013 	}
2014 
2015 	case DIOCRINADEFINE: {
2016 		struct pfioc_table *io = (struct pfioc_table *)addr;
2017 
2018 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2019 			error = ENODEV;
2020 			break;
2021 		}
2022 		error = pfr_ina_define(&io->pfrio_table, io->pfrio_buffer,
2023 		    io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr,
2024 		    io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2025 		break;
2026 	}
2027 
2028 	case DIOCOSFPADD: {
2029 		struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
2030 		error = pf_osfp_add(io);
2031 		break;
2032 	}
2033 
2034 	case DIOCOSFPGET: {
2035 		struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
2036 		error = pf_osfp_get(io);
2037 		break;
2038 	}
2039 
2040 	case DIOCXBEGIN: {
2041 		struct pfioc_trans	*io = (struct pfioc_trans *)addr;
2042 		struct pfioc_trans_e	*ioe;
2043 		struct pfr_table	*table;
2044 		int			 i;
2045 
2046 		if (io->esize != sizeof(*ioe)) {
2047 			error = ENODEV;
2048 			goto fail;
2049 		}
2050 		ioe = malloc(sizeof(*ioe), M_TEMP, M_WAITOK);
2051 		table = malloc(sizeof(*table), M_TEMP, M_WAITOK);
2052 		pf_default_rule_new = pf_default_rule;
2053 		bzero(&pf_trans_set, sizeof(pf_trans_set));
2054 		for (i = 0; i < io->size; i++) {
2055 			if (copyin(io->array+i, ioe, sizeof(*ioe))) {
2056 				free(table, M_TEMP);
2057 				free(ioe, M_TEMP);
2058 				error = EFAULT;
2059 				goto fail;
2060 			}
2061 			switch (ioe->type) {
2062 #ifdef ALTQ
2063 			case PF_TRANS_ALTQ:
2064 				if (ioe->anchor[0]) {
2065 					free(table, M_TEMP);
2066 					free(ioe, M_TEMP);
2067 					error = EINVAL;
2068 					goto fail;
2069 				}
2070 				if ((error = pf_begin_altq(&ioe->ticket))) {
2071 					free(table, M_TEMP);
2072 					free(ioe, M_TEMP);
2073 					goto fail;
2074 				}
2075 				break;
2076 #endif /* ALTQ */
2077 			case PF_TRANS_TABLE:
2078 				bzero(table, sizeof(*table));
2079 				strlcpy(table->pfrt_anchor, ioe->anchor,
2080 				    sizeof(table->pfrt_anchor));
2081 				if ((error = pfr_ina_begin(table,
2082 				    &ioe->ticket, NULL, 0))) {
2083 					free(table, M_TEMP);
2084 					free(ioe, M_TEMP);
2085 					goto fail;
2086 				}
2087 				break;
2088 			default:
2089 				if ((error = pf_begin_rules(&ioe->ticket,
2090 				    ioe->anchor))) {
2091 					free(table, M_TEMP);
2092 					free(ioe, M_TEMP);
2093 					goto fail;
2094 				}
2095 				break;
2096 			}
2097 			if (copyout(ioe, io->array+i, sizeof(io->array[i]))) {
2098 				free(table, M_TEMP);
2099 				free(ioe, M_TEMP);
2100 				error = EFAULT;
2101 				goto fail;
2102 			}
2103 		}
2104 		free(table, M_TEMP);
2105 		free(ioe, M_TEMP);
2106 		break;
2107 	}
2108 
2109 	case DIOCXROLLBACK: {
2110 		struct pfioc_trans	*io = (struct pfioc_trans *)addr;
2111 		struct pfioc_trans_e	*ioe;
2112 		struct pfr_table	*table;
2113 		int			 i;
2114 
2115 		if (io->esize != sizeof(*ioe)) {
2116 			error = ENODEV;
2117 			goto fail;
2118 		}
2119 		ioe = malloc(sizeof(*ioe), M_TEMP, M_WAITOK);
2120 		table = malloc(sizeof(*table), M_TEMP, M_WAITOK);
2121 		for (i = 0; i < io->size; i++) {
2122 			if (copyin(io->array+i, ioe, sizeof(*ioe))) {
2123 				free(table, M_TEMP);
2124 				free(ioe, M_TEMP);
2125 				error = EFAULT;
2126 				goto fail;
2127 			}
2128 			switch (ioe->type) {
2129 #ifdef ALTQ
2130 			case PF_TRANS_ALTQ:
2131 				if (ioe->anchor[0]) {
2132 					free(table, M_TEMP);
2133 					free(ioe, M_TEMP);
2134 					error = EINVAL;
2135 					goto fail;
2136 				}
2137 				if ((error = pf_rollback_altq(ioe->ticket))) {
2138 					free(table, M_TEMP);
2139 					free(ioe, M_TEMP);
2140 					goto fail; /* really bad */
2141 				}
2142 				break;
2143 #endif /* ALTQ */
2144 			case PF_TRANS_TABLE:
2145 				bzero(table, sizeof(*table));
2146 				strlcpy(table->pfrt_anchor, ioe->anchor,
2147 				    sizeof(table->pfrt_anchor));
2148 				if ((error = pfr_ina_rollback(table,
2149 				    ioe->ticket, NULL, 0))) {
2150 					free(table, M_TEMP);
2151 					free(ioe, M_TEMP);
2152 					goto fail; /* really bad */
2153 				}
2154 				break;
2155 			default:
2156 				if ((error = pf_rollback_rules(ioe->ticket,
2157 				    ioe->anchor))) {
2158 					free(table, M_TEMP);
2159 					free(ioe, M_TEMP);
2160 					goto fail; /* really bad */
2161 				}
2162 				break;
2163 			}
2164 		}
2165 		free(table, M_TEMP);
2166 		free(ioe, M_TEMP);
2167 		break;
2168 	}
2169 
2170 	case DIOCXCOMMIT: {
2171 		struct pfioc_trans	*io = (struct pfioc_trans *)addr;
2172 		struct pfioc_trans_e	*ioe;
2173 		struct pfr_table	*table;
2174 		struct pf_ruleset	*rs;
2175 		int			 i;
2176 
2177 		if (io->esize != sizeof(*ioe)) {
2178 			error = ENODEV;
2179 			goto fail;
2180 		}
2181 		ioe = malloc(sizeof(*ioe), M_TEMP, M_WAITOK);
2182 		table = malloc(sizeof(*table), M_TEMP, M_WAITOK);
2183 		/* first makes sure everything will succeed */
2184 		for (i = 0; i < io->size; i++) {
2185 			if (copyin(io->array+i, ioe, sizeof(*ioe))) {
2186 				free(table, M_TEMP);
2187 				free(ioe, M_TEMP);
2188 				error = EFAULT;
2189 				goto fail;
2190 			}
2191 			switch (ioe->type) {
2192 #ifdef ALTQ
2193 			case PF_TRANS_ALTQ:
2194 				if (ioe->anchor[0]) {
2195 					free(table, M_TEMP);
2196 					free(ioe, M_TEMP);
2197 					error = EINVAL;
2198 					goto fail;
2199 				}
2200 				if (!altqs_inactive_open || ioe->ticket !=
2201 				    ticket_altqs_inactive) {
2202 					free(table, M_TEMP);
2203 					free(ioe, M_TEMP);
2204 					error = EBUSY;
2205 					goto fail;
2206 				}
2207 				break;
2208 #endif /* ALTQ */
2209 			case PF_TRANS_TABLE:
2210 				rs = pf_find_ruleset(ioe->anchor);
2211 				if (rs == NULL || !rs->topen || ioe->ticket !=
2212 				     rs->tticket) {
2213 					free(table, M_TEMP);
2214 					free(ioe, M_TEMP);
2215 					error = EBUSY;
2216 					goto fail;
2217 				}
2218 				break;
2219 			default:
2220 				rs = pf_find_ruleset(ioe->anchor);
2221 				if (rs == NULL ||
2222 				    !rs->rules.inactive.open ||
2223 				    rs->rules.inactive.ticket !=
2224 				    ioe->ticket) {
2225 					free(table, M_TEMP);
2226 					free(ioe, M_TEMP);
2227 					error = EBUSY;
2228 					goto fail;
2229 				}
2230 				break;
2231 			}
2232 		}
2233 		/*
2234 		 * Checked already in DIOCSETLIMIT, but check again as the
2235 		 * situation might have changed.
2236 		 */
2237 		for (i = 0; i < PF_LIMIT_MAX; i++) {
2238 			if (((struct pool *)pf_pool_limits[i].pp)->pr_nout >
2239 			    pf_pool_limits[i].limit_new) {
2240 				free(table, M_TEMP);
2241 				free(ioe, M_TEMP);
2242 				error = EBUSY;
2243 				goto fail;
2244 			}
2245 		}
2246 		/* now do the commit - no errors should happen here */
2247 		for (i = 0; i < io->size; i++) {
2248 			if (copyin(io->array+i, ioe, sizeof(*ioe))) {
2249 				free(table, M_TEMP);
2250 				free(ioe, M_TEMP);
2251 				error = EFAULT;
2252 				goto fail;
2253 			}
2254 			switch (ioe->type) {
2255 #ifdef ALTQ
2256 			case PF_TRANS_ALTQ:
2257 				if ((error = pf_commit_altq(ioe->ticket))) {
2258 					free(table, M_TEMP);
2259 					free(ioe, M_TEMP);
2260 					goto fail; /* really bad */
2261 				}
2262 				break;
2263 #endif /* ALTQ */
2264 			case PF_TRANS_TABLE:
2265 				bzero(table, sizeof(*table));
2266 				strlcpy(table->pfrt_anchor, ioe->anchor,
2267 				    sizeof(table->pfrt_anchor));
2268 				if ((error = pfr_ina_commit(table, ioe->ticket,
2269 				    NULL, NULL, 0))) {
2270 					free(table, M_TEMP);
2271 					free(ioe, M_TEMP);
2272 					goto fail; /* really bad */
2273 				}
2274 				break;
2275 			default:
2276 				if ((error = pf_commit_rules(ioe->ticket,
2277 				    ioe->anchor))) {
2278 					free(table, M_TEMP);
2279 					free(ioe, M_TEMP);
2280 					goto fail; /* really bad */
2281 				}
2282 				break;
2283 			}
2284 		}
2285 		for (i = 0; i < PF_LIMIT_MAX; i++) {
2286 			if (pf_pool_limits[i].limit_new !=
2287 			    pf_pool_limits[i].limit &&
2288 			    pool_sethardlimit(pf_pool_limits[i].pp,
2289 			    pf_pool_limits[i].limit_new, NULL, 0) != 0) {
2290 				free(table, M_TEMP);
2291 				free(ioe, M_TEMP);
2292 				error = EBUSY;
2293 				goto fail; /* really bad */
2294 			}
2295 			pf_pool_limits[i].limit = pf_pool_limits[i].limit_new;
2296 		}
2297 		for (i = 0; i < PFTM_MAX; i++) {
2298 			int old = pf_default_rule.timeout[i];
2299 
2300 			pf_default_rule.timeout[i] =
2301 			    pf_default_rule_new.timeout[i];
2302 			if (pf_default_rule.timeout[i] == PFTM_INTERVAL &&
2303 			    pf_default_rule.timeout[i] < old)
2304 				wakeup(pf_purge_thread);
2305 		}
2306 		pfi_xcommit();
2307 		pf_trans_set_commit();
2308 		free(table, M_TEMP);
2309 		free(ioe, M_TEMP);
2310 		break;
2311 	}
2312 
2313 	case DIOCGETSRCNODES: {
2314 		struct pfioc_src_nodes	*psn = (struct pfioc_src_nodes *)addr;
2315 		struct pf_src_node	*n, *p, *pstore;
2316 		u_int32_t		 nr = 0;
2317 		int			 space = psn->psn_len;
2318 
2319 		if (space == 0) {
2320 			RB_FOREACH(n, pf_src_tree, &tree_src_tracking)
2321 				nr++;
2322 			psn->psn_len = sizeof(struct pf_src_node) * nr;
2323 			break;
2324 		}
2325 
2326 		pstore = malloc(sizeof(*pstore), M_TEMP, M_WAITOK);
2327 
2328 		p = psn->psn_src_nodes;
2329 		RB_FOREACH(n, pf_src_tree, &tree_src_tracking) {
2330 			int	secs = time_second, diff;
2331 
2332 			if ((nr + 1) * sizeof(*p) > (unsigned)psn->psn_len)
2333 				break;
2334 
2335 			bcopy(n, pstore, sizeof(*pstore));
2336 			if (n->rule.ptr != NULL)
2337 				pstore->rule.nr = n->rule.ptr->nr;
2338 			pstore->creation = secs - pstore->creation;
2339 			if (pstore->expire > secs)
2340 				pstore->expire -= secs;
2341 			else
2342 				pstore->expire = 0;
2343 
2344 			/* adjust the connection rate estimate */
2345 			diff = secs - n->conn_rate.last;
2346 			if (diff >= n->conn_rate.seconds)
2347 				pstore->conn_rate.count = 0;
2348 			else
2349 				pstore->conn_rate.count -=
2350 				    n->conn_rate.count * diff /
2351 				    n->conn_rate.seconds;
2352 
2353 			error = copyout(pstore, p, sizeof(*p));
2354 			if (error) {
2355 				free(pstore, M_TEMP);
2356 				goto fail;
2357 			}
2358 			p++;
2359 			nr++;
2360 		}
2361 		psn->psn_len = sizeof(struct pf_src_node) * nr;
2362 
2363 		free(pstore, M_TEMP);
2364 		break;
2365 	}
2366 
2367 	case DIOCCLRSRCNODES: {
2368 		struct pf_src_node	*n;
2369 		struct pf_state		*state;
2370 
2371 		RB_FOREACH(state, pf_state_tree_id, &tree_id)
2372 			pf_src_tree_remove_state(state);
2373 		RB_FOREACH(n, pf_src_tree, &tree_src_tracking)
2374 			n->expire = 1;
2375 		pf_purge_expired_src_nodes(1);
2376 		break;
2377 	}
2378 
2379 	case DIOCKILLSRCNODES: {
2380 		struct pf_src_node	*sn;
2381 		struct pf_state		*s;
2382 		struct pfioc_src_node_kill *psnk =
2383 		    (struct pfioc_src_node_kill *)addr;
2384 		u_int			killed = 0;
2385 
2386 		RB_FOREACH(sn, pf_src_tree, &tree_src_tracking) {
2387 			if (PF_MATCHA(psnk->psnk_src.neg,
2388 				&psnk->psnk_src.addr.v.a.addr,
2389 				&psnk->psnk_src.addr.v.a.mask,
2390 				&sn->addr, sn->af) &&
2391 			    PF_MATCHA(psnk->psnk_dst.neg,
2392 				&psnk->psnk_dst.addr.v.a.addr,
2393 				&psnk->psnk_dst.addr.v.a.mask,
2394 				&sn->raddr, sn->af)) {
2395 				/* Handle state to src_node linkage */
2396 				if (sn->states != 0)
2397 					RB_FOREACH(s, pf_state_tree_id,
2398 					   &tree_id)
2399 						pf_state_rm_src_node(s, sn);
2400 				sn->expire = 1;
2401 				killed++;
2402 			}
2403 		}
2404 
2405 		if (killed > 0)
2406 			pf_purge_expired_src_nodes(1);
2407 
2408 		psnk->psnk_killed = killed;
2409 		break;
2410 	}
2411 
2412 	case DIOCSETHOSTID: {
2413 		u_int32_t	*hostid = (u_int32_t *)addr;
2414 
2415 		if (*hostid == 0)
2416 			pf_trans_set.hostid = arc4random();
2417 		else
2418 			pf_trans_set.hostid = *hostid;
2419 		pf_trans_set.mask |= PF_TSET_HOSTID;
2420 		break;
2421 	}
2422 
2423 	case DIOCOSFPFLUSH:
2424 		pf_osfp_flush();
2425 		break;
2426 
2427 	case DIOCIGETIFACES: {
2428 		struct pfioc_iface *io = (struct pfioc_iface *)addr;
2429 
2430 		if (io->pfiio_esize != sizeof(struct pfi_kif)) {
2431 			error = ENODEV;
2432 			break;
2433 		}
2434 		error = pfi_get_ifaces(io->pfiio_name, io->pfiio_buffer,
2435 		    &io->pfiio_size);
2436 		break;
2437 	}
2438 
2439 	case DIOCSETIFFLAG: {
2440 		struct pfioc_iface *io = (struct pfioc_iface *)addr;
2441 
2442 		error = pfi_set_flags(io->pfiio_name, io->pfiio_flags);
2443 		break;
2444 	}
2445 
2446 	case DIOCCLRIFFLAG: {
2447 		struct pfioc_iface *io = (struct pfioc_iface *)addr;
2448 
2449 		error = pfi_clear_flags(io->pfiio_name, io->pfiio_flags);
2450 		break;
2451 	}
2452 
2453 	case DIOCSETREASS: {
2454 		u_int32_t	*reass = (u_int32_t *)addr;
2455 
2456 		pf_trans_set.reass = *reass;
2457 		pf_trans_set.mask |= PF_TSET_REASS;
2458 		break;
2459 	}
2460 
2461 	default:
2462 		error = ENODEV;
2463 		break;
2464 	}
2465 fail:
2466 	splx(s);
2467 	if (flags & FWRITE)
2468 		rw_exit_write(&pf_consistency_lock);
2469 	else
2470 		rw_exit_read(&pf_consistency_lock);
2471 	return (error);
2472 }
2473 
2474 void
2475 pf_trans_set_commit(void)
2476 {
2477 	if (pf_trans_set.mask & PF_TSET_STATUSIF)
2478 		strlcpy(pf_status.ifname, pf_trans_set.statusif, IFNAMSIZ);
2479 	if (pf_trans_set.mask & PF_TSET_DEBUG)
2480 		pf_status.debug = pf_trans_set.debug;
2481 	if (pf_trans_set.mask & PF_TSET_HOSTID)
2482 		pf_status.hostid = pf_trans_set.hostid;
2483 	if (pf_trans_set.mask & PF_TSET_REASS)
2484 		pf_status.reass = pf_trans_set.reass;
2485 }
2486 
2487 void
2488 pf_pool_copyin(struct pf_pool *from, struct pf_pool *to)
2489 {
2490 	bcopy(from, to, sizeof(*to));
2491 	to->kif = NULL;
2492 }
2493 
2494 int
2495 pf_rule_copyin(struct pf_rule *from, struct pf_rule *to,
2496     struct pf_ruleset *ruleset)
2497 {
2498 	int i;
2499 
2500 	to->src = from->src;
2501 	to->dst = from->dst;
2502 
2503 	/* XXX union skip[] */
2504 
2505 	strlcpy(to->label, from->label, sizeof(to->label));
2506 	strlcpy(to->ifname, from->ifname, sizeof(to->ifname));
2507 	strlcpy(to->rcv_ifname, from->rcv_ifname, sizeof(to->rcv_ifname));
2508 	strlcpy(to->qname, from->qname, sizeof(to->qname));
2509 	strlcpy(to->pqname, from->pqname, sizeof(to->pqname));
2510 	strlcpy(to->tagname, from->tagname, sizeof(to->tagname));
2511 	strlcpy(to->match_tagname, from->match_tagname,
2512 	    sizeof(to->match_tagname));
2513 	strlcpy(to->overload_tblname, from->overload_tblname,
2514 	    sizeof(to->overload_tblname));
2515 
2516 	pf_pool_copyin(&from->nat, &to->nat);
2517 	pf_pool_copyin(&from->rdr, &to->rdr);
2518 	pf_pool_copyin(&from->route, &to->route);
2519 
2520 	if (pf_kif_setup(to->ifname, &to->kif))
2521 		return (EINVAL);
2522 	if (pf_kif_setup(to->rcv_ifname, &to->rcv_kif))
2523 		return (EINVAL);
2524 	if (to->overload_tblname[0]) {
2525 		if ((to->overload_tbl = pfr_attach_table(ruleset,
2526 		    to->overload_tblname, 0)) == NULL)
2527 			return (EINVAL);
2528 		else
2529 			to->overload_tbl->pfrkt_flags |= PFR_TFLAG_ACTIVE;
2530 	}
2531 
2532 	if (pf_kif_setup(to->rdr.ifname, &to->rdr.kif))
2533 		return (EINVAL);
2534 	if (pf_kif_setup(to->nat.ifname, &to->nat.kif))
2535 		return (EINVAL);
2536 	if (pf_kif_setup(to->route.ifname, &to->route.kif))
2537 		return (EINVAL);
2538 
2539 	to->os_fingerprint = from->os_fingerprint;
2540 
2541 	to->rtableid = from->rtableid;
2542 	if (to->rtableid >= 0 && !rtable_exists(to->rtableid))
2543 		return (EBUSY);
2544 	to->onrdomain = from->onrdomain;
2545 	if (to->onrdomain >= 0 && !rtable_exists(to->onrdomain))
2546 		return (EBUSY);
2547 	if (to->onrdomain >= 0)		/* make sure it is a real rdomain */
2548 		to->onrdomain = rtable_l2(to->onrdomain);
2549 
2550 	for (i = 0; i < PFTM_MAX; i++)
2551 		to->timeout[i] = from->timeout[i];
2552 	to->states_tot = from->states_tot;
2553 	to->max_states = from->max_states;
2554 	to->max_src_nodes = from->max_src_nodes;
2555 	to->max_src_states = from->max_src_states;
2556 	to->max_src_conn = from->max_src_conn;
2557 	to->max_src_conn_rate.limit = from->max_src_conn_rate.limit;
2558 	to->max_src_conn_rate.seconds = from->max_src_conn_rate.seconds;
2559 
2560 #ifdef ALTQ
2561 	/* set queue IDs */
2562 	if (to->qname[0] != 0) {
2563 		if ((to->qid = pf_qname2qid(to->qname)) == 0)
2564 			return (EBUSY);
2565 		else if (to->pqname[0] != 0) {
2566 			if ((to->pqid = pf_qname2qid(to->pqname)) == 0)
2567 				return (EBUSY);
2568 		} else
2569 			to->pqid = to->qid;
2570 	}
2571 #endif
2572 	to->rt_listid = from->rt_listid;
2573 	to->prob = from->prob;
2574 	to->return_icmp = from->return_icmp;
2575 	to->return_icmp6 = from->return_icmp6;
2576 	to->max_mss = from->max_mss;
2577 	if (to->tagname[0])
2578 		if ((to->tag = pf_tagname2tag(to->tagname)) == 0)
2579 			return (EBUSY);
2580 	if (to->match_tagname[0])
2581 		if ((to->match_tag = pf_tagname2tag(to->match_tagname)) == 0)
2582 			return (EBUSY);
2583 	to->scrub_flags = from->scrub_flags;
2584 	to->uid = from->uid;
2585 	to->gid = from->gid;
2586 	to->rule_flag = from->rule_flag;
2587 	to->action = from->action;
2588 	to->direction = from->direction;
2589 	to->log = from->log;
2590 	to->logif = from->logif;
2591 #if NPFLOG > 0
2592 	if (!to->log)
2593 		to->logif = 0;
2594 	if (to->logif >= PFLOGIFS_MAX)
2595 		return (EINVAL);
2596 #endif
2597 	to->quick = from->quick;
2598 	to->ifnot = from->ifnot;
2599 	to->match_tag_not = from->match_tag_not;
2600 	to->keep_state = from->keep_state;
2601 	to->af = from->af;
2602 	to->proto = from->proto;
2603 	to->type = from->type;
2604 	to->code = from->code;
2605 	to->flags = from->flags;
2606 	to->flagset = from->flagset;
2607 	to->min_ttl = from->min_ttl;
2608 	to->allow_opts = from->allow_opts;
2609 	to->rt = from->rt;
2610 	to->return_ttl = from->return_ttl;
2611 	to->tos = from->tos;
2612 	to->set_tos = from->set_tos;
2613 	to->anchor_relative = from->anchor_relative; /* XXX */
2614 	to->anchor_wildcard = from->anchor_wildcard; /* XXX */
2615 	to->flush = from->flush;
2616 	to->divert.addr = from->divert.addr;
2617 	to->divert.port = from->divert.port;
2618 	to->divert_packet.addr = from->divert_packet.addr;
2619 	to->divert_packet.port = from->divert_packet.port;
2620 	to->prio[0] = from->prio[0];
2621 	to->prio[1] = from->prio[1];
2622 
2623 	return (0);
2624 }
2625