1 /* $NetBSD: pf_ioctl.c,v 1.58 2022/03/28 12:33:21 riastradh Exp $ */
2 /* $OpenBSD: pf_ioctl.c,v 1.182 2007/06/24 11:17:13 mcbride Exp $ */
3
4 /*
5 * Copyright (c) 2001 Daniel Hartmeier
6 * Copyright (c) 2002,2003 Henning Brauer
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 *
13 * - Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * - Redistributions in binary form must reproduce the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer in the documentation and/or other materials provided
18 * with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
30 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 *
33 * Effort sponsored in part by the Defense Advanced Research Projects
34 * Agency (DARPA) and Air Force Research Laboratory, Air Force
35 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
36 *
37 */
38
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: pf_ioctl.c,v 1.58 2022/03/28 12:33:21 riastradh Exp $");
41
42 #ifdef _KERNEL_OPT
43 #include "opt_inet.h"
44 #endif
45
46 #include "pfsync.h"
47
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/mbuf.h>
51 #include <sys/filio.h>
52 #include <sys/fcntl.h>
53 #include <sys/socket.h>
54 #include <sys/socketvar.h>
55 #include <sys/kernel.h>
56 #include <sys/time.h>
57 #include <sys/pool.h>
58 #include <sys/proc.h>
59 #include <sys/malloc.h>
60 #include <sys/kthread.h>
61 #include <sys/rwlock.h>
62 #include <uvm/uvm_extern.h>
63 #ifdef __NetBSD__
64 #include <sys/conf.h>
65 #include <sys/lwp.h>
66 #include <sys/kauth.h>
67 #include <sys/module.h>
68 #include <sys/cprng.h>
69 #include <sys/device.h>
70 #endif /* __NetBSD__ */
71
72 #include <net/if.h>
73 #include <net/if_types.h>
74 #include <net/route.h>
75
76 #include <netinet/in.h>
77 #include <netinet/in_var.h>
78 #include <netinet/in_systm.h>
79 #include <netinet/ip.h>
80 #include <netinet/ip_var.h>
81 #include <netinet/ip_icmp.h>
82
83 #ifndef __NetBSD__
84 #include <dev/rndvar.h>
85 #include <crypto/md5.h>
86 #else
87 #include <netinet/in_offload.h>
88 #include <sys/md5.h>
89 #endif /* __NetBSD__ */
90 #include <net/pfvar.h>
91
92 #if NPFSYNC > 0
93 #include <net/if_pfsync.h>
94 #endif /* NPFSYNC > 0 */
95
96 #if NPFLOG > 0
97 #include <net/if_pflog.h>
98 #endif /* NPFLOG > 0 */
99
100 #ifdef INET6
101 #include <netinet/ip6.h>
102 #include <netinet/in_pcb.h>
103 #include <netinet6/in6_offload.h>
104 #endif /* INET6 */
105
106 #ifdef ALTQ
107 #include <altq/altq.h>
108 #endif
109
110 #include "ioconf.h"
111
112 #ifdef _MODULE
113 void pfdetach(void);
114 #endif /* _MODULE */
115 #ifndef __NetBSD__
116 void pf_thread_create(void *);
117 #endif /* !__NetBSD__ */
118 int pfopen(dev_t, int, int, struct lwp *);
119 int pfclose(dev_t, int, int, struct lwp *);
120 struct pf_pool *pf_get_pool(char *, u_int32_t, u_int8_t, u_int32_t,
121 u_int8_t, u_int8_t, u_int8_t);
122
123 void pf_mv_pool(struct pf_palist *, struct pf_palist *);
124 void pf_empty_pool(struct pf_palist *);
125 int pfioctl(dev_t, u_long, void *, int, struct lwp *);
126 #ifdef ALTQ
127 int pf_begin_altq(u_int32_t *);
128 int pf_rollback_altq(u_int32_t);
129 int pf_commit_altq(u_int32_t);
130 int pf_enable_altq(struct pf_altq *);
131 int pf_disable_altq(struct pf_altq *);
132 #endif /* ALTQ */
133 int pf_begin_rules(u_int32_t *, int, const char *);
134 int pf_rollback_rules(u_int32_t, int, char *);
135 int pf_setup_pfsync_matching(struct pf_ruleset *);
136 void pf_hash_rule(MD5_CTX *, struct pf_rule *);
137 void pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *);
138 int pf_commit_rules(u_int32_t, int, char *);
139 void pf_state_export(struct pfsync_state *,
140 struct pf_state_key *, struct pf_state *);
141 void pf_state_import(struct pfsync_state *,
142 struct pf_state_key *, struct pf_state *);
143
144 static int pf_state_add(struct pfsync_state*);
145
146 struct pf_rule pf_default_rule;
147 #ifdef __NetBSD__
148 krwlock_t pf_consistency_lock;
149 #else
150 struct rwlock pf_consistency_lock = RWLOCK_INITIALIZER("pfcnslk");
151 #endif /* __NetBSD__ */
152 #ifdef ALTQ
153 static int pf_altq_running;
154 #endif
155
156 int pf_state_lock = 0;
157
158 #define TAGID_MAX 50000
159 TAILQ_HEAD(pf_tags, pf_tagname) pf_tags = TAILQ_HEAD_INITIALIZER(pf_tags),
160 pf_qids = TAILQ_HEAD_INITIALIZER(pf_qids);
161
162 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE)
163 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE
164 #endif
165 u_int16_t tagname2tag(struct pf_tags *, char *);
166 void tag2tagname(struct pf_tags *, u_int16_t, char *);
167 void tag_unref(struct pf_tags *, u_int16_t);
168 int pf_rtlabel_add(struct pf_addr_wrap *);
169 void pf_rtlabel_remove(struct pf_addr_wrap *);
170 void pf_rtlabel_copyout(struct pf_addr_wrap *);
171
172 #ifdef __NetBSD__
173 void pf_deferred_init(device_t);
174 #endif
175
176 #define DPFPRINTF(n, x) if (pf_status.debug >= (n)) printf x
177
178 #ifdef __NetBSD__
179 const struct cdevsw pf_cdevsw = {
180 .d_open = pfopen,
181 .d_close = pfclose,
182 .d_read = noread,
183 .d_write = nowrite,
184 .d_ioctl = pfioctl,
185 .d_stop = nostop,
186 .d_tty = notty,
187 .d_poll = nopoll,
188 .d_mmap = nommap,
189 .d_kqfilter = nokqfilter,
190 .d_discard = nodiscard,
191 .d_flag = D_OTHER
192 };
193
194 static int pfil4_wrapper(void *, struct mbuf **, struct ifnet *, int);
195 #ifdef INET6
196 static int pfil6_wrapper(void *, struct mbuf **, struct ifnet *, int);
197 #endif /* INET6 */
198
199 static int pf_pfil_attach(void);
200 static int pf_pfil_detach(void);
201
202 static int pf_pfil_attached;
203
204 static kauth_listener_t pf_listener;
205 #endif /* __NetBSD__ */
206
207 #ifdef __NetBSD__
208 static int
pf_listener_cb(kauth_cred_t cred,kauth_action_t action,void * cookie,void * arg0,void * arg1,void * arg2,void * arg3)209 pf_listener_cb(kauth_cred_t cred, kauth_action_t action, void *cookie,
210 void *arg0, void *arg1, void *arg2, void *arg3)
211 {
212 int result;
213 enum kauth_network_req req;
214
215 result = KAUTH_RESULT_DEFER;
216 req = (enum kauth_network_req)(uintptr_t)arg0;
217
218 if (action != KAUTH_NETWORK_FIREWALL)
219 return result;
220
221 /* These must have came from device context. */
222 if ((req == KAUTH_REQ_NETWORK_FIREWALL_FW) ||
223 (req == KAUTH_REQ_NETWORK_FIREWALL_NAT))
224 result = KAUTH_RESULT_ALLOW;
225
226 return result;
227 }
228 #endif /* __NetBSD__ */
229
230 void
pfattach(int num)231 pfattach(int num)
232 {
233 u_int32_t *timeout = pf_default_rule.timeout;
234
235 #ifdef __NetBSD__
236 pool_init(&pf_rule_pl, sizeof(struct pf_rule), 0, 0, 0, "pfrulepl",
237 &pool_allocator_nointr, IPL_NONE);
238 pool_init(&pf_src_tree_pl, sizeof(struct pf_src_node), 0, 0, 0,
239 "pfsrctrpl", NULL, IPL_SOFTNET);
240 pool_init(&pf_state_pl, sizeof(struct pf_state), 0, 0, 0, "pfstatepl",
241 NULL, IPL_SOFTNET);
242 pool_init(&pf_state_key_pl, sizeof(struct pf_state_key), 0, 0, 0,
243 "pfstatekeypl", NULL, IPL_SOFTNET);
244 pool_init(&pf_altq_pl, sizeof(struct pf_altq), 0, 0, 0, "pfaltqpl",
245 &pool_allocator_nointr, IPL_NONE);
246 pool_init(&pf_pooladdr_pl, sizeof(struct pf_pooladdr), 0, 0, 0,
247 "pfpooladdrpl", &pool_allocator_nointr, IPL_NONE);
248 #else
249 pool_init(&pf_rule_pl, sizeof(struct pf_rule), 0, 0, 0, "pfrulepl",
250 &pool_allocator_nointr);
251 pool_init(&pf_src_tree_pl, sizeof(struct pf_src_node), 0, 0, 0,
252 "pfsrctrpl", NULL);
253 pool_init(&pf_state_pl, sizeof(struct pf_state), 0, 0, 0, "pfstatepl",
254 NULL);
255 pool_init(&pf_state_key_pl, sizeof(struct pf_state_key), 0, 0, 0,
256 "pfstatekeypl", NULL);
257 pool_init(&pf_altq_pl, sizeof(struct pf_altq), 0, 0, 0, "pfaltqpl",
258 &pool_allocator_nointr);
259 pool_init(&pf_pooladdr_pl, sizeof(struct pf_pooladdr), 0, 0, 0,
260 "pfpooladdrpl", &pool_allocator_nointr);
261 #endif /* !__NetBSD__ */
262
263 pfr_initialize();
264 pfi_initialize();
265 pf_osfp_initialize();
266
267 pool_sethardlimit(pf_pool_limits[PF_LIMIT_STATES].pp,
268 pf_pool_limits[PF_LIMIT_STATES].limit, NULL, 0);
269
270 if (ctob(physmem) <= 100*1024*1024)
271 pf_pool_limits[PF_LIMIT_TABLE_ENTRIES].limit =
272 PFR_KENTRY_HIWAT_SMALL;
273
274 RB_INIT(&tree_src_tracking);
275 RB_INIT(&pf_anchors);
276 pf_init_ruleset(&pf_main_ruleset);
277 TAILQ_INIT(&pf_altqs[0]);
278 TAILQ_INIT(&pf_altqs[1]);
279 TAILQ_INIT(&pf_pabuf);
280 pf_altqs_active = &pf_altqs[0];
281 pf_altqs_inactive = &pf_altqs[1];
282 TAILQ_INIT(&state_list);
283
284 #ifdef __NetBSD__
285 rw_init(&pf_consistency_lock);
286 #endif /* __NetBSD__ */
287
288 /* default rule should never be garbage collected */
289 pf_default_rule.entries.tqe_prev = &pf_default_rule.entries.tqe_next;
290 pf_default_rule.action = PF_PASS;
291 pf_default_rule.nr = -1;
292 pf_default_rule.rtableid = -1;
293
294 /* initialize default timeouts */
295 timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL;
296 timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL;
297 timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL;
298 timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL;
299 timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL;
300 timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL;
301 timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL;
302 timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL;
303 timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL;
304 timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL;
305 timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL;
306 timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL;
307 timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL;
308 timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL;
309 timeout[PFTM_FRAG] = PFTM_FRAG_VAL;
310 timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL;
311 timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL;
312 timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL;
313 timeout[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START;
314 timeout[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END;
315
316 pf_normalize_init();
317 bzero(&pf_status, sizeof(pf_status));
318 pf_status.debug = PF_DEBUG_URGENT;
319
320 #ifdef __NetBSD__
321 /*
322 * Defer rest of initialization until we can use cprng_fast32()
323 * which requires per-CPU data to have been initialized which
324 * in turn requires that all CPUs have been discovered and
325 * attached!
326 */
327 config_interrupts(NULL, pf_deferred_init);
328 #else
329 /* XXX do our best to avoid a conflict */
330 pf_status.hostid = cprng_fast32();
331
332 /* require process context to purge states, so perform in a thread */
333 kthread_create_deferred(pf_thread_create, NULL);
334 #endif /* !__NetBSD__ */
335
336 #ifdef __NetBSD__
337 pf_listener = kauth_listen_scope(KAUTH_SCOPE_NETWORK,
338 pf_listener_cb, NULL);
339 #endif /* __NetBSD__ */
340 }
341
342 #ifdef __NetBSD__
343 /* ARGSUSED */
344 void
pf_deferred_init(device_t dev)345 pf_deferred_init(device_t dev)
346 {
347
348 /* XXX do our best to avoid a conflict */
349 pf_status.hostid = cprng_fast32();
350
351 /* require process context to purge states, so perform in a thread */
352 if (kthread_create(PRI_NONE, 0, NULL, pf_purge_thread, NULL, NULL,
353 "pfpurge"))
354 panic("pfpurge thread");
355 }
356 #endif /* __NetBSD__ */
357
358 #ifdef _MODULE
359 void
pfdetach(void)360 pfdetach(void)
361 {
362 extern int pf_purge_thread_running;
363 extern int pf_purge_thread_stop;
364 struct pf_anchor *anchor;
365 struct pf_state *state;
366 struct pf_src_node *node;
367 struct pfioc_table pt;
368 u_int32_t ticket;
369 int i;
370 char r = '\0';
371
372 pf_purge_thread_stop = 1;
373 wakeup(pf_purge_thread);
374
375 /* wait until the kthread exits */
376 while (pf_purge_thread_running)
377 tsleep(&pf_purge_thread_running, PWAIT, "pfdown", 0);
378
379 (void)pf_pfil_detach();
380
381 pf_status.running = 0;
382
383 /* clear the rulesets */
384 for (i = 0; i < PF_RULESET_MAX; i++)
385 if (pf_begin_rules(&ticket, i, &r) == 0)
386 pf_commit_rules(ticket, i, &r);
387 #ifdef ALTQ
388 if (pf_begin_altq(&ticket) == 0)
389 pf_commit_altq(ticket);
390 #endif /* ALTQ */
391
392 /* clear states */
393 RB_FOREACH(state, pf_state_tree_id, &tree_id) {
394 state->timeout = PFTM_PURGE;
395 #if NPFSYNC > 0
396 state->sync_flags = PFSTATE_NOSYNC;
397 #endif /* NPFSYNC > 0 */
398 }
399 pf_purge_expired_states(pf_status.states);
400 #if NPFSYNC > 0
401 pfsync_clear_states(pf_status.hostid, NULL);
402 #endif /* NPFSYNC > 0 */
403
404 /* clear source nodes */
405 RB_FOREACH(state, pf_state_tree_id, &tree_id) {
406 state->src_node = NULL;
407 state->nat_src_node = NULL;
408 }
409 RB_FOREACH(node, pf_src_tree, &tree_src_tracking) {
410 node->expire = 1;
411 node->states = 0;
412 }
413 pf_purge_expired_src_nodes(0);
414
415 /* clear tables */
416 memset(&pt, '\0', sizeof(pt));
417 pfr_clr_tables(&pt.pfrio_table, &pt.pfrio_ndel, pt.pfrio_flags);
418
419 /* destroy anchors */
420 while ((anchor = RB_MIN(pf_anchor_global, &pf_anchors)) != NULL) {
421 for (i = 0; i < PF_RULESET_MAX; i++)
422 if (pf_begin_rules(&ticket, i, anchor->name) == 0)
423 pf_commit_rules(ticket, i, anchor->name);
424 }
425
426 /* destroy main ruleset */
427 pf_remove_if_empty_ruleset(&pf_main_ruleset);
428
429 /* destroy the pools */
430 pool_destroy(&pf_pooladdr_pl);
431 pool_destroy(&pf_altq_pl);
432 pool_destroy(&pf_state_key_pl);
433 pool_destroy(&pf_state_pl);
434 pool_destroy(&pf_rule_pl);
435 pool_destroy(&pf_src_tree_pl);
436
437 rw_destroy(&pf_consistency_lock);
438
439 /* destroy subsystems */
440 pf_normalize_destroy();
441 pf_osfp_destroy();
442 pfr_destroy();
443 pfi_destroy();
444
445 /* cleanup kauth listener */
446 kauth_unlisten_scope(pf_listener);
447 }
448 #endif /* _MODULE */
449
450 #ifndef __NetBSD__
451 void
pf_thread_create(void * v)452 pf_thread_create(void *v)
453 {
454 if (kthread_create(pf_purge_thread, NULL, NULL, "pfpurge"))
455 panic("pfpurge thread");
456 }
457 #endif /* !__NetBSD__ */
458
459 int
pfopen(dev_t dev,int flags,int fmt,struct lwp * l)460 pfopen(dev_t dev, int flags, int fmt, struct lwp *l)
461 {
462 if (minor(dev) >= 1)
463 return (ENXIO);
464 return (0);
465 }
466
467 int
pfclose(dev_t dev,int flags,int fmt,struct lwp * l)468 pfclose(dev_t dev, int flags, int fmt, struct lwp *l)
469 {
470 if (minor(dev) >= 1)
471 return (ENXIO);
472 return (0);
473 }
474
475 struct pf_pool *
pf_get_pool(char * anchor,u_int32_t ticket,u_int8_t rule_action,u_int32_t rule_number,u_int8_t r_last,u_int8_t active,u_int8_t check_ticket)476 pf_get_pool(char *anchor, u_int32_t ticket, u_int8_t rule_action,
477 u_int32_t rule_number, u_int8_t r_last, u_int8_t active,
478 u_int8_t check_ticket)
479 {
480 struct pf_ruleset *ruleset;
481 struct pf_rule *rule;
482 int rs_num;
483
484 ruleset = pf_find_ruleset(anchor);
485 if (ruleset == NULL)
486 return (NULL);
487 rs_num = pf_get_ruleset_number(rule_action);
488 if (rs_num >= PF_RULESET_MAX)
489 return (NULL);
490 if (active) {
491 if (check_ticket && ticket !=
492 ruleset->rules[rs_num].active.ticket)
493 return (NULL);
494 if (r_last)
495 rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
496 pf_rulequeue);
497 else
498 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
499 } else {
500 if (check_ticket && ticket !=
501 ruleset->rules[rs_num].inactive.ticket)
502 return (NULL);
503 if (r_last)
504 rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
505 pf_rulequeue);
506 else
507 rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr);
508 }
509 if (!r_last) {
510 while ((rule != NULL) && (rule->nr != rule_number))
511 rule = TAILQ_NEXT(rule, entries);
512 }
513 if (rule == NULL)
514 return (NULL);
515
516 return (&rule->rpool);
517 }
518
519 void
pf_mv_pool(struct pf_palist * poola,struct pf_palist * poolb)520 pf_mv_pool(struct pf_palist *poola, struct pf_palist *poolb)
521 {
522 struct pf_pooladdr *mv_pool_pa;
523
524 while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) {
525 TAILQ_REMOVE(poola, mv_pool_pa, entries);
526 TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries);
527 }
528 }
529
530 void
pf_empty_pool(struct pf_palist * poola)531 pf_empty_pool(struct pf_palist *poola)
532 {
533 struct pf_pooladdr *empty_pool_pa;
534
535 while ((empty_pool_pa = TAILQ_FIRST(poola)) != NULL) {
536 pfi_dynaddr_remove(&empty_pool_pa->addr);
537 pf_tbladdr_remove(&empty_pool_pa->addr);
538 pfi_kif_unref(empty_pool_pa->kif, PFI_KIF_REF_RULE);
539 TAILQ_REMOVE(poola, empty_pool_pa, entries);
540 pool_put(&pf_pooladdr_pl, empty_pool_pa);
541 }
542 }
543
544 void
pf_rm_rule(struct pf_rulequeue * rulequeue,struct pf_rule * rule)545 pf_rm_rule(struct pf_rulequeue *rulequeue, struct pf_rule *rule)
546 {
547 if (rulequeue != NULL) {
548 if (rule->states <= 0) {
549 /*
550 * XXX - we need to remove the table *before* detaching
551 * the rule to make sure the table code does not delete
552 * the anchor under our feet.
553 */
554 pf_tbladdr_remove(&rule->src.addr);
555 pf_tbladdr_remove(&rule->dst.addr);
556 if (rule->overload_tbl)
557 pfr_detach_table(rule->overload_tbl);
558 }
559 TAILQ_REMOVE(rulequeue, rule, entries);
560 rule->entries.tqe_prev = NULL;
561 rule->nr = -1;
562 }
563
564 if (rule->states > 0 || rule->src_nodes > 0 ||
565 rule->entries.tqe_prev != NULL)
566 return;
567 pf_tag_unref(rule->tag);
568 pf_tag_unref(rule->match_tag);
569 #ifdef ALTQ
570 if (rule->pqid != rule->qid)
571 pf_qid_unref(rule->pqid);
572 pf_qid_unref(rule->qid);
573 #endif
574 pf_rtlabel_remove(&rule->src.addr);
575 pf_rtlabel_remove(&rule->dst.addr);
576 pfi_dynaddr_remove(&rule->src.addr);
577 pfi_dynaddr_remove(&rule->dst.addr);
578 if (rulequeue == NULL) {
579 pf_tbladdr_remove(&rule->src.addr);
580 pf_tbladdr_remove(&rule->dst.addr);
581 if (rule->overload_tbl)
582 pfr_detach_table(rule->overload_tbl);
583 }
584 pfi_kif_unref(rule->kif, PFI_KIF_REF_RULE);
585 pf_anchor_remove(rule);
586 pf_empty_pool(&rule->rpool.list);
587 pool_put(&pf_rule_pl, rule);
588 }
589
590 u_int16_t
tagname2tag(struct pf_tags * head,char * tagname)591 tagname2tag(struct pf_tags *head, char *tagname)
592 {
593 struct pf_tagname *tag, *p = NULL;
594 u_int16_t new_tagid = 1;
595
596 TAILQ_FOREACH(tag, head, entries)
597 if (strcmp(tagname, tag->name) == 0) {
598 tag->ref++;
599 return (tag->tag);
600 }
601
602 /*
603 * to avoid fragmentation, we do a linear search from the beginning
604 * and take the first free slot we find. if there is none or the list
605 * is empty, append a new entry at the end.
606 */
607
608 /* new entry */
609 if (!TAILQ_EMPTY(head))
610 for (p = TAILQ_FIRST(head); p != NULL &&
611 p->tag == new_tagid; p = TAILQ_NEXT(p, entries))
612 new_tagid = p->tag + 1;
613
614 if (new_tagid > TAGID_MAX)
615 return (0);
616
617 /* allocate and fill new struct pf_tagname */
618 tag = (struct pf_tagname *)malloc(sizeof(struct pf_tagname),
619 M_TEMP, M_NOWAIT);
620 if (tag == NULL)
621 return (0);
622 bzero(tag, sizeof(struct pf_tagname));
623 strlcpy(tag->name, tagname, sizeof(tag->name));
624 tag->tag = new_tagid;
625 tag->ref++;
626
627 if (p != NULL) /* insert new entry before p */
628 TAILQ_INSERT_BEFORE(p, tag, entries);
629 else /* either list empty or no free slot in between */
630 TAILQ_INSERT_TAIL(head, tag, entries);
631
632 return (tag->tag);
633 }
634
635 void
tag2tagname(struct pf_tags * head,u_int16_t tagid,char * p)636 tag2tagname(struct pf_tags *head, u_int16_t tagid, char *p)
637 {
638 struct pf_tagname *tag;
639
640 TAILQ_FOREACH(tag, head, entries)
641 if (tag->tag == tagid) {
642 strlcpy(p, tag->name, PF_TAG_NAME_SIZE);
643 return;
644 }
645 }
646
647 void
tag_unref(struct pf_tags * head,u_int16_t tag)648 tag_unref(struct pf_tags *head, u_int16_t tag)
649 {
650 struct pf_tagname *p, *next;
651
652 if (tag == 0)
653 return;
654
655 for (p = TAILQ_FIRST(head); p != NULL; p = next) {
656 next = TAILQ_NEXT(p, entries);
657 if (tag == p->tag) {
658 if (--p->ref == 0) {
659 TAILQ_REMOVE(head, p, entries);
660 free(p, M_TEMP);
661 }
662 break;
663 }
664 }
665 }
666
667 u_int16_t
pf_tagname2tag(char * tagname)668 pf_tagname2tag(char *tagname)
669 {
670 return (tagname2tag(&pf_tags, tagname));
671 }
672
673 void
pf_tag2tagname(u_int16_t tagid,char * p)674 pf_tag2tagname(u_int16_t tagid, char *p)
675 {
676 tag2tagname(&pf_tags, tagid, p);
677 }
678
679 void
pf_tag_ref(u_int16_t tag)680 pf_tag_ref(u_int16_t tag)
681 {
682 struct pf_tagname *t;
683
684 TAILQ_FOREACH(t, &pf_tags, entries)
685 if (t->tag == tag)
686 break;
687 if (t != NULL)
688 t->ref++;
689 }
690
691 void
pf_tag_unref(u_int16_t tag)692 pf_tag_unref(u_int16_t tag)
693 {
694 tag_unref(&pf_tags, tag);
695 }
696
697 int
pf_rtlabel_add(struct pf_addr_wrap * a)698 pf_rtlabel_add(struct pf_addr_wrap *a)
699 {
700 #ifndef __NetBSD__
701 if (a->type == PF_ADDR_RTLABEL &&
702 (a->v.rtlabel = rtlabel_name2id(a->v.rtlabelname)) == 0)
703 return (-1);
704 #endif /* !__NetBSD__ */
705 return (0);
706 }
707
708 void
pf_rtlabel_remove(struct pf_addr_wrap * a)709 pf_rtlabel_remove(struct pf_addr_wrap *a)
710 {
711 #ifndef __NetBSD__
712 if (a->type == PF_ADDR_RTLABEL)
713 rtlabel_unref(a->v.rtlabel);
714 #endif /* !__NetBSD__ */
715 }
716
717 void
pf_rtlabel_copyout(struct pf_addr_wrap * a)718 pf_rtlabel_copyout(struct pf_addr_wrap *a)
719 {
720 #ifndef __NetBSD__
721 const char *name;
722
723 if (a->type == PF_ADDR_RTLABEL && a->v.rtlabel) {
724 if ((name = rtlabel_id2name(a->v.rtlabel)) == NULL)
725 strlcpy(a->v.rtlabelname, "?",
726 sizeof(a->v.rtlabelname));
727 else
728 strlcpy(a->v.rtlabelname, name,
729 sizeof(a->v.rtlabelname));
730 }
731 #endif /* !__NetBSD__ */
732 }
733
734 #ifdef ALTQ
735 u_int32_t
pf_qname2qid(char * qname)736 pf_qname2qid(char *qname)
737 {
738 return ((u_int32_t)tagname2tag(&pf_qids, qname));
739 }
740
741 void
pf_qid2qname(u_int32_t qid,char * p)742 pf_qid2qname(u_int32_t qid, char *p)
743 {
744 tag2tagname(&pf_qids, (u_int16_t)qid, p);
745 }
746
747 void
pf_qid_unref(u_int32_t qid)748 pf_qid_unref(u_int32_t qid)
749 {
750 tag_unref(&pf_qids, (u_int16_t)qid);
751 }
752
753 int
pf_begin_altq(u_int32_t * ticket)754 pf_begin_altq(u_int32_t *ticket)
755 {
756 struct pf_altq *altq;
757 int error = 0;
758
759 /* Purge the old altq list */
760 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) {
761 TAILQ_REMOVE(pf_altqs_inactive, altq, entries);
762 if (altq->qname[0] == 0) {
763 /* detach and destroy the discipline */
764 error = altq_remove(altq);
765 } else
766 pf_qid_unref(altq->qid);
767 pool_put(&pf_altq_pl, altq);
768 }
769 if (error)
770 return (error);
771 *ticket = ++ticket_altqs_inactive;
772 altqs_inactive_open = 1;
773 return (0);
774 }
775
776 int
pf_rollback_altq(u_int32_t ticket)777 pf_rollback_altq(u_int32_t ticket)
778 {
779 struct pf_altq *altq;
780 int error = 0;
781
782 if (!altqs_inactive_open || ticket != ticket_altqs_inactive)
783 return (0);
784 /* Purge the old altq list */
785 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) {
786 TAILQ_REMOVE(pf_altqs_inactive, altq, entries);
787 if (altq->qname[0] == 0) {
788 /* detach and destroy the discipline */
789 error = altq_remove(altq);
790 } else
791 pf_qid_unref(altq->qid);
792 pool_put(&pf_altq_pl, altq);
793 }
794 altqs_inactive_open = 0;
795 return (error);
796 }
797
798 int
pf_commit_altq(u_int32_t ticket)799 pf_commit_altq(u_int32_t ticket)
800 {
801 struct pf_altqqueue *old_altqs;
802 struct pf_altq *altq;
803 int s, err, error = 0;
804
805 if (!altqs_inactive_open || ticket != ticket_altqs_inactive)
806 return (EBUSY);
807
808 /* swap altqs, keep the old. */
809 s = splsoftnet();
810 old_altqs = pf_altqs_active;
811 pf_altqs_active = pf_altqs_inactive;
812 pf_altqs_inactive = old_altqs;
813 ticket_altqs_active = ticket_altqs_inactive;
814
815 /* Attach new disciplines */
816 TAILQ_FOREACH(altq, pf_altqs_active, entries) {
817 if (altq->qname[0] == 0) {
818 /* attach the discipline */
819 error = altq_pfattach(altq);
820 if (error == 0 && pf_altq_running)
821 error = pf_enable_altq(altq);
822 if (error != 0) {
823 splx(s);
824 return (error);
825 }
826 }
827 }
828
829 /* Purge the old altq list */
830 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) {
831 TAILQ_REMOVE(pf_altqs_inactive, altq, entries);
832 if (altq->qname[0] == 0) {
833 /* detach and destroy the discipline */
834 if (pf_altq_running)
835 error = pf_disable_altq(altq);
836 err = altq_pfdetach(altq);
837 if (err != 0 && error == 0)
838 error = err;
839 err = altq_remove(altq);
840 if (err != 0 && error == 0)
841 error = err;
842 } else
843 pf_qid_unref(altq->qid);
844 pool_put(&pf_altq_pl, altq);
845 }
846 splx(s);
847
848 altqs_inactive_open = 0;
849 return (error);
850 }
851
852 int
pf_enable_altq(struct pf_altq * altq)853 pf_enable_altq(struct pf_altq *altq)
854 {
855 struct ifnet *ifp;
856 struct tb_profile tb;
857 int s, error = 0;
858
859 if ((ifp = ifunit(altq->ifname)) == NULL)
860 return (EINVAL);
861
862 if (ifp->if_snd.altq_type != ALTQT_NONE)
863 error = altq_enable(&ifp->if_snd);
864
865 /* set tokenbucket regulator */
866 if (error == 0 && ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) {
867 tb.rate = altq->ifbandwidth;
868 tb.depth = altq->tbrsize;
869 s = splnet();
870 error = tbr_set(&ifp->if_snd, &tb);
871 splx(s);
872 }
873
874 return (error);
875 }
876
877 int
pf_disable_altq(struct pf_altq * altq)878 pf_disable_altq(struct pf_altq *altq)
879 {
880 struct ifnet *ifp;
881 struct tb_profile tb;
882 int s, error;
883
884 if ((ifp = ifunit(altq->ifname)) == NULL)
885 return (EINVAL);
886
887 /*
888 * when the discipline is no longer referenced, it was overridden
889 * by a new one. if so, just return.
890 */
891 if (altq->altq_disc != ifp->if_snd.altq_disc)
892 return (0);
893
894 error = altq_disable(&ifp->if_snd);
895
896 if (error == 0) {
897 /* clear tokenbucket regulator */
898 tb.rate = 0;
899 s = splnet();
900 error = tbr_set(&ifp->if_snd, &tb);
901 splx(s);
902 }
903
904 return (error);
905 }
906 #endif /* ALTQ */
907
908 int
pf_begin_rules(u_int32_t * ticket,int rs_num,const char * anchor)909 pf_begin_rules(u_int32_t *ticket, int rs_num, const char *anchor)
910 {
911 struct pf_ruleset *rs;
912 struct pf_rule *rule;
913
914 if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
915 return (EINVAL);
916 rs = pf_find_or_create_ruleset(anchor);
917 if (rs == NULL)
918 return (EINVAL);
919 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
920 pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule);
921 rs->rules[rs_num].inactive.rcount--;
922 }
923 *ticket = ++rs->rules[rs_num].inactive.ticket;
924 rs->rules[rs_num].inactive.open = 1;
925 return (0);
926 }
927
928 int
pf_rollback_rules(u_int32_t ticket,int rs_num,char * anchor)929 pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor)
930 {
931 struct pf_ruleset *rs;
932 struct pf_rule *rule;
933
934 if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
935 return (EINVAL);
936 rs = pf_find_ruleset(anchor);
937 if (rs == NULL || !rs->rules[rs_num].inactive.open ||
938 rs->rules[rs_num].inactive.ticket != ticket)
939 return (0);
940 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
941 pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule);
942 rs->rules[rs_num].inactive.rcount--;
943 }
944 rs->rules[rs_num].inactive.open = 0;
945 return (0);
946 }
947
948 #define PF_MD5_UPD(st, elm) \
949 MD5Update(ctx, (u_int8_t *) &(st)->elm, sizeof((st)->elm))
950
951 #define PF_MD5_UPD_STR(st, elm) \
952 MD5Update(ctx, (u_int8_t *) (st)->elm, strlen((st)->elm))
953
954 #define PF_MD5_UPD_HTONL(st, elm, stor) do { \
955 (stor) = htonl((st)->elm); \
956 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int32_t));\
957 } while (0)
958
959 #define PF_MD5_UPD_HTONS(st, elm, stor) do { \
960 (stor) = htons((st)->elm); \
961 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int16_t));\
962 } while (0)
963
964 void
pf_hash_rule_addr(MD5_CTX * ctx,struct pf_rule_addr * pfr)965 pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr)
966 {
967 PF_MD5_UPD(pfr, addr.type);
968 switch (pfr->addr.type) {
969 case PF_ADDR_DYNIFTL:
970 PF_MD5_UPD(pfr, addr.v.ifname);
971 PF_MD5_UPD(pfr, addr.iflags);
972 break;
973 case PF_ADDR_TABLE:
974 PF_MD5_UPD(pfr, addr.v.tblname);
975 break;
976 case PF_ADDR_ADDRMASK:
977 /* XXX ignore af? */
978 PF_MD5_UPD(pfr, addr.v.a.addr.addr32);
979 PF_MD5_UPD(pfr, addr.v.a.mask.addr32);
980 break;
981 case PF_ADDR_RTLABEL:
982 PF_MD5_UPD(pfr, addr.v.rtlabelname);
983 break;
984 }
985
986 PF_MD5_UPD(pfr, port[0]);
987 PF_MD5_UPD(pfr, port[1]);
988 PF_MD5_UPD(pfr, neg);
989 PF_MD5_UPD(pfr, port_op);
990 }
991
992 void
pf_hash_rule(MD5_CTX * ctx,struct pf_rule * rule)993 pf_hash_rule(MD5_CTX *ctx, struct pf_rule *rule)
994 {
995 u_int16_t x;
996 u_int32_t y;
997
998 pf_hash_rule_addr(ctx, &rule->src);
999 pf_hash_rule_addr(ctx, &rule->dst);
1000 PF_MD5_UPD_STR(rule, label);
1001 PF_MD5_UPD_STR(rule, ifname);
1002 PF_MD5_UPD_STR(rule, match_tagname);
1003 PF_MD5_UPD_HTONS(rule, match_tag, x); /* dup? */
1004 PF_MD5_UPD_HTONL(rule, os_fingerprint, y);
1005 PF_MD5_UPD_HTONL(rule, prob, y);
1006 PF_MD5_UPD_HTONL(rule, uid.uid[0], y);
1007 PF_MD5_UPD_HTONL(rule, uid.uid[1], y);
1008 PF_MD5_UPD(rule, uid.op);
1009 PF_MD5_UPD_HTONL(rule, gid.gid[0], y);
1010 PF_MD5_UPD_HTONL(rule, gid.gid[1], y);
1011 PF_MD5_UPD(rule, gid.op);
1012 PF_MD5_UPD_HTONL(rule, rule_flag, y);
1013 PF_MD5_UPD(rule, action);
1014 PF_MD5_UPD(rule, direction);
1015 PF_MD5_UPD(rule, af);
1016 PF_MD5_UPD(rule, quick);
1017 PF_MD5_UPD(rule, ifnot);
1018 PF_MD5_UPD(rule, match_tag_not);
1019 PF_MD5_UPD(rule, natpass);
1020 PF_MD5_UPD(rule, keep_state);
1021 PF_MD5_UPD(rule, proto);
1022 PF_MD5_UPD(rule, type);
1023 PF_MD5_UPD(rule, code);
1024 PF_MD5_UPD(rule, flags);
1025 PF_MD5_UPD(rule, flagset);
1026 PF_MD5_UPD(rule, allow_opts);
1027 PF_MD5_UPD(rule, rt);
1028 PF_MD5_UPD(rule, tos);
1029 }
1030
1031 int
pf_commit_rules(u_int32_t ticket,int rs_num,char * anchor)1032 pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor)
1033 {
1034 struct pf_ruleset *rs;
1035 struct pf_rule *rule, **old_array;
1036 struct pf_rulequeue *old_rules;
1037 int s, error;
1038 u_int32_t old_rcount;
1039
1040 if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1041 return (EINVAL);
1042 rs = pf_find_ruleset(anchor);
1043 if (rs == NULL || !rs->rules[rs_num].inactive.open ||
1044 ticket != rs->rules[rs_num].inactive.ticket)
1045 return (EBUSY);
1046
1047 /* Calculate checksum for the main ruleset */
1048 if (rs == &pf_main_ruleset) {
1049 error = pf_setup_pfsync_matching(rs);
1050 if (error != 0)
1051 return (error);
1052 }
1053
1054 /* Swap rules, keep the old. */
1055 s = splsoftnet();
1056 old_rules = rs->rules[rs_num].active.ptr;
1057 old_rcount = rs->rules[rs_num].active.rcount;
1058 old_array = rs->rules[rs_num].active.ptr_array;
1059
1060 rs->rules[rs_num].active.ptr =
1061 rs->rules[rs_num].inactive.ptr;
1062 rs->rules[rs_num].active.ptr_array =
1063 rs->rules[rs_num].inactive.ptr_array;
1064 rs->rules[rs_num].active.rcount =
1065 rs->rules[rs_num].inactive.rcount;
1066 rs->rules[rs_num].inactive.ptr = old_rules;
1067 rs->rules[rs_num].inactive.ptr_array = old_array;
1068 rs->rules[rs_num].inactive.rcount = old_rcount;
1069
1070 rs->rules[rs_num].active.ticket =
1071 rs->rules[rs_num].inactive.ticket;
1072 pf_calc_skip_steps(rs->rules[rs_num].active.ptr);
1073
1074
1075 /* Purge the old rule list. */
1076 while ((rule = TAILQ_FIRST(old_rules)) != NULL)
1077 pf_rm_rule(old_rules, rule);
1078 if (rs->rules[rs_num].inactive.ptr_array)
1079 free(rs->rules[rs_num].inactive.ptr_array, M_TEMP);
1080 rs->rules[rs_num].inactive.ptr_array = NULL;
1081 rs->rules[rs_num].inactive.rcount = 0;
1082 rs->rules[rs_num].inactive.open = 0;
1083 pf_remove_if_empty_ruleset(rs);
1084 splx(s);
1085 return (0);
1086 }
1087
1088 void
pf_state_export(struct pfsync_state * sp,struct pf_state_key * sk,struct pf_state * s)1089 pf_state_export(struct pfsync_state *sp, struct pf_state_key *sk,
1090 struct pf_state *s)
1091 {
1092 int secs = time_second;
1093 bzero(sp, sizeof(struct pfsync_state));
1094
1095 /* copy from state key */
1096 sp->lan.addr = sk->lan.addr;
1097 sp->lan.port = sk->lan.port;
1098 sp->gwy.addr = sk->gwy.addr;
1099 sp->gwy.port = sk->gwy.port;
1100 sp->ext.addr = sk->ext.addr;
1101 sp->ext.port = sk->ext.port;
1102 sp->proto = sk->proto;
1103 sp->af = sk->af;
1104 sp->direction = sk->direction;
1105
1106 /* copy from state */
1107 memcpy(&sp->id, &s->id, sizeof(sp->id));
1108 sp->creatorid = s->creatorid;
1109 strlcpy(sp->ifname, s->kif->pfik_name, sizeof(sp->ifname));
1110 pf_state_peer_to_pfsync(&s->src, &sp->src);
1111 pf_state_peer_to_pfsync(&s->dst, &sp->dst);
1112
1113 sp->rule = s->rule.ptr->nr;
1114 sp->nat_rule = (s->nat_rule.ptr == NULL) ? -1 : s->nat_rule.ptr->nr;
1115 sp->anchor = (s->anchor.ptr == NULL) ? -1 : s->anchor.ptr->nr;
1116
1117 pf_state_counter_to_pfsync(s->bytes[0], sp->bytes[0]);
1118 pf_state_counter_to_pfsync(s->bytes[1], sp->bytes[1]);
1119 pf_state_counter_to_pfsync(s->packets[0], sp->packets[0]);
1120 pf_state_counter_to_pfsync(s->packets[1], sp->packets[1]);
1121 sp->creation = secs - s->creation;
1122 sp->expire = pf_state_expires(s);
1123 sp->log = s->log;
1124 sp->allow_opts = s->allow_opts;
1125 sp->timeout = s->timeout;
1126
1127 if (s->src_node)
1128 sp->sync_flags |= PFSYNC_FLAG_SRCNODE;
1129 if (s->nat_src_node)
1130 sp->sync_flags |= PFSYNC_FLAG_NATSRCNODE;
1131
1132 if (sp->expire > secs)
1133 sp->expire -= secs;
1134 else
1135 sp->expire = 0;
1136
1137 }
1138
1139 void
pf_state_import(struct pfsync_state * sp,struct pf_state_key * sk,struct pf_state * s)1140 pf_state_import(struct pfsync_state *sp, struct pf_state_key *sk,
1141 struct pf_state *s)
1142 {
1143 /* copy to state key */
1144 sk->lan.addr = sp->lan.addr;
1145 sk->lan.port = sp->lan.port;
1146 sk->gwy.addr = sp->gwy.addr;
1147 sk->gwy.port = sp->gwy.port;
1148 sk->ext.addr = sp->ext.addr;
1149 sk->ext.port = sp->ext.port;
1150 sk->proto = sp->proto;
1151 sk->af = sp->af;
1152 sk->direction = sp->direction;
1153
1154 /* copy to state */
1155 memcpy(&s->id, &sp->id, sizeof(sp->id));
1156 s->creatorid = sp->creatorid;
1157 pf_state_peer_from_pfsync(&sp->src, &s->src);
1158 pf_state_peer_from_pfsync(&sp->dst, &s->dst);
1159
1160 s->rule.ptr = &pf_default_rule;
1161 s->rule.ptr->states++;
1162 s->nat_rule.ptr = NULL;
1163 s->anchor.ptr = NULL;
1164 s->rt_kif = NULL;
1165 s->creation = time_second;
1166 s->expire = time_second;
1167 s->timeout = sp->timeout;
1168 if (sp->expire > 0)
1169 s->expire -= pf_default_rule.timeout[sp->timeout] - sp->expire;
1170 s->pfsync_time = 0;
1171 s->packets[0] = s->packets[1] = 0;
1172 s->bytes[0] = s->bytes[1] = 0;
1173 }
1174
1175 int
pf_state_add(struct pfsync_state * sp)1176 pf_state_add(struct pfsync_state* sp)
1177 {
1178 struct pf_state *s;
1179 struct pf_state_key *sk;
1180 struct pfi_kif *kif;
1181
1182 if (sp->timeout >= PFTM_MAX &&
1183 sp->timeout != PFTM_UNTIL_PACKET) {
1184 return EINVAL;
1185 }
1186 s = pool_get(&pf_state_pl, PR_NOWAIT);
1187 if (s == NULL) {
1188 return ENOMEM;
1189 }
1190 bzero(s, sizeof(struct pf_state));
1191 if ((sk = pf_alloc_state_key(s)) == NULL) {
1192 pool_put(&pf_state_pl, s);
1193 return ENOMEM;
1194 }
1195 pf_state_import(sp, sk, s);
1196 kif = pfi_kif_get(sp->ifname);
1197 if (kif == NULL) {
1198 pool_put(&pf_state_pl, s);
1199 pool_put(&pf_state_key_pl, sk);
1200 return ENOENT;
1201 }
1202 if (pf_insert_state(kif, s)) {
1203 pfi_kif_unref(kif, PFI_KIF_REF_NONE);
1204 pool_put(&pf_state_pl, s);
1205 return ENOMEM;
1206 }
1207
1208 return 0;
1209 }
1210
1211
1212 int
pf_setup_pfsync_matching(struct pf_ruleset * rs)1213 pf_setup_pfsync_matching(struct pf_ruleset *rs)
1214 {
1215 MD5_CTX ctx;
1216 struct pf_rule *rule;
1217 int rs_cnt;
1218 u_int8_t digest[PF_MD5_DIGEST_LENGTH];
1219
1220 MD5Init(&ctx);
1221 for (rs_cnt = 0; rs_cnt < PF_RULESET_MAX; rs_cnt++) {
1222 /* XXX PF_RULESET_SCRUB as well? */
1223 if (rs_cnt == PF_RULESET_SCRUB)
1224 continue;
1225
1226 if (rs->rules[rs_cnt].inactive.ptr_array)
1227 free(rs->rules[rs_cnt].inactive.ptr_array, M_TEMP);
1228 rs->rules[rs_cnt].inactive.ptr_array = NULL;
1229
1230 if (rs->rules[rs_cnt].inactive.rcount) {
1231 rs->rules[rs_cnt].inactive.ptr_array =
1232 malloc(sizeof(void *) *
1233 rs->rules[rs_cnt].inactive.rcount,
1234 M_TEMP, M_NOWAIT);
1235
1236 if (!rs->rules[rs_cnt].inactive.ptr_array)
1237 return (ENOMEM);
1238 }
1239
1240 TAILQ_FOREACH(rule, rs->rules[rs_cnt].inactive.ptr,
1241 entries) {
1242 pf_hash_rule(&ctx, rule);
1243 (rs->rules[rs_cnt].inactive.ptr_array)[rule->nr] = rule;
1244 }
1245 }
1246
1247 MD5Final(digest, &ctx);
1248 memcpy(pf_status.pf_chksum, digest, sizeof(pf_status.pf_chksum));
1249 return (0);
1250 }
1251
1252 int
pfioctl(dev_t dev,u_long cmd,void * addr,int flags,struct lwp * l)1253 pfioctl(dev_t dev, u_long cmd, void *addr, int flags, struct lwp *l)
1254 {
1255 struct pf_pooladdr *pa = NULL;
1256 struct pf_pool *pool = NULL;
1257 int s;
1258 int error = 0;
1259
1260 /* XXX keep in sync with switch() below */
1261 if (kauth_authorize_network(l->l_cred, KAUTH_NETWORK_FIREWALL,
1262 KAUTH_REQ_NETWORK_FIREWALL_FW, NULL, NULL, NULL))
1263 switch (cmd) {
1264 case DIOCGETRULES:
1265 case DIOCGETRULE:
1266 case DIOCGETADDRS:
1267 case DIOCGETADDR:
1268 case DIOCGETSTATE:
1269 case DIOCSETSTATUSIF:
1270 case DIOCGETSTATUS:
1271 case DIOCCLRSTATUS:
1272 case DIOCNATLOOK:
1273 case DIOCSETDEBUG:
1274 case DIOCGETSTATES:
1275 case DIOCGETTIMEOUT:
1276 case DIOCCLRRULECTRS:
1277 case DIOCGETLIMIT:
1278 case DIOCGETALTQS:
1279 case DIOCGETALTQ:
1280 case DIOCGETQSTATS:
1281 case DIOCGETRULESETS:
1282 case DIOCGETRULESET:
1283 case DIOCRGETTABLES:
1284 case DIOCRGETTSTATS:
1285 case DIOCRCLRTSTATS:
1286 case DIOCRCLRADDRS:
1287 case DIOCRADDADDRS:
1288 case DIOCRDELADDRS:
1289 case DIOCRSETADDRS:
1290 case DIOCRGETADDRS:
1291 case DIOCRGETASTATS:
1292 case DIOCRCLRASTATS:
1293 case DIOCRTSTADDRS:
1294 case DIOCOSFPGET:
1295 case DIOCGETSRCNODES:
1296 case DIOCCLRSRCNODES:
1297 case DIOCIGETIFACES:
1298 case DIOCSETIFFLAG:
1299 case DIOCCLRIFFLAG:
1300 case DIOCSETLCK:
1301 case DIOCADDSTATES:
1302 break;
1303 case DIOCRCLRTABLES:
1304 case DIOCRADDTABLES:
1305 case DIOCRDELTABLES:
1306 case DIOCRSETTFLAGS:
1307 if (((struct pfioc_table *)addr)->pfrio_flags &
1308 PFR_FLAG_DUMMY)
1309 break; /* dummy operation ok */
1310 return (EPERM);
1311 default:
1312 return (EPERM);
1313 }
1314
1315 if (!(flags & FWRITE))
1316 switch (cmd) {
1317 case DIOCGETRULES:
1318 case DIOCGETADDRS:
1319 case DIOCGETADDR:
1320 case DIOCGETSTATE:
1321 case DIOCGETSTATUS:
1322 case DIOCGETSTATES:
1323 case DIOCGETTIMEOUT:
1324 case DIOCGETLIMIT:
1325 case DIOCGETALTQS:
1326 case DIOCGETALTQ:
1327 case DIOCGETQSTATS:
1328 case DIOCGETRULESETS:
1329 case DIOCGETRULESET:
1330 case DIOCNATLOOK:
1331 case DIOCRGETTABLES:
1332 case DIOCRGETTSTATS:
1333 case DIOCRGETADDRS:
1334 case DIOCRGETASTATS:
1335 case DIOCRTSTADDRS:
1336 case DIOCOSFPGET:
1337 case DIOCGETSRCNODES:
1338 case DIOCIGETIFACES:
1339 case DIOCSETLCK:
1340 break;
1341 case DIOCRCLRTABLES:
1342 case DIOCRADDTABLES:
1343 case DIOCRDELTABLES:
1344 case DIOCRCLRTSTATS:
1345 case DIOCRCLRADDRS:
1346 case DIOCRADDADDRS:
1347 case DIOCRDELADDRS:
1348 case DIOCRSETADDRS:
1349 case DIOCRSETTFLAGS:
1350 case DIOCADDSTATES:
1351 if (((struct pfioc_table *)addr)->pfrio_flags &
1352 PFR_FLAG_DUMMY) {
1353 flags |= FWRITE; /* need write lock for dummy */
1354 break; /* dummy operation ok */
1355 }
1356 return (EACCES);
1357 case DIOCGETRULE:
1358 if (((struct pfioc_rule *)addr)->action == PF_GET_CLR_CNTR)
1359 return (EACCES);
1360 break;
1361 default:
1362 return (EACCES);
1363 }
1364
1365 if (flags & FWRITE)
1366 rw_enter_write(&pf_consistency_lock);
1367 else
1368 rw_enter_read(&pf_consistency_lock);
1369
1370 s = splsoftnet();
1371 switch (cmd) {
1372
1373 case DIOCSTART:
1374 if (pf_status.running)
1375 error = EEXIST;
1376 else {
1377 #ifdef __NetBSD__
1378 error = pf_pfil_attach();
1379 if (error)
1380 break;
1381 #endif /* __NetBSD__ */
1382 pf_status.running = 1;
1383 pf_status.since = time_second;
1384 if (pf_status.stateid == 0) {
1385 pf_status.stateid = time_second;
1386 pf_status.stateid = pf_status.stateid << 32;
1387 }
1388 DPFPRINTF(PF_DEBUG_MISC, ("pf: started\n"));
1389 }
1390 break;
1391
1392 case DIOCSTOP:
1393 if (!pf_status.running)
1394 error = ENOENT;
1395 else {
1396 #ifdef __NetBSD__
1397 error = pf_pfil_detach();
1398 if (error)
1399 break;
1400 #endif /* __NetBSD__ */
1401 pf_status.running = 0;
1402 pf_status.since = time_second;
1403 DPFPRINTF(PF_DEBUG_MISC, ("pf: stopped\n"));
1404 }
1405 break;
1406
1407 case DIOCADDRULE: {
1408 struct pfioc_rule *pr = (struct pfioc_rule *)addr;
1409 struct pf_ruleset *ruleset;
1410 struct pf_rule *rule, *tail;
1411 int rs_num;
1412
1413 pr->anchor[sizeof(pr->anchor) - 1] = 0;
1414 ruleset = pf_find_ruleset(pr->anchor);
1415 if (ruleset == NULL) {
1416 error = EINVAL;
1417 break;
1418 }
1419 rs_num = pf_get_ruleset_number(pr->rule.action);
1420 if (rs_num >= PF_RULESET_MAX) {
1421 error = EINVAL;
1422 break;
1423 }
1424 if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
1425 error = EINVAL;
1426 break;
1427 }
1428 if (pr->ticket != ruleset->rules[rs_num].inactive.ticket) {
1429 error = EBUSY;
1430 break;
1431 }
1432 if (pr->pool_ticket != ticket_pabuf) {
1433 error = EBUSY;
1434 break;
1435 }
1436 rule = pool_get(&pf_rule_pl, PR_NOWAIT);
1437 if (rule == NULL) {
1438 error = ENOMEM;
1439 break;
1440 }
1441 bcopy(&pr->rule, rule, sizeof(struct pf_rule));
1442 #ifdef __NetBSD__
1443 rule->cuid = kauth_cred_getuid(l->l_cred);
1444 rule->cpid = l->l_proc->p_pid;
1445 #else
1446 rule->cuid = p->p_cred->p_ruid;
1447 rule->cpid = p->p_pid;
1448 #endif /* !__NetBSD__ */
1449 rule->anchor = NULL;
1450 rule->kif = NULL;
1451 TAILQ_INIT(&rule->rpool.list);
1452 /* initialize refcounting */
1453 rule->states = 0;
1454 rule->src_nodes = 0;
1455 rule->entries.tqe_prev = NULL;
1456 #ifndef INET
1457 if (rule->af == AF_INET) {
1458 pool_put(&pf_rule_pl, rule);
1459 error = EAFNOSUPPORT;
1460 break;
1461 }
1462 #endif /* INET */
1463 #ifndef INET6
1464 if (rule->af == AF_INET6) {
1465 pool_put(&pf_rule_pl, rule);
1466 error = EAFNOSUPPORT;
1467 break;
1468 }
1469 #endif /* INET6 */
1470 tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
1471 pf_rulequeue);
1472 if (tail)
1473 rule->nr = tail->nr + 1;
1474 else
1475 rule->nr = 0;
1476 if (rule->ifname[0]) {
1477 rule->kif = pfi_kif_get(rule->ifname);
1478 if (rule->kif == NULL) {
1479 pool_put(&pf_rule_pl, rule);
1480 error = EINVAL;
1481 break;
1482 }
1483 pfi_kif_ref(rule->kif, PFI_KIF_REF_RULE);
1484 }
1485
1486 #ifndef __NetBSD__
1487 if (rule->rtableid > 0 && !rtable_exists(rule->rtableid))
1488 error = EBUSY;
1489 #endif /* !__NetBSD__ */
1490
1491 #ifdef ALTQ
1492 /* set queue IDs */
1493 if (rule->qname[0] != 0) {
1494 if ((rule->qid = pf_qname2qid(rule->qname)) == 0)
1495 error = EBUSY;
1496 else if (rule->pqname[0] != 0) {
1497 if ((rule->pqid =
1498 pf_qname2qid(rule->pqname)) == 0)
1499 error = EBUSY;
1500 } else
1501 rule->pqid = rule->qid;
1502 }
1503 #endif
1504 if (rule->tagname[0])
1505 if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0)
1506 error = EBUSY;
1507 if (rule->match_tagname[0])
1508 if ((rule->match_tag =
1509 pf_tagname2tag(rule->match_tagname)) == 0)
1510 error = EBUSY;
1511 if (rule->rt && !rule->direction)
1512 error = EINVAL;
1513 #if NPFLOG > 0
1514 if (!rule->log)
1515 rule->logif = 0;
1516 if (rule->logif >= PFLOGIFS_MAX)
1517 error = EINVAL;
1518 #endif
1519 if (pf_rtlabel_add(&rule->src.addr) ||
1520 pf_rtlabel_add(&rule->dst.addr))
1521 error = EBUSY;
1522 if (pfi_dynaddr_setup(&rule->src.addr, rule->af))
1523 error = EINVAL;
1524 if (pfi_dynaddr_setup(&rule->dst.addr, rule->af))
1525 error = EINVAL;
1526 if (pf_tbladdr_setup(ruleset, &rule->src.addr))
1527 error = EINVAL;
1528 if (pf_tbladdr_setup(ruleset, &rule->dst.addr))
1529 error = EINVAL;
1530 if (pf_anchor_setup(rule, ruleset, pr->anchor_call))
1531 error = EINVAL;
1532 TAILQ_FOREACH(pa, &pf_pabuf, entries)
1533 if (pf_tbladdr_setup(ruleset, &pa->addr))
1534 error = EINVAL;
1535
1536 rule->overload_tbl = NULL;
1537 if (rule->overload_tblname[0]) {
1538 if ((rule->overload_tbl = pfr_attach_table(ruleset,
1539 rule->overload_tblname)) == NULL)
1540 error = EINVAL;
1541 else
1542 rule->overload_tbl->pfrkt_flags |=
1543 PFR_TFLAG_ACTIVE;
1544 }
1545
1546 pf_mv_pool(&pf_pabuf, &rule->rpool.list);
1547 if (((((rule->action == PF_NAT) || (rule->action == PF_RDR) ||
1548 (rule->action == PF_BINAT)) && rule->anchor == NULL) ||
1549 (rule->rt > PF_FASTROUTE)) &&
1550 (TAILQ_FIRST(&rule->rpool.list) == NULL))
1551 error = EINVAL;
1552
1553 if (error) {
1554 pf_rm_rule(NULL, rule);
1555 break;
1556 }
1557 rule->rpool.cur = TAILQ_FIRST(&rule->rpool.list);
1558 rule->evaluations = rule->packets[0] = rule->packets[1] =
1559 rule->bytes[0] = rule->bytes[1] = 0;
1560 TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr,
1561 rule, entries);
1562 ruleset->rules[rs_num].inactive.rcount++;
1563 break;
1564 }
1565
1566 case DIOCGETRULES: {
1567 struct pfioc_rule *pr = (struct pfioc_rule *)addr;
1568 struct pf_ruleset *ruleset;
1569 struct pf_rule *tail;
1570 int rs_num;
1571
1572 pr->anchor[sizeof(pr->anchor) - 1] = 0;
1573 ruleset = pf_find_ruleset(pr->anchor);
1574 if (ruleset == NULL) {
1575 error = EINVAL;
1576 break;
1577 }
1578 rs_num = pf_get_ruleset_number(pr->rule.action);
1579 if (rs_num >= PF_RULESET_MAX) {
1580 error = EINVAL;
1581 break;
1582 }
1583 tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
1584 pf_rulequeue);
1585 if (tail)
1586 pr->nr = tail->nr + 1;
1587 else
1588 pr->nr = 0;
1589 pr->ticket = ruleset->rules[rs_num].active.ticket;
1590 break;
1591 }
1592
1593 case DIOCGETRULE: {
1594 struct pfioc_rule *pr = (struct pfioc_rule *)addr;
1595 struct pf_ruleset *ruleset;
1596 struct pf_rule *rule;
1597 int rs_num, i;
1598
1599 pr->anchor[sizeof(pr->anchor) - 1] = 0;
1600 ruleset = pf_find_ruleset(pr->anchor);
1601 if (ruleset == NULL) {
1602 error = EINVAL;
1603 break;
1604 }
1605 rs_num = pf_get_ruleset_number(pr->rule.action);
1606 if (rs_num >= PF_RULESET_MAX) {
1607 error = EINVAL;
1608 break;
1609 }
1610 if (pr->ticket != ruleset->rules[rs_num].active.ticket) {
1611 error = EBUSY;
1612 break;
1613 }
1614 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
1615 while ((rule != NULL) && (rule->nr != pr->nr))
1616 rule = TAILQ_NEXT(rule, entries);
1617 if (rule == NULL) {
1618 error = EBUSY;
1619 break;
1620 }
1621 bcopy(rule, &pr->rule, sizeof(struct pf_rule));
1622 if (pf_anchor_copyout(ruleset, rule, pr)) {
1623 error = EBUSY;
1624 break;
1625 }
1626 pfi_dynaddr_copyout(&pr->rule.src.addr);
1627 pfi_dynaddr_copyout(&pr->rule.dst.addr);
1628 pf_tbladdr_copyout(&pr->rule.src.addr);
1629 pf_tbladdr_copyout(&pr->rule.dst.addr);
1630 pf_rtlabel_copyout(&pr->rule.src.addr);
1631 pf_rtlabel_copyout(&pr->rule.dst.addr);
1632 for (i = 0; i < PF_SKIP_COUNT; ++i)
1633 if (rule->skip[i].ptr == NULL)
1634 pr->rule.skip[i].nr = -1;
1635 else
1636 pr->rule.skip[i].nr =
1637 rule->skip[i].ptr->nr;
1638
1639 if (pr->action == PF_GET_CLR_CNTR) {
1640 rule->evaluations = 0;
1641 rule->packets[0] = rule->packets[1] = 0;
1642 rule->bytes[0] = rule->bytes[1] = 0;
1643 }
1644 break;
1645 }
1646
1647 case DIOCCHANGERULE: {
1648 struct pfioc_rule *pcr = (struct pfioc_rule *)addr;
1649 struct pf_ruleset *ruleset;
1650 struct pf_rule *oldrule = NULL, *newrule = NULL;
1651 u_int32_t nr = 0;
1652 int rs_num;
1653
1654 if (!(pcr->action == PF_CHANGE_REMOVE ||
1655 pcr->action == PF_CHANGE_GET_TICKET) &&
1656 pcr->pool_ticket != ticket_pabuf) {
1657 error = EBUSY;
1658 break;
1659 }
1660
1661 if (pcr->action < PF_CHANGE_ADD_HEAD ||
1662 pcr->action > PF_CHANGE_GET_TICKET) {
1663 error = EINVAL;
1664 break;
1665 }
1666 ruleset = pf_find_ruleset(pcr->anchor);
1667 if (ruleset == NULL) {
1668 error = EINVAL;
1669 break;
1670 }
1671 rs_num = pf_get_ruleset_number(pcr->rule.action);
1672 if (rs_num >= PF_RULESET_MAX) {
1673 error = EINVAL;
1674 break;
1675 }
1676
1677 if (pcr->action == PF_CHANGE_GET_TICKET) {
1678 pcr->ticket = ++ruleset->rules[rs_num].active.ticket;
1679 break;
1680 } else {
1681 if (pcr->ticket !=
1682 ruleset->rules[rs_num].active.ticket) {
1683 error = EINVAL;
1684 break;
1685 }
1686 if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
1687 error = EINVAL;
1688 break;
1689 }
1690 }
1691
1692 if (pcr->action != PF_CHANGE_REMOVE) {
1693 newrule = pool_get(&pf_rule_pl, PR_NOWAIT);
1694 if (newrule == NULL) {
1695 error = ENOMEM;
1696 break;
1697 }
1698 bcopy(&pcr->rule, newrule, sizeof(struct pf_rule));
1699 #ifdef __NetBSD__
1700 newrule->cuid = kauth_cred_getuid(l->l_cred);
1701 newrule->cpid = l->l_proc->p_pid;
1702 #else
1703 newrule->cuid = p->p_cred->p_ruid;
1704 newrule->cpid = p->p_pid;
1705 #endif /* !__NetBSD__ */
1706 TAILQ_INIT(&newrule->rpool.list);
1707 /* initialize refcounting */
1708 newrule->states = 0;
1709 newrule->entries.tqe_prev = NULL;
1710 #ifndef INET
1711 if (newrule->af == AF_INET) {
1712 pool_put(&pf_rule_pl, newrule);
1713 error = EAFNOSUPPORT;
1714 break;
1715 }
1716 #endif /* INET */
1717 #ifndef INET6
1718 if (newrule->af == AF_INET6) {
1719 pool_put(&pf_rule_pl, newrule);
1720 error = EAFNOSUPPORT;
1721 break;
1722 }
1723 #endif /* INET6 */
1724 if (newrule->ifname[0]) {
1725 newrule->kif = pfi_kif_get(newrule->ifname);
1726 if (newrule->kif == NULL) {
1727 pool_put(&pf_rule_pl, newrule);
1728 error = EINVAL;
1729 break;
1730 }
1731 pfi_kif_ref(newrule->kif, PFI_KIF_REF_RULE);
1732 } else
1733 newrule->kif = NULL;
1734
1735 #ifndef __NetBSD__
1736 if (newrule->rtableid > 0 &&
1737 !rtable_exists(newrule->rtableid))
1738 error = EBUSY;
1739 #endif /* !__NetBSD__ */
1740
1741 #ifdef ALTQ
1742 /* set queue IDs */
1743 if (newrule->qname[0] != 0) {
1744 if ((newrule->qid =
1745 pf_qname2qid(newrule->qname)) == 0)
1746 error = EBUSY;
1747 else if (newrule->pqname[0] != 0) {
1748 if ((newrule->pqid =
1749 pf_qname2qid(newrule->pqname)) == 0)
1750 error = EBUSY;
1751 } else
1752 newrule->pqid = newrule->qid;
1753 }
1754 #endif /* ALTQ */
1755 if (newrule->tagname[0])
1756 if ((newrule->tag =
1757 pf_tagname2tag(newrule->tagname)) == 0)
1758 error = EBUSY;
1759 if (newrule->match_tagname[0])
1760 if ((newrule->match_tag = pf_tagname2tag(
1761 newrule->match_tagname)) == 0)
1762 error = EBUSY;
1763 if (newrule->rt && !newrule->direction)
1764 error = EINVAL;
1765 #if NPFLOG > 0
1766 if (!newrule->log)
1767 newrule->logif = 0;
1768 if (newrule->logif >= PFLOGIFS_MAX)
1769 error = EINVAL;
1770 #endif
1771 if (pf_rtlabel_add(&newrule->src.addr) ||
1772 pf_rtlabel_add(&newrule->dst.addr))
1773 error = EBUSY;
1774 if (pfi_dynaddr_setup(&newrule->src.addr, newrule->af))
1775 error = EINVAL;
1776 if (pfi_dynaddr_setup(&newrule->dst.addr, newrule->af))
1777 error = EINVAL;
1778 if (pf_tbladdr_setup(ruleset, &newrule->src.addr))
1779 error = EINVAL;
1780 if (pf_tbladdr_setup(ruleset, &newrule->dst.addr))
1781 error = EINVAL;
1782 if (pf_anchor_setup(newrule, ruleset, pcr->anchor_call))
1783 error = EINVAL;
1784 TAILQ_FOREACH(pa, &pf_pabuf, entries)
1785 if (pf_tbladdr_setup(ruleset, &pa->addr))
1786 error = EINVAL;
1787
1788 newrule->overload_tbl = NULL;
1789 if (newrule->overload_tblname[0]) {
1790 if ((newrule->overload_tbl = pfr_attach_table(
1791 ruleset, newrule->overload_tblname)) ==
1792 NULL)
1793 error = EINVAL;
1794 else
1795 newrule->overload_tbl->pfrkt_flags |=
1796 PFR_TFLAG_ACTIVE;
1797 }
1798
1799 pf_mv_pool(&pf_pabuf, &newrule->rpool.list);
1800 if (((((newrule->action == PF_NAT) ||
1801 (newrule->action == PF_RDR) ||
1802 (newrule->action == PF_BINAT) ||
1803 (newrule->rt > PF_FASTROUTE)) &&
1804 !newrule->anchor)) &&
1805 (TAILQ_FIRST(&newrule->rpool.list) == NULL))
1806 error = EINVAL;
1807
1808 if (error) {
1809 pf_rm_rule(NULL, newrule);
1810 break;
1811 }
1812 newrule->rpool.cur = TAILQ_FIRST(&newrule->rpool.list);
1813 newrule->evaluations = 0;
1814 newrule->packets[0] = newrule->packets[1] = 0;
1815 newrule->bytes[0] = newrule->bytes[1] = 0;
1816 }
1817 pf_empty_pool(&pf_pabuf);
1818
1819 if (pcr->action == PF_CHANGE_ADD_HEAD)
1820 oldrule = TAILQ_FIRST(
1821 ruleset->rules[rs_num].active.ptr);
1822 else if (pcr->action == PF_CHANGE_ADD_TAIL)
1823 oldrule = TAILQ_LAST(
1824 ruleset->rules[rs_num].active.ptr, pf_rulequeue);
1825 else {
1826 oldrule = TAILQ_FIRST(
1827 ruleset->rules[rs_num].active.ptr);
1828 while ((oldrule != NULL) && (oldrule->nr != pcr->nr))
1829 oldrule = TAILQ_NEXT(oldrule, entries);
1830 if (oldrule == NULL) {
1831 if (newrule != NULL)
1832 pf_rm_rule(NULL, newrule);
1833 error = EINVAL;
1834 break;
1835 }
1836 }
1837
1838 if (pcr->action == PF_CHANGE_REMOVE) {
1839 pf_rm_rule(ruleset->rules[rs_num].active.ptr, oldrule);
1840 ruleset->rules[rs_num].active.rcount--;
1841 } else {
1842 if (oldrule == NULL)
1843 TAILQ_INSERT_TAIL(
1844 ruleset->rules[rs_num].active.ptr,
1845 newrule, entries);
1846 else if (pcr->action == PF_CHANGE_ADD_HEAD ||
1847 pcr->action == PF_CHANGE_ADD_BEFORE)
1848 TAILQ_INSERT_BEFORE(oldrule, newrule, entries);
1849 else
1850 TAILQ_INSERT_AFTER(
1851 ruleset->rules[rs_num].active.ptr,
1852 oldrule, newrule, entries);
1853 ruleset->rules[rs_num].active.rcount++;
1854 }
1855
1856 nr = 0;
1857 TAILQ_FOREACH(oldrule,
1858 ruleset->rules[rs_num].active.ptr, entries)
1859 oldrule->nr = nr++;
1860
1861 ruleset->rules[rs_num].active.ticket++;
1862
1863 pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr);
1864 pf_remove_if_empty_ruleset(ruleset);
1865
1866 break;
1867 }
1868
1869 case DIOCCLRSTATES: {
1870 struct pf_state *ps, *nexts;
1871 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr;
1872 int killed = 0;
1873
1874 for (ps = RB_MIN(pf_state_tree_id, &tree_id); ps; ps = nexts) {
1875 nexts = RB_NEXT(pf_state_tree_id, &tree_id, ps);
1876
1877 if (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname,
1878 ps->kif->pfik_name)) {
1879 #if NPFSYNC
1880 /* don't send out individual delete messages */
1881 ps->sync_flags = PFSTATE_NOSYNC;
1882 #endif
1883 pf_unlink_state(ps);
1884 killed++;
1885 }
1886 }
1887 psk->psk_af = killed;
1888 #if NPFSYNC
1889 pfsync_clear_states(pf_status.hostid, psk->psk_ifname);
1890 #endif
1891 break;
1892 }
1893
1894 case DIOCKILLSTATES: {
1895 struct pf_state *ps, *nexts;
1896 struct pf_state_key *sk;
1897 struct pf_state_host *src, *dst;
1898 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr;
1899 int killed = 0;
1900
1901 for (ps = RB_MIN(pf_state_tree_id, &tree_id); ps;
1902 ps = nexts) {
1903 nexts = RB_NEXT(pf_state_tree_id, &tree_id, ps);
1904 sk = ps->state_key;
1905
1906 if (sk->direction == PF_OUT) {
1907 src = &sk->lan;
1908 dst = &sk->ext;
1909 } else {
1910 src = &sk->ext;
1911 dst = &sk->lan;
1912 }
1913 if ((!psk->psk_af || sk->af == psk->psk_af)
1914 && (!psk->psk_proto || psk->psk_proto ==
1915 sk->proto) &&
1916 PF_MATCHA(psk->psk_src.neg,
1917 &psk->psk_src.addr.v.a.addr,
1918 &psk->psk_src.addr.v.a.mask,
1919 &src->addr, sk->af) &&
1920 PF_MATCHA(psk->psk_dst.neg,
1921 &psk->psk_dst.addr.v.a.addr,
1922 &psk->psk_dst.addr.v.a.mask,
1923 &dst->addr, sk->af) &&
1924 (psk->psk_src.port_op == 0 ||
1925 pf_match_port(psk->psk_src.port_op,
1926 psk->psk_src.port[0], psk->psk_src.port[1],
1927 src->port)) &&
1928 (psk->psk_dst.port_op == 0 ||
1929 pf_match_port(psk->psk_dst.port_op,
1930 psk->psk_dst.port[0], psk->psk_dst.port[1],
1931 dst->port)) &&
1932 (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname,
1933 ps->kif->pfik_name))) {
1934 #if NPFSYNC > 0
1935 /* send immediate delete of state */
1936 pfsync_delete_state(ps);
1937 ps->sync_flags |= PFSTATE_NOSYNC;
1938 #endif
1939 pf_unlink_state(ps);
1940 killed++;
1941 }
1942 }
1943 psk->psk_af = killed;
1944 break;
1945 }
1946
1947 case DIOCADDSTATE: {
1948 struct pfioc_state *ps = (struct pfioc_state *)addr;
1949 struct pfsync_state *sp = (struct pfsync_state *)ps->state;
1950
1951 error = pf_state_add(sp);
1952 break;
1953 }
1954
1955 case DIOCADDSTATES: {
1956 struct pfioc_states *ps = (struct pfioc_states *)addr;
1957 struct pfsync_state *p = (struct pfsync_state *) ps->ps_states;
1958 struct pfsync_state *pk;
1959 int size = ps->ps_len;
1960 int i = 0;
1961 error = 0;
1962
1963 pk = malloc(sizeof(*pk), M_TEMP,M_WAITOK);
1964
1965 while (error == 0 && i < size)
1966 {
1967 if (copyin(p, pk, sizeof(struct pfsync_state)))
1968 {
1969 error = EFAULT;
1970 free(pk, M_TEMP);
1971 } else {
1972 error = pf_state_add(pk);
1973 i += sizeof(*p);
1974 p++;
1975 }
1976 }
1977
1978 free(pk, M_TEMP);
1979 break;
1980 }
1981
1982
1983 case DIOCGETSTATE: {
1984 struct pfioc_state *ps = (struct pfioc_state *)addr;
1985 struct pf_state *pfs;
1986 u_int32_t nr;
1987
1988 nr = 0;
1989 RB_FOREACH(pfs, pf_state_tree_id, &tree_id) {
1990 if (nr >= ps->nr)
1991 break;
1992 nr++;
1993 }
1994 if (pfs == NULL) {
1995 error = EBUSY;
1996 break;
1997 }
1998
1999 pf_state_export((struct pfsync_state *)&ps->state,
2000 pfs->state_key, pfs);
2001 break;
2002 }
2003
2004 case DIOCGETSTATES: {
2005 struct pfioc_states *ps = (struct pfioc_states *)addr;
2006 struct pf_state *state;
2007 struct pfsync_state *p, *pstore;
2008 u_int32_t nr = 0;
2009
2010 if (ps->ps_len == 0) {
2011 nr = pf_status.states;
2012 ps->ps_len = sizeof(struct pfsync_state) * nr;
2013 break;
2014 }
2015
2016 pstore = malloc(sizeof(*pstore), M_TEMP, M_WAITOK);
2017
2018 p = ps->ps_states;
2019
2020 state = TAILQ_FIRST(&state_list);
2021 while (state) {
2022 if (state->timeout != PFTM_UNLINKED) {
2023 if ((nr+1) * sizeof(*p) > (unsigned)ps->ps_len)
2024 break;
2025
2026 pf_state_export(pstore,
2027 state->state_key, state);
2028 error = copyout(pstore, p, sizeof(*p));
2029 if (error) {
2030 free(pstore, M_TEMP);
2031 goto fail;
2032 }
2033 p++;
2034 nr++;
2035 }
2036 state = TAILQ_NEXT(state, entry_list);
2037 }
2038
2039 ps->ps_len = sizeof(struct pfsync_state) * nr;
2040
2041 free(pstore, M_TEMP);
2042 break;
2043 }
2044
2045 case DIOCGETSTATUS: {
2046 struct pf_status *ps = (struct pf_status *)addr;
2047 bcopy(&pf_status, ps, sizeof(struct pf_status));
2048 pfi_fill_oldstatus(ps);
2049 break;
2050 }
2051
2052 case DIOCSETSTATUSIF: {
2053 struct pfioc_if *pi = (struct pfioc_if *)addr;
2054
2055 if (pi->ifname[0] == 0) {
2056 bzero(pf_status.ifname, IFNAMSIZ);
2057 break;
2058 }
2059 if (ifunit(pi->ifname) == NULL) {
2060 error = EINVAL;
2061 break;
2062 }
2063 strlcpy(pf_status.ifname, pi->ifname, IFNAMSIZ);
2064 break;
2065 }
2066
2067 case DIOCCLRSTATUS: {
2068 bzero(pf_status.counters, sizeof(pf_status.counters));
2069 bzero(pf_status.fcounters, sizeof(pf_status.fcounters));
2070 bzero(pf_status.scounters, sizeof(pf_status.scounters));
2071 pf_status.since = time_second;
2072 if (*pf_status.ifname)
2073 pfi_clr_istats(pf_status.ifname);
2074 break;
2075 }
2076
2077 case DIOCNATLOOK: {
2078 struct pfioc_natlook *pnl = (struct pfioc_natlook *)addr;
2079 struct pf_state_key *sk;
2080 struct pf_state *state;
2081 struct pf_state_key_cmp key;
2082 int m = 0, direction = pnl->direction;
2083
2084 key.af = pnl->af;
2085 key.proto = pnl->proto;
2086
2087 if (!pnl->proto ||
2088 PF_AZERO(&pnl->saddr, pnl->af) ||
2089 PF_AZERO(&pnl->daddr, pnl->af) ||
2090 ((pnl->proto == IPPROTO_TCP ||
2091 pnl->proto == IPPROTO_UDP) &&
2092 (!pnl->dport || !pnl->sport)))
2093 error = EINVAL;
2094 else {
2095 /*
2096 * userland gives us source and dest of connection,
2097 * reverse the lookup so we ask for what happens with
2098 * the return traffic, enabling us to find it in the
2099 * state tree.
2100 */
2101 if (direction == PF_IN) {
2102 PF_ACPY(&key.ext.addr, &pnl->daddr, pnl->af);
2103 key.ext.port = pnl->dport;
2104 PF_ACPY(&key.gwy.addr, &pnl->saddr, pnl->af);
2105 key.gwy.port = pnl->sport;
2106 state = pf_find_state_all(&key, PF_EXT_GWY, &m);
2107 } else {
2108 PF_ACPY(&key.lan.addr, &pnl->daddr, pnl->af);
2109 key.lan.port = pnl->dport;
2110 PF_ACPY(&key.ext.addr, &pnl->saddr, pnl->af);
2111 key.ext.port = pnl->sport;
2112 state = pf_find_state_all(&key, PF_LAN_EXT, &m);
2113 }
2114 if (m > 1)
2115 error = E2BIG; /* more than one state */
2116 else if (state != NULL) {
2117 sk = state->state_key;
2118 if (direction == PF_IN) {
2119 PF_ACPY(&pnl->rsaddr, &sk->lan.addr,
2120 sk->af);
2121 pnl->rsport = sk->lan.port;
2122 PF_ACPY(&pnl->rdaddr, &pnl->daddr,
2123 pnl->af);
2124 pnl->rdport = pnl->dport;
2125 } else {
2126 PF_ACPY(&pnl->rdaddr, &sk->gwy.addr,
2127 sk->af);
2128 pnl->rdport = sk->gwy.port;
2129 PF_ACPY(&pnl->rsaddr, &pnl->saddr,
2130 pnl->af);
2131 pnl->rsport = pnl->sport;
2132 }
2133 } else
2134 error = ENOENT;
2135 }
2136 break;
2137 }
2138
2139 case DIOCSETTIMEOUT: {
2140 struct pfioc_tm *pt = (struct pfioc_tm *)addr;
2141 int old;
2142
2143 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX ||
2144 pt->seconds < 0) {
2145 error = EINVAL;
2146 goto fail;
2147 }
2148 old = pf_default_rule.timeout[pt->timeout];
2149 if (pt->timeout == PFTM_INTERVAL && pt->seconds == 0)
2150 pt->seconds = 1;
2151 pf_default_rule.timeout[pt->timeout] = pt->seconds;
2152 if (pt->timeout == PFTM_INTERVAL && pt->seconds < old)
2153 wakeup(pf_purge_thread);
2154 pt->seconds = old;
2155 break;
2156 }
2157
2158 case DIOCGETTIMEOUT: {
2159 struct pfioc_tm *pt = (struct pfioc_tm *)addr;
2160
2161 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) {
2162 error = EINVAL;
2163 goto fail;
2164 }
2165 pt->seconds = pf_default_rule.timeout[pt->timeout];
2166 break;
2167 }
2168
2169 case DIOCGETLIMIT: {
2170 struct pfioc_limit *pl = (struct pfioc_limit *)addr;
2171
2172 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) {
2173 error = EINVAL;
2174 goto fail;
2175 }
2176 pl->limit = pf_pool_limits[pl->index].limit;
2177 break;
2178 }
2179
2180 case DIOCSETLIMIT: {
2181 struct pfioc_limit *pl = (struct pfioc_limit *)addr;
2182 int old_limit;
2183
2184 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX ||
2185 pf_pool_limits[pl->index].pp == NULL) {
2186 error = EINVAL;
2187 goto fail;
2188 }
2189 #ifdef __NetBSD__
2190 pool_sethardlimit(pf_pool_limits[pl->index].pp,
2191 pl->limit, NULL, 0);
2192 #else
2193 if (pool_sethardlimit(pf_pool_limits[pl->index].pp,
2194 pl->limit, NULL, 0) != 0) {
2195 error = EBUSY;
2196 goto fail;
2197 }
2198 #endif /* !__NetBSD__ */
2199 old_limit = pf_pool_limits[pl->index].limit;
2200 pf_pool_limits[pl->index].limit = pl->limit;
2201 pl->limit = old_limit;
2202 break;
2203 }
2204
2205 case DIOCSETDEBUG: {
2206 u_int32_t *level = (u_int32_t *)addr;
2207
2208 pf_status.debug = *level;
2209 break;
2210 }
2211
2212 case DIOCCLRRULECTRS: {
2213 /* obsoleted by DIOCGETRULE with action=PF_GET_CLR_CNTR */
2214 struct pf_ruleset *ruleset = &pf_main_ruleset;
2215 struct pf_rule *rule;
2216
2217 TAILQ_FOREACH(rule,
2218 ruleset->rules[PF_RULESET_FILTER].active.ptr, entries) {
2219 rule->evaluations = 0;
2220 rule->packets[0] = rule->packets[1] = 0;
2221 rule->bytes[0] = rule->bytes[1] = 0;
2222 }
2223 break;
2224 }
2225
2226 #ifdef ALTQ
2227 case DIOCSTARTALTQ: {
2228 struct pf_altq *altq;
2229
2230 /* enable all altq interfaces on active list */
2231 TAILQ_FOREACH(altq, pf_altqs_active, entries) {
2232 if (altq->qname[0] == 0) {
2233 error = pf_enable_altq(altq);
2234 if (error != 0)
2235 break;
2236 }
2237 }
2238 if (error == 0)
2239 pf_altq_running = 1;
2240 DPFPRINTF(PF_DEBUG_MISC, ("altq: started\n"));
2241 break;
2242 }
2243
2244 case DIOCSTOPALTQ: {
2245 struct pf_altq *altq;
2246
2247 /* disable all altq interfaces on active list */
2248 TAILQ_FOREACH(altq, pf_altqs_active, entries) {
2249 if (altq->qname[0] == 0) {
2250 error = pf_disable_altq(altq);
2251 if (error != 0)
2252 break;
2253 }
2254 }
2255 if (error == 0)
2256 pf_altq_running = 0;
2257 DPFPRINTF(PF_DEBUG_MISC, ("altq: stopped\n"));
2258 break;
2259 }
2260
2261 case DIOCADDALTQ: {
2262 struct pfioc_altq *paa = (struct pfioc_altq *)addr;
2263 struct pf_altq *altq, *a;
2264
2265 if (paa->ticket != ticket_altqs_inactive) {
2266 error = EBUSY;
2267 break;
2268 }
2269 altq = pool_get(&pf_altq_pl, PR_NOWAIT);
2270 if (altq == NULL) {
2271 error = ENOMEM;
2272 break;
2273 }
2274 bcopy(&paa->altq, altq, sizeof(struct pf_altq));
2275
2276 /*
2277 * if this is for a queue, find the discipline and
2278 * copy the necessary fields
2279 */
2280 if (altq->qname[0] != 0) {
2281 if ((altq->qid = pf_qname2qid(altq->qname)) == 0) {
2282 error = EBUSY;
2283 pool_put(&pf_altq_pl, altq);
2284 break;
2285 }
2286 TAILQ_FOREACH(a, pf_altqs_inactive, entries) {
2287 if (strncmp(a->ifname, altq->ifname,
2288 IFNAMSIZ) == 0 && a->qname[0] == 0) {
2289 altq->altq_disc = a->altq_disc;
2290 break;
2291 }
2292 }
2293 }
2294
2295 error = altq_add(altq);
2296 if (error) {
2297 pool_put(&pf_altq_pl, altq);
2298 break;
2299 }
2300
2301 TAILQ_INSERT_TAIL(pf_altqs_inactive, altq, entries);
2302 bcopy(altq, &paa->altq, sizeof(struct pf_altq));
2303 break;
2304 }
2305
2306 case DIOCGETALTQS: {
2307 struct pfioc_altq *paa = (struct pfioc_altq *)addr;
2308 struct pf_altq *altq;
2309
2310 paa->nr = 0;
2311 TAILQ_FOREACH(altq, pf_altqs_active, entries)
2312 paa->nr++;
2313 paa->ticket = ticket_altqs_active;
2314 break;
2315 }
2316
2317 case DIOCGETALTQ: {
2318 struct pfioc_altq *paa = (struct pfioc_altq *)addr;
2319 struct pf_altq *altq;
2320 u_int32_t nr;
2321
2322 if (paa->ticket != ticket_altqs_active) {
2323 error = EBUSY;
2324 break;
2325 }
2326 nr = 0;
2327 altq = TAILQ_FIRST(pf_altqs_active);
2328 while ((altq != NULL) && (nr < paa->nr)) {
2329 altq = TAILQ_NEXT(altq, entries);
2330 nr++;
2331 }
2332 if (altq == NULL) {
2333 error = EBUSY;
2334 break;
2335 }
2336 bcopy(altq, &paa->altq, sizeof(struct pf_altq));
2337 break;
2338 }
2339
2340 case DIOCCHANGEALTQ:
2341 /* CHANGEALTQ not supported yet! */
2342 error = ENODEV;
2343 break;
2344
2345 case DIOCGETQSTATS: {
2346 struct pfioc_qstats *pq = (struct pfioc_qstats *)addr;
2347 struct pf_altq *altq;
2348 u_int32_t nr;
2349 int nbytes;
2350
2351 if (pq->ticket != ticket_altqs_active) {
2352 error = EBUSY;
2353 break;
2354 }
2355 nbytes = pq->nbytes;
2356 nr = 0;
2357 altq = TAILQ_FIRST(pf_altqs_active);
2358 while ((altq != NULL) && (nr < pq->nr)) {
2359 altq = TAILQ_NEXT(altq, entries);
2360 nr++;
2361 }
2362 if (altq == NULL) {
2363 error = EBUSY;
2364 break;
2365 }
2366 error = altq_getqstats(altq, pq->buf, &nbytes);
2367 if (error == 0) {
2368 pq->scheduler = altq->scheduler;
2369 pq->nbytes = nbytes;
2370 }
2371 break;
2372 }
2373 #endif /* ALTQ */
2374
2375 case DIOCBEGINADDRS: {
2376 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
2377
2378 pf_empty_pool(&pf_pabuf);
2379 pp->ticket = ++ticket_pabuf;
2380 break;
2381 }
2382
2383 case DIOCADDADDR: {
2384 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
2385
2386 if (pp->ticket != ticket_pabuf) {
2387 error = EBUSY;
2388 break;
2389 }
2390 #ifndef INET
2391 if (pp->af == AF_INET) {
2392 error = EAFNOSUPPORT;
2393 break;
2394 }
2395 #endif /* INET */
2396 #ifndef INET6
2397 if (pp->af == AF_INET6) {
2398 error = EAFNOSUPPORT;
2399 break;
2400 }
2401 #endif /* INET6 */
2402 if (pp->addr.addr.type != PF_ADDR_ADDRMASK &&
2403 pp->addr.addr.type != PF_ADDR_DYNIFTL &&
2404 pp->addr.addr.type != PF_ADDR_TABLE) {
2405 error = EINVAL;
2406 break;
2407 }
2408 pa = pool_get(&pf_pooladdr_pl, PR_NOWAIT);
2409 if (pa == NULL) {
2410 error = ENOMEM;
2411 break;
2412 }
2413 bcopy(&pp->addr, pa, sizeof(struct pf_pooladdr));
2414 if (pa->ifname[0]) {
2415 pa->kif = pfi_kif_get(pa->ifname);
2416 if (pa->kif == NULL) {
2417 pool_put(&pf_pooladdr_pl, pa);
2418 error = EINVAL;
2419 break;
2420 }
2421 pfi_kif_ref(pa->kif, PFI_KIF_REF_RULE);
2422 }
2423 if (pfi_dynaddr_setup(&pa->addr, pp->af)) {
2424 pfi_dynaddr_remove(&pa->addr);
2425 pfi_kif_unref(pa->kif, PFI_KIF_REF_RULE);
2426 pool_put(&pf_pooladdr_pl, pa);
2427 error = EINVAL;
2428 break;
2429 }
2430 TAILQ_INSERT_TAIL(&pf_pabuf, pa, entries);
2431 break;
2432 }
2433
2434 case DIOCGETADDRS: {
2435 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
2436
2437 pp->nr = 0;
2438 pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action,
2439 pp->r_num, 0, 1, 0);
2440 if (pool == NULL) {
2441 error = EBUSY;
2442 break;
2443 }
2444 TAILQ_FOREACH(pa, &pool->list, entries)
2445 pp->nr++;
2446 break;
2447 }
2448
2449 case DIOCGETADDR: {
2450 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
2451 u_int32_t nr = 0;
2452
2453 pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action,
2454 pp->r_num, 0, 1, 1);
2455 if (pool == NULL) {
2456 error = EBUSY;
2457 break;
2458 }
2459 pa = TAILQ_FIRST(&pool->list);
2460 while ((pa != NULL) && (nr < pp->nr)) {
2461 pa = TAILQ_NEXT(pa, entries);
2462 nr++;
2463 }
2464 if (pa == NULL) {
2465 error = EBUSY;
2466 break;
2467 }
2468 bcopy(pa, &pp->addr, sizeof(struct pf_pooladdr));
2469 pfi_dynaddr_copyout(&pp->addr.addr);
2470 pf_tbladdr_copyout(&pp->addr.addr);
2471 pf_rtlabel_copyout(&pp->addr.addr);
2472 break;
2473 }
2474
2475 case DIOCCHANGEADDR: {
2476 struct pfioc_pooladdr *pca = (struct pfioc_pooladdr *)addr;
2477 struct pf_pooladdr *oldpa = NULL, *newpa = NULL;
2478 struct pf_ruleset *ruleset;
2479
2480 if (pca->action < PF_CHANGE_ADD_HEAD ||
2481 pca->action > PF_CHANGE_REMOVE) {
2482 error = EINVAL;
2483 break;
2484 }
2485 if (pca->addr.addr.type != PF_ADDR_ADDRMASK &&
2486 pca->addr.addr.type != PF_ADDR_DYNIFTL &&
2487 pca->addr.addr.type != PF_ADDR_TABLE) {
2488 error = EINVAL;
2489 break;
2490 }
2491
2492 ruleset = pf_find_ruleset(pca->anchor);
2493 if (ruleset == NULL) {
2494 error = EBUSY;
2495 break;
2496 }
2497 pool = pf_get_pool(pca->anchor, pca->ticket, pca->r_action,
2498 pca->r_num, pca->r_last, 1, 1);
2499 if (pool == NULL) {
2500 error = EBUSY;
2501 break;
2502 }
2503 if (pca->action != PF_CHANGE_REMOVE) {
2504 newpa = pool_get(&pf_pooladdr_pl, PR_NOWAIT);
2505 if (newpa == NULL) {
2506 error = ENOMEM;
2507 break;
2508 }
2509 bcopy(&pca->addr, newpa, sizeof(struct pf_pooladdr));
2510 #ifndef INET
2511 if (pca->af == AF_INET) {
2512 pool_put(&pf_pooladdr_pl, newpa);
2513 error = EAFNOSUPPORT;
2514 break;
2515 }
2516 #endif /* INET */
2517 #ifndef INET6
2518 if (pca->af == AF_INET6) {
2519 pool_put(&pf_pooladdr_pl, newpa);
2520 error = EAFNOSUPPORT;
2521 break;
2522 }
2523 #endif /* INET6 */
2524 if (newpa->ifname[0]) {
2525 newpa->kif = pfi_kif_get(newpa->ifname);
2526 if (newpa->kif == NULL) {
2527 pool_put(&pf_pooladdr_pl, newpa);
2528 error = EINVAL;
2529 break;
2530 }
2531 pfi_kif_ref(newpa->kif, PFI_KIF_REF_RULE);
2532 } else
2533 newpa->kif = NULL;
2534 if (pfi_dynaddr_setup(&newpa->addr, pca->af) ||
2535 pf_tbladdr_setup(ruleset, &newpa->addr)) {
2536 pfi_dynaddr_remove(&newpa->addr);
2537 pfi_kif_unref(newpa->kif, PFI_KIF_REF_RULE);
2538 pool_put(&pf_pooladdr_pl, newpa);
2539 error = EINVAL;
2540 break;
2541 }
2542 }
2543
2544 if (pca->action == PF_CHANGE_ADD_HEAD)
2545 oldpa = TAILQ_FIRST(&pool->list);
2546 else if (pca->action == PF_CHANGE_ADD_TAIL)
2547 oldpa = TAILQ_LAST(&pool->list, pf_palist);
2548 else {
2549 int i = 0;
2550
2551 oldpa = TAILQ_FIRST(&pool->list);
2552 while ((oldpa != NULL) && (i < pca->nr)) {
2553 oldpa = TAILQ_NEXT(oldpa, entries);
2554 i++;
2555 }
2556 if (oldpa == NULL) {
2557 error = EINVAL;
2558 break;
2559 }
2560 }
2561
2562 if (pca->action == PF_CHANGE_REMOVE) {
2563 TAILQ_REMOVE(&pool->list, oldpa, entries);
2564 pfi_dynaddr_remove(&oldpa->addr);
2565 pf_tbladdr_remove(&oldpa->addr);
2566 pfi_kif_unref(oldpa->kif, PFI_KIF_REF_RULE);
2567 pool_put(&pf_pooladdr_pl, oldpa);
2568 } else {
2569 if (oldpa == NULL)
2570 TAILQ_INSERT_TAIL(&pool->list, newpa, entries);
2571 else if (pca->action == PF_CHANGE_ADD_HEAD ||
2572 pca->action == PF_CHANGE_ADD_BEFORE)
2573 TAILQ_INSERT_BEFORE(oldpa, newpa, entries);
2574 else
2575 TAILQ_INSERT_AFTER(&pool->list, oldpa,
2576 newpa, entries);
2577 }
2578
2579 pool->cur = TAILQ_FIRST(&pool->list);
2580 PF_ACPY(&pool->counter, &pool->cur->addr.v.a.addr,
2581 pca->af);
2582 break;
2583 }
2584
2585 case DIOCGETRULESETS: {
2586 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr;
2587 struct pf_ruleset *ruleset;
2588 struct pf_anchor *anchor;
2589
2590 pr->path[sizeof(pr->path) - 1] = 0;
2591 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) {
2592 error = EINVAL;
2593 break;
2594 }
2595 pr->nr = 0;
2596 if (ruleset->anchor == NULL) {
2597 /* XXX kludge for pf_main_ruleset */
2598 RB_FOREACH(anchor, pf_anchor_global, &pf_anchors)
2599 if (anchor->parent == NULL)
2600 pr->nr++;
2601 } else {
2602 RB_FOREACH(anchor, pf_anchor_node,
2603 &ruleset->anchor->children)
2604 pr->nr++;
2605 }
2606 break;
2607 }
2608
2609 case DIOCGETRULESET: {
2610 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr;
2611 struct pf_ruleset *ruleset;
2612 struct pf_anchor *anchor;
2613 u_int32_t nr = 0;
2614
2615 pr->path[sizeof(pr->path) - 1] = 0;
2616 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) {
2617 error = EINVAL;
2618 break;
2619 }
2620 pr->name[0] = 0;
2621 if (ruleset->anchor == NULL) {
2622 /* XXX kludge for pf_main_ruleset */
2623 RB_FOREACH(anchor, pf_anchor_global, &pf_anchors)
2624 if (anchor->parent == NULL && nr++ == pr->nr) {
2625 strlcpy(pr->name, anchor->name,
2626 sizeof(pr->name));
2627 break;
2628 }
2629 } else {
2630 RB_FOREACH(anchor, pf_anchor_node,
2631 &ruleset->anchor->children)
2632 if (nr++ == pr->nr) {
2633 strlcpy(pr->name, anchor->name,
2634 sizeof(pr->name));
2635 break;
2636 }
2637 }
2638 if (!pr->name[0])
2639 error = EBUSY;
2640 break;
2641 }
2642
2643 case DIOCRCLRTABLES: {
2644 struct pfioc_table *io = (struct pfioc_table *)addr;
2645
2646 if (io->pfrio_esize != 0) {
2647 error = ENODEV;
2648 break;
2649 }
2650 error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel,
2651 io->pfrio_flags | PFR_FLAG_USERIOCTL);
2652 break;
2653 }
2654
2655 case DIOCRADDTABLES: {
2656 struct pfioc_table *io = (struct pfioc_table *)addr;
2657
2658 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2659 error = ENODEV;
2660 break;
2661 }
2662 error = pfr_add_tables(io->pfrio_buffer, io->pfrio_size,
2663 &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2664 break;
2665 }
2666
2667 case DIOCRDELTABLES: {
2668 struct pfioc_table *io = (struct pfioc_table *)addr;
2669
2670 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2671 error = ENODEV;
2672 break;
2673 }
2674 error = pfr_del_tables(io->pfrio_buffer, io->pfrio_size,
2675 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2676 break;
2677 }
2678
2679 case DIOCRGETTABLES: {
2680 struct pfioc_table *io = (struct pfioc_table *)addr;
2681
2682 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2683 error = ENODEV;
2684 break;
2685 }
2686 error = pfr_get_tables(&io->pfrio_table, io->pfrio_buffer,
2687 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2688 break;
2689 }
2690
2691 case DIOCRGETTSTATS: {
2692 struct pfioc_table *io = (struct pfioc_table *)addr;
2693
2694 if (io->pfrio_esize != sizeof(struct pfr_tstats)) {
2695 error = ENODEV;
2696 break;
2697 }
2698 error = pfr_get_tstats(&io->pfrio_table, io->pfrio_buffer,
2699 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2700 break;
2701 }
2702
2703 case DIOCRCLRTSTATS: {
2704 struct pfioc_table *io = (struct pfioc_table *)addr;
2705
2706 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2707 error = ENODEV;
2708 break;
2709 }
2710 error = pfr_clr_tstats(io->pfrio_buffer, io->pfrio_size,
2711 &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2712 break;
2713 }
2714
2715 case DIOCRSETTFLAGS: {
2716 struct pfioc_table *io = (struct pfioc_table *)addr;
2717
2718 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2719 error = ENODEV;
2720 break;
2721 }
2722 error = pfr_set_tflags(io->pfrio_buffer, io->pfrio_size,
2723 io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange,
2724 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2725 break;
2726 }
2727
2728 case DIOCRCLRADDRS: {
2729 struct pfioc_table *io = (struct pfioc_table *)addr;
2730
2731 if (io->pfrio_esize != 0) {
2732 error = ENODEV;
2733 break;
2734 }
2735 error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel,
2736 io->pfrio_flags | PFR_FLAG_USERIOCTL);
2737 break;
2738 }
2739
2740 case DIOCRADDADDRS: {
2741 struct pfioc_table *io = (struct pfioc_table *)addr;
2742
2743 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2744 error = ENODEV;
2745 break;
2746 }
2747 error = pfr_add_addrs(&io->pfrio_table, io->pfrio_buffer,
2748 io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags |
2749 PFR_FLAG_USERIOCTL);
2750 break;
2751 }
2752
2753 case DIOCRDELADDRS: {
2754 struct pfioc_table *io = (struct pfioc_table *)addr;
2755
2756 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2757 error = ENODEV;
2758 break;
2759 }
2760 error = pfr_del_addrs(&io->pfrio_table, io->pfrio_buffer,
2761 io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags |
2762 PFR_FLAG_USERIOCTL);
2763 break;
2764 }
2765
2766 case DIOCRSETADDRS: {
2767 struct pfioc_table *io = (struct pfioc_table *)addr;
2768
2769 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2770 error = ENODEV;
2771 break;
2772 }
2773 error = pfr_set_addrs(&io->pfrio_table, io->pfrio_buffer,
2774 io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd,
2775 &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags |
2776 PFR_FLAG_USERIOCTL, 0);
2777 break;
2778 }
2779
2780 case DIOCRGETADDRS: {
2781 struct pfioc_table *io = (struct pfioc_table *)addr;
2782
2783 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2784 error = ENODEV;
2785 break;
2786 }
2787 error = pfr_get_addrs(&io->pfrio_table, io->pfrio_buffer,
2788 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2789 break;
2790 }
2791
2792 case DIOCRGETASTATS: {
2793 struct pfioc_table *io = (struct pfioc_table *)addr;
2794
2795 if (io->pfrio_esize != sizeof(struct pfr_astats)) {
2796 error = ENODEV;
2797 break;
2798 }
2799 error = pfr_get_astats(&io->pfrio_table, io->pfrio_buffer,
2800 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2801 break;
2802 }
2803
2804 case DIOCRCLRASTATS: {
2805 struct pfioc_table *io = (struct pfioc_table *)addr;
2806
2807 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2808 error = ENODEV;
2809 break;
2810 }
2811 error = pfr_clr_astats(&io->pfrio_table, io->pfrio_buffer,
2812 io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags |
2813 PFR_FLAG_USERIOCTL);
2814 break;
2815 }
2816
2817 case DIOCRTSTADDRS: {
2818 struct pfioc_table *io = (struct pfioc_table *)addr;
2819
2820 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2821 error = ENODEV;
2822 break;
2823 }
2824 error = pfr_tst_addrs(&io->pfrio_table, io->pfrio_buffer,
2825 io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags |
2826 PFR_FLAG_USERIOCTL);
2827 break;
2828 }
2829
2830 case DIOCRINADEFINE: {
2831 struct pfioc_table *io = (struct pfioc_table *)addr;
2832
2833 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2834 error = ENODEV;
2835 break;
2836 }
2837 error = pfr_ina_define(&io->pfrio_table, io->pfrio_buffer,
2838 io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr,
2839 io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2840 break;
2841 }
2842
2843 case DIOCOSFPADD: {
2844 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
2845 error = pf_osfp_add(io);
2846 break;
2847 }
2848
2849 case DIOCOSFPGET: {
2850 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
2851 error = pf_osfp_get(io);
2852 break;
2853 }
2854
2855 case DIOCXBEGIN: {
2856 struct pfioc_trans *io = (struct pfioc_trans *)addr;
2857 struct pfioc_trans_e *ioe;
2858 struct pfr_table *table;
2859 int i;
2860
2861 if (io->esize != sizeof(*ioe)) {
2862 error = ENODEV;
2863 goto fail;
2864 }
2865 ioe = (struct pfioc_trans_e *)malloc(sizeof(*ioe),
2866 M_TEMP, M_WAITOK);
2867 table = (struct pfr_table *)malloc(sizeof(*table),
2868 M_TEMP, M_WAITOK);
2869 for (i = 0; i < io->size; i++) {
2870 if (copyin(io->array+i, ioe, sizeof(*ioe))) {
2871 free(table, M_TEMP);
2872 free(ioe, M_TEMP);
2873 error = EFAULT;
2874 goto fail;
2875 }
2876 switch (ioe->rs_num) {
2877 #ifdef ALTQ
2878 case PF_RULESET_ALTQ:
2879 if (ioe->anchor[0]) {
2880 free(table, M_TEMP);
2881 free(ioe, M_TEMP);
2882 error = EINVAL;
2883 goto fail;
2884 }
2885 if ((error = pf_begin_altq(&ioe->ticket))) {
2886 free(table, M_TEMP);
2887 free(ioe, M_TEMP);
2888 goto fail;
2889 }
2890 break;
2891 #endif /* ALTQ */
2892 case PF_RULESET_TABLE:
2893 bzero(table, sizeof(*table));
2894 strlcpy(table->pfrt_anchor, ioe->anchor,
2895 sizeof(table->pfrt_anchor));
2896 if ((error = pfr_ina_begin(table,
2897 &ioe->ticket, NULL, 0))) {
2898 free(table, M_TEMP);
2899 free(ioe, M_TEMP);
2900 goto fail;
2901 }
2902 break;
2903 default:
2904 if ((error = pf_begin_rules(&ioe->ticket,
2905 ioe->rs_num, ioe->anchor))) {
2906 free(table, M_TEMP);
2907 free(ioe, M_TEMP);
2908 goto fail;
2909 }
2910 break;
2911 }
2912 if (copyout(ioe, io->array+i, sizeof(io->array[i]))) {
2913 free(table, M_TEMP);
2914 free(ioe, M_TEMP);
2915 error = EFAULT;
2916 goto fail;
2917 }
2918 }
2919 free(table, M_TEMP);
2920 free(ioe, M_TEMP);
2921 break;
2922 }
2923
2924 case DIOCXROLLBACK: {
2925 struct pfioc_trans *io = (struct pfioc_trans *)addr;
2926 struct pfioc_trans_e *ioe;
2927 struct pfr_table *table;
2928 int i;
2929
2930 if (io->esize != sizeof(*ioe)) {
2931 error = ENODEV;
2932 goto fail;
2933 }
2934 ioe = (struct pfioc_trans_e *)malloc(sizeof(*ioe),
2935 M_TEMP, M_WAITOK);
2936 table = (struct pfr_table *)malloc(sizeof(*table),
2937 M_TEMP, M_WAITOK);
2938 for (i = 0; i < io->size; i++) {
2939 if (copyin(io->array+i, ioe, sizeof(*ioe))) {
2940 free(table, M_TEMP);
2941 free(ioe, M_TEMP);
2942 error = EFAULT;
2943 goto fail;
2944 }
2945 switch (ioe->rs_num) {
2946 #ifdef ALTQ
2947 case PF_RULESET_ALTQ:
2948 if (ioe->anchor[0]) {
2949 free(table, M_TEMP);
2950 free(ioe, M_TEMP);
2951 error = EINVAL;
2952 goto fail;
2953 }
2954 if ((error = pf_rollback_altq(ioe->ticket))) {
2955 free(table, M_TEMP);
2956 free(ioe, M_TEMP);
2957 goto fail; /* really bad */
2958 }
2959 break;
2960 #endif /* ALTQ */
2961 case PF_RULESET_TABLE:
2962 bzero(table, sizeof(*table));
2963 strlcpy(table->pfrt_anchor, ioe->anchor,
2964 sizeof(table->pfrt_anchor));
2965 if ((error = pfr_ina_rollback(table,
2966 ioe->ticket, NULL, 0))) {
2967 free(table, M_TEMP);
2968 free(ioe, M_TEMP);
2969 goto fail; /* really bad */
2970 }
2971 break;
2972 default:
2973 if ((error = pf_rollback_rules(ioe->ticket,
2974 ioe->rs_num, ioe->anchor))) {
2975 free(table, M_TEMP);
2976 free(ioe, M_TEMP);
2977 goto fail; /* really bad */
2978 }
2979 break;
2980 }
2981 }
2982 free(table, M_TEMP);
2983 free(ioe, M_TEMP);
2984 break;
2985 }
2986
2987 case DIOCXCOMMIT: {
2988 struct pfioc_trans *io = (struct pfioc_trans *)addr;
2989 struct pfioc_trans_e *ioe;
2990 struct pfr_table *table;
2991 struct pf_ruleset *rs;
2992 int i;
2993
2994 if (io->esize != sizeof(*ioe)) {
2995 error = ENODEV;
2996 goto fail;
2997 }
2998 ioe = (struct pfioc_trans_e *)malloc(sizeof(*ioe),
2999 M_TEMP, M_WAITOK);
3000 table = (struct pfr_table *)malloc(sizeof(*table),
3001 M_TEMP, M_WAITOK);
3002 /* first makes sure everything will succeed */
3003 for (i = 0; i < io->size; i++) {
3004 if (copyin(io->array+i, ioe, sizeof(*ioe))) {
3005 free(table, M_TEMP);
3006 free(ioe, M_TEMP);
3007 error = EFAULT;
3008 goto fail;
3009 }
3010 switch (ioe->rs_num) {
3011 #ifdef ALTQ
3012 case PF_RULESET_ALTQ:
3013 if (ioe->anchor[0]) {
3014 free(table, M_TEMP);
3015 free(ioe, M_TEMP);
3016 error = EINVAL;
3017 goto fail;
3018 }
3019 if (!altqs_inactive_open || ioe->ticket !=
3020 ticket_altqs_inactive) {
3021 free(table, M_TEMP);
3022 free(ioe, M_TEMP);
3023 error = EBUSY;
3024 goto fail;
3025 }
3026 break;
3027 #endif /* ALTQ */
3028 case PF_RULESET_TABLE:
3029 rs = pf_find_ruleset(ioe->anchor);
3030 if (rs == NULL || !rs->topen || ioe->ticket !=
3031 rs->tticket) {
3032 free(table, M_TEMP);
3033 free(ioe, M_TEMP);
3034 error = EBUSY;
3035 goto fail;
3036 }
3037 break;
3038 default:
3039 if (ioe->rs_num < 0 || ioe->rs_num >=
3040 PF_RULESET_MAX) {
3041 free(table, M_TEMP);
3042 free(ioe, M_TEMP);
3043 error = EINVAL;
3044 goto fail;
3045 }
3046 rs = pf_find_ruleset(ioe->anchor);
3047 if (rs == NULL ||
3048 !rs->rules[ioe->rs_num].inactive.open ||
3049 rs->rules[ioe->rs_num].inactive.ticket !=
3050 ioe->ticket) {
3051 free(table, M_TEMP);
3052 free(ioe, M_TEMP);
3053 error = EBUSY;
3054 goto fail;
3055 }
3056 break;
3057 }
3058 }
3059 /* now do the commit - no errors should happen here */
3060 for (i = 0; i < io->size; i++) {
3061 if (copyin(io->array+i, ioe, sizeof(*ioe))) {
3062 free(table, M_TEMP);
3063 free(ioe, M_TEMP);
3064 error = EFAULT;
3065 goto fail;
3066 }
3067 switch (ioe->rs_num) {
3068 #ifdef ALTQ
3069 case PF_RULESET_ALTQ:
3070 if ((error = pf_commit_altq(ioe->ticket))) {
3071 free(table, M_TEMP);
3072 free(ioe, M_TEMP);
3073 goto fail; /* really bad */
3074 }
3075 break;
3076 #endif /* ALTQ */
3077 case PF_RULESET_TABLE:
3078 bzero(table, sizeof(*table));
3079 strlcpy(table->pfrt_anchor, ioe->anchor,
3080 sizeof(table->pfrt_anchor));
3081 if ((error = pfr_ina_commit(table, ioe->ticket,
3082 NULL, NULL, 0))) {
3083 free(table, M_TEMP);
3084 free(ioe, M_TEMP);
3085 goto fail; /* really bad */
3086 }
3087 break;
3088 default:
3089 if ((error = pf_commit_rules(ioe->ticket,
3090 ioe->rs_num, ioe->anchor))) {
3091 free(table, M_TEMP);
3092 free(ioe, M_TEMP);
3093 goto fail; /* really bad */
3094 }
3095 break;
3096 }
3097 }
3098 free(table, M_TEMP);
3099 free(ioe, M_TEMP);
3100 break;
3101 }
3102
3103 case DIOCGETSRCNODES: {
3104 struct pfioc_src_nodes *psn = (struct pfioc_src_nodes *)addr;
3105 struct pf_src_node *n, *p, *pstore;
3106 u_int32_t nr = 0;
3107 int space = psn->psn_len;
3108
3109 if (space == 0) {
3110 RB_FOREACH(n, pf_src_tree, &tree_src_tracking)
3111 nr++;
3112 psn->psn_len = sizeof(struct pf_src_node) * nr;
3113 break;
3114 }
3115
3116 pstore = malloc(sizeof(*pstore), M_TEMP, M_WAITOK);
3117
3118 p = psn->psn_src_nodes;
3119 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) {
3120 int secs = time_second, diff;
3121
3122 if ((nr + 1) * sizeof(*p) > (unsigned)psn->psn_len)
3123 break;
3124
3125 bcopy(n, pstore, sizeof(*pstore));
3126 if (n->rule.ptr != NULL)
3127 pstore->rule.nr = n->rule.ptr->nr;
3128 pstore->creation = secs - pstore->creation;
3129 if (pstore->expire > secs)
3130 pstore->expire -= secs;
3131 else
3132 pstore->expire = 0;
3133
3134 /* adjust the connection rate estimate */
3135 diff = secs - n->conn_rate.last;
3136 if (diff >= n->conn_rate.seconds)
3137 pstore->conn_rate.count = 0;
3138 else
3139 pstore->conn_rate.count -=
3140 n->conn_rate.count * diff /
3141 n->conn_rate.seconds;
3142
3143 error = copyout(pstore, p, sizeof(*p));
3144 if (error) {
3145 free(pstore, M_TEMP);
3146 goto fail;
3147 }
3148 p++;
3149 nr++;
3150 }
3151 psn->psn_len = sizeof(struct pf_src_node) * nr;
3152
3153 free(pstore, M_TEMP);
3154 break;
3155 }
3156
3157 case DIOCCLRSRCNODES: {
3158 struct pf_src_node *n;
3159 struct pf_state *state;
3160
3161 RB_FOREACH(state, pf_state_tree_id, &tree_id) {
3162 state->src_node = NULL;
3163 state->nat_src_node = NULL;
3164 }
3165 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) {
3166 n->expire = 1;
3167 n->states = 0;
3168 }
3169 pf_purge_expired_src_nodes(1);
3170 pf_status.src_nodes = 0;
3171 break;
3172 }
3173
3174 case DIOCKILLSRCNODES: {
3175 struct pf_src_node *sn;
3176 struct pf_state *ps;
3177 struct pfioc_src_node_kill *psnk = \
3178 (struct pfioc_src_node_kill *) addr;
3179 int killed = 0;
3180
3181 RB_FOREACH(sn, pf_src_tree, &tree_src_tracking) {
3182 if (PF_MATCHA(psnk->psnk_src.neg, \
3183 &psnk->psnk_src.addr.v.a.addr, \
3184 &psnk->psnk_src.addr.v.a.mask, \
3185 &sn->addr, sn->af) &&
3186 PF_MATCHA(psnk->psnk_dst.neg, \
3187 &psnk->psnk_dst.addr.v.a.addr, \
3188 &psnk->psnk_dst.addr.v.a.mask, \
3189 &sn->raddr, sn->af)) {
3190 /* Handle state to src_node linkage */
3191 if (sn->states != 0) {
3192 RB_FOREACH(ps, pf_state_tree_id,
3193 &tree_id) {
3194 if (ps->src_node == sn)
3195 ps->src_node = NULL;
3196 if (ps->nat_src_node == sn)
3197 ps->nat_src_node = NULL;
3198 }
3199 sn->states = 0;
3200 }
3201 sn->expire = 1;
3202 killed++;
3203 }
3204 }
3205
3206 if (killed > 0)
3207 pf_purge_expired_src_nodes(1);
3208
3209 psnk->psnk_af = killed;
3210 break;
3211 }
3212
3213 case DIOCSETHOSTID: {
3214 u_int32_t *hid = (u_int32_t *)addr;
3215
3216 if (*hid == 0)
3217 pf_status.hostid = cprng_fast32();
3218 else
3219 pf_status.hostid = *hid;
3220 break;
3221 }
3222
3223 case DIOCOSFPFLUSH:
3224 pf_osfp_flush();
3225 break;
3226
3227 case DIOCIGETIFACES: {
3228 struct pfioc_iface *io = (struct pfioc_iface *)addr;
3229
3230 if (io->pfiio_esize != sizeof(struct pfi_kif)) {
3231 error = ENODEV;
3232 break;
3233 }
3234 error = pfi_get_ifaces(io->pfiio_name, io->pfiio_buffer,
3235 &io->pfiio_size);
3236 break;
3237 }
3238
3239 case DIOCSETIFFLAG: {
3240 struct pfioc_iface *io = (struct pfioc_iface *)addr;
3241
3242 error = pfi_set_flags(io->pfiio_name, io->pfiio_flags);
3243 break;
3244 }
3245
3246 case DIOCCLRIFFLAG: {
3247 struct pfioc_iface *io = (struct pfioc_iface *)addr;
3248
3249 error = pfi_clear_flags(io->pfiio_name, io->pfiio_flags);
3250 break;
3251 }
3252
3253 case DIOCSETLCK: {
3254 pf_state_lock = *(uint32_t*)addr;
3255 break;
3256 }
3257
3258 default:
3259 error = ENODEV;
3260 break;
3261 }
3262 fail:
3263 splx(s);
3264 if (flags & FWRITE)
3265 rw_exit_write(&pf_consistency_lock);
3266 else
3267 rw_exit_read(&pf_consistency_lock);
3268 return (error);
3269 }
3270
3271 #ifdef __NetBSD__
3272 #ifdef INET
3273 static int
pfil4_wrapper(void * arg,struct mbuf ** mp,struct ifnet * ifp,int dir)3274 pfil4_wrapper(void *arg, struct mbuf **mp, struct ifnet *ifp, int dir)
3275 {
3276 int error;
3277
3278 /*
3279 * ensure that mbufs are writable beforehand
3280 * as it's assumed by pf code.
3281 * ip hdr (60 bytes) + tcp hdr (60 bytes) should be enough.
3282 * XXX inefficient
3283 */
3284 error = m_makewritable(mp, 0, 60 + 60, M_DONTWAIT);
3285 if (error) {
3286 m_freem(*mp);
3287 *mp = NULL;
3288 return error;
3289 }
3290
3291 /*
3292 * If the packet is out-bound, we can't delay checksums
3293 * here. For in-bound, the checksum has already been
3294 * validated.
3295 */
3296 if (dir == PFIL_OUT) {
3297 if ((*mp)->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) {
3298 in_undefer_cksum_tcpudp(*mp);
3299 (*mp)->m_pkthdr.csum_flags &=
3300 ~(M_CSUM_TCPv4|M_CSUM_UDPv4);
3301 }
3302 }
3303
3304 if (pf_test(dir == PFIL_OUT ? PF_OUT : PF_IN, ifp, mp, NULL)
3305 != PF_PASS) {
3306 m_freem(*mp);
3307 *mp = NULL;
3308 return EHOSTUNREACH;
3309 }
3310
3311 /*
3312 * we're not compatible with fast-forward.
3313 */
3314
3315 if (dir == PFIL_IN && *mp) {
3316 (*mp)->m_flags &= ~M_CANFASTFWD;
3317 }
3318
3319 return (0);
3320 }
3321 #endif /* INET */
3322
3323 #ifdef INET6
3324 static int
pfil6_wrapper(void * arg,struct mbuf ** mp,struct ifnet * ifp,int dir)3325 pfil6_wrapper(void *arg, struct mbuf **mp, struct ifnet *ifp, int dir)
3326 {
3327 int error;
3328
3329 /*
3330 * ensure that mbufs are writable beforehand
3331 * as it's assumed by pf code.
3332 * XXX inefficient
3333 */
3334 error = m_makewritable(mp, 0, M_COPYALL, M_DONTWAIT);
3335 if (error) {
3336 m_freem(*mp);
3337 *mp = NULL;
3338 return error;
3339 }
3340
3341 /*
3342 * If the packet is out-bound, we can't delay checksums
3343 * here. For in-bound, the checksum has already been
3344 * validated.
3345 */
3346 if (dir == PFIL_OUT) {
3347 if ((*mp)->m_pkthdr.csum_flags & (M_CSUM_TCPv6|M_CSUM_UDPv6)) {
3348 in6_undefer_cksum_tcpudp(*mp);
3349 (*mp)->m_pkthdr.csum_flags &=
3350 ~(M_CSUM_TCPv6|M_CSUM_UDPv6);
3351 }
3352 }
3353
3354 if (pf_test6(dir == PFIL_OUT ? PF_OUT : PF_IN, ifp, mp, NULL)
3355 != PF_PASS) {
3356 m_freem(*mp);
3357 *mp = NULL;
3358 return EHOSTUNREACH;
3359 } else
3360 return (0);
3361 }
3362 #endif /* INET6 */
3363
3364 static int
pf_pfil_attach(void)3365 pf_pfil_attach(void)
3366 {
3367 pfil_head_t *ph_inet;
3368 #ifdef INET6
3369 pfil_head_t *ph_inet6;
3370 #endif /* INET6 */
3371 int error;
3372
3373 if (pf_pfil_attached)
3374 return (EBUSY);
3375
3376 ph_inet = pfil_head_get(PFIL_TYPE_AF, (void *)AF_INET);
3377 if (ph_inet)
3378 error = pfil_add_hook((void *)pfil4_wrapper, NULL,
3379 PFIL_IN|PFIL_OUT, ph_inet);
3380 else
3381 error = ENOENT;
3382 if (error)
3383 return (error);
3384
3385 #ifdef INET6
3386 ph_inet6 = pfil_head_get(PFIL_TYPE_AF, (void *)AF_INET6);
3387 if (ph_inet6)
3388 error = pfil_add_hook((void *)pfil6_wrapper, NULL,
3389 PFIL_IN|PFIL_OUT, ph_inet6);
3390 else
3391 error = ENOENT;
3392 if (error)
3393 goto bad;
3394 #endif /* INET6 */
3395
3396 pf_pfil_attached = 1;
3397
3398 return (0);
3399
3400 #ifdef INET6
3401 bad:
3402 pfil_remove_hook(pfil4_wrapper, NULL, PFIL_IN|PFIL_OUT, ph_inet);
3403 #endif /* INET6 */
3404
3405 return (error);
3406 }
3407
3408 static int
pf_pfil_detach(void)3409 pf_pfil_detach(void)
3410 {
3411 pfil_head_t *ph_inet;
3412 #ifdef INET6
3413 pfil_head_t *ph_inet6;
3414 #endif /* INET6 */
3415
3416 if (pf_pfil_attached == 0)
3417 return (EBUSY);
3418
3419 ph_inet = pfil_head_get(PFIL_TYPE_AF, (void *)AF_INET);
3420 if (ph_inet)
3421 pfil_remove_hook((void *)pfil4_wrapper, NULL,
3422 PFIL_IN|PFIL_OUT, ph_inet);
3423 #ifdef INET6
3424 ph_inet6 = pfil_head_get(PFIL_TYPE_AF, (void *)AF_INET6);
3425 if (ph_inet6)
3426 pfil_remove_hook((void *)pfil6_wrapper, NULL,
3427 PFIL_IN|PFIL_OUT, ph_inet6);
3428 #endif /* INET6 */
3429 pf_pfil_attached = 0;
3430
3431 return (0);
3432 }
3433 #endif /* __NetBSD__ */
3434
3435 #if defined(__NetBSD__)
3436 MODULE(MODULE_CLASS_DRIVER, pf, "bpf");
3437
3438 static int
pf_modcmd(modcmd_t cmd,void * opaque)3439 pf_modcmd(modcmd_t cmd, void *opaque)
3440 {
3441 #ifdef _MODULE
3442 extern void pflogattach(int);
3443 extern void pflogdetach(void);
3444
3445 devmajor_t cmajor = NODEVMAJOR, bmajor = NODEVMAJOR;
3446 int err;
3447
3448 switch (cmd) {
3449 case MODULE_CMD_INIT:
3450 err = devsw_attach("pf", NULL, &bmajor, &pf_cdevsw, &cmajor);
3451 if (err)
3452 return err;
3453 pfattach(1);
3454 pflogattach(1);
3455 return 0;
3456 case MODULE_CMD_FINI:
3457 if (pf_status.running) {
3458 return EBUSY;
3459 } else {
3460 pfdetach();
3461 pflogdetach();
3462 devsw_detach(NULL, &pf_cdevsw);
3463 return 0;
3464 }
3465 default:
3466 return ENOTTY;
3467 }
3468 #else
3469 if (cmd == MODULE_CMD_INIT)
3470 return 0;
3471 return ENOTTY;
3472 #endif
3473 }
3474 #endif
3475