1 /* $NetBSD: ip_fil_netbsd.c,v 1.39 2024/06/09 20:30:36 mrg Exp $ */
2
3 /*
4 * Copyright (C) 2012 by Darren Reed.
5 *
6 * See the IPFILTER.LICENCE file for details on licencing.
7 */
8 #if !defined(lint)
9 #if defined(__NetBSD__)
10 #include <sys/cdefs.h>
11 __KERNEL_RCSID(0, "$NetBSD: ip_fil_netbsd.c,v 1.39 2024/06/09 20:30:36 mrg Exp $");
12 #else
13 static const char sccsid[] = "@(#)ip_fil.c 2.41 6/5/96 (C) 1993-2000 Darren Reed";
14 static const char rcsid[] = "@(#)Id: ip_fil_netbsd.c,v 1.1.1.2 2012/07/22 13:45:17 darrenr Exp";
15 #endif
16 #endif
17
18 #if defined(KERNEL) || defined(_KERNEL)
19 # undef KERNEL
20 # undef _KERNEL
21 # define KERNEL 1
22 # define _KERNEL 1
23 #endif
24 #include <sys/param.h>
25 #if (NetBSD >= 199905) && !defined(IPFILTER_LKM)
26 # if (__NetBSD_Version__ >= 799003000)
27 # ifdef _KERNEL_OPT
28 # include "opt_ipsec.h"
29 # endif
30 # else
31 # include "opt_ipsec.h"
32 # endif
33 #endif
34 #include <sys/errno.h>
35 #include <sys/types.h>
36 #include <sys/file.h>
37 #include <sys/ioctl.h>
38 #include <sys/time.h>
39 #include <sys/systm.h>
40 #include <sys/select.h>
41 #if (NetBSD > 199609)
42 # include <sys/dirent.h>
43 #else
44 # include <sys/dir.h>
45 #endif
46 #if (__NetBSD_Version__ >= 599005900)
47 # include <sys/cprng.h>
48 #endif
49 #include <sys/mbuf.h>
50 #include <sys/protosw.h>
51 #include <sys/socket.h>
52 #include <sys/poll.h>
53 #if (__NetBSD_Version__ >= 399002000)
54 # include <sys/kauth.h>
55 #endif
56 #if (__NetBSD_Version__ >= 799003000)
57 #include <sys/module.h>
58 #include <sys/mutex.h>
59 #endif
60 #if defined(__NetBSD__)
61 #include <netinet/in_offload.h>
62 #endif
63
64 #include <net/if.h>
65 #include <net/route.h>
66 #include <netinet/in.h>
67 #include <netinet/in_var.h>
68 #include <netinet/in_systm.h>
69 #include <netinet/ip.h>
70 #include <netinet/ip_var.h>
71 #include <netinet/tcp.h>
72 #if __NetBSD_Version__ >= 105190000 /* 1.5T */
73 # include <netinet/tcp_timer.h>
74 # include <netinet/tcp_var.h>
75 #endif
76 #include <netinet/udp.h>
77 #include <netinet/ip_icmp.h>
78 #include "netinet/ip_compat.h"
79 #ifdef USE_INET6
80 # include <netinet/icmp6.h>
81 # if (__NetBSD_Version__ >= 106000000)
82 # include <netinet6/nd6.h>
83 # endif
84 # if __NetBSD_Version__ >= 499001100
85 # include <netinet6/scope6_var.h>
86 # include <netinet6/in6_offload.h>
87 # endif
88 #endif
89 #include "netinet/ip_fil.h"
90 #include "netinet/ip_nat.h"
91 #include "netinet/ip_frag.h"
92 #include "netinet/ip_state.h"
93 #include "netinet/ip_proxy.h"
94 #include "netinet/ip_auth.h"
95 #include "netinet/ip_sync.h"
96 #include "netinet/ip_lookup.h"
97 #include "netinet/ip_dstlist.h"
98 #ifdef IPFILTER_SCAN
99 #include "netinet/ip_scan.h"
100 #endif
101 #include <sys/md5.h>
102 #include <sys/kernel.h>
103 #include <sys/conf.h>
104 #ifdef INET
105 extern int ip_optcopy (struct ip *, struct ip *);
106 #endif
107
108 #ifdef IPFILTER_M_IPFILTER
109 MALLOC_DEFINE(M_IPFILTER, "IP Filter", "IP Filter packet filter data structures");
110 #endif
111
112 #if __NetBSD_Version__ >= 105009999
113 # define csuminfo csum_flags
114 #endif
115
116 #if __NetBSD_Version__ < 200000000
117 extern struct protosw inetsw[];
118 #endif
119
120 #if (__NetBSD_Version__ >= 599002000)
121 static kauth_listener_t ipf_listener;
122 #endif
123
124 #if (__NetBSD_Version__ < 399001400)
125 extern int ip6_getpmtu (struct route_in6 *, struct route_in6 *,
126 struct ifnet *, struct in6_addr *, u_long *,
127 int *);
128 #endif
129 #if (NetBSD >= 199511)
130 static int ipfopen(dev_t dev, int flags, int devtype, PROC_T *p);
131 static int ipfclose(dev_t dev, int flags, int devtype, PROC_T *p);
132 #else
133 # if (__NetBSD_Version__ >= 399001400)
134 static int ipfopen(dev_t dev, int flags, struct lwp *);
135 static int ipfclose(dev_t dev, int flags, struct lwp *);
136 # else
137 static int ipfopen(dev_t dev, int flags);
138 static int ipfclose(dev_t dev, int flags);
139 # endif /* __NetBSD_Version__ >= 399001400 */
140 #endif
141 static int ipfread(dev_t, struct uio *, int ioflag);
142 static int ipfwrite(dev_t, struct uio *, int ioflag);
143 static int ipfpoll(dev_t, int events, PROC_T *);
144 static void ipf_timer_func(void *ptr);
145
146 const struct cdevsw ipl_cdevsw = {
147 .d_open = ipfopen,
148 .d_close = ipfclose,
149 .d_read = ipfread,
150 .d_write = ipfwrite,
151 .d_ioctl = ipfioctl,
152 .d_stop = nostop,
153 .d_tty = notty,
154 .d_poll = ipfpoll,
155 .d_mmap = nommap,
156 #if (__NetBSD_Version__ >= 200000000)
157 .d_kqfilter = nokqfilter,
158 #endif
159 .d_discard = nodiscard,
160 #ifdef D_OTHER
161 .d_flag = D_OTHER
162 # if __NetBSD_Version__ >= 799003200
163 | D_MPSAFE
164 # endif
165 #else
166 .d_flag = 0
167 #endif
168 };
169 #if (__NetBSD_Version__ >= 799003000)
170 kmutex_t ipf_ref_mutex;
171 int ipf_active;
172 #endif
173
174 ipf_main_softc_t ipfmain;
175
176 static u_short ipid = 0;
177 static int (*ipf_savep)(void *, ip_t *, int, void *, int, struct mbuf **);
178 static int ipf_send_ip(fr_info_t *, mb_t *);
179 #ifdef USE_INET6
180 static int ipf_fastroute6(struct mbuf *, struct mbuf **,
181 fr_info_t *, frdest_t *);
182 #endif
183
184 #if defined(NETBSD_PF)
185 # include <net/pfil.h>
186 /*
187 * We provide the ipf_checkp name just to minimize changes later.
188 */
189 int (*ipf_checkp)(void *, ip_t *ip, int hlen, void *ifp, int out, mb_t **mp);
190 #endif /* NETBSD_PF */
191
192 #if defined(__NetBSD_Version__) && (__NetBSD_Version__ >= 105110000)
193 # include <net/pfil.h>
194
195 static int ipf_check_wrapper(void *, struct mbuf **, struct ifnet *, int );
196
197 static int
ipf_check_wrapper(void * arg,struct mbuf ** mp,struct ifnet * ifp,int dir)198 ipf_check_wrapper(void *arg, struct mbuf **mp, struct ifnet *ifp, int dir)
199 {
200 struct ip *ip;
201 int rv, hlen;
202
203 #if __NetBSD_Version__ >= 200080000
204 /*
205 * ensure that mbufs are writable beforehand
206 * as it's assumed by ipf code.
207 * XXX inefficient
208 */
209 int error = m_makewritable(mp, 0, M_COPYALL, M_DONTWAIT);
210
211 if (error) {
212 m_freem(*mp);
213 *mp = NULL;
214 return error;
215 }
216 #endif
217 ip = mtod(*mp, struct ip *);
218 hlen = ip->ip_hl << 2;
219
220 #ifdef INET
221 #if defined(M_CSUM_TCPv4)
222 /*
223 * If the packet is out-bound, we can't delay checksums
224 * here. For in-bound, the checksum has already been
225 * validated.
226 */
227 if (dir == PFIL_OUT) {
228 if ((*mp)->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) {
229 in_undefer_cksum_tcpudp(*mp);
230 (*mp)->m_pkthdr.csum_flags &=
231 ~(M_CSUM_TCPv4|M_CSUM_UDPv4);
232 }
233 }
234 #endif /* M_CSUM_TCPv4 */
235 #endif /* INET */
236
237 /*
238 * Note, we don't need to update the checksum, because
239 * it has already been verified.
240 */
241 rv = ipf_check(&ipfmain, ip, hlen, ifp, (dir == PFIL_OUT), mp);
242
243 return (rv);
244 }
245
246 # ifdef USE_INET6
247 # include <netinet/ip6.h>
248
249 static int ipf_check_wrapper6(void *, struct mbuf **, struct ifnet *, int );
250
251 static int
ipf_check_wrapper6(void * arg,struct mbuf ** mp,struct ifnet * ifp,int dir)252 ipf_check_wrapper6(void *arg, struct mbuf **mp, struct ifnet *ifp, int dir)
253 {
254 #if defined(INET6)
255 # if defined(M_CSUM_TCPv6) && (__NetBSD_Version__ > 200000000)
256 /*
257 * If the packet is out-bound, we can't delay checksums
258 * here. For in-bound, the checksum has already been
259 * validated.
260 */
261 if (dir == PFIL_OUT) {
262 if ((*mp)->m_pkthdr.csum_flags & (M_CSUM_TCPv6|M_CSUM_UDPv6)) {
263 # if (__NetBSD_Version__ > 399000600)
264 in6_undefer_cksum_tcpudp(*mp);
265 # endif
266 (*mp)->m_pkthdr.csum_flags &= ~(M_CSUM_TCPv6|
267 M_CSUM_UDPv6);
268 }
269 }
270 # endif
271 #endif /* INET6 */
272
273 return (ipf_check(&ipfmain, mtod(*mp, struct ip *), sizeof(struct ip6_hdr),
274 ifp, (dir == PFIL_OUT), mp));
275 }
276 # endif
277
278
279 # if defined(PFIL_TYPE_IFNET) && defined(PFIL_IFNET)
280
281 # if (__NetBSD_Version__ >= 799000400)
282
283 static void ipf_pfilsync(void *, unsigned long, void *);
284
285 static void
ipf_pfilsync(void * hdr,unsigned long cmd,void * arg2)286 ipf_pfilsync(void *hdr, unsigned long cmd, void *arg2)
287 {
288 /*
289 * The interface pointer is useless for create (we have nothing to
290 * compare it to) and at detach, the interface name is still in the
291 * list of active NICs (albeit, down, but that's not any real
292 * indicator) and doing ifunit() on the name will still return the
293 * pointer, so it's not much use then, either.
294 */
295 ipf_sync(&ipfmain, NULL);
296 }
297
298 # else
299
300 static int ipf_pfilsync(void *, struct mbuf **, struct ifnet *, int);
301
302 static int
ipf_pfilsync(void * hdr,struct mbuf ** mp,struct ifnet * ifp,int dir)303 ipf_pfilsync(void *hdr, struct mbuf **mp, struct ifnet *ifp, int dir)
304 {
305 ipf_sync(&ipfmain, NULL);
306 return 0;
307 }
308
309 # endif
310 # endif
311
312 #endif /* __NetBSD_Version__ >= 105110000 */
313
314
315 #if defined(IPFILTER_LKM)
316 int
ipf_identify(s)317 ipf_identify(s)
318 char *s;
319 {
320 if (strcmp(s, "ipl") == 0)
321 return 1;
322 return 0;
323 }
324 #endif /* IPFILTER_LKM */
325
326 #if (__NetBSD_Version__ >= 599002000)
327 static int
ipf_listener_cb(kauth_cred_t cred,kauth_action_t action,void * cookie,void * arg0,void * arg1,void * arg2,void * arg3)328 ipf_listener_cb(kauth_cred_t cred, kauth_action_t action, void *cookie,
329 void *arg0, void *arg1, void *arg2, void *arg3)
330 {
331 int result;
332 enum kauth_network_req req;
333
334 result = KAUTH_RESULT_DEFER;
335 req = (enum kauth_network_req)(uintptr_t)arg0;
336
337 if (action != KAUTH_NETWORK_FIREWALL)
338 return result;
339
340 /* These must have came from device context. */
341 if ((req == KAUTH_REQ_NETWORK_FIREWALL_FW) ||
342 (req == KAUTH_REQ_NETWORK_FIREWALL_NAT))
343 result = KAUTH_RESULT_ALLOW;
344
345 return result;
346 }
347 #endif
348
349 /*
350 * Try to detect the case when compiling for NetBSD with pseudo-device
351 */
352 void
ipfilterattach(int count)353 ipfilterattach(int count)
354 {
355
356 #if (__NetBSD_Version__ >= 799003000)
357 return;
358 #else
359 #if (__NetBSD_Version__ >= 599002000)
360 ipf_listener = kauth_listen_scope(KAUTH_SCOPE_NETWORK,
361 ipf_listener_cb, NULL);
362 #endif
363
364 if (ipf_load_all() == 0)
365 (void) ipf_create_all(&ipfmain);
366 #endif
367 }
368
369
370 int
ipfattach(ipf_main_softc_t * softc)371 ipfattach(ipf_main_softc_t *softc)
372 {
373 SPL_INT(s);
374 #if (__NetBSD_Version__ >= 499005500)
375 int i;
376 #endif
377 #if defined(NETBSD_PF) && (__NetBSD_Version__ >= 104200000)
378 int error = 0;
379 # if defined(__NetBSD_Version__) && (__NetBSD_Version__ >= 105110000)
380 pfil_head_t *ph_inet;
381 # ifdef USE_INET6
382 pfil_head_t *ph_inet6;
383 # endif
384 # if defined(PFIL_TYPE_IFNET) && defined(PFIL_IFNET)
385 pfil_head_t *ph_ifsync;
386 # endif
387 # endif
388 #endif
389
390 SPL_NET(s);
391 if ((softc->ipf_running > 0) || (ipf_checkp == ipf_check)) {
392 printf("IP Filter: already initialized\n");
393 SPL_X(s);
394 IPFERROR(130017);
395 return EBUSY;
396 }
397
398 if (ipf_init_all(softc) < 0) {
399 SPL_X(s);
400 IPFERROR(130015);
401 return EIO;
402 }
403
404 #ifdef NETBSD_PF
405 # if (__NetBSD_Version__ >= 104200000)
406 # if __NetBSD_Version__ >= 105110000
407 ph_inet = pfil_head_get(PFIL_TYPE_AF, (void *)AF_INET);
408 # ifdef USE_INET6
409 ph_inet6 = pfil_head_get(PFIL_TYPE_AF, (void *)AF_INET6);
410 # endif
411 # if defined(PFIL_TYPE_IFNET) && defined(PFIL_IFNET)
412 ph_ifsync = pfil_head_get(PFIL_TYPE_IFNET, 0);
413 # endif
414
415 if (ph_inet == NULL
416 # ifdef USE_INET6
417 && ph_inet6 == NULL
418 # endif
419 # if defined(PFIL_TYPE_IFNET) && defined(PFIL_IFNET)
420 && ph_ifsync == NULL
421 # endif
422 ) {
423 SPL_X(s);
424 IPFERROR(130016);
425 return ENODEV;
426 }
427
428 if (ph_inet != NULL)
429 error = pfil_add_hook((void *)ipf_check_wrapper, NULL,
430 PFIL_IN|PFIL_OUT, ph_inet);
431 else
432 error = 0;
433 # else
434 error = pfil_add_hook((void *)ipf_check, PFIL_IN|PFIL_OUT,
435 &inetsw[ip_protox[IPPROTO_IP]].pr_pfh);
436 # endif
437 if (error) {
438 IPFERROR(130013);
439 goto pfil_error;
440 }
441 # else
442 pfil_add_hook((void *)ipf_check, PFIL_IN|PFIL_OUT);
443 # endif
444
445 # ifdef USE_INET6
446 # if __NetBSD_Version__ >= 105110000
447 if (ph_inet6 != NULL)
448 error = pfil_add_hook((void *)ipf_check_wrapper6, NULL,
449 PFIL_IN|PFIL_OUT, ph_inet6);
450 else
451 error = 0;
452 if (error) {
453 pfil_remove_hook((void *)ipf_check_wrapper6, NULL,
454 PFIL_IN|PFIL_OUT, ph_inet6);
455 ipfmain.ipf_interror = 130014;
456 goto pfil_error;
457 }
458 # else
459 error = pfil_add_hook((void *)ipf_check, PFIL_IN|PFIL_OUT,
460 &inetsw[ip_protox[IPPROTO_IPV6]].pr_pfh);
461 if (error) {
462 pfil_remove_hook((void *)ipf_check, PFIL_IN|PFIL_OUT,
463 &inetsw[ip_protox[IPPROTO_IP]].pr_pfh);
464 IPFERROR(130014);
465 goto pfil_error;
466 }
467 # endif
468 # endif
469
470 # if defined(PFIL_TYPE_IFNET) && defined(PFIL_IFNET)
471 if (ph_ifsync != NULL)
472 #if (__NetBSD_Version__ >= 799000400)
473 (void) pfil_add_ihook((void *)ipf_pfilsync, NULL,
474 PFIL_IFNET, ph_ifsync);
475 #else
476 (void) pfil_add_hook((void *)ipf_pfilsync, NULL,
477 PFIL_IFNET, ph_ifsync);
478 #endif
479 # endif
480 #endif
481
482 #if (__NetBSD_Version__ >= 499005500)
483 for (i = 0; i < IPL_LOGSIZE; i++)
484 selinit(&ipfmain.ipf_selwait[i]);
485 #else
486 bzero((char *)ipfmain.ipf_selwait, sizeof(ipfmain.ipf_selwait));
487 #endif
488 ipf_savep = ipf_checkp;
489 ipf_checkp = ipf_check;
490
491 #ifdef INET
492 if (softc->ipf_control_forwarding & 1)
493 ipforwarding = 1;
494 #endif
495
496 ipid = 0;
497
498 SPL_X(s);
499
500 #if (__NetBSD_Version__ >= 104010000)
501 # if (__NetBSD_Version__ >= 499002000)
502 callout_init(&softc->ipf_slow_ch, 0);
503 # else
504 callout_init(&softc->ipf_slow_ch);
505 # endif
506 callout_reset(&softc->ipf_slow_ch, (hz / IPF_HZ_DIVIDE) * IPF_HZ_MULT,
507 ipf_timer_func, softc);
508 #else
509 timeout(ipf_timer_func, softc, (hz / IPF_HZ_DIVIDE) * IPF_HZ_MULT);
510 #endif
511
512 return 0;
513
514 #if __NetBSD_Version__ >= 105110000
515 pfil_error:
516 SPL_X(s);
517 ipf_fini_all(softc);
518 return error;
519 #endif
520 }
521
522 static void
ipf_timer_func(void * ptr)523 ipf_timer_func(void *ptr)
524 {
525 ipf_main_softc_t *softc = ptr;
526 SPL_INT(s);
527
528 SPL_NET(s);
529 READ_ENTER(&softc->ipf_global);
530
531 if (softc->ipf_running > 0)
532 ipf_slowtimer(softc);
533
534 if (softc->ipf_running == -1 || softc->ipf_running == 1) {
535 #if NETBSD_GE_REV(104240000)
536 callout_reset(&softc->ipf_slow_ch, hz / 2,
537 ipf_timer_func, softc);
538 #else
539 timeout(ipf_timer_func, softc,
540 (hz / IPF_HZ_DIVIDE) * IPF_HZ_MULT);
541 #endif
542 }
543 RWLOCK_EXIT(&softc->ipf_global);
544 SPL_X(s);
545 }
546
547
548 /*
549 * Disable the filter by removing the hooks from the IP input/output
550 * stream.
551 */
552 int
ipfdetach(ipf_main_softc_t * softc)553 ipfdetach(ipf_main_softc_t *softc)
554 {
555 SPL_INT(s);
556 #if (__NetBSD_Version__ >= 499005500)
557 int i;
558 #endif
559 #if defined(NETBSD_PF) && (__NetBSD_Version__ >= 104200000)
560 int error = 0;
561 # if __NetBSD_Version__ >= 105150000
562 pfil_head_t *ph_inet = pfil_head_get(PFIL_TYPE_AF, (void *)AF_INET);
563 # ifdef USE_INET6
564 pfil_head_t *ph_inet6 = pfil_head_get(PFIL_TYPE_AF, (void *)AF_INET6);
565 # endif
566 # if defined(PFIL_TYPE_IFNET) && defined(PFIL_IFNET)
567 struct pfil_head *ph_ifsync = pfil_head_get(PFIL_TYPE_IFNET, 0);
568 # endif
569 # endif
570 #endif
571
572 SPL_NET(s);
573
574 #if (__NetBSD_Version__ >= 104010000)
575 if (softc->ipf_running > 0)
576 callout_stop(&softc->ipf_slow_ch);
577 #else
578 untimeout(ipf_slowtimer, NULL);
579 #endif /* NetBSD */
580
581 ipf_checkp = ipf_savep;
582 (void) ipf_flush(softc, IPL_LOGIPF, FR_INQUE|FR_OUTQUE|FR_INACTIVE);
583 (void) ipf_flush(softc, IPL_LOGIPF, FR_INQUE|FR_OUTQUE);
584
585 #ifdef INET
586 if (softc->ipf_control_forwarding & 2)
587 ipforwarding = 0;
588 #endif
589
590 #ifdef NETBSD_PF
591 # if (__NetBSD_Version__ >= 104200000)
592 # if __NetBSD_Version__ >= 105110000
593 # if defined(PFIL_TYPE_IFNET) && defined(PFIL_IFNET)
594 # if __NetBSD_Version__ >= 799000400
595 (void) pfil_remove_ihook((void *)ipf_pfilsync, NULL,
596 PFIL_IFNET, ph_ifsync);
597 # else
598 (void) pfil_remove_hook((void *)ipf_pfilsync, NULL,
599 PFIL_IFNET, ph_ifsync);
600 # endif
601 # endif
602
603 if (ph_inet != NULL)
604 error = pfil_remove_hook((void *)ipf_check_wrapper, NULL,
605 PFIL_IN|PFIL_OUT, ph_inet);
606 else
607 error = 0;
608 # else
609 error = pfil_remove_hook((void *)ipf_check, PFIL_IN|PFIL_OUT,
610 &inetsw[ip_protox[IPPROTO_IP]].pr_pfh);
611 # endif
612 if (error) {
613 SPL_X(s);
614 IPFERROR(130011);
615 return error;
616 }
617 # else
618 pfil_remove_hook((void *)ipf_check, PFIL_IN|PFIL_OUT);
619 # endif
620 # ifdef USE_INET6
621 # if __NetBSD_Version__ >= 105110000
622 if (ph_inet6 != NULL)
623 error = pfil_remove_hook((void *)ipf_check_wrapper6, NULL,
624 PFIL_IN|PFIL_OUT, ph_inet6);
625 else
626 error = 0;
627 # else
628 error = pfil_remove_hook((void *)ipf_check, PFIL_IN|PFIL_OUT,
629 &inetsw[ip_protox[IPPROTO_IPV6]].pr_pfh);
630 # endif
631 if (error) {
632 SPL_X(s);
633 IPFERROR(130012);
634 return error;
635 }
636 # endif
637 #endif
638 SPL_X(s);
639
640 #if (__NetBSD_Version__ >= 499005500)
641 for (i = 0; i < IPL_LOGSIZE; i++)
642 seldestroy(&ipfmain.ipf_selwait[i]);
643 #endif
644
645 ipf_fini_all(softc);
646
647 return 0;
648 }
649
650
651 /*
652 * Filter ioctl interface.
653 */
654 int
ipfioctl(dev_t dev,u_long cmd,void * data,int mode,struct lwp * p)655 ipfioctl(dev_t dev, u_long cmd,
656 #if (__NetBSD_Version__ >= 499001000)
657 void *data,
658 #else
659 caddr_t data,
660 #endif
661 int mode
662 #if (NetBSD >= 199511)
663 # if (__NetBSD_Version__ >= 399001400)
664 , struct lwp *p
665 # if (__NetBSD_Version__ >= 399002000)
666 # define UID(l) kauth_cred_getuid((l)->l_cred)
667 # else
668 # define UID(l) ((l)->l_proc->p_cred->p_ruid)
669 # endif
670 # else
671 , struct proc *p
672 # define UID(p) ((p)->p_cred->p_ruid)
673 # endif
674 #endif
675 )
676 {
677 int error = 0, unit = 0;
678 SPL_INT(s);
679
680 #if (__NetBSD_Version__ >= 399002000)
681 if ((mode & FWRITE) &&
682 kauth_authorize_network(p->l_cred, KAUTH_NETWORK_FIREWALL,
683 KAUTH_REQ_NETWORK_FIREWALL_FW, NULL,
684 NULL, NULL)) {
685 ipfmain.ipf_interror = 130005;
686 return EPERM;
687 }
688 #else
689 if ((securelevel >= 2) && (mode & FWRITE)) {
690 ipfmain.ipf_interror = 130001;
691 return EPERM;
692 }
693 #endif
694
695 unit = GET_MINOR(dev);
696 if ((IPL_LOGMAX < unit) || (unit < 0)) {
697 ipfmain.ipf_interror = 130002;
698 return ENXIO;
699 }
700
701 if (ipfmain.ipf_running <= 0) {
702 if (unit != IPL_LOGIPF && cmd != SIOCIPFINTERROR) {
703 ipfmain.ipf_interror = 130003;
704 return EIO;
705 }
706 if (cmd != SIOCIPFGETNEXT && cmd != SIOCIPFGET &&
707 cmd != SIOCIPFSET && cmd != SIOCFRENB &&
708 cmd != SIOCGETFS && cmd != SIOCGETFF &&
709 cmd != SIOCIPFINTERROR) {
710 ipfmain.ipf_interror = 130004;
711 return EIO;
712 }
713 }
714
715 SPL_NET(s);
716
717 error = ipf_ioctlswitch(&ipfmain, unit, data, cmd, mode, UID(p), p);
718 if (error != -1) {
719 SPL_X(s);
720 return error;
721 }
722
723 SPL_X(s);
724 return error;
725 }
726
727
728 /*
729 * ipf_send_reset - this could conceivably be a call to tcp_respond(), but that
730 * requires a large amount of setting up and isn't any more efficient.
731 */
732 int
ipf_send_reset(fr_info_t * fin)733 ipf_send_reset(fr_info_t *fin)
734 {
735 struct tcphdr *tcp, *tcp2;
736 int tlen = 0, hlen;
737 struct mbuf *m;
738 #ifdef USE_INET6
739 ip6_t *ip6;
740 #endif
741 ip_t *ip;
742
743 tcp = fin->fin_dp;
744 if (tcp->th_flags & TH_RST)
745 return -1; /* feedback loop */
746
747 if (ipf_checkl4sum(fin) == -1)
748 return -1;
749
750 tlen = fin->fin_dlen - (TCP_OFF(tcp) << 2) +
751 ((tcp->th_flags & TH_SYN) ? 1 : 0) +
752 ((tcp->th_flags & TH_FIN) ? 1 : 0);
753
754 #ifdef USE_INET6
755 hlen = (fin->fin_v == 6) ? sizeof(ip6_t) : sizeof(ip_t);
756 #else
757 hlen = sizeof(ip_t);
758 #endif
759 #ifdef MGETHDR
760 MGETHDR(m, M_DONTWAIT, MT_HEADER);
761 #else
762 MGET(m, M_DONTWAIT, MT_HEADER);
763 #endif
764 if (m == NULL)
765 return -1;
766 if (sizeof(*tcp2) + hlen > MHLEN) {
767 MCLGET(m, M_DONTWAIT);
768 if (m == NULL)
769 return -1;
770 if ((m->m_flags & M_EXT) == 0) {
771 FREE_MB_T(m);
772 return -1;
773 }
774 }
775
776 m->m_len = sizeof(*tcp2) + hlen;
777 m->m_data += max_linkhdr;
778 m->m_pkthdr.len = m->m_len;
779 m_reset_rcvif(m);
780 ip = mtod(m, struct ip *);
781 bzero((char *)ip, hlen);
782 #ifdef USE_INET6
783 ip6 = (ip6_t *)ip;
784 #endif
785 bzero((char *)ip, sizeof(*tcp2) + hlen);
786 tcp2 = (struct tcphdr *)((char *)ip + hlen);
787 tcp2->th_sport = tcp->th_dport;
788 tcp2->th_dport = tcp->th_sport;
789
790 if (tcp->th_flags & TH_ACK) {
791 tcp2->th_seq = tcp->th_ack;
792 tcp2->th_flags = TH_RST;
793 tcp2->th_ack = 0;
794 } else {
795 tcp2->th_seq = 0;
796 tcp2->th_ack = ntohl(tcp->th_seq);
797 tcp2->th_ack += tlen;
798 tcp2->th_ack = htonl(tcp2->th_ack);
799 tcp2->th_flags = TH_RST|TH_ACK;
800 }
801 tcp2->th_x2 = 0;
802 TCP_OFF_A(tcp2, sizeof(*tcp2) >> 2);
803 tcp2->th_win = tcp->th_win;
804 tcp2->th_sum = 0;
805 tcp2->th_urp = 0;
806
807 #ifdef USE_INET6
808 if (fin->fin_v == 6) {
809 ip6->ip6_flow = ((ip6_t *)fin->fin_ip)->ip6_flow;
810 ip6->ip6_plen = htons(sizeof(struct tcphdr));
811 ip6->ip6_nxt = IPPROTO_TCP;
812 ip6->ip6_hlim = 0;
813 ip6->ip6_src = fin->fin_dst6.in6;
814 ip6->ip6_dst = fin->fin_src6.in6;
815 tcp2->th_sum = in6_cksum(m, IPPROTO_TCP,
816 sizeof(*ip6), sizeof(*tcp2));
817 return ipf_send_ip(fin, m);
818 }
819 #endif
820 #ifdef INET
821 ip->ip_p = IPPROTO_TCP;
822 ip->ip_len = htons(sizeof(struct tcphdr));
823 ip->ip_src.s_addr = fin->fin_daddr;
824 ip->ip_dst.s_addr = fin->fin_saddr;
825 tcp2->th_sum = in_cksum(m, hlen + sizeof(*tcp2));
826 ip->ip_len = hlen + sizeof(*tcp2);
827 return ipf_send_ip(fin, m);
828 #else
829 return 0;
830 #endif
831 }
832
833
834 /*
835 * Expects ip_len to be in host byte order when called.
836 */
837 static int
ipf_send_ip(fr_info_t * fin,mb_t * m)838 ipf_send_ip(fr_info_t *fin, mb_t *m)
839 {
840 fr_info_t fnew;
841 #ifdef INET
842 ip_t *oip;
843 #endif
844 ip_t *ip;
845 int hlen;
846
847 ip = mtod(m, ip_t *);
848 bzero((char *)&fnew, sizeof(fnew));
849 fnew.fin_main_soft = fin->fin_main_soft;
850
851 IP_V_A(ip, fin->fin_v);
852 switch (fin->fin_v)
853 {
854 #ifdef INET
855 case 4 :
856 oip = fin->fin_ip;
857 hlen = sizeof(*oip);
858 fnew.fin_v = 4;
859 fnew.fin_p = ip->ip_p;
860 fnew.fin_plen = ntohs(ip->ip_len);
861 HTONS(ip->ip_len);
862 IP_HL_A(ip, sizeof(*oip) >> 2);
863 ip->ip_tos = oip->ip_tos;
864 ip->ip_id = ipf_nextipid(fin);
865 ip->ip_off = htons(ip_mtudisc ? IP_DF : 0);
866 ip->ip_ttl = ip_defttl;
867 ip->ip_sum = 0;
868 break;
869 #endif
870 #ifdef USE_INET6
871 case 6 :
872 {
873 ip6_t *ip6 = (ip6_t *)ip;
874
875 ip6->ip6_vfc = 0x60;
876 ip6->ip6_hlim = IPDEFTTL;
877
878 hlen = sizeof(*ip6);
879 fnew.fin_p = ip6->ip6_nxt;
880 fnew.fin_v = 6;
881 fnew.fin_plen = ntohs(ip6->ip6_plen) + hlen;
882 break;
883 }
884 #endif
885 default :
886 return EINVAL;
887 }
888 #ifdef KAME_IPSEC
889 m_reset_rcvif(m);
890 #endif
891
892 fnew.fin_ifp = fin->fin_ifp;
893 fnew.fin_flx = FI_NOCKSUM;
894 fnew.fin_m = m;
895 fnew.fin_ip = ip;
896 fnew.fin_mp = &m;
897 fnew.fin_hlen = hlen;
898 fnew.fin_dp = (char *)ip + hlen;
899 (void) ipf_makefrip(hlen, ip, &fnew);
900
901 return ipf_fastroute(m, &m, &fnew, NULL);
902 }
903
904
905 int
ipf_send_icmp_err(int type,fr_info_t * fin,int dst)906 ipf_send_icmp_err(int type, fr_info_t *fin, int dst)
907 {
908 int err, hlen, xtra, iclen, ohlen, avail;
909 struct in_addr dst4;
910 struct icmp *icmp;
911 struct mbuf *m;
912 i6addr_t dst6;
913 void *ifp;
914 #ifdef USE_INET6
915 int code;
916 ip6_t *ip6;
917 #endif
918 ip_t *ip, *ip2;
919
920 if ((type < 0) || (type > ICMP_MAXTYPE))
921 return -1;
922
923 #ifdef USE_INET6
924 code = fin->fin_icode;
925 if ((code < 0) || (code >= sizeof(icmptoicmp6unreach)/sizeof(int)))
926 return -1;
927 #endif
928
929 if (ipf_checkl4sum(fin) == -1)
930 return -1;
931 #ifdef MGETHDR
932 MGETHDR(m, M_DONTWAIT, MT_HEADER);
933 #else
934 MGET(m, M_DONTWAIT, MT_HEADER);
935 #endif
936 if (m == NULL)
937 return -1;
938 avail = MHLEN;
939
940 xtra = 0;
941 hlen = 0;
942 ohlen = 0;
943 dst4.s_addr = 0;
944 ifp = fin->fin_ifp;
945 if (fin->fin_v == 4) {
946 if ((fin->fin_p == IPPROTO_ICMP) && !(fin->fin_flx & FI_SHORT))
947 switch (ntohs(fin->fin_data[0]) >> 8)
948 {
949 case ICMP_ECHO :
950 case ICMP_TSTAMP :
951 case ICMP_IREQ :
952 case ICMP_MASKREQ :
953 break;
954 default :
955 FREE_MB_T(m);
956 return 0;
957 }
958
959 if (dst == 0) {
960 if (ipf_ifpaddr(&ipfmain, 4, FRI_NORMAL, ifp,
961 &dst6, NULL) == -1) {
962 FREE_MB_T(m);
963 return -1;
964 }
965 dst4 = dst6.in4;
966 } else
967 dst4.s_addr = fin->fin_daddr;
968
969 hlen = sizeof(ip_t);
970 ohlen = fin->fin_hlen;
971 iclen = hlen + offsetof(struct icmp, icmp_ip) + ohlen;
972 if (fin->fin_hlen < fin->fin_plen)
973 xtra = MIN(fin->fin_dlen, 8);
974 else
975 xtra = 0;
976 }
977
978 #ifdef USE_INET6
979 else if (fin->fin_v == 6) {
980 hlen = sizeof(ip6_t);
981 ohlen = sizeof(ip6_t);
982 iclen = hlen + offsetof(struct icmp, icmp_ip) + ohlen;
983 type = icmptoicmp6types[type];
984 if (type == ICMP6_DST_UNREACH)
985 code = icmptoicmp6unreach[code];
986
987 if (iclen + max_linkhdr + fin->fin_plen > avail) {
988 MCLGET(m, M_DONTWAIT);
989 if (m == NULL)
990 return -1;
991 if ((m->m_flags & M_EXT) == 0) {
992 FREE_MB_T(m);
993 return -1;
994 }
995 avail = MCLBYTES;
996 }
997 xtra = MIN(fin->fin_plen, avail - iclen - max_linkhdr);
998 xtra = MIN(xtra, IPV6_MMTU - iclen);
999 if (dst == 0 && !IN6_IS_ADDR_LINKLOCAL(&fin->fin_dst6.in6)) {
1000 if (ipf_ifpaddr(&ipfmain, 6, FRI_NORMAL, ifp,
1001 &dst6, NULL) == -1) {
1002 FREE_MB_T(m);
1003 return -1;
1004 }
1005 } else
1006 dst6 = fin->fin_dst6;
1007 }
1008 #endif
1009 else {
1010 FREE_MB_T(m);
1011 return -1;
1012 }
1013
1014 avail -= (max_linkhdr + iclen);
1015 if (avail < 0) {
1016 FREE_MB_T(m);
1017 return -1;
1018 }
1019 if (xtra > avail)
1020 xtra = avail;
1021 iclen += xtra;
1022 m->m_data += max_linkhdr;
1023 m_reset_rcvif(m);
1024 m->m_pkthdr.len = iclen;
1025 m->m_len = iclen;
1026 ip = mtod(m, ip_t *);
1027 icmp = (struct icmp *)((char *)ip + hlen);
1028 ip2 = (ip_t *)&icmp->icmp_ip;
1029
1030 icmp->icmp_type = type;
1031 icmp->icmp_code = fin->fin_icode;
1032 icmp->icmp_cksum = 0;
1033 #ifdef icmp_nextmtu
1034 if (type == ICMP_UNREACH && fin->fin_icode == ICMP_UNREACH_NEEDFRAG) {
1035 if (fin->fin_mtu != 0) {
1036 icmp->icmp_nextmtu = htons(fin->fin_mtu);
1037
1038 } else if (ifp != NULL) {
1039 icmp->icmp_nextmtu = htons(GETIFMTU_4(ifp));
1040
1041 } else { /* make up a number... */
1042 icmp->icmp_nextmtu = htons(fin->fin_plen - 20);
1043 }
1044 }
1045 #endif
1046
1047 bcopy((char *)fin->fin_ip, (char *)ip2, ohlen);
1048
1049 #if defined(M_CSUM_IPv4)
1050 /*
1051 * Clear any in-bound checksum flags for this packet.
1052 */
1053 m->m_pkthdr.csuminfo = 0;
1054 #endif /* __NetBSD__ && M_CSUM_IPv4 */
1055
1056 #ifdef USE_INET6
1057 ip6 = (ip6_t *)ip;
1058 if (fin->fin_v == 6) {
1059 ip6->ip6_flow = ((ip6_t *)fin->fin_ip)->ip6_flow;
1060 ip6->ip6_plen = htons(iclen - hlen);
1061 ip6->ip6_nxt = IPPROTO_ICMPV6;
1062 ip6->ip6_hlim = 0;
1063 ip6->ip6_src = dst6.in6;
1064 ip6->ip6_dst = fin->fin_src6.in6;
1065 if (xtra > 0)
1066 bcopy((char *)fin->fin_ip + ohlen,
1067 (char *)&icmp->icmp_ip + ohlen, xtra);
1068 icmp->icmp_cksum = in6_cksum(m, IPPROTO_ICMPV6,
1069 sizeof(*ip6), iclen - hlen);
1070 } else
1071 #endif
1072 {
1073 ip->ip_p = IPPROTO_ICMP;
1074 ip->ip_src.s_addr = dst4.s_addr;
1075 ip->ip_dst.s_addr = fin->fin_saddr;
1076
1077 if (xtra > 0)
1078 bcopy((char *)fin->fin_ip + ohlen,
1079 (char *)&icmp->icmp_ip + ohlen, xtra);
1080 icmp->icmp_cksum = ipf_cksum((u_short *)icmp,
1081 sizeof(*icmp) + 8);
1082 ip->ip_len = iclen;
1083 ip->ip_p = IPPROTO_ICMP;
1084 }
1085 err = ipf_send_ip(fin, m);
1086 return err;
1087 }
1088
1089
1090 /*
1091 * m0 - pointer to mbuf where the IP packet starts
1092 * mpp - pointer to the mbuf pointer that is the start of the mbuf chain
1093 */
1094 int
ipf_fastroute(mb_t * m0,mb_t ** mpp,fr_info_t * fin,frdest_t * fdp)1095 ipf_fastroute(mb_t *m0, mb_t **mpp, fr_info_t *fin, frdest_t *fdp)
1096 {
1097 register struct ip *ip, *mhip;
1098 register struct mbuf *m = *mpp;
1099 register struct route *ro;
1100 int len, off, error = 0, hlen, code;
1101 struct ifnet *ifp, *sifp;
1102 ipf_main_softc_t *softc;
1103 #if __NetBSD_Version__ >= 499001100
1104 union {
1105 struct sockaddr dst;
1106 struct sockaddr_in dst4;
1107 } u;
1108 #else
1109 struct sockaddr_in *dst4;
1110 #endif
1111 struct sockaddr *dst;
1112 u_short ip_off, ip_len;
1113 struct route iproute;
1114 struct rtentry *rt;
1115 frdest_t node;
1116 frentry_t *fr;
1117
1118 if (fin->fin_v == 6) {
1119 #ifdef USE_INET6
1120 error = ipf_fastroute6(m0, mpp, fin, fdp);
1121 #else
1122 error = EPROTONOSUPPORT;
1123 #endif
1124 if ((error != 0) && (*mpp != NULL))
1125 FREE_MB_T(*mpp);
1126 return error;
1127 }
1128 #ifndef INET
1129 FREE_MB_T(*mpp);
1130 return EPROTONOSUPPORT;
1131 #else
1132
1133 hlen = fin->fin_hlen;
1134 ip = mtod(m0, struct ip *);
1135 softc = fin->fin_main_soft;
1136 rt = NULL;
1137 ifp = NULL;
1138
1139 # if defined(M_CSUM_IPv4)
1140 /*
1141 * Clear any in-bound checksum flags for this packet.
1142 */
1143 m0->m_pkthdr.csuminfo = 0;
1144 # endif /* __NetBSD__ && M_CSUM_IPv4 */
1145
1146 /*
1147 * Route packet.
1148 */
1149 ro = &iproute;
1150 memset(ro, 0, sizeof(*ro));
1151 fr = fin->fin_fr;
1152
1153 if ((fr != NULL) && !(fr->fr_flags & FR_KEEPSTATE) && (fdp != NULL) &&
1154 (fdp->fd_type == FRD_DSTLIST)) {
1155 if (ipf_dstlist_select_node(fin, fdp->fd_ptr, NULL, &node) == 0)
1156 fdp = &node;
1157 }
1158 if (fdp != NULL)
1159 ifp = fdp->fd_ptr;
1160 else
1161 ifp = fin->fin_ifp;
1162
1163 if ((ifp == NULL) && ((fr == NULL) || !(fr->fr_flags & FR_FASTROUTE))) {
1164 error = -2;
1165 goto bad;
1166 }
1167
1168 # if __NetBSD_Version__ >= 499001100
1169 if ((fdp != NULL) && (fdp->fd_ip.s_addr != 0))
1170 sockaddr_in_init(&u.dst4, &fdp->fd_ip, 0);
1171 else
1172 sockaddr_in_init(&u.dst4, &ip->ip_dst, 0);
1173 dst = &u.dst;
1174 rtcache_setdst(ro, dst);
1175 rt = rtcache_init(ro);
1176 # else
1177 dst4 = (struct sockaddr_in *)&ro->ro_dst;
1178 dst = (struct sockaddr *)dst4;
1179 dst4->sin_family = AF_INET;
1180 dst4->sin_addr = ip->ip_dst;
1181
1182 if ((fdp != NULL) && (fdp->fd_ip.s_addr != 0))
1183 dst4->sin_addr = fdp->fd_ip;
1184
1185 dst4->sin_len = sizeof(*dst);
1186 rtalloc(ro);
1187 rt = ro->ro_rt;
1188 # endif
1189 if ((ifp == NULL) && (rt != NULL))
1190 ifp = rt->rt_ifp;
1191 if ((rt == NULL) || (ifp == NULL)) {
1192 #ifdef INET
1193 if (in_localaddr(ip->ip_dst))
1194 error = EHOSTUNREACH;
1195 else
1196 #endif
1197 error = ENETUNREACH;
1198 goto bad;
1199 }
1200
1201
1202 if (rt->rt_flags & RTF_GATEWAY)
1203 dst = rt->rt_gateway;
1204
1205 rt->rt_use++;
1206
1207 /*
1208 * For input packets which are being "fastrouted", they won't
1209 * go back through output filtering and miss their chance to get
1210 * NAT'd and counted. Duplicated packets aren't considered to be
1211 * part of the normal packet stream, so do not NAT them or pass
1212 * them through stateful checking, etc.
1213 */
1214 if ((fdp != &fr->fr_dif) && (fin->fin_out == 0)) {
1215 sifp = fin->fin_ifp;
1216 fin->fin_ifp = ifp;
1217 fin->fin_out = 1;
1218 (void) ipf_acctpkt(fin, NULL);
1219 fin->fin_fr = NULL;
1220 if (!fr || !(fr->fr_flags & FR_RETMASK)) {
1221 u_32_t pass;
1222
1223 (void) ipf_state_check(fin, &pass);
1224 }
1225
1226 switch (ipf_nat_checkout(fin, NULL))
1227 {
1228 case 0 :
1229 break;
1230 case 1 :
1231 ip->ip_sum = 0;
1232 break;
1233 case -1 :
1234 error = -1;
1235 goto bad;
1236 break;
1237 }
1238
1239 fin->fin_ifp = sifp;
1240 fin->fin_out = 0;
1241 } else
1242 ip->ip_sum = 0;
1243 /*
1244 * If small enough for interface, can just send directly.
1245 */
1246 m_set_rcvif(m, ifp);
1247
1248 ip_len = ntohs(ip->ip_len);
1249 if (ip_len <= ifp->if_mtu) {
1250 # if defined(M_CSUM_IPv4)
1251 # if (__NetBSD_Version__ >= 105009999)
1252 if (ifp->if_csum_flags_tx & M_CSUM_IPv4)
1253 m->m_pkthdr.csuminfo |= M_CSUM_IPv4;
1254 # else
1255 if (ifp->if_capabilities & IFCAP_CSUM_IPv4)
1256 m->m_pkthdr.csuminfo |= M_CSUM_IPv4;
1257 # endif /* (__NetBSD_Version__ >= 105009999) */
1258 else if (ip->ip_sum == 0)
1259 ip->ip_sum = in_cksum(m, hlen);
1260 # else
1261 if (!ip->ip_sum)
1262 ip->ip_sum = in_cksum(m, hlen);
1263 # endif /* M_CSUM_IPv4 */
1264
1265 error = if_output_lock(ifp, ifp, m, dst, rt);
1266 goto done;
1267 }
1268
1269 /*
1270 * Too large for interface; fragment if possible.
1271 * Must be able to put at least 8 bytes per fragment.
1272 */
1273 ip_off = ntohs(ip->ip_off);
1274 if (ip_off & IP_DF) {
1275 error = EMSGSIZE;
1276 goto bad;
1277 }
1278 len = (ifp->if_mtu - hlen) &~ 7;
1279 if (len < 8) {
1280 error = EMSGSIZE;
1281 goto bad;
1282 }
1283
1284 {
1285 int mhlen, firstlen = len;
1286 struct mbuf **mnext = &m->m_act;
1287
1288 /*
1289 * Loop through length of segment after first fragment,
1290 * make new header and copy data of each part and link onto chain.
1291 */
1292 m0 = m;
1293 mhlen = sizeof (struct ip);
1294 for (off = hlen + len; off < ip_len; off += len) {
1295 # ifdef MGETHDR
1296 MGETHDR(m, M_DONTWAIT, MT_HEADER);
1297 # else
1298 MGET(m, M_DONTWAIT, MT_HEADER);
1299 # endif
1300 if (m == 0) {
1301 m = m0;
1302 error = ENOBUFS;
1303 goto bad;
1304 }
1305 m->m_data += max_linkhdr;
1306 mhip = mtod(m, struct ip *);
1307 bcopy((char *)ip, (char *)mhip, sizeof(*ip));
1308 #ifdef INET
1309 if (hlen > sizeof (struct ip)) {
1310 mhlen = ip_optcopy(ip, mhip) + sizeof (struct ip);
1311 IP_HL_A(mhip, mhlen >> 2);
1312 }
1313 #endif
1314 m->m_len = mhlen;
1315 mhip->ip_off = ((off - hlen) >> 3) + ip_off;
1316 if (off + len >= ip_len)
1317 len = ip_len - off;
1318 else
1319 mhip->ip_off |= IP_MF;
1320 mhip->ip_len = htons((u_short)(len + mhlen));
1321 m->m_next = m_copym(m0, off, len, M_DONTWAIT);
1322 if (m->m_next == 0) {
1323 error = ENOBUFS; /* ??? */
1324 goto sendorfree;
1325 }
1326 m->m_pkthdr.len = mhlen + len;
1327 m_reset_rcvif(m);
1328 mhip->ip_off = htons((u_short)mhip->ip_off);
1329 mhip->ip_sum = 0;
1330 #ifdef INET
1331 mhip->ip_sum = in_cksum(m, mhlen);
1332 #endif
1333 *mnext = m;
1334 mnext = &m->m_act;
1335 }
1336 /*
1337 * Update first fragment by trimming what's been copied out
1338 * and updating header, then send each fragment (in order).
1339 */
1340 m_adj(m0, hlen + firstlen - ip_len);
1341 ip->ip_len = htons((u_short)(hlen + firstlen));
1342 ip->ip_off = htons((u_short)IP_MF);
1343 ip->ip_sum = 0;
1344 #ifdef INET
1345 ip->ip_sum = in_cksum(m0, hlen);
1346 #endif
1347 sendorfree:
1348 for (m = m0; m; m = m0) {
1349 m0 = m->m_act;
1350 m->m_act = 0;
1351 if (error == 0) {
1352 # if __NetBSD_Version__ >= 799003200
1353 error = if_output_lock(ifp, ifp, m, dst, rt);
1354 # else
1355 KERNEL_LOCK(1, NULL);
1356 error = (*ifp->if_output)(ifp, m, dst, rt);
1357 KERNEL_UNLOCK_ONE(NULL);
1358 # endif
1359 } else {
1360 FREE_MB_T(m);
1361 }
1362 }
1363 }
1364 done:
1365 if (!error)
1366 softc->ipf_frouteok[0]++;
1367 else
1368 softc->ipf_frouteok[1]++;
1369
1370 # if __NetBSD_Version__ >= 499001100
1371 rtcache_unref(rt, ro);
1372 rtcache_free(ro);
1373 # else
1374 if (rt) {
1375 RTFREE(rt);
1376 }
1377 # endif
1378 return error;
1379 bad:
1380 if (error == EMSGSIZE) {
1381 sifp = fin->fin_ifp;
1382 code = fin->fin_icode;
1383 fin->fin_icode = ICMP_UNREACH_NEEDFRAG;
1384 fin->fin_ifp = ifp;
1385 (void) ipf_send_icmp_err(ICMP_UNREACH, fin, 1);
1386 fin->fin_ifp = sifp;
1387 fin->fin_icode = code;
1388 }
1389 FREE_MB_T(m);
1390 goto done;
1391 #endif /* INET */
1392 }
1393
1394
1395 #if defined(USE_INET6)
1396 /*
1397 * This is the IPv6 specific fastroute code. It doesn't clean up the mbuf's
1398 * or ensure that it is an IPv6 packet that is being forwarded, those are
1399 * expected to be done by the called (ipf_fastroute).
1400 */
1401 static int
ipf_fastroute6(struct mbuf * m0,struct mbuf ** mpp,fr_info_t * fin,frdest_t * fdp)1402 ipf_fastroute6(struct mbuf *m0, struct mbuf **mpp, fr_info_t *fin,
1403 frdest_t *fdp)
1404 {
1405 # if __NetBSD_Version__ >= 499001100
1406 struct route ip6route;
1407 const struct sockaddr *dst;
1408 union {
1409 struct sockaddr dst;
1410 struct sockaddr_in6 dst6;
1411 } u;
1412 struct route *ro;
1413 # else
1414 struct route_in6 ip6route;
1415 struct sockaddr_in6 *dst6;
1416 struct route_in6 *ro;
1417 # endif
1418 struct rtentry *rt;
1419 struct ifnet *ifp;
1420 u_long mtu;
1421 int error;
1422
1423 error = 0;
1424 ro = &ip6route;
1425
1426 if (fdp != NULL)
1427 ifp = fdp->fd_ptr;
1428 else
1429 ifp = fin->fin_ifp;
1430 memset(ro, 0, sizeof(*ro));
1431 # if __NetBSD_Version__ >= 499001100
1432 if (fdp != NULL && IP6_NOTZERO(&fdp->fd_ip6))
1433 sockaddr_in6_init(&u.dst6, &fdp->fd_ip6.in6, 0, 0, 0);
1434 else
1435 sockaddr_in6_init(&u.dst6, &fin->fin_fi.fi_dst.in6, 0, 0, 0);
1436 if ((error = in6_setscope(&u.dst6.sin6_addr, ifp,
1437 &u.dst6.sin6_scope_id)) != 0)
1438 return error;
1439 if ((error = sa6_embedscope(&u.dst6, 0)) != 0)
1440 return error;
1441
1442 dst = &u.dst;
1443 rtcache_setdst(ro, dst);
1444
1445 rt = rtcache_init(ro);
1446 if ((ifp == NULL) && (rt != NULL))
1447 ifp = rt->rt_ifp;
1448 # else
1449 dst6 = (struct sockaddr_in6 *)&ro->ro_dst;
1450 dst6->sin6_family = AF_INET6;
1451 dst6->sin6_len = sizeof(struct sockaddr_in6);
1452 dst6->sin6_addr = fin->fin_fi.fi_dst.in6;
1453 /* KAME */
1454 if (IN6_IS_ADDR_LINKLOCAL(&dst6->sin6_addr))
1455 dst6->sin6_addr.s6_addr16[1] = htons(ifp->if_index);
1456
1457 if (fdp != NULL) {
1458 if (IP6_NOTZERO(&fdp->fd_ip6))
1459 dst6->sin6_addr = fdp->fd_ip6.in6;
1460 }
1461
1462 rtalloc((struct route *)ro);
1463
1464 if ((ifp == NULL) && (ro->ro_rt != NULL))
1465 ifp = ro->ro_rt->rt_ifp;
1466 rt = ro->ro_rt;
1467 # endif
1468 if ((rt == NULL) || (ifp == NULL)) {
1469
1470 error = EHOSTUNREACH;
1471 goto bad;
1472 }
1473
1474 {
1475 # if (__NetBSD_Version__ >= 106010000) && !defined(IN6_LINKMTU) \
1476 && defined(IPV6CTL_ACCEPT_RTADV)
1477 struct in6_ifextra *ife;
1478 # endif
1479 if (rt->rt_flags & RTF_GATEWAY)
1480 # if __NetBSD_Version__ >= 499001100
1481 dst = rt->rt_gateway;
1482 # else
1483 dst6 = (struct sockaddr_in6 *)rt->rt_gateway;
1484 # endif
1485 rt->rt_use++;
1486
1487 /* Determine path MTU. */
1488 # if (__NetBSD_Version__ <= 106009999)
1489 mtu = nd_ifinfo[ifp->if_index].linkmtu;
1490 # elif defined(IPV6CTL_ACCEPT_RTADV)
1491 # ifdef IN6_LINKMTU
1492 mtu = IN6_LINKMTU(ifp);
1493 # else
1494 ife = (struct in6_ifextra *)(ifp)->if_afdata[AF_INET6];
1495 mtu = ife->nd_ifinfo[ifp->if_index].linkmtu;
1496 # endif
1497 # else
1498 mtu = ifp->if_mtu;
1499 # endif
1500 if ((error == 0) && (m0->m_pkthdr.len <= mtu)) {
1501 # if __NetBSD_Version__ >= 499001100
1502 error = ip6_if_output(ifp, ifp, m0, satocsin6(dst), rt);
1503 # else
1504 error = nd6_output(ifp, ifp, m0, dst6, rt);
1505 # endif
1506 if (error)
1507 *mpp = NULL; /* m0 has been freed */
1508 } else {
1509 error = EMSGSIZE;
1510 }
1511 }
1512 bad:
1513 # if __NetBSD_Version__ >= 499001100
1514 rtcache_unref(rt, ro);
1515 rtcache_free(ro);
1516 # else
1517 if (ro->ro_rt != NULL) {
1518 RTFREE(((struct route *)ro)->ro_rt);
1519 }
1520 # endif
1521 return error;
1522 }
1523 #endif /* INET6 */
1524
1525
1526 int
ipf_verifysrc(fr_info_t * fin)1527 ipf_verifysrc(fr_info_t *fin)
1528 {
1529 #if __NetBSD_Version__ >= 499001100
1530 union {
1531 struct sockaddr dst;
1532 struct sockaddr_in dst4;
1533 } u;
1534 struct rtentry *rt;
1535 #else
1536 struct sockaddr_in *dst;
1537 #endif
1538 struct route iproute;
1539 int rc;
1540
1541 #if __NetBSD_Version__ >= 499001100
1542 sockaddr_in_init(&u.dst4, &fin->fin_src, 0);
1543 rtcache_setdst(&iproute, &u.dst);
1544 rt = rtcache_init(&iproute);
1545 if (rt == NULL)
1546 rc = 0;
1547 else
1548 rc = (fin->fin_ifp == rt->rt_ifp);
1549 rtcache_unref(rt, &iproute);
1550 rtcache_free(&iproute);
1551 #else
1552 dst = (struct sockaddr_in *)&iproute.ro_dst;
1553 dst->sin_len = sizeof(*dst);
1554 dst->sin_family = AF_INET;
1555 dst->sin_addr = fin->fin_src;
1556 rtalloc(&iproute);
1557 if (iproute.ro_rt == NULL)
1558 return 0;
1559 rc = (fin->fin_ifp == iproute.ro_rt->rt_ifp);
1560 RTFREE(iproute.ro_rt);
1561 #endif
1562 return rc;
1563 }
1564
1565
1566 /*
1567 * return the first IP Address associated with an interface
1568 */
1569 int
ipf_ifpaddr(ipf_main_softc_t * softc,int v,int atype,void * ifptr,i6addr_t * inp,i6addr_t * inpmask)1570 ipf_ifpaddr(ipf_main_softc_t *softc, int v, int atype, void *ifptr,
1571 i6addr_t *inp, i6addr_t *inpmask)
1572 {
1573 #ifdef USE_INET6
1574 struct in6_addr *inp6 = NULL;
1575 #endif
1576 struct sockaddr *sock, *mask;
1577 struct sockaddr_in *sin;
1578 struct ifaddr *ifa;
1579 struct ifnet *ifp;
1580
1581 if ((ifptr == NULL) || (ifptr == (void *)-1))
1582 return -1;
1583
1584 ifp = ifptr;
1585 mask = NULL;
1586
1587 if (v == 4)
1588 inp->in4.s_addr = 0;
1589 #ifdef USE_INET6
1590 else if (v == 6)
1591 bzero((char *)inp, sizeof(*inp));
1592 #endif
1593
1594 ifa = IFADDR_READER_FIRST(ifp);
1595 sock = ifa ? ifa->ifa_addr : NULL;
1596 while (sock != NULL && ifa != NULL) {
1597 sin = (struct sockaddr_in *)sock;
1598 if ((v == 4) && (sin->sin_family == AF_INET))
1599 break;
1600 #ifdef USE_INET6
1601 if ((v == 6) && (sin->sin_family == AF_INET6)) {
1602 inp6 = &((struct sockaddr_in6 *)sin)->sin6_addr;
1603 if (!IN6_IS_ADDR_LINKLOCAL(inp6) &&
1604 !IN6_IS_ADDR_LOOPBACK(inp6))
1605 break;
1606 }
1607 #endif
1608 ifa = IFADDR_READER_NEXT(ifa);
1609 if (ifa != NULL)
1610 sock = ifa->ifa_addr;
1611 }
1612 if (ifa == NULL || sock == NULL)
1613 return -1;
1614
1615 mask = ifa->ifa_netmask;
1616 if (atype == FRI_BROADCAST)
1617 sock = ifa->ifa_broadaddr;
1618 else if (atype == FRI_PEERADDR)
1619 sock = ifa->ifa_dstaddr;
1620
1621 #ifdef USE_INET6
1622 if (v == 6)
1623 return ipf_ifpfillv6addr(atype, (struct sockaddr_in6 *)sock,
1624 (struct sockaddr_in6 *)mask,
1625 inp, inpmask);
1626 #endif
1627 return ipf_ifpfillv4addr(atype, (struct sockaddr_in *)sock,
1628 (struct sockaddr_in *)mask,
1629 &inp->in4, &inpmask->in4);
1630 }
1631
1632
1633 u_32_t
ipf_newisn(fr_info_t * fin)1634 ipf_newisn(fr_info_t *fin)
1635 {
1636 #if __NetBSD_Version__ >= 105190000 /* 1.5T */
1637 size_t asz;
1638
1639 if (fin->fin_v == 4)
1640 asz = sizeof(struct in_addr);
1641 else if (fin->fin_v == 6)
1642 asz = sizeof(fin->fin_src);
1643 else /* XXX: no way to return error */
1644 return 0;
1645 #ifdef INET
1646 return tcp_new_iss1((void *)&fin->fin_src, (void *)&fin->fin_dst,
1647 fin->fin_sport, fin->fin_dport, asz);
1648 #else
1649 return ENOSYS;
1650 #endif
1651 #else
1652 static int iss_seq_off = 0;
1653 u_char hash[16];
1654 u_32_t newiss;
1655 MD5_CTX ctx;
1656
1657 /*
1658 * Compute the base value of the ISS. It is a hash
1659 * of (saddr, sport, daddr, dport, secret).
1660 */
1661 MD5Init(&ctx);
1662
1663 MD5Update(&ctx, (u_char *) &fin->fin_fi.fi_src,
1664 sizeof(fin->fin_fi.fi_src));
1665 MD5Update(&ctx, (u_char *) &fin->fin_fi.fi_dst,
1666 sizeof(fin->fin_fi.fi_dst));
1667 MD5Update(&ctx, (u_char *) &fin->fin_dat, sizeof(fin->fin_dat));
1668
1669 MD5Update(&ctx, ipf_iss_secret, sizeof(ipf_iss_secret));
1670
1671 MD5Final(hash, &ctx);
1672
1673 memcpy(&newiss, hash, sizeof(newiss));
1674
1675 /*
1676 * Now increment our "timer", and add it in to
1677 * the computed value.
1678 *
1679 * XXX Use `addin'?
1680 * XXX TCP_ISSINCR too large to use?
1681 */
1682 iss_seq_off += 0x00010000;
1683 newiss += iss_seq_off;
1684 return newiss;
1685 #endif
1686 }
1687
1688
1689 /* ------------------------------------------------------------------------ */
1690 /* Function: ipf_nextipid */
1691 /* Returns: int - 0 == success, -1 == error (packet should be dropped) */
1692 /* Parameters: fin(I) - pointer to packet information */
1693 /* */
1694 /* Returns the next IPv4 ID to use for this packet. */
1695 /* ------------------------------------------------------------------------ */
1696 u_short
ipf_nextipid(fr_info_t * fin)1697 ipf_nextipid(fr_info_t *fin)
1698 {
1699 #ifdef USE_MUTEXES
1700 ipf_main_softc_t *softc = fin->fin_main_soft;
1701 #endif
1702 u_short id;
1703
1704 MUTEX_ENTER(&softc->ipf_rw);
1705 id = ipid++;
1706 MUTEX_EXIT(&softc->ipf_rw);
1707
1708 return id;
1709 }
1710
1711
1712 EXTERN_INLINE int
ipf_checkv4sum(fr_info_t * fin)1713 ipf_checkv4sum(fr_info_t *fin)
1714 {
1715 #ifdef M_CSUM_TCP_UDP_BAD
1716 int manual, pflag, cflags, active;
1717 mb_t *m;
1718
1719 if ((fin->fin_flx & FI_NOCKSUM) != 0)
1720 return 0;
1721
1722 if ((fin->fin_flx & FI_SHORT) != 0)
1723 return 1;
1724
1725 if (fin->fin_cksum != FI_CK_NEEDED)
1726 return (fin->fin_cksum > FI_CK_NEEDED) ? 0 : -1;
1727
1728 manual = 0;
1729 m = fin->fin_m;
1730 if (m == NULL) {
1731 manual = 1;
1732 goto skipauto;
1733 }
1734
1735 switch (fin->fin_p)
1736 {
1737 case IPPROTO_UDP :
1738 pflag = M_CSUM_UDPv4;
1739 break;
1740 case IPPROTO_TCP :
1741 pflag = M_CSUM_TCPv4;
1742 break;
1743 default :
1744 pflag = 0;
1745 manual = 1;
1746 break;
1747 }
1748
1749 active = ((struct ifnet *)fin->fin_ifp)->if_csum_flags_rx & pflag;
1750 active |= M_CSUM_TCP_UDP_BAD | M_CSUM_DATA;
1751 cflags = m->m_pkthdr.csum_flags & active;
1752
1753 if (pflag != 0) {
1754 if (cflags == (pflag | M_CSUM_TCP_UDP_BAD)) {
1755 fin->fin_flx |= FI_BAD;
1756 fin->fin_cksum = FI_CK_BAD;
1757 } else if (cflags == (pflag | M_CSUM_DATA)) {
1758 if ((m->m_pkthdr.csum_data ^ 0xffff) != 0) {
1759 fin->fin_flx |= FI_BAD;
1760 fin->fin_cksum = FI_CK_BAD;
1761 } else {
1762 fin->fin_cksum = FI_CK_SUMOK;
1763 }
1764 } else if (cflags == pflag) {
1765 fin->fin_cksum = FI_CK_SUMOK;
1766 } else {
1767 manual = 1;
1768 }
1769 }
1770 skipauto:
1771 if (manual != 0) {
1772 if (ipf_checkl4sum(fin) == -1) {
1773 fin->fin_flx |= FI_BAD;
1774 return -1;
1775 }
1776 }
1777 #else
1778 if (ipf_checkl4sum(fin) == -1) {
1779 fin->fin_flx |= FI_BAD;
1780 return -1;
1781 }
1782 #endif
1783 return 0;
1784 }
1785
1786
1787 #ifdef USE_INET6
1788 EXTERN_INLINE int
ipf_checkv6sum(fr_info_t * fin)1789 ipf_checkv6sum(fr_info_t *fin)
1790 {
1791 # ifdef M_CSUM_TCP_UDP_BAD
1792 int manual, pflag, cflags, active;
1793 mb_t *m;
1794
1795 if ((fin->fin_flx & FI_NOCKSUM) != 0)
1796 return 0;
1797
1798 if ((fin->fin_flx & FI_SHORT) != 0)
1799 return 1;
1800
1801 if (fin->fin_cksum != FI_CK_SUMOK)
1802 return (fin->fin_cksum > FI_CK_NEEDED) ? 0 : -1;
1803
1804
1805 manual = 0;
1806 m = fin->fin_m;
1807
1808 switch (fin->fin_p)
1809 {
1810 case IPPROTO_UDP :
1811 pflag = M_CSUM_UDPv6;
1812 break;
1813 case IPPROTO_TCP :
1814 pflag = M_CSUM_TCPv6;
1815 break;
1816 default :
1817 pflag = 0;
1818 manual = 1;
1819 break;
1820 }
1821
1822 active = ((struct ifnet *)fin->fin_ifp)->if_csum_flags_rx & pflag;
1823 active |= M_CSUM_TCP_UDP_BAD | M_CSUM_DATA;
1824 cflags = m->m_pkthdr.csum_flags & active;
1825
1826 if (pflag != 0) {
1827 if (cflags == (pflag | M_CSUM_TCP_UDP_BAD)) {
1828 fin->fin_flx |= FI_BAD;
1829 } else if (cflags == (pflag | M_CSUM_DATA)) {
1830 if ((m->m_pkthdr.csum_data ^ 0xffff) != 0)
1831 fin->fin_flx |= FI_BAD;
1832 } else if (cflags == pflag) {
1833 ;
1834 } else {
1835 manual = 1;
1836 }
1837 }
1838 if (manual != 0) {
1839 if (ipf_checkl4sum(fin) == -1) {
1840 fin->fin_flx |= FI_BAD;
1841 return -1;
1842 }
1843 }
1844 # else
1845 if (ipf_checkl4sum(fin) == -1) {
1846 fin->fin_flx |= FI_BAD;
1847 return -1;
1848 }
1849 # endif
1850 return 0;
1851 }
1852 #endif /* USE_INET6 */
1853
1854
1855 size_t
mbufchainlen(struct mbuf * m0)1856 mbufchainlen(struct mbuf *m0)
1857 {
1858 size_t len;
1859
1860 if ((m0->m_flags & M_PKTHDR) != 0) {
1861 len = m0->m_pkthdr.len;
1862 } else {
1863 struct mbuf *m;
1864
1865 for (m = m0, len = 0; m != NULL; m = m->m_next)
1866 len += m->m_len;
1867 }
1868 return len;
1869 }
1870
1871
1872 /* ------------------------------------------------------------------------ */
1873 /* Function: ipf_pullup */
1874 /* Returns: NULL == pullup failed, else pointer to protocol header */
1875 /* Parameters: xmin(I)- pointer to buffer where data packet starts */
1876 /* fin(I) - pointer to packet information */
1877 /* len(I) - number of bytes to pullup */
1878 /* */
1879 /* Attempt to move at least len bytes (from the start of the buffer) into a */
1880 /* single buffer for ease of access. Operating system native functions are */
1881 /* used to manage buffers - if necessary. If the entire packet ends up in */
1882 /* a single buffer, set the FI_COALESCE flag even though ipf_coalesce() has */
1883 /* not been called. Both fin_ip and fin_dp are updated before exiting _IF_ */
1884 /* and ONLY if the pullup succeeds. */
1885 /* */
1886 /* We assume that 'xmin' is a pointer to a buffer that is part of the chain */
1887 /* of buffers that starts at *fin->fin_mp. */
1888 /* ------------------------------------------------------------------------ */
1889 void *
ipf_pullup(mb_t * xmin,fr_info_t * fin,int len)1890 ipf_pullup(mb_t *xmin, fr_info_t *fin, int len)
1891 {
1892 int dpoff, ipoff;
1893 mb_t *m = xmin;
1894 char *ip;
1895
1896 if (m == NULL)
1897 return NULL;
1898
1899 ip = (char *)fin->fin_ip;
1900 if ((fin->fin_flx & FI_COALESCE) != 0)
1901 return ip;
1902
1903 ipoff = fin->fin_ipoff;
1904 if (fin->fin_dp != NULL)
1905 dpoff = (char *)fin->fin_dp - (char *)ip;
1906 else
1907 dpoff = 0;
1908
1909 if (M_LEN(m) < len) {
1910 mb_t *n = *fin->fin_mp;
1911 /*
1912 * Assume that M_PKTHDR is set and just work with what is left
1913 * rather than check..
1914 * Should not make any real difference, anyway.
1915 */
1916 if (m != n) {
1917 /*
1918 * Record the mbuf that points to the mbuf that we're
1919 * about to go to work on so that we can update the
1920 * m_next appropriately later.
1921 */
1922 for (; n->m_next != m; n = n->m_next)
1923 ;
1924 } else {
1925 n = NULL;
1926 }
1927
1928 #ifdef MHLEN
1929 if (len > MHLEN)
1930 #else
1931 if (len > MLEN)
1932 #endif
1933 {
1934 #ifdef HAVE_M_PULLDOWN
1935 if (m_pulldown(m, 0, len, NULL) == NULL)
1936 m = NULL;
1937 #else
1938 FREE_MB_T(*fin->fin_mp);
1939 m = NULL;
1940 n = NULL;
1941 #endif
1942 } else
1943 {
1944 m = m_pullup(m, len);
1945 }
1946 if (n != NULL)
1947 n->m_next = m;
1948 if (m == NULL) {
1949 /*
1950 * When n is non-NULL, it indicates that m pointed to
1951 * a sub-chain (tail) of the mbuf and that the head
1952 * of this chain has not yet been free'd.
1953 */
1954 if (n != NULL) {
1955 FREE_MB_T(*fin->fin_mp);
1956 }
1957
1958 *fin->fin_mp = NULL;
1959 fin->fin_m = NULL;
1960 return NULL;
1961 }
1962
1963 if (n == NULL)
1964 *fin->fin_mp = m;
1965
1966 while (M_LEN(m) == 0) {
1967 m = m->m_next;
1968 }
1969 fin->fin_m = m;
1970 ip = MTOD(m, char *) + ipoff;
1971
1972 fin->fin_ip = (ip_t *)ip;
1973 if (fin->fin_dp != NULL)
1974 fin->fin_dp = (char *)fin->fin_ip + dpoff;
1975 if (fin->fin_fraghdr != NULL)
1976 fin->fin_fraghdr = (char *)ip +
1977 ((char *)fin->fin_fraghdr -
1978 (char *)fin->fin_ip);
1979 }
1980
1981 if (len == fin->fin_plen)
1982 fin->fin_flx |= FI_COALESCE;
1983 return ip;
1984 }
1985
1986
1987 int
ipf_inject(fr_info_t * fin,mb_t * m)1988 ipf_inject(fr_info_t *fin, mb_t *m)
1989 {
1990 int error;
1991
1992 if (fin->fin_out == 0) {
1993 if (__predict_false(!pktq_enqueue(ip_pktq, m, 0))) {
1994 FREE_MB_T(m);
1995 error = ENOBUFS;
1996 } else {
1997 error = 0;
1998 }
1999 } else {
2000 error = ip_output(m, NULL, NULL, IP_FORWARDING, NULL, NULL);
2001 }
2002 return error;
2003 }
2004
2005
2006 u_32_t
ipf_random(void)2007 ipf_random(void)
2008 {
2009 int number;
2010
2011 #ifdef _CPRNG_H
2012 number = cprng_fast32();
2013 #else
2014 number = arc4random();
2015 #endif
2016 return number;
2017 }
2018
2019
2020 /*
2021 * routines below for saving IP headers to buffer
2022 */
ipfopen(dev_t dev,int flags,int devtype,PROC_T * p)2023 static int ipfopen(dev_t dev, int flags
2024 #if (NetBSD >= 199511)
2025 , int devtype, PROC_T *p
2026 #endif
2027 )
2028 {
2029 u_int unit = GET_MINOR(dev);
2030 int error;
2031
2032 if (IPL_LOGMAX < unit) {
2033 error = ENXIO;
2034 } else {
2035 switch (unit)
2036 {
2037 case IPL_LOGIPF :
2038 case IPL_LOGNAT :
2039 case IPL_LOGSTATE :
2040 case IPL_LOGAUTH :
2041 case IPL_LOGLOOKUP :
2042 case IPL_LOGSYNC :
2043 #ifdef IPFILTER_SCAN
2044 case IPL_LOGSCAN :
2045 #endif
2046 error = 0;
2047 break;
2048 default :
2049 error = ENXIO;
2050 break;
2051 }
2052 }
2053 #if (__NetBSD_Version__ >= 799003000)
2054 if (error == 0) {
2055 mutex_enter(&ipf_ref_mutex);
2056 ipf_active = 1;
2057 mutex_exit(&ipf_ref_mutex);
2058 }
2059 #endif
2060 return error;
2061 }
2062
2063
ipfclose(dev_t dev,int flags,int devtype,PROC_T * p)2064 static int ipfclose(dev_t dev, int flags
2065 #if (NetBSD >= 199511)
2066 , int devtype, PROC_T *p
2067 #endif
2068 )
2069 {
2070 u_int unit = GET_MINOR(dev);
2071
2072 if (IPL_LOGMAX < unit)
2073 return ENXIO;
2074 else {
2075 #if (__NetBSD_Version__ >= 799003000)
2076 mutex_enter(&ipf_ref_mutex);
2077 ipf_active = 0;
2078 mutex_exit(&ipf_ref_mutex);
2079 #endif
2080 return 0;
2081 }
2082 }
2083
2084 /*
2085 * ipfread/ipflog
2086 * both of these must operate with at least splnet() lest they be
2087 * called during packet processing and cause an inconsistancy to appear in
2088 * the filter lists.
2089 */
ipfread(dev_t dev,struct uio * uio,int ioflag)2090 static int ipfread(dev_t dev, struct uio *uio, int ioflag)
2091 {
2092
2093 if (ipfmain.ipf_running < 1) {
2094 ipfmain.ipf_interror = 130006;
2095 return EIO;
2096 }
2097
2098 if (GET_MINOR(dev) == IPL_LOGSYNC)
2099 return ipf_sync_read(&ipfmain, uio);
2100
2101 #ifdef IPFILTER_LOG
2102 return ipf_log_read(&ipfmain, GET_MINOR(dev), uio);
2103 #else
2104 ipfmain.ipf_interror = 130007;
2105 return ENXIO;
2106 #endif
2107 }
2108
2109
2110 /*
2111 * ipfwrite
2112 * both of these must operate with at least splnet() lest they be
2113 * called during packet processing and cause an inconsistancy to appear in
2114 * the filter lists.
2115 */
ipfwrite(dev_t dev,struct uio * uio,int ioflag)2116 static int ipfwrite(dev_t dev, struct uio *uio, int ioflag)
2117 {
2118
2119 if (ipfmain.ipf_running < 1) {
2120 ipfmain.ipf_interror = 130008;
2121 return EIO;
2122 }
2123
2124 if (GET_MINOR(dev) == IPL_LOGSYNC)
2125 return ipf_sync_write(&ipfmain, uio);
2126 ipfmain.ipf_interror = 130009;
2127 return ENXIO;
2128 }
2129
2130
ipfpoll(dev_t dev,int events,PROC_T * p)2131 static int ipfpoll(dev_t dev, int events, PROC_T *p)
2132 {
2133 u_int unit = GET_MINOR(dev);
2134 int revents = 0;
2135
2136 if (IPL_LOGMAX < unit) {
2137 ipfmain.ipf_interror = 130010;
2138 return ENXIO;
2139 }
2140
2141 switch (unit)
2142 {
2143 case IPL_LOGIPF :
2144 case IPL_LOGNAT :
2145 case IPL_LOGSTATE :
2146 #ifdef IPFILTER_LOG
2147 if ((events & (POLLIN | POLLRDNORM)) &&
2148 ipf_log_canread(&ipfmain, unit))
2149 revents |= events & (POLLIN | POLLRDNORM);
2150 #endif
2151 break;
2152 case IPL_LOGAUTH :
2153 if ((events & (POLLIN | POLLRDNORM)) &&
2154 ipf_auth_waiting(&ipfmain))
2155 revents |= events & (POLLIN | POLLRDNORM);
2156 break;
2157 case IPL_LOGSYNC :
2158 if ((events & (POLLIN | POLLRDNORM)) &&
2159 ipf_sync_canread(&ipfmain))
2160 revents |= events & (POLLIN | POLLRDNORM);
2161 if ((events & (POLLOUT | POLLWRNORM)) &&
2162 ipf_sync_canwrite(&ipfmain))
2163 revents |= events & (POLLOUT | POLLWRNORM);
2164 break;
2165 case IPL_LOGSCAN :
2166 case IPL_LOGLOOKUP :
2167 default :
2168 break;
2169 }
2170
2171 if ((revents == 0) && (((events & (POLLIN|POLLRDNORM)) != 0)))
2172 selrecord(p, &ipfmain.ipf_selwait[unit]);
2173 return revents;
2174 }
2175
2176 u_int
ipf_pcksum(fr_info_t * fin,int hlen,u_int sum)2177 ipf_pcksum(fr_info_t *fin, int hlen, u_int sum)
2178 {
2179 struct mbuf *m;
2180 u_int sum2;
2181 int off;
2182
2183 m = fin->fin_m;
2184 off = (char *)fin->fin_dp - (char *)fin->fin_ip;
2185 m->m_data += hlen;
2186 m->m_len -= hlen;
2187 sum2 = in_cksum(fin->fin_m, fin->fin_plen - off);
2188 m->m_len += hlen;
2189 m->m_data -= hlen;
2190
2191 /*
2192 * Both sum and sum2 are partial sums, so combine them together.
2193 */
2194 sum += ~sum2 & 0xffff;
2195 while (sum > 0xffff)
2196 sum = (sum & 0xffff) + (sum >> 16);
2197 sum2 = ~sum & 0xffff;
2198 return sum2;
2199 }
2200
2201 #if (__NetBSD_Version__ >= 799003000)
2202
2203 /* NetBSD module interface */
2204
2205 MODULE(MODULE_CLASS_DRIVER, ipl, "bpf_filter");
2206
2207 static int ipl_init(void *);
2208 static int ipl_fini(void *);
2209 static int ipl_modcmd(modcmd_t, void *);
2210
2211 #ifdef _MODULE
2212 static devmajor_t ipl_cmaj = -1, ipl_bmaj = -1;
2213 #endif
2214
2215 static int
ipl_modcmd(modcmd_t cmd,void * opaque)2216 ipl_modcmd(modcmd_t cmd, void *opaque)
2217 {
2218
2219 switch (cmd) {
2220 case MODULE_CMD_INIT:
2221 return ipl_init(opaque);
2222 case MODULE_CMD_FINI:
2223 return ipl_fini(opaque);
2224 default:
2225 return ENOTTY;
2226 }
2227 }
2228
2229 static int
ipl_init(void * opaque)2230 ipl_init(void *opaque)
2231 {
2232 int error;
2233
2234 ipf_listener = kauth_listen_scope(KAUTH_SCOPE_NETWORK,
2235 ipf_listener_cb, NULL);
2236
2237 if ((error = ipf_load_all()) != 0)
2238 return error;
2239
2240 if (ipf_create_all(&ipfmain) == NULL) {
2241 ipf_unload_all();
2242 return ENODEV;
2243 }
2244
2245 /* Initialize our mutex and reference count */
2246 mutex_init(&ipf_ref_mutex, MUTEX_DEFAULT, IPL_NONE);
2247 ipf_active = 0;
2248
2249 #ifdef _MODULE
2250 /*
2251 * Insert ourself into the cdevsw list.
2252 */
2253 error = devsw_attach("ipl", NULL, &ipl_bmaj, &ipl_cdevsw, &ipl_cmaj);
2254 if (error)
2255 ipl_fini(opaque);
2256 #endif
2257
2258 return error;
2259 }
2260
2261 static int
ipl_fini(void * opaque)2262 ipl_fini(void *opaque)
2263 {
2264
2265 #ifdef _MODULE
2266 devsw_detach(NULL, &ipl_cdevsw);
2267 #endif
2268
2269 /*
2270 * Grab the mutex, verify that there are no references
2271 * and that there are no running filters. If either
2272 * of these exists, reinsert our cdevsw entry and return
2273 * an error.
2274 */
2275 mutex_enter(&ipf_ref_mutex);
2276 if (ipf_active != 0 || ipfmain.ipf_running > 0) {
2277 #ifdef _MODULE
2278 (void)devsw_attach("ipl", NULL, &ipl_bmaj,
2279 &ipl_cdevsw, &ipl_cmaj);
2280 #endif
2281 mutex_exit(&ipf_ref_mutex);
2282 return EBUSY;
2283 }
2284
2285 /* Clean up the rest of our state before being unloaded */
2286
2287 mutex_exit(&ipf_ref_mutex);
2288 mutex_destroy(&ipf_ref_mutex);
2289 ipf_destroy_all(&ipfmain);
2290 ipf_unload_all();
2291 kauth_unlisten_scope(ipf_listener);
2292
2293 return 0;
2294 }
2295 #endif /* (__NetBSD_Version__ >= 799003000) */
2296