1 /* $NetBSD: ip_fil_netbsd.c,v 1.38 2023/06/24 05:16:15 msaitoh Exp $ */ 2 3 /* 4 * Copyright (C) 2012 by Darren Reed. 5 * 6 * See the IPFILTER.LICENCE file for details on licencing. 7 */ 8 #if !defined(lint) 9 #if defined(__NetBSD__) 10 #include <sys/cdefs.h> 11 __KERNEL_RCSID(0, "$NetBSD: ip_fil_netbsd.c,v 1.38 2023/06/24 05:16:15 msaitoh Exp $"); 12 #else 13 static const char sccsid[] = "@(#)ip_fil.c 2.41 6/5/96 (C) 1993-2000 Darren Reed"; 14 static const char rcsid[] = "@(#)Id: ip_fil_netbsd.c,v 1.1.1.2 2012/07/22 13:45:17 darrenr Exp"; 15 #endif 16 #endif 17 18 #if defined(KERNEL) || defined(_KERNEL) 19 # undef KERNEL 20 # undef _KERNEL 21 # define KERNEL 1 22 # define _KERNEL 1 23 #endif 24 #include <sys/param.h> 25 #if (NetBSD >= 199905) && !defined(IPFILTER_LKM) 26 # if (__NetBSD_Version__ >= 799003000) 27 # ifdef _KERNEL_OPT 28 # include "opt_ipsec.h" 29 # endif 30 # else 31 # include "opt_ipsec.h" 32 # endif 33 #endif 34 #include <sys/errno.h> 35 #include <sys/types.h> 36 #include <sys/file.h> 37 #include <sys/ioctl.h> 38 #include <sys/time.h> 39 #include <sys/systm.h> 40 #include <sys/select.h> 41 #if (NetBSD > 199609) 42 # include <sys/dirent.h> 43 #else 44 # include <sys/dir.h> 45 #endif 46 #if (__NetBSD_Version__ >= 599005900) 47 # include <sys/cprng.h> 48 #endif 49 #include <sys/mbuf.h> 50 #include <sys/protosw.h> 51 #include <sys/socket.h> 52 #include <sys/poll.h> 53 #if (__NetBSD_Version__ >= 399002000) 54 # include <sys/kauth.h> 55 #endif 56 #if (__NetBSD_Version__ >= 799003000) 57 #include <sys/module.h> 58 #include <sys/mutex.h> 59 #endif 60 #if defined(__NetBSD__) 61 #include <netinet/in_offload.h> 62 #endif 63 64 #include <net/if.h> 65 #include <net/route.h> 66 #include <netinet/in.h> 67 #include <netinet/in_var.h> 68 #include <netinet/in_systm.h> 69 #include <netinet/ip.h> 70 #include <netinet/ip_var.h> 71 #include <netinet/tcp.h> 72 #if __NetBSD_Version__ >= 105190000 /* 1.5T */ 73 # include <netinet/tcp_timer.h> 74 # include <netinet/tcp_var.h> 75 #endif 76 #include <netinet/udp.h> 77 #include <netinet/ip_icmp.h> 78 #include "netinet/ip_compat.h" 79 #ifdef USE_INET6 80 # include <netinet/icmp6.h> 81 # if (__NetBSD_Version__ >= 106000000) 82 # include <netinet6/nd6.h> 83 # endif 84 # if __NetBSD_Version__ >= 499001100 85 # include <netinet6/scope6_var.h> 86 # include <netinet6/in6_offload.h> 87 # endif 88 #endif 89 #include "netinet/ip_fil.h" 90 #include "netinet/ip_nat.h" 91 #include "netinet/ip_frag.h" 92 #include "netinet/ip_state.h" 93 #include "netinet/ip_proxy.h" 94 #include "netinet/ip_auth.h" 95 #include "netinet/ip_sync.h" 96 #include "netinet/ip_lookup.h" 97 #include "netinet/ip_dstlist.h" 98 #ifdef IPFILTER_SCAN 99 #include "netinet/ip_scan.h" 100 #endif 101 #include <sys/md5.h> 102 #include <sys/kernel.h> 103 #include <sys/conf.h> 104 #ifdef INET 105 extern int ip_optcopy (struct ip *, struct ip *); 106 #endif 107 108 #ifdef IPFILTER_M_IPFILTER 109 MALLOC_DEFINE(M_IPFILTER, "IP Filter", "IP Filter packet filter data structures"); 110 #endif 111 112 #if __NetBSD_Version__ >= 105009999 113 # define csuminfo csum_flags 114 #endif 115 116 #if __NetBSD_Version__ < 200000000 117 extern struct protosw inetsw[]; 118 #endif 119 120 #if (__NetBSD_Version__ >= 599002000) 121 static kauth_listener_t ipf_listener; 122 #endif 123 124 #if (__NetBSD_Version__ < 399001400) 125 extern int ip6_getpmtu (struct route_in6 *, struct route_in6 *, 126 struct ifnet *, struct in6_addr *, u_long *, 127 int *); 128 #endif 129 #if (NetBSD >= 199511) 130 static int ipfopen(dev_t dev, int flags, int devtype, PROC_T *p); 131 static int ipfclose(dev_t dev, int flags, int devtype, PROC_T *p); 132 #else 133 # if (__NetBSD_Version__ >= 399001400) 134 static int ipfopen(dev_t dev, int flags, struct lwp *); 135 static int ipfclose(dev_t dev, int flags, struct lwp *); 136 # else 137 static int ipfopen(dev_t dev, int flags); 138 static int ipfclose(dev_t dev, int flags); 139 # endif /* __NetBSD_Version__ >= 399001400 */ 140 #endif 141 static int ipfread(dev_t, struct uio *, int ioflag); 142 static int ipfwrite(dev_t, struct uio *, int ioflag); 143 static int ipfpoll(dev_t, int events, PROC_T *); 144 static void ipf_timer_func(void *ptr); 145 146 const struct cdevsw ipl_cdevsw = { 147 .d_open = ipfopen, 148 .d_close = ipfclose, 149 .d_read = ipfread, 150 .d_write = ipfwrite, 151 .d_ioctl = ipfioctl, 152 .d_stop = nostop, 153 .d_tty = notty, 154 .d_poll = ipfpoll, 155 .d_mmap = nommap, 156 #if (__NetBSD_Version__ >= 200000000) 157 .d_kqfilter = nokqfilter, 158 #endif 159 .d_discard = nodiscard, 160 #ifdef D_OTHER 161 .d_flag = D_OTHER 162 #else 163 .d_flag = 0 164 #endif 165 }; 166 #if (__NetBSD_Version__ >= 799003000) 167 kmutex_t ipf_ref_mutex; 168 int ipf_active; 169 #endif 170 171 ipf_main_softc_t ipfmain; 172 173 static u_short ipid = 0; 174 static int (*ipf_savep)(void *, ip_t *, int, void *, int, struct mbuf **); 175 static int ipf_send_ip(fr_info_t *, mb_t *); 176 #ifdef USE_INET6 177 static int ipf_fastroute6(struct mbuf *, struct mbuf **, 178 fr_info_t *, frdest_t *); 179 #endif 180 181 #if defined(NETBSD_PF) 182 # include <net/pfil.h> 183 /* 184 * We provide the ipf_checkp name just to minimize changes later. 185 */ 186 int (*ipf_checkp)(void *, ip_t *ip, int hlen, void *ifp, int out, mb_t **mp); 187 #endif /* NETBSD_PF */ 188 189 #if defined(__NetBSD_Version__) && (__NetBSD_Version__ >= 105110000) 190 # include <net/pfil.h> 191 192 static int ipf_check_wrapper(void *, struct mbuf **, struct ifnet *, int ); 193 194 static int 195 ipf_check_wrapper(void *arg, struct mbuf **mp, struct ifnet *ifp, int dir) 196 { 197 struct ip *ip; 198 int rv, hlen; 199 200 #if __NetBSD_Version__ >= 200080000 201 /* 202 * ensure that mbufs are writable beforehand 203 * as it's assumed by ipf code. 204 * XXX inefficient 205 */ 206 int error = m_makewritable(mp, 0, M_COPYALL, M_DONTWAIT); 207 208 if (error) { 209 m_freem(*mp); 210 *mp = NULL; 211 return error; 212 } 213 #endif 214 ip = mtod(*mp, struct ip *); 215 hlen = ip->ip_hl << 2; 216 217 #ifdef INET 218 #if defined(M_CSUM_TCPv4) 219 /* 220 * If the packet is out-bound, we can't delay checksums 221 * here. For in-bound, the checksum has already been 222 * validated. 223 */ 224 if (dir == PFIL_OUT) { 225 if ((*mp)->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) { 226 in_undefer_cksum_tcpudp(*mp); 227 (*mp)->m_pkthdr.csum_flags &= 228 ~(M_CSUM_TCPv4|M_CSUM_UDPv4); 229 } 230 } 231 #endif /* M_CSUM_TCPv4 */ 232 #endif /* INET */ 233 234 /* 235 * Note, we don't need to update the checksum, because 236 * it has already been verified. 237 */ 238 rv = ipf_check(&ipfmain, ip, hlen, ifp, (dir == PFIL_OUT), mp); 239 240 return (rv); 241 } 242 243 # ifdef USE_INET6 244 # include <netinet/ip6.h> 245 246 static int ipf_check_wrapper6(void *, struct mbuf **, struct ifnet *, int ); 247 248 static int 249 ipf_check_wrapper6(void *arg, struct mbuf **mp, struct ifnet *ifp, int dir) 250 { 251 #if defined(INET6) 252 # if defined(M_CSUM_TCPv6) && (__NetBSD_Version__ > 200000000) 253 /* 254 * If the packet is out-bound, we can't delay checksums 255 * here. For in-bound, the checksum has already been 256 * validated. 257 */ 258 if (dir == PFIL_OUT) { 259 if ((*mp)->m_pkthdr.csum_flags & (M_CSUM_TCPv6|M_CSUM_UDPv6)) { 260 # if (__NetBSD_Version__ > 399000600) 261 in6_undefer_cksum_tcpudp(*mp); 262 # endif 263 (*mp)->m_pkthdr.csum_flags &= ~(M_CSUM_TCPv6| 264 M_CSUM_UDPv6); 265 } 266 } 267 # endif 268 #endif /* INET6 */ 269 270 return (ipf_check(&ipfmain, mtod(*mp, struct ip *), sizeof(struct ip6_hdr), 271 ifp, (dir == PFIL_OUT), mp)); 272 } 273 # endif 274 275 276 # if defined(PFIL_TYPE_IFNET) && defined(PFIL_IFNET) 277 278 # if (__NetBSD_Version__ >= 799000400) 279 280 static void ipf_pfilsync(void *, unsigned long, void *); 281 282 static void 283 ipf_pfilsync(void *hdr, unsigned long cmd, void *arg2) 284 { 285 /* 286 * The interface pointer is useless for create (we have nothing to 287 * compare it to) and at detach, the interface name is still in the 288 * list of active NICs (albeit, down, but that's not any real 289 * indicator) and doing ifunit() on the name will still return the 290 * pointer, so it's not much use then, either. 291 */ 292 ipf_sync(&ipfmain, NULL); 293 } 294 295 # else 296 297 static int ipf_pfilsync(void *, struct mbuf **, struct ifnet *, int); 298 299 static int 300 ipf_pfilsync(void *hdr, struct mbuf **mp, struct ifnet *ifp, int dir) 301 { 302 ipf_sync(&ipfmain, NULL); 303 return 0; 304 } 305 306 # endif 307 # endif 308 309 #endif /* __NetBSD_Version__ >= 105110000 */ 310 311 312 #if defined(IPFILTER_LKM) 313 int 314 ipf_identify(s) 315 char *s; 316 { 317 if (strcmp(s, "ipl") == 0) 318 return 1; 319 return 0; 320 } 321 #endif /* IPFILTER_LKM */ 322 323 #if (__NetBSD_Version__ >= 599002000) 324 static int 325 ipf_listener_cb(kauth_cred_t cred, kauth_action_t action, void *cookie, 326 void *arg0, void *arg1, void *arg2, void *arg3) 327 { 328 int result; 329 enum kauth_network_req req; 330 331 result = KAUTH_RESULT_DEFER; 332 req = (enum kauth_network_req)(uintptr_t)arg0; 333 334 if (action != KAUTH_NETWORK_FIREWALL) 335 return result; 336 337 /* These must have came from device context. */ 338 if ((req == KAUTH_REQ_NETWORK_FIREWALL_FW) || 339 (req == KAUTH_REQ_NETWORK_FIREWALL_NAT)) 340 result = KAUTH_RESULT_ALLOW; 341 342 return result; 343 } 344 #endif 345 346 /* 347 * Try to detect the case when compiling for NetBSD with pseudo-device 348 */ 349 void 350 ipfilterattach(int count) 351 { 352 353 #if (__NetBSD_Version__ >= 799003000) 354 return; 355 #else 356 #if (__NetBSD_Version__ >= 599002000) 357 ipf_listener = kauth_listen_scope(KAUTH_SCOPE_NETWORK, 358 ipf_listener_cb, NULL); 359 #endif 360 361 if (ipf_load_all() == 0) 362 (void) ipf_create_all(&ipfmain); 363 #endif 364 } 365 366 367 int 368 ipfattach(ipf_main_softc_t *softc) 369 { 370 SPL_INT(s); 371 #if (__NetBSD_Version__ >= 499005500) 372 int i; 373 #endif 374 #if defined(NETBSD_PF) && (__NetBSD_Version__ >= 104200000) 375 int error = 0; 376 # if defined(__NetBSD_Version__) && (__NetBSD_Version__ >= 105110000) 377 pfil_head_t *ph_inet; 378 # ifdef USE_INET6 379 pfil_head_t *ph_inet6; 380 # endif 381 # if defined(PFIL_TYPE_IFNET) && defined(PFIL_IFNET) 382 pfil_head_t *ph_ifsync; 383 # endif 384 # endif 385 #endif 386 387 SPL_NET(s); 388 if ((softc->ipf_running > 0) || (ipf_checkp == ipf_check)) { 389 printf("IP Filter: already initialized\n"); 390 SPL_X(s); 391 IPFERROR(130017); 392 return EBUSY; 393 } 394 395 if (ipf_init_all(softc) < 0) { 396 SPL_X(s); 397 IPFERROR(130015); 398 return EIO; 399 } 400 401 #ifdef NETBSD_PF 402 # if (__NetBSD_Version__ >= 104200000) 403 # if __NetBSD_Version__ >= 105110000 404 ph_inet = pfil_head_get(PFIL_TYPE_AF, (void *)AF_INET); 405 # ifdef USE_INET6 406 ph_inet6 = pfil_head_get(PFIL_TYPE_AF, (void *)AF_INET6); 407 # endif 408 # if defined(PFIL_TYPE_IFNET) && defined(PFIL_IFNET) 409 ph_ifsync = pfil_head_get(PFIL_TYPE_IFNET, 0); 410 # endif 411 412 if (ph_inet == NULL 413 # ifdef USE_INET6 414 && ph_inet6 == NULL 415 # endif 416 # if defined(PFIL_TYPE_IFNET) && defined(PFIL_IFNET) 417 && ph_ifsync == NULL 418 # endif 419 ) { 420 SPL_X(s); 421 IPFERROR(130016); 422 return ENODEV; 423 } 424 425 if (ph_inet != NULL) 426 error = pfil_add_hook((void *)ipf_check_wrapper, NULL, 427 PFIL_IN|PFIL_OUT, ph_inet); 428 else 429 error = 0; 430 # else 431 error = pfil_add_hook((void *)ipf_check, PFIL_IN|PFIL_OUT, 432 &inetsw[ip_protox[IPPROTO_IP]].pr_pfh); 433 # endif 434 if (error) { 435 IPFERROR(130013); 436 goto pfil_error; 437 } 438 # else 439 pfil_add_hook((void *)ipf_check, PFIL_IN|PFIL_OUT); 440 # endif 441 442 # ifdef USE_INET6 443 # if __NetBSD_Version__ >= 105110000 444 if (ph_inet6 != NULL) 445 error = pfil_add_hook((void *)ipf_check_wrapper6, NULL, 446 PFIL_IN|PFIL_OUT, ph_inet6); 447 else 448 error = 0; 449 if (error) { 450 pfil_remove_hook((void *)ipf_check_wrapper6, NULL, 451 PFIL_IN|PFIL_OUT, ph_inet6); 452 ipfmain.ipf_interror = 130014; 453 goto pfil_error; 454 } 455 # else 456 error = pfil_add_hook((void *)ipf_check, PFIL_IN|PFIL_OUT, 457 &inetsw[ip_protox[IPPROTO_IPV6]].pr_pfh); 458 if (error) { 459 pfil_remove_hook((void *)ipf_check, PFIL_IN|PFIL_OUT, 460 &inetsw[ip_protox[IPPROTO_IP]].pr_pfh); 461 IPFERROR(130014); 462 goto pfil_error; 463 } 464 # endif 465 # endif 466 467 # if defined(PFIL_TYPE_IFNET) && defined(PFIL_IFNET) 468 if (ph_ifsync != NULL) 469 #if (__NetBSD_Version__ >= 799000400) 470 (void) pfil_add_ihook((void *)ipf_pfilsync, NULL, 471 PFIL_IFNET, ph_ifsync); 472 #else 473 (void) pfil_add_hook((void *)ipf_pfilsync, NULL, 474 PFIL_IFNET, ph_ifsync); 475 #endif 476 # endif 477 #endif 478 479 #if (__NetBSD_Version__ >= 499005500) 480 for (i = 0; i < IPL_LOGSIZE; i++) 481 selinit(&ipfmain.ipf_selwait[i]); 482 #else 483 bzero((char *)ipfmain.ipf_selwait, sizeof(ipfmain.ipf_selwait)); 484 #endif 485 ipf_savep = ipf_checkp; 486 ipf_checkp = ipf_check; 487 488 #ifdef INET 489 if (softc->ipf_control_forwarding & 1) 490 ipforwarding = 1; 491 #endif 492 493 ipid = 0; 494 495 SPL_X(s); 496 497 #if (__NetBSD_Version__ >= 104010000) 498 # if (__NetBSD_Version__ >= 499002000) 499 callout_init(&softc->ipf_slow_ch, 0); 500 # else 501 callout_init(&softc->ipf_slow_ch); 502 # endif 503 callout_reset(&softc->ipf_slow_ch, (hz / IPF_HZ_DIVIDE) * IPF_HZ_MULT, 504 ipf_timer_func, softc); 505 #else 506 timeout(ipf_timer_func, softc, (hz / IPF_HZ_DIVIDE) * IPF_HZ_MULT); 507 #endif 508 509 return 0; 510 511 #if __NetBSD_Version__ >= 105110000 512 pfil_error: 513 SPL_X(s); 514 ipf_fini_all(softc); 515 return error; 516 #endif 517 } 518 519 static void 520 ipf_timer_func(void *ptr) 521 { 522 ipf_main_softc_t *softc = ptr; 523 SPL_INT(s); 524 525 SPL_NET(s); 526 READ_ENTER(&softc->ipf_global); 527 528 if (softc->ipf_running > 0) 529 ipf_slowtimer(softc); 530 531 if (softc->ipf_running == -1 || softc->ipf_running == 1) { 532 #if NETBSD_GE_REV(104240000) 533 callout_reset(&softc->ipf_slow_ch, hz / 2, 534 ipf_timer_func, softc); 535 #else 536 timeout(ipf_timer_func, softc, 537 (hz / IPF_HZ_DIVIDE) * IPF_HZ_MULT); 538 #endif 539 } 540 RWLOCK_EXIT(&softc->ipf_global); 541 SPL_X(s); 542 } 543 544 545 /* 546 * Disable the filter by removing the hooks from the IP input/output 547 * stream. 548 */ 549 int 550 ipfdetach(ipf_main_softc_t *softc) 551 { 552 SPL_INT(s); 553 #if (__NetBSD_Version__ >= 499005500) 554 int i; 555 #endif 556 #if defined(NETBSD_PF) && (__NetBSD_Version__ >= 104200000) 557 int error = 0; 558 # if __NetBSD_Version__ >= 105150000 559 pfil_head_t *ph_inet = pfil_head_get(PFIL_TYPE_AF, (void *)AF_INET); 560 # ifdef USE_INET6 561 pfil_head_t *ph_inet6 = pfil_head_get(PFIL_TYPE_AF, (void *)AF_INET6); 562 # endif 563 # if defined(PFIL_TYPE_IFNET) && defined(PFIL_IFNET) 564 struct pfil_head *ph_ifsync = pfil_head_get(PFIL_TYPE_IFNET, 0); 565 # endif 566 # endif 567 #endif 568 569 SPL_NET(s); 570 571 #if (__NetBSD_Version__ >= 104010000) 572 if (softc->ipf_running > 0) 573 callout_stop(&softc->ipf_slow_ch); 574 #else 575 untimeout(ipf_slowtimer, NULL); 576 #endif /* NetBSD */ 577 578 ipf_checkp = ipf_savep; 579 (void) ipf_flush(softc, IPL_LOGIPF, FR_INQUE|FR_OUTQUE|FR_INACTIVE); 580 (void) ipf_flush(softc, IPL_LOGIPF, FR_INQUE|FR_OUTQUE); 581 582 #ifdef INET 583 if (softc->ipf_control_forwarding & 2) 584 ipforwarding = 0; 585 #endif 586 587 #ifdef NETBSD_PF 588 # if (__NetBSD_Version__ >= 104200000) 589 # if __NetBSD_Version__ >= 105110000 590 # if defined(PFIL_TYPE_IFNET) && defined(PFIL_IFNET) 591 # if __NetBSD_Version__ >= 799000400 592 (void) pfil_remove_ihook((void *)ipf_pfilsync, NULL, 593 PFIL_IFNET, ph_ifsync); 594 # else 595 (void) pfil_remove_hook((void *)ipf_pfilsync, NULL, 596 PFIL_IFNET, ph_ifsync); 597 # endif 598 # endif 599 600 if (ph_inet != NULL) 601 error = pfil_remove_hook((void *)ipf_check_wrapper, NULL, 602 PFIL_IN|PFIL_OUT, ph_inet); 603 else 604 error = 0; 605 # else 606 error = pfil_remove_hook((void *)ipf_check, PFIL_IN|PFIL_OUT, 607 &inetsw[ip_protox[IPPROTO_IP]].pr_pfh); 608 # endif 609 if (error) { 610 SPL_X(s); 611 IPFERROR(130011); 612 return error; 613 } 614 # else 615 pfil_remove_hook((void *)ipf_check, PFIL_IN|PFIL_OUT); 616 # endif 617 # ifdef USE_INET6 618 # if __NetBSD_Version__ >= 105110000 619 if (ph_inet6 != NULL) 620 error = pfil_remove_hook((void *)ipf_check_wrapper6, NULL, 621 PFIL_IN|PFIL_OUT, ph_inet6); 622 else 623 error = 0; 624 # else 625 error = pfil_remove_hook((void *)ipf_check, PFIL_IN|PFIL_OUT, 626 &inetsw[ip_protox[IPPROTO_IPV6]].pr_pfh); 627 # endif 628 if (error) { 629 SPL_X(s); 630 IPFERROR(130012); 631 return error; 632 } 633 # endif 634 #endif 635 SPL_X(s); 636 637 #if (__NetBSD_Version__ >= 499005500) 638 for (i = 0; i < IPL_LOGSIZE; i++) 639 seldestroy(&ipfmain.ipf_selwait[i]); 640 #endif 641 642 ipf_fini_all(softc); 643 644 return 0; 645 } 646 647 648 /* 649 * Filter ioctl interface. 650 */ 651 int 652 ipfioctl(dev_t dev, u_long cmd, 653 #if (__NetBSD_Version__ >= 499001000) 654 void *data, 655 #else 656 caddr_t data, 657 #endif 658 int mode 659 #if (NetBSD >= 199511) 660 # if (__NetBSD_Version__ >= 399001400) 661 , struct lwp *p 662 # if (__NetBSD_Version__ >= 399002000) 663 # define UID(l) kauth_cred_getuid((l)->l_cred) 664 # else 665 # define UID(l) ((l)->l_proc->p_cred->p_ruid) 666 # endif 667 # else 668 , struct proc *p 669 # define UID(p) ((p)->p_cred->p_ruid) 670 # endif 671 #endif 672 ) 673 { 674 int error = 0, unit = 0; 675 SPL_INT(s); 676 677 #if (__NetBSD_Version__ >= 399002000) 678 if ((mode & FWRITE) && 679 kauth_authorize_network(p->l_cred, KAUTH_NETWORK_FIREWALL, 680 KAUTH_REQ_NETWORK_FIREWALL_FW, NULL, 681 NULL, NULL)) { 682 ipfmain.ipf_interror = 130005; 683 return EPERM; 684 } 685 #else 686 if ((securelevel >= 2) && (mode & FWRITE)) { 687 ipfmain.ipf_interror = 130001; 688 return EPERM; 689 } 690 #endif 691 692 unit = GET_MINOR(dev); 693 if ((IPL_LOGMAX < unit) || (unit < 0)) { 694 ipfmain.ipf_interror = 130002; 695 return ENXIO; 696 } 697 698 if (ipfmain.ipf_running <= 0) { 699 if (unit != IPL_LOGIPF && cmd != SIOCIPFINTERROR) { 700 ipfmain.ipf_interror = 130003; 701 return EIO; 702 } 703 if (cmd != SIOCIPFGETNEXT && cmd != SIOCIPFGET && 704 cmd != SIOCIPFSET && cmd != SIOCFRENB && 705 cmd != SIOCGETFS && cmd != SIOCGETFF && 706 cmd != SIOCIPFINTERROR) { 707 ipfmain.ipf_interror = 130004; 708 return EIO; 709 } 710 } 711 712 SPL_NET(s); 713 714 error = ipf_ioctlswitch(&ipfmain, unit, data, cmd, mode, UID(p), p); 715 if (error != -1) { 716 SPL_X(s); 717 return error; 718 } 719 720 SPL_X(s); 721 return error; 722 } 723 724 725 /* 726 * ipf_send_reset - this could conceivably be a call to tcp_respond(), but that 727 * requires a large amount of setting up and isn't any more efficient. 728 */ 729 int 730 ipf_send_reset(fr_info_t *fin) 731 { 732 struct tcphdr *tcp, *tcp2; 733 int tlen = 0, hlen; 734 struct mbuf *m; 735 #ifdef USE_INET6 736 ip6_t *ip6; 737 #endif 738 ip_t *ip; 739 740 tcp = fin->fin_dp; 741 if (tcp->th_flags & TH_RST) 742 return -1; /* feedback loop */ 743 744 if (ipf_checkl4sum(fin) == -1) 745 return -1; 746 747 tlen = fin->fin_dlen - (TCP_OFF(tcp) << 2) + 748 ((tcp->th_flags & TH_SYN) ? 1 : 0) + 749 ((tcp->th_flags & TH_FIN) ? 1 : 0); 750 751 #ifdef USE_INET6 752 hlen = (fin->fin_v == 6) ? sizeof(ip6_t) : sizeof(ip_t); 753 #else 754 hlen = sizeof(ip_t); 755 #endif 756 #ifdef MGETHDR 757 MGETHDR(m, M_DONTWAIT, MT_HEADER); 758 #else 759 MGET(m, M_DONTWAIT, MT_HEADER); 760 #endif 761 if (m == NULL) 762 return -1; 763 if (sizeof(*tcp2) + hlen > MHLEN) { 764 MCLGET(m, M_DONTWAIT); 765 if (m == NULL) 766 return -1; 767 if ((m->m_flags & M_EXT) == 0) { 768 FREE_MB_T(m); 769 return -1; 770 } 771 } 772 773 m->m_len = sizeof(*tcp2) + hlen; 774 m->m_data += max_linkhdr; 775 m->m_pkthdr.len = m->m_len; 776 m_reset_rcvif(m); 777 ip = mtod(m, struct ip *); 778 bzero((char *)ip, hlen); 779 #ifdef USE_INET6 780 ip6 = (ip6_t *)ip; 781 #endif 782 bzero((char *)ip, sizeof(*tcp2) + hlen); 783 tcp2 = (struct tcphdr *)((char *)ip + hlen); 784 tcp2->th_sport = tcp->th_dport; 785 tcp2->th_dport = tcp->th_sport; 786 787 if (tcp->th_flags & TH_ACK) { 788 tcp2->th_seq = tcp->th_ack; 789 tcp2->th_flags = TH_RST; 790 tcp2->th_ack = 0; 791 } else { 792 tcp2->th_seq = 0; 793 tcp2->th_ack = ntohl(tcp->th_seq); 794 tcp2->th_ack += tlen; 795 tcp2->th_ack = htonl(tcp2->th_ack); 796 tcp2->th_flags = TH_RST|TH_ACK; 797 } 798 tcp2->th_x2 = 0; 799 TCP_OFF_A(tcp2, sizeof(*tcp2) >> 2); 800 tcp2->th_win = tcp->th_win; 801 tcp2->th_sum = 0; 802 tcp2->th_urp = 0; 803 804 #ifdef USE_INET6 805 if (fin->fin_v == 6) { 806 ip6->ip6_flow = ((ip6_t *)fin->fin_ip)->ip6_flow; 807 ip6->ip6_plen = htons(sizeof(struct tcphdr)); 808 ip6->ip6_nxt = IPPROTO_TCP; 809 ip6->ip6_hlim = 0; 810 ip6->ip6_src = fin->fin_dst6.in6; 811 ip6->ip6_dst = fin->fin_src6.in6; 812 tcp2->th_sum = in6_cksum(m, IPPROTO_TCP, 813 sizeof(*ip6), sizeof(*tcp2)); 814 return ipf_send_ip(fin, m); 815 } 816 #endif 817 #ifdef INET 818 ip->ip_p = IPPROTO_TCP; 819 ip->ip_len = htons(sizeof(struct tcphdr)); 820 ip->ip_src.s_addr = fin->fin_daddr; 821 ip->ip_dst.s_addr = fin->fin_saddr; 822 tcp2->th_sum = in_cksum(m, hlen + sizeof(*tcp2)); 823 ip->ip_len = hlen + sizeof(*tcp2); 824 return ipf_send_ip(fin, m); 825 #else 826 return 0; 827 #endif 828 } 829 830 831 /* 832 * Expects ip_len to be in host byte order when called. 833 */ 834 static int 835 ipf_send_ip(fr_info_t *fin, mb_t *m) 836 { 837 fr_info_t fnew; 838 #ifdef INET 839 ip_t *oip; 840 #endif 841 ip_t *ip; 842 int hlen; 843 844 ip = mtod(m, ip_t *); 845 bzero((char *)&fnew, sizeof(fnew)); 846 fnew.fin_main_soft = fin->fin_main_soft; 847 848 IP_V_A(ip, fin->fin_v); 849 switch (fin->fin_v) 850 { 851 #ifdef INET 852 case 4 : 853 oip = fin->fin_ip; 854 hlen = sizeof(*oip); 855 fnew.fin_v = 4; 856 fnew.fin_p = ip->ip_p; 857 fnew.fin_plen = ntohs(ip->ip_len); 858 HTONS(ip->ip_len); 859 IP_HL_A(ip, sizeof(*oip) >> 2); 860 ip->ip_tos = oip->ip_tos; 861 ip->ip_id = ipf_nextipid(fin); 862 ip->ip_off = htons(ip_mtudisc ? IP_DF : 0); 863 ip->ip_ttl = ip_defttl; 864 ip->ip_sum = 0; 865 break; 866 #endif 867 #ifdef USE_INET6 868 case 6 : 869 { 870 ip6_t *ip6 = (ip6_t *)ip; 871 872 ip6->ip6_vfc = 0x60; 873 ip6->ip6_hlim = IPDEFTTL; 874 875 hlen = sizeof(*ip6); 876 fnew.fin_p = ip6->ip6_nxt; 877 fnew.fin_v = 6; 878 fnew.fin_plen = ntohs(ip6->ip6_plen) + hlen; 879 break; 880 } 881 #endif 882 default : 883 return EINVAL; 884 } 885 #ifdef KAME_IPSEC 886 m_reset_rcvif(m); 887 #endif 888 889 fnew.fin_ifp = fin->fin_ifp; 890 fnew.fin_flx = FI_NOCKSUM; 891 fnew.fin_m = m; 892 fnew.fin_ip = ip; 893 fnew.fin_mp = &m; 894 fnew.fin_hlen = hlen; 895 fnew.fin_dp = (char *)ip + hlen; 896 (void) ipf_makefrip(hlen, ip, &fnew); 897 898 return ipf_fastroute(m, &m, &fnew, NULL); 899 } 900 901 902 int 903 ipf_send_icmp_err(int type, fr_info_t *fin, int dst) 904 { 905 int err, hlen, xtra, iclen, ohlen, avail; 906 struct in_addr dst4; 907 struct icmp *icmp; 908 struct mbuf *m; 909 i6addr_t dst6; 910 void *ifp; 911 #ifdef USE_INET6 912 int code; 913 ip6_t *ip6; 914 #endif 915 ip_t *ip, *ip2; 916 917 if ((type < 0) || (type > ICMP_MAXTYPE)) 918 return -1; 919 920 #ifdef USE_INET6 921 code = fin->fin_icode; 922 if ((code < 0) || (code >= sizeof(icmptoicmp6unreach)/sizeof(int))) 923 return -1; 924 #endif 925 926 if (ipf_checkl4sum(fin) == -1) 927 return -1; 928 #ifdef MGETHDR 929 MGETHDR(m, M_DONTWAIT, MT_HEADER); 930 #else 931 MGET(m, M_DONTWAIT, MT_HEADER); 932 #endif 933 if (m == NULL) 934 return -1; 935 avail = MHLEN; 936 937 xtra = 0; 938 hlen = 0; 939 ohlen = 0; 940 dst4.s_addr = 0; 941 ifp = fin->fin_ifp; 942 if (fin->fin_v == 4) { 943 if ((fin->fin_p == IPPROTO_ICMP) && !(fin->fin_flx & FI_SHORT)) 944 switch (ntohs(fin->fin_data[0]) >> 8) 945 { 946 case ICMP_ECHO : 947 case ICMP_TSTAMP : 948 case ICMP_IREQ : 949 case ICMP_MASKREQ : 950 break; 951 default : 952 FREE_MB_T(m); 953 return 0; 954 } 955 956 if (dst == 0) { 957 if (ipf_ifpaddr(&ipfmain, 4, FRI_NORMAL, ifp, 958 &dst6, NULL) == -1) { 959 FREE_MB_T(m); 960 return -1; 961 } 962 dst4 = dst6.in4; 963 } else 964 dst4.s_addr = fin->fin_daddr; 965 966 hlen = sizeof(ip_t); 967 ohlen = fin->fin_hlen; 968 iclen = hlen + offsetof(struct icmp, icmp_ip) + ohlen; 969 if (fin->fin_hlen < fin->fin_plen) 970 xtra = MIN(fin->fin_dlen, 8); 971 else 972 xtra = 0; 973 } 974 975 #ifdef USE_INET6 976 else if (fin->fin_v == 6) { 977 hlen = sizeof(ip6_t); 978 ohlen = sizeof(ip6_t); 979 iclen = hlen + offsetof(struct icmp, icmp_ip) + ohlen; 980 type = icmptoicmp6types[type]; 981 if (type == ICMP6_DST_UNREACH) 982 code = icmptoicmp6unreach[code]; 983 984 if (iclen + max_linkhdr + fin->fin_plen > avail) { 985 MCLGET(m, M_DONTWAIT); 986 if (m == NULL) 987 return -1; 988 if ((m->m_flags & M_EXT) == 0) { 989 FREE_MB_T(m); 990 return -1; 991 } 992 avail = MCLBYTES; 993 } 994 xtra = MIN(fin->fin_plen, avail - iclen - max_linkhdr); 995 xtra = MIN(xtra, IPV6_MMTU - iclen); 996 if (dst == 0 && !IN6_IS_ADDR_LINKLOCAL(&fin->fin_dst6.in6)) { 997 if (ipf_ifpaddr(&ipfmain, 6, FRI_NORMAL, ifp, 998 &dst6, NULL) == -1) { 999 FREE_MB_T(m); 1000 return -1; 1001 } 1002 } else 1003 dst6 = fin->fin_dst6; 1004 } 1005 #endif 1006 else { 1007 FREE_MB_T(m); 1008 return -1; 1009 } 1010 1011 avail -= (max_linkhdr + iclen); 1012 if (avail < 0) { 1013 FREE_MB_T(m); 1014 return -1; 1015 } 1016 if (xtra > avail) 1017 xtra = avail; 1018 iclen += xtra; 1019 m->m_data += max_linkhdr; 1020 m_reset_rcvif(m); 1021 m->m_pkthdr.len = iclen; 1022 m->m_len = iclen; 1023 ip = mtod(m, ip_t *); 1024 icmp = (struct icmp *)((char *)ip + hlen); 1025 ip2 = (ip_t *)&icmp->icmp_ip; 1026 1027 icmp->icmp_type = type; 1028 icmp->icmp_code = fin->fin_icode; 1029 icmp->icmp_cksum = 0; 1030 #ifdef icmp_nextmtu 1031 if (type == ICMP_UNREACH && fin->fin_icode == ICMP_UNREACH_NEEDFRAG) { 1032 if (fin->fin_mtu != 0) { 1033 icmp->icmp_nextmtu = htons(fin->fin_mtu); 1034 1035 } else if (ifp != NULL) { 1036 icmp->icmp_nextmtu = htons(GETIFMTU_4(ifp)); 1037 1038 } else { /* make up a number... */ 1039 icmp->icmp_nextmtu = htons(fin->fin_plen - 20); 1040 } 1041 } 1042 #endif 1043 1044 bcopy((char *)fin->fin_ip, (char *)ip2, ohlen); 1045 1046 #if defined(M_CSUM_IPv4) 1047 /* 1048 * Clear any in-bound checksum flags for this packet. 1049 */ 1050 m->m_pkthdr.csuminfo = 0; 1051 #endif /* __NetBSD__ && M_CSUM_IPv4 */ 1052 1053 #ifdef USE_INET6 1054 ip6 = (ip6_t *)ip; 1055 if (fin->fin_v == 6) { 1056 ip6->ip6_flow = ((ip6_t *)fin->fin_ip)->ip6_flow; 1057 ip6->ip6_plen = htons(iclen - hlen); 1058 ip6->ip6_nxt = IPPROTO_ICMPV6; 1059 ip6->ip6_hlim = 0; 1060 ip6->ip6_src = dst6.in6; 1061 ip6->ip6_dst = fin->fin_src6.in6; 1062 if (xtra > 0) 1063 bcopy((char *)fin->fin_ip + ohlen, 1064 (char *)&icmp->icmp_ip + ohlen, xtra); 1065 icmp->icmp_cksum = in6_cksum(m, IPPROTO_ICMPV6, 1066 sizeof(*ip6), iclen - hlen); 1067 } else 1068 #endif 1069 { 1070 ip->ip_p = IPPROTO_ICMP; 1071 ip->ip_src.s_addr = dst4.s_addr; 1072 ip->ip_dst.s_addr = fin->fin_saddr; 1073 1074 if (xtra > 0) 1075 bcopy((char *)fin->fin_ip + ohlen, 1076 (char *)&icmp->icmp_ip + ohlen, xtra); 1077 icmp->icmp_cksum = ipf_cksum((u_short *)icmp, 1078 sizeof(*icmp) + 8); 1079 ip->ip_len = iclen; 1080 ip->ip_p = IPPROTO_ICMP; 1081 } 1082 err = ipf_send_ip(fin, m); 1083 return err; 1084 } 1085 1086 1087 /* 1088 * m0 - pointer to mbuf where the IP packet starts 1089 * mpp - pointer to the mbuf pointer that is the start of the mbuf chain 1090 */ 1091 int 1092 ipf_fastroute(mb_t *m0, mb_t **mpp, fr_info_t *fin, frdest_t *fdp) 1093 { 1094 register struct ip *ip, *mhip; 1095 register struct mbuf *m = *mpp; 1096 register struct route *ro; 1097 int len, off, error = 0, hlen, code; 1098 struct ifnet *ifp, *sifp; 1099 ipf_main_softc_t *softc; 1100 #if __NetBSD_Version__ >= 499001100 1101 union { 1102 struct sockaddr dst; 1103 struct sockaddr_in dst4; 1104 } u; 1105 #else 1106 struct sockaddr_in *dst4; 1107 #endif 1108 struct sockaddr *dst; 1109 u_short ip_off, ip_len; 1110 struct route iproute; 1111 struct rtentry *rt; 1112 frdest_t node; 1113 frentry_t *fr; 1114 1115 if (fin->fin_v == 6) { 1116 #ifdef USE_INET6 1117 error = ipf_fastroute6(m0, mpp, fin, fdp); 1118 #else 1119 error = EPROTONOSUPPORT; 1120 #endif 1121 if ((error != 0) && (*mpp != NULL)) 1122 FREE_MB_T(*mpp); 1123 return error; 1124 } 1125 #ifndef INET 1126 FREE_MB_T(*mpp); 1127 return EPROTONOSUPPORT; 1128 #else 1129 1130 hlen = fin->fin_hlen; 1131 ip = mtod(m0, struct ip *); 1132 softc = fin->fin_main_soft; 1133 rt = NULL; 1134 ifp = NULL; 1135 1136 # if defined(M_CSUM_IPv4) 1137 /* 1138 * Clear any in-bound checksum flags for this packet. 1139 */ 1140 m0->m_pkthdr.csuminfo = 0; 1141 # endif /* __NetBSD__ && M_CSUM_IPv4 */ 1142 1143 /* 1144 * Route packet. 1145 */ 1146 ro = &iproute; 1147 memset(ro, 0, sizeof(*ro)); 1148 fr = fin->fin_fr; 1149 1150 if ((fr != NULL) && !(fr->fr_flags & FR_KEEPSTATE) && (fdp != NULL) && 1151 (fdp->fd_type == FRD_DSTLIST)) { 1152 if (ipf_dstlist_select_node(fin, fdp->fd_ptr, NULL, &node) == 0) 1153 fdp = &node; 1154 } 1155 if (fdp != NULL) 1156 ifp = fdp->fd_ptr; 1157 else 1158 ifp = fin->fin_ifp; 1159 1160 if ((ifp == NULL) && ((fr == NULL) || !(fr->fr_flags & FR_FASTROUTE))) { 1161 error = -2; 1162 goto bad; 1163 } 1164 1165 # if __NetBSD_Version__ >= 499001100 1166 if ((fdp != NULL) && (fdp->fd_ip.s_addr != 0)) 1167 sockaddr_in_init(&u.dst4, &fdp->fd_ip, 0); 1168 else 1169 sockaddr_in_init(&u.dst4, &ip->ip_dst, 0); 1170 dst = &u.dst; 1171 rtcache_setdst(ro, dst); 1172 rt = rtcache_init(ro); 1173 # else 1174 dst4 = (struct sockaddr_in *)&ro->ro_dst; 1175 dst = (struct sockaddr *)dst4; 1176 dst4->sin_family = AF_INET; 1177 dst4->sin_addr = ip->ip_dst; 1178 1179 if ((fdp != NULL) && (fdp->fd_ip.s_addr != 0)) 1180 dst4->sin_addr = fdp->fd_ip; 1181 1182 dst4->sin_len = sizeof(*dst); 1183 rtalloc(ro); 1184 rt = ro->ro_rt; 1185 # endif 1186 if ((ifp == NULL) && (rt != NULL)) 1187 ifp = rt->rt_ifp; 1188 if ((rt == NULL) || (ifp == NULL)) { 1189 #ifdef INET 1190 if (in_localaddr(ip->ip_dst)) 1191 error = EHOSTUNREACH; 1192 else 1193 #endif 1194 error = ENETUNREACH; 1195 goto bad; 1196 } 1197 1198 1199 if (rt->rt_flags & RTF_GATEWAY) 1200 dst = rt->rt_gateway; 1201 1202 rt->rt_use++; 1203 1204 /* 1205 * For input packets which are being "fastrouted", they won't 1206 * go back through output filtering and miss their chance to get 1207 * NAT'd and counted. Duplicated packets aren't considered to be 1208 * part of the normal packet stream, so do not NAT them or pass 1209 * them through stateful checking, etc. 1210 */ 1211 if ((fdp != &fr->fr_dif) && (fin->fin_out == 0)) { 1212 sifp = fin->fin_ifp; 1213 fin->fin_ifp = ifp; 1214 fin->fin_out = 1; 1215 (void) ipf_acctpkt(fin, NULL); 1216 fin->fin_fr = NULL; 1217 if (!fr || !(fr->fr_flags & FR_RETMASK)) { 1218 u_32_t pass; 1219 1220 (void) ipf_state_check(fin, &pass); 1221 } 1222 1223 switch (ipf_nat_checkout(fin, NULL)) 1224 { 1225 case 0 : 1226 break; 1227 case 1 : 1228 ip->ip_sum = 0; 1229 break; 1230 case -1 : 1231 error = -1; 1232 goto bad; 1233 break; 1234 } 1235 1236 fin->fin_ifp = sifp; 1237 fin->fin_out = 0; 1238 } else 1239 ip->ip_sum = 0; 1240 /* 1241 * If small enough for interface, can just send directly. 1242 */ 1243 m_set_rcvif(m, ifp); 1244 1245 ip_len = ntohs(ip->ip_len); 1246 if (ip_len <= ifp->if_mtu) { 1247 # if defined(M_CSUM_IPv4) 1248 # if (__NetBSD_Version__ >= 105009999) 1249 if (ifp->if_csum_flags_tx & M_CSUM_IPv4) 1250 m->m_pkthdr.csuminfo |= M_CSUM_IPv4; 1251 # else 1252 if (ifp->if_capabilities & IFCAP_CSUM_IPv4) 1253 m->m_pkthdr.csuminfo |= M_CSUM_IPv4; 1254 # endif /* (__NetBSD_Version__ >= 105009999) */ 1255 else if (ip->ip_sum == 0) 1256 ip->ip_sum = in_cksum(m, hlen); 1257 # else 1258 if (!ip->ip_sum) 1259 ip->ip_sum = in_cksum(m, hlen); 1260 # endif /* M_CSUM_IPv4 */ 1261 1262 error = if_output_lock(ifp, ifp, m, dst, rt); 1263 goto done; 1264 } 1265 1266 /* 1267 * Too large for interface; fragment if possible. 1268 * Must be able to put at least 8 bytes per fragment. 1269 */ 1270 ip_off = ntohs(ip->ip_off); 1271 if (ip_off & IP_DF) { 1272 error = EMSGSIZE; 1273 goto bad; 1274 } 1275 len = (ifp->if_mtu - hlen) &~ 7; 1276 if (len < 8) { 1277 error = EMSGSIZE; 1278 goto bad; 1279 } 1280 1281 { 1282 int mhlen, firstlen = len; 1283 struct mbuf **mnext = &m->m_act; 1284 1285 /* 1286 * Loop through length of segment after first fragment, 1287 * make new header and copy data of each part and link onto chain. 1288 */ 1289 m0 = m; 1290 mhlen = sizeof (struct ip); 1291 for (off = hlen + len; off < ip_len; off += len) { 1292 # ifdef MGETHDR 1293 MGETHDR(m, M_DONTWAIT, MT_HEADER); 1294 # else 1295 MGET(m, M_DONTWAIT, MT_HEADER); 1296 # endif 1297 if (m == 0) { 1298 m = m0; 1299 error = ENOBUFS; 1300 goto bad; 1301 } 1302 m->m_data += max_linkhdr; 1303 mhip = mtod(m, struct ip *); 1304 bcopy((char *)ip, (char *)mhip, sizeof(*ip)); 1305 #ifdef INET 1306 if (hlen > sizeof (struct ip)) { 1307 mhlen = ip_optcopy(ip, mhip) + sizeof (struct ip); 1308 IP_HL_A(mhip, mhlen >> 2); 1309 } 1310 #endif 1311 m->m_len = mhlen; 1312 mhip->ip_off = ((off - hlen) >> 3) + ip_off; 1313 if (off + len >= ip_len) 1314 len = ip_len - off; 1315 else 1316 mhip->ip_off |= IP_MF; 1317 mhip->ip_len = htons((u_short)(len + mhlen)); 1318 m->m_next = m_copym(m0, off, len, M_DONTWAIT); 1319 if (m->m_next == 0) { 1320 error = ENOBUFS; /* ??? */ 1321 goto sendorfree; 1322 } 1323 m->m_pkthdr.len = mhlen + len; 1324 m_reset_rcvif(m); 1325 mhip->ip_off = htons((u_short)mhip->ip_off); 1326 mhip->ip_sum = 0; 1327 #ifdef INET 1328 mhip->ip_sum = in_cksum(m, mhlen); 1329 #endif 1330 *mnext = m; 1331 mnext = &m->m_act; 1332 } 1333 /* 1334 * Update first fragment by trimming what's been copied out 1335 * and updating header, then send each fragment (in order). 1336 */ 1337 m_adj(m0, hlen + firstlen - ip_len); 1338 ip->ip_len = htons((u_short)(hlen + firstlen)); 1339 ip->ip_off = htons((u_short)IP_MF); 1340 ip->ip_sum = 0; 1341 #ifdef INET 1342 ip->ip_sum = in_cksum(m0, hlen); 1343 #endif 1344 sendorfree: 1345 for (m = m0; m; m = m0) { 1346 m0 = m->m_act; 1347 m->m_act = 0; 1348 if (error == 0) { 1349 KERNEL_LOCK(1, NULL); 1350 error = (*ifp->if_output)(ifp, m, dst, rt); 1351 KERNEL_UNLOCK_ONE(NULL); 1352 } else { 1353 FREE_MB_T(m); 1354 } 1355 } 1356 } 1357 done: 1358 if (!error) 1359 softc->ipf_frouteok[0]++; 1360 else 1361 softc->ipf_frouteok[1]++; 1362 1363 # if __NetBSD_Version__ >= 499001100 1364 rtcache_unref(rt, ro); 1365 rtcache_free(ro); 1366 # else 1367 if (rt) { 1368 RTFREE(rt); 1369 } 1370 # endif 1371 return error; 1372 bad: 1373 if (error == EMSGSIZE) { 1374 sifp = fin->fin_ifp; 1375 code = fin->fin_icode; 1376 fin->fin_icode = ICMP_UNREACH_NEEDFRAG; 1377 fin->fin_ifp = ifp; 1378 (void) ipf_send_icmp_err(ICMP_UNREACH, fin, 1); 1379 fin->fin_ifp = sifp; 1380 fin->fin_icode = code; 1381 } 1382 FREE_MB_T(m); 1383 goto done; 1384 #endif /* INET */ 1385 } 1386 1387 1388 #if defined(USE_INET6) 1389 /* 1390 * This is the IPv6 specific fastroute code. It doesn't clean up the mbuf's 1391 * or ensure that it is an IPv6 packet that is being forwarded, those are 1392 * expected to be done by the called (ipf_fastroute). 1393 */ 1394 static int 1395 ipf_fastroute6(struct mbuf *m0, struct mbuf **mpp, fr_info_t *fin, 1396 frdest_t *fdp) 1397 { 1398 # if __NetBSD_Version__ >= 499001100 1399 struct route ip6route; 1400 const struct sockaddr *dst; 1401 union { 1402 struct sockaddr dst; 1403 struct sockaddr_in6 dst6; 1404 } u; 1405 struct route *ro; 1406 # else 1407 struct route_in6 ip6route; 1408 struct sockaddr_in6 *dst6; 1409 struct route_in6 *ro; 1410 # endif 1411 struct rtentry *rt; 1412 struct ifnet *ifp; 1413 u_long mtu; 1414 int error; 1415 1416 error = 0; 1417 ro = &ip6route; 1418 1419 if (fdp != NULL) 1420 ifp = fdp->fd_ptr; 1421 else 1422 ifp = fin->fin_ifp; 1423 memset(ro, 0, sizeof(*ro)); 1424 # if __NetBSD_Version__ >= 499001100 1425 if (fdp != NULL && IP6_NOTZERO(&fdp->fd_ip6)) 1426 sockaddr_in6_init(&u.dst6, &fdp->fd_ip6.in6, 0, 0, 0); 1427 else 1428 sockaddr_in6_init(&u.dst6, &fin->fin_fi.fi_dst.in6, 0, 0, 0); 1429 if ((error = in6_setscope(&u.dst6.sin6_addr, ifp, 1430 &u.dst6.sin6_scope_id)) != 0) 1431 return error; 1432 if ((error = sa6_embedscope(&u.dst6, 0)) != 0) 1433 return error; 1434 1435 dst = &u.dst; 1436 rtcache_setdst(ro, dst); 1437 1438 rt = rtcache_init(ro); 1439 if ((ifp == NULL) && (rt != NULL)) 1440 ifp = rt->rt_ifp; 1441 # else 1442 dst6 = (struct sockaddr_in6 *)&ro->ro_dst; 1443 dst6->sin6_family = AF_INET6; 1444 dst6->sin6_len = sizeof(struct sockaddr_in6); 1445 dst6->sin6_addr = fin->fin_fi.fi_dst.in6; 1446 /* KAME */ 1447 if (IN6_IS_ADDR_LINKLOCAL(&dst6->sin6_addr)) 1448 dst6->sin6_addr.s6_addr16[1] = htons(ifp->if_index); 1449 1450 if (fdp != NULL) { 1451 if (IP6_NOTZERO(&fdp->fd_ip6)) 1452 dst6->sin6_addr = fdp->fd_ip6.in6; 1453 } 1454 1455 rtalloc((struct route *)ro); 1456 1457 if ((ifp == NULL) && (ro->ro_rt != NULL)) 1458 ifp = ro->ro_rt->rt_ifp; 1459 rt = ro->ro_rt; 1460 # endif 1461 if ((rt == NULL) || (ifp == NULL)) { 1462 1463 error = EHOSTUNREACH; 1464 goto bad; 1465 } 1466 1467 { 1468 # if (__NetBSD_Version__ >= 106010000) && !defined(IN6_LINKMTU) \ 1469 && defined(IPV6CTL_ACCEPT_RTADV) 1470 struct in6_ifextra *ife; 1471 # endif 1472 if (rt->rt_flags & RTF_GATEWAY) 1473 # if __NetBSD_Version__ >= 499001100 1474 dst = rt->rt_gateway; 1475 # else 1476 dst6 = (struct sockaddr_in6 *)rt->rt_gateway; 1477 # endif 1478 rt->rt_use++; 1479 1480 /* Determine path MTU. */ 1481 # if (__NetBSD_Version__ <= 106009999) 1482 mtu = nd_ifinfo[ifp->if_index].linkmtu; 1483 # elif defined(IPV6CTL_ACCEPT_RTADV) 1484 # ifdef IN6_LINKMTU 1485 mtu = IN6_LINKMTU(ifp); 1486 # else 1487 ife = (struct in6_ifextra *)(ifp)->if_afdata[AF_INET6]; 1488 mtu = ife->nd_ifinfo[ifp->if_index].linkmtu; 1489 # endif 1490 # else 1491 mtu = ifp->if_mtu; 1492 # endif 1493 if ((error == 0) && (m0->m_pkthdr.len <= mtu)) { 1494 # if __NetBSD_Version__ >= 499001100 1495 error = ip6_if_output(ifp, ifp, m0, satocsin6(dst), rt); 1496 # else 1497 error = nd6_output(ifp, ifp, m0, dst6, rt); 1498 # endif 1499 if (error) 1500 *mpp = NULL; /* m0 has been freed */ 1501 } else { 1502 error = EMSGSIZE; 1503 } 1504 } 1505 bad: 1506 # if __NetBSD_Version__ >= 499001100 1507 rtcache_unref(rt, ro); 1508 rtcache_free(ro); 1509 # else 1510 if (ro->ro_rt != NULL) { 1511 RTFREE(((struct route *)ro)->ro_rt); 1512 } 1513 # endif 1514 return error; 1515 } 1516 #endif /* INET6 */ 1517 1518 1519 int 1520 ipf_verifysrc(fr_info_t *fin) 1521 { 1522 #if __NetBSD_Version__ >= 499001100 1523 union { 1524 struct sockaddr dst; 1525 struct sockaddr_in dst4; 1526 } u; 1527 struct rtentry *rt; 1528 #else 1529 struct sockaddr_in *dst; 1530 #endif 1531 struct route iproute; 1532 int rc; 1533 1534 #if __NetBSD_Version__ >= 499001100 1535 sockaddr_in_init(&u.dst4, &fin->fin_src, 0); 1536 rtcache_setdst(&iproute, &u.dst); 1537 rt = rtcache_init(&iproute); 1538 if (rt == NULL) 1539 rc = 0; 1540 else 1541 rc = (fin->fin_ifp == rt->rt_ifp); 1542 rtcache_unref(rt, &iproute); 1543 rtcache_free(&iproute); 1544 #else 1545 dst = (struct sockaddr_in *)&iproute.ro_dst; 1546 dst->sin_len = sizeof(*dst); 1547 dst->sin_family = AF_INET; 1548 dst->sin_addr = fin->fin_src; 1549 rtalloc(&iproute); 1550 if (iproute.ro_rt == NULL) 1551 return 0; 1552 rc = (fin->fin_ifp == iproute.ro_rt->rt_ifp); 1553 RTFREE(iproute.ro_rt); 1554 #endif 1555 return rc; 1556 } 1557 1558 1559 /* 1560 * return the first IP Address associated with an interface 1561 */ 1562 int 1563 ipf_ifpaddr(ipf_main_softc_t *softc, int v, int atype, void *ifptr, 1564 i6addr_t *inp, i6addr_t *inpmask) 1565 { 1566 #ifdef USE_INET6 1567 struct in6_addr *inp6 = NULL; 1568 #endif 1569 struct sockaddr *sock, *mask; 1570 struct sockaddr_in *sin; 1571 struct ifaddr *ifa; 1572 struct ifnet *ifp; 1573 1574 if ((ifptr == NULL) || (ifptr == (void *)-1)) 1575 return -1; 1576 1577 ifp = ifptr; 1578 mask = NULL; 1579 1580 if (v == 4) 1581 inp->in4.s_addr = 0; 1582 #ifdef USE_INET6 1583 else if (v == 6) 1584 bzero((char *)inp, sizeof(*inp)); 1585 #endif 1586 1587 ifa = IFADDR_READER_FIRST(ifp); 1588 sock = ifa ? ifa->ifa_addr : NULL; 1589 while (sock != NULL && ifa != NULL) { 1590 sin = (struct sockaddr_in *)sock; 1591 if ((v == 4) && (sin->sin_family == AF_INET)) 1592 break; 1593 #ifdef USE_INET6 1594 if ((v == 6) && (sin->sin_family == AF_INET6)) { 1595 inp6 = &((struct sockaddr_in6 *)sin)->sin6_addr; 1596 if (!IN6_IS_ADDR_LINKLOCAL(inp6) && 1597 !IN6_IS_ADDR_LOOPBACK(inp6)) 1598 break; 1599 } 1600 #endif 1601 ifa = IFADDR_READER_NEXT(ifa); 1602 if (ifa != NULL) 1603 sock = ifa->ifa_addr; 1604 } 1605 if (ifa == NULL || sock == NULL) 1606 return -1; 1607 1608 mask = ifa->ifa_netmask; 1609 if (atype == FRI_BROADCAST) 1610 sock = ifa->ifa_broadaddr; 1611 else if (atype == FRI_PEERADDR) 1612 sock = ifa->ifa_dstaddr; 1613 1614 #ifdef USE_INET6 1615 if (v == 6) 1616 return ipf_ifpfillv6addr(atype, (struct sockaddr_in6 *)sock, 1617 (struct sockaddr_in6 *)mask, 1618 inp, inpmask); 1619 #endif 1620 return ipf_ifpfillv4addr(atype, (struct sockaddr_in *)sock, 1621 (struct sockaddr_in *)mask, 1622 &inp->in4, &inpmask->in4); 1623 } 1624 1625 1626 u_32_t 1627 ipf_newisn(fr_info_t *fin) 1628 { 1629 #if __NetBSD_Version__ >= 105190000 /* 1.5T */ 1630 size_t asz; 1631 1632 if (fin->fin_v == 4) 1633 asz = sizeof(struct in_addr); 1634 else if (fin->fin_v == 6) 1635 asz = sizeof(fin->fin_src); 1636 else /* XXX: no way to return error */ 1637 return 0; 1638 #ifdef INET 1639 return tcp_new_iss1((void *)&fin->fin_src, (void *)&fin->fin_dst, 1640 fin->fin_sport, fin->fin_dport, asz); 1641 #else 1642 return ENOSYS; 1643 #endif 1644 #else 1645 static int iss_seq_off = 0; 1646 u_char hash[16]; 1647 u_32_t newiss; 1648 MD5_CTX ctx; 1649 1650 /* 1651 * Compute the base value of the ISS. It is a hash 1652 * of (saddr, sport, daddr, dport, secret). 1653 */ 1654 MD5Init(&ctx); 1655 1656 MD5Update(&ctx, (u_char *) &fin->fin_fi.fi_src, 1657 sizeof(fin->fin_fi.fi_src)); 1658 MD5Update(&ctx, (u_char *) &fin->fin_fi.fi_dst, 1659 sizeof(fin->fin_fi.fi_dst)); 1660 MD5Update(&ctx, (u_char *) &fin->fin_dat, sizeof(fin->fin_dat)); 1661 1662 MD5Update(&ctx, ipf_iss_secret, sizeof(ipf_iss_secret)); 1663 1664 MD5Final(hash, &ctx); 1665 1666 memcpy(&newiss, hash, sizeof(newiss)); 1667 1668 /* 1669 * Now increment our "timer", and add it in to 1670 * the computed value. 1671 * 1672 * XXX Use `addin'? 1673 * XXX TCP_ISSINCR too large to use? 1674 */ 1675 iss_seq_off += 0x00010000; 1676 newiss += iss_seq_off; 1677 return newiss; 1678 #endif 1679 } 1680 1681 1682 /* ------------------------------------------------------------------------ */ 1683 /* Function: ipf_nextipid */ 1684 /* Returns: int - 0 == success, -1 == error (packet should be dropped) */ 1685 /* Parameters: fin(I) - pointer to packet information */ 1686 /* */ 1687 /* Returns the next IPv4 ID to use for this packet. */ 1688 /* ------------------------------------------------------------------------ */ 1689 u_short 1690 ipf_nextipid(fr_info_t *fin) 1691 { 1692 #ifdef USE_MUTEXES 1693 ipf_main_softc_t *softc = fin->fin_main_soft; 1694 #endif 1695 u_short id; 1696 1697 MUTEX_ENTER(&softc->ipf_rw); 1698 id = ipid++; 1699 MUTEX_EXIT(&softc->ipf_rw); 1700 1701 return id; 1702 } 1703 1704 1705 EXTERN_INLINE int 1706 ipf_checkv4sum(fr_info_t *fin) 1707 { 1708 #ifdef M_CSUM_TCP_UDP_BAD 1709 int manual, pflag, cflags, active; 1710 mb_t *m; 1711 1712 if ((fin->fin_flx & FI_NOCKSUM) != 0) 1713 return 0; 1714 1715 if ((fin->fin_flx & FI_SHORT) != 0) 1716 return 1; 1717 1718 if (fin->fin_cksum != FI_CK_NEEDED) 1719 return (fin->fin_cksum > FI_CK_NEEDED) ? 0 : -1; 1720 1721 manual = 0; 1722 m = fin->fin_m; 1723 if (m == NULL) { 1724 manual = 1; 1725 goto skipauto; 1726 } 1727 1728 switch (fin->fin_p) 1729 { 1730 case IPPROTO_UDP : 1731 pflag = M_CSUM_UDPv4; 1732 break; 1733 case IPPROTO_TCP : 1734 pflag = M_CSUM_TCPv4; 1735 break; 1736 default : 1737 pflag = 0; 1738 manual = 1; 1739 break; 1740 } 1741 1742 active = ((struct ifnet *)fin->fin_ifp)->if_csum_flags_rx & pflag; 1743 active |= M_CSUM_TCP_UDP_BAD | M_CSUM_DATA; 1744 cflags = m->m_pkthdr.csum_flags & active; 1745 1746 if (pflag != 0) { 1747 if (cflags == (pflag | M_CSUM_TCP_UDP_BAD)) { 1748 fin->fin_flx |= FI_BAD; 1749 fin->fin_cksum = FI_CK_BAD; 1750 } else if (cflags == (pflag | M_CSUM_DATA)) { 1751 if ((m->m_pkthdr.csum_data ^ 0xffff) != 0) { 1752 fin->fin_flx |= FI_BAD; 1753 fin->fin_cksum = FI_CK_BAD; 1754 } else { 1755 fin->fin_cksum = FI_CK_SUMOK; 1756 } 1757 } else if (cflags == pflag) { 1758 fin->fin_cksum = FI_CK_SUMOK; 1759 } else { 1760 manual = 1; 1761 } 1762 } 1763 skipauto: 1764 if (manual != 0) { 1765 if (ipf_checkl4sum(fin) == -1) { 1766 fin->fin_flx |= FI_BAD; 1767 return -1; 1768 } 1769 } 1770 #else 1771 if (ipf_checkl4sum(fin) == -1) { 1772 fin->fin_flx |= FI_BAD; 1773 return -1; 1774 } 1775 #endif 1776 return 0; 1777 } 1778 1779 1780 #ifdef USE_INET6 1781 EXTERN_INLINE int 1782 ipf_checkv6sum(fr_info_t *fin) 1783 { 1784 # ifdef M_CSUM_TCP_UDP_BAD 1785 int manual, pflag, cflags, active; 1786 mb_t *m; 1787 1788 if ((fin->fin_flx & FI_NOCKSUM) != 0) 1789 return 0; 1790 1791 if ((fin->fin_flx & FI_SHORT) != 0) 1792 return 1; 1793 1794 if (fin->fin_cksum != FI_CK_SUMOK) 1795 return (fin->fin_cksum > FI_CK_NEEDED) ? 0 : -1; 1796 1797 1798 manual = 0; 1799 m = fin->fin_m; 1800 1801 switch (fin->fin_p) 1802 { 1803 case IPPROTO_UDP : 1804 pflag = M_CSUM_UDPv6; 1805 break; 1806 case IPPROTO_TCP : 1807 pflag = M_CSUM_TCPv6; 1808 break; 1809 default : 1810 pflag = 0; 1811 manual = 1; 1812 break; 1813 } 1814 1815 active = ((struct ifnet *)fin->fin_ifp)->if_csum_flags_rx & pflag; 1816 active |= M_CSUM_TCP_UDP_BAD | M_CSUM_DATA; 1817 cflags = m->m_pkthdr.csum_flags & active; 1818 1819 if (pflag != 0) { 1820 if (cflags == (pflag | M_CSUM_TCP_UDP_BAD)) { 1821 fin->fin_flx |= FI_BAD; 1822 } else if (cflags == (pflag | M_CSUM_DATA)) { 1823 if ((m->m_pkthdr.csum_data ^ 0xffff) != 0) 1824 fin->fin_flx |= FI_BAD; 1825 } else if (cflags == pflag) { 1826 ; 1827 } else { 1828 manual = 1; 1829 } 1830 } 1831 if (manual != 0) { 1832 if (ipf_checkl4sum(fin) == -1) { 1833 fin->fin_flx |= FI_BAD; 1834 return -1; 1835 } 1836 } 1837 # else 1838 if (ipf_checkl4sum(fin) == -1) { 1839 fin->fin_flx |= FI_BAD; 1840 return -1; 1841 } 1842 # endif 1843 return 0; 1844 } 1845 #endif /* USE_INET6 */ 1846 1847 1848 size_t 1849 mbufchainlen(struct mbuf *m0) 1850 { 1851 size_t len; 1852 1853 if ((m0->m_flags & M_PKTHDR) != 0) { 1854 len = m0->m_pkthdr.len; 1855 } else { 1856 struct mbuf *m; 1857 1858 for (m = m0, len = 0; m != NULL; m = m->m_next) 1859 len += m->m_len; 1860 } 1861 return len; 1862 } 1863 1864 1865 /* ------------------------------------------------------------------------ */ 1866 /* Function: ipf_pullup */ 1867 /* Returns: NULL == pullup failed, else pointer to protocol header */ 1868 /* Parameters: xmin(I)- pointer to buffer where data packet starts */ 1869 /* fin(I) - pointer to packet information */ 1870 /* len(I) - number of bytes to pullup */ 1871 /* */ 1872 /* Attempt to move at least len bytes (from the start of the buffer) into a */ 1873 /* single buffer for ease of access. Operating system native functions are */ 1874 /* used to manage buffers - if necessary. If the entire packet ends up in */ 1875 /* a single buffer, set the FI_COALESCE flag even though ipf_coalesce() has */ 1876 /* not been called. Both fin_ip and fin_dp are updated before exiting _IF_ */ 1877 /* and ONLY if the pullup succeeds. */ 1878 /* */ 1879 /* We assume that 'xmin' is a pointer to a buffer that is part of the chain */ 1880 /* of buffers that starts at *fin->fin_mp. */ 1881 /* ------------------------------------------------------------------------ */ 1882 void * 1883 ipf_pullup(mb_t *xmin, fr_info_t *fin, int len) 1884 { 1885 int dpoff, ipoff; 1886 mb_t *m = xmin; 1887 char *ip; 1888 1889 if (m == NULL) 1890 return NULL; 1891 1892 ip = (char *)fin->fin_ip; 1893 if ((fin->fin_flx & FI_COALESCE) != 0) 1894 return ip; 1895 1896 ipoff = fin->fin_ipoff; 1897 if (fin->fin_dp != NULL) 1898 dpoff = (char *)fin->fin_dp - (char *)ip; 1899 else 1900 dpoff = 0; 1901 1902 if (M_LEN(m) < len) { 1903 mb_t *n = *fin->fin_mp; 1904 /* 1905 * Assume that M_PKTHDR is set and just work with what is left 1906 * rather than check.. 1907 * Should not make any real difference, anyway. 1908 */ 1909 if (m != n) { 1910 /* 1911 * Record the mbuf that points to the mbuf that we're 1912 * about to go to work on so that we can update the 1913 * m_next appropriately later. 1914 */ 1915 for (; n->m_next != m; n = n->m_next) 1916 ; 1917 } else { 1918 n = NULL; 1919 } 1920 1921 #ifdef MHLEN 1922 if (len > MHLEN) 1923 #else 1924 if (len > MLEN) 1925 #endif 1926 { 1927 #ifdef HAVE_M_PULLDOWN 1928 if (m_pulldown(m, 0, len, NULL) == NULL) 1929 m = NULL; 1930 #else 1931 FREE_MB_T(*fin->fin_mp); 1932 m = NULL; 1933 n = NULL; 1934 #endif 1935 } else 1936 { 1937 m = m_pullup(m, len); 1938 } 1939 if (n != NULL) 1940 n->m_next = m; 1941 if (m == NULL) { 1942 /* 1943 * When n is non-NULL, it indicates that m pointed to 1944 * a sub-chain (tail) of the mbuf and that the head 1945 * of this chain has not yet been free'd. 1946 */ 1947 if (n != NULL) { 1948 FREE_MB_T(*fin->fin_mp); 1949 } 1950 1951 *fin->fin_mp = NULL; 1952 fin->fin_m = NULL; 1953 return NULL; 1954 } 1955 1956 if (n == NULL) 1957 *fin->fin_mp = m; 1958 1959 while (M_LEN(m) == 0) { 1960 m = m->m_next; 1961 } 1962 fin->fin_m = m; 1963 ip = MTOD(m, char *) + ipoff; 1964 1965 fin->fin_ip = (ip_t *)ip; 1966 if (fin->fin_dp != NULL) 1967 fin->fin_dp = (char *)fin->fin_ip + dpoff; 1968 if (fin->fin_fraghdr != NULL) 1969 fin->fin_fraghdr = (char *)ip + 1970 ((char *)fin->fin_fraghdr - 1971 (char *)fin->fin_ip); 1972 } 1973 1974 if (len == fin->fin_plen) 1975 fin->fin_flx |= FI_COALESCE; 1976 return ip; 1977 } 1978 1979 1980 int 1981 ipf_inject(fr_info_t *fin, mb_t *m) 1982 { 1983 int error; 1984 1985 if (fin->fin_out == 0) { 1986 if (__predict_false(!pktq_enqueue(ip_pktq, m, 0))) { 1987 FREE_MB_T(m); 1988 error = ENOBUFS; 1989 } else { 1990 error = 0; 1991 } 1992 } else { 1993 error = ip_output(m, NULL, NULL, IP_FORWARDING, NULL, NULL); 1994 } 1995 return error; 1996 } 1997 1998 1999 u_32_t 2000 ipf_random(void) 2001 { 2002 int number; 2003 2004 #ifdef _CPRNG_H 2005 number = cprng_fast32(); 2006 #else 2007 number = arc4random(); 2008 #endif 2009 return number; 2010 } 2011 2012 2013 /* 2014 * routines below for saving IP headers to buffer 2015 */ 2016 static int ipfopen(dev_t dev, int flags 2017 #if (NetBSD >= 199511) 2018 , int devtype, PROC_T *p 2019 #endif 2020 ) 2021 { 2022 u_int unit = GET_MINOR(dev); 2023 int error; 2024 2025 if (IPL_LOGMAX < unit) { 2026 error = ENXIO; 2027 } else { 2028 switch (unit) 2029 { 2030 case IPL_LOGIPF : 2031 case IPL_LOGNAT : 2032 case IPL_LOGSTATE : 2033 case IPL_LOGAUTH : 2034 case IPL_LOGLOOKUP : 2035 case IPL_LOGSYNC : 2036 #ifdef IPFILTER_SCAN 2037 case IPL_LOGSCAN : 2038 #endif 2039 error = 0; 2040 break; 2041 default : 2042 error = ENXIO; 2043 break; 2044 } 2045 } 2046 #if (__NetBSD_Version__ >= 799003000) 2047 if (error == 0) { 2048 mutex_enter(&ipf_ref_mutex); 2049 ipf_active = 1; 2050 mutex_exit(&ipf_ref_mutex); 2051 } 2052 #endif 2053 return error; 2054 } 2055 2056 2057 static int ipfclose(dev_t dev, int flags 2058 #if (NetBSD >= 199511) 2059 , int devtype, PROC_T *p 2060 #endif 2061 ) 2062 { 2063 u_int unit = GET_MINOR(dev); 2064 2065 if (IPL_LOGMAX < unit) 2066 return ENXIO; 2067 else { 2068 #if (__NetBSD_Version__ >= 799003000) 2069 mutex_enter(&ipf_ref_mutex); 2070 ipf_active = 0; 2071 mutex_exit(&ipf_ref_mutex); 2072 #endif 2073 return 0; 2074 } 2075 } 2076 2077 /* 2078 * ipfread/ipflog 2079 * both of these must operate with at least splnet() lest they be 2080 * called during packet processing and cause an inconsistancy to appear in 2081 * the filter lists. 2082 */ 2083 static int ipfread(dev_t dev, struct uio *uio, int ioflag) 2084 { 2085 2086 if (ipfmain.ipf_running < 1) { 2087 ipfmain.ipf_interror = 130006; 2088 return EIO; 2089 } 2090 2091 if (GET_MINOR(dev) == IPL_LOGSYNC) 2092 return ipf_sync_read(&ipfmain, uio); 2093 2094 #ifdef IPFILTER_LOG 2095 return ipf_log_read(&ipfmain, GET_MINOR(dev), uio); 2096 #else 2097 ipfmain.ipf_interror = 130007; 2098 return ENXIO; 2099 #endif 2100 } 2101 2102 2103 /* 2104 * ipfwrite 2105 * both of these must operate with at least splnet() lest they be 2106 * called during packet processing and cause an inconsistancy to appear in 2107 * the filter lists. 2108 */ 2109 static int ipfwrite(dev_t dev, struct uio *uio, int ioflag) 2110 { 2111 2112 if (ipfmain.ipf_running < 1) { 2113 ipfmain.ipf_interror = 130008; 2114 return EIO; 2115 } 2116 2117 if (GET_MINOR(dev) == IPL_LOGSYNC) 2118 return ipf_sync_write(&ipfmain, uio); 2119 ipfmain.ipf_interror = 130009; 2120 return ENXIO; 2121 } 2122 2123 2124 static int ipfpoll(dev_t dev, int events, PROC_T *p) 2125 { 2126 u_int unit = GET_MINOR(dev); 2127 int revents = 0; 2128 2129 if (IPL_LOGMAX < unit) { 2130 ipfmain.ipf_interror = 130010; 2131 return ENXIO; 2132 } 2133 2134 switch (unit) 2135 { 2136 case IPL_LOGIPF : 2137 case IPL_LOGNAT : 2138 case IPL_LOGSTATE : 2139 #ifdef IPFILTER_LOG 2140 if ((events & (POLLIN | POLLRDNORM)) && 2141 ipf_log_canread(&ipfmain, unit)) 2142 revents |= events & (POLLIN | POLLRDNORM); 2143 #endif 2144 break; 2145 case IPL_LOGAUTH : 2146 if ((events & (POLLIN | POLLRDNORM)) && 2147 ipf_auth_waiting(&ipfmain)) 2148 revents |= events & (POLLIN | POLLRDNORM); 2149 break; 2150 case IPL_LOGSYNC : 2151 if ((events & (POLLIN | POLLRDNORM)) && 2152 ipf_sync_canread(&ipfmain)) 2153 revents |= events & (POLLIN | POLLRDNORM); 2154 if ((events & (POLLOUT | POLLWRNORM)) && 2155 ipf_sync_canwrite(&ipfmain)) 2156 revents |= events & (POLLOUT | POLLWRNORM); 2157 break; 2158 case IPL_LOGSCAN : 2159 case IPL_LOGLOOKUP : 2160 default : 2161 break; 2162 } 2163 2164 if ((revents == 0) && (((events & (POLLIN|POLLRDNORM)) != 0))) 2165 selrecord(p, &ipfmain.ipf_selwait[unit]); 2166 return revents; 2167 } 2168 2169 u_int 2170 ipf_pcksum(fr_info_t *fin, int hlen, u_int sum) 2171 { 2172 struct mbuf *m; 2173 u_int sum2; 2174 int off; 2175 2176 m = fin->fin_m; 2177 off = (char *)fin->fin_dp - (char *)fin->fin_ip; 2178 m->m_data += hlen; 2179 m->m_len -= hlen; 2180 sum2 = in_cksum(fin->fin_m, fin->fin_plen - off); 2181 m->m_len += hlen; 2182 m->m_data -= hlen; 2183 2184 /* 2185 * Both sum and sum2 are partial sums, so combine them together. 2186 */ 2187 sum += ~sum2 & 0xffff; 2188 while (sum > 0xffff) 2189 sum = (sum & 0xffff) + (sum >> 16); 2190 sum2 = ~sum & 0xffff; 2191 return sum2; 2192 } 2193 2194 #if (__NetBSD_Version__ >= 799003000) 2195 2196 /* NetBSD module interface */ 2197 2198 MODULE(MODULE_CLASS_DRIVER, ipl, "bpf_filter"); 2199 2200 static int ipl_init(void *); 2201 static int ipl_fini(void *); 2202 static int ipl_modcmd(modcmd_t, void *); 2203 2204 #ifdef _MODULE 2205 static devmajor_t ipl_cmaj = -1, ipl_bmaj = -1; 2206 #endif 2207 2208 static int 2209 ipl_modcmd(modcmd_t cmd, void *opaque) 2210 { 2211 2212 switch (cmd) { 2213 case MODULE_CMD_INIT: 2214 return ipl_init(opaque); 2215 case MODULE_CMD_FINI: 2216 return ipl_fini(opaque); 2217 default: 2218 return ENOTTY; 2219 } 2220 } 2221 2222 static int 2223 ipl_init(void *opaque) 2224 { 2225 int error; 2226 2227 ipf_listener = kauth_listen_scope(KAUTH_SCOPE_NETWORK, 2228 ipf_listener_cb, NULL); 2229 2230 if ((error = ipf_load_all()) != 0) 2231 return error; 2232 2233 if (ipf_create_all(&ipfmain) == NULL) { 2234 ipf_unload_all(); 2235 return ENODEV; 2236 } 2237 2238 /* Initialize our mutex and reference count */ 2239 mutex_init(&ipf_ref_mutex, MUTEX_DEFAULT, IPL_NONE); 2240 ipf_active = 0; 2241 2242 #ifdef _MODULE 2243 /* 2244 * Insert ourself into the cdevsw list. 2245 */ 2246 error = devsw_attach("ipl", NULL, &ipl_bmaj, &ipl_cdevsw, &ipl_cmaj); 2247 if (error) 2248 ipl_fini(opaque); 2249 #endif 2250 2251 return error; 2252 } 2253 2254 static int 2255 ipl_fini(void *opaque) 2256 { 2257 2258 #ifdef _MODULE 2259 devsw_detach(NULL, &ipl_cdevsw); 2260 #endif 2261 2262 /* 2263 * Grab the mutex, verify that there are no references 2264 * and that there are no running filters. If either 2265 * of these exists, reinsert our cdevsw entry and return 2266 * an error. 2267 */ 2268 mutex_enter(&ipf_ref_mutex); 2269 if (ipf_active != 0 || ipfmain.ipf_running > 0) { 2270 #ifdef _MODULE 2271 (void)devsw_attach("ipl", NULL, &ipl_bmaj, 2272 &ipl_cdevsw, &ipl_cmaj); 2273 #endif 2274 mutex_exit(&ipf_ref_mutex); 2275 return EBUSY; 2276 } 2277 2278 /* Clean up the rest of our state before being unloaded */ 2279 2280 mutex_exit(&ipf_ref_mutex); 2281 mutex_destroy(&ipf_ref_mutex); 2282 ipf_destroy_all(&ipfmain); 2283 ipf_unload_all(); 2284 kauth_unlisten_scope(ipf_listener); 2285 2286 return 0; 2287 } 2288 #endif /* (__NetBSD_Version__ >= 799003000) */ 2289